text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import numpy as np
from showml.losses import MeanSquaredError, BinaryCrossEntropy
from showml.losses.loss_functions import CrossEntropy
def r2_score(y: np.ndarray, z: np.ndarray) -> float:
"""Calculate the r^2 (coefficient of determination) score of the model.
Args:
y (np.ndarray): The true values.
z (np.ndarray): The predicted values.
Returns:
float: The r^2 score.
"""
rss = np.sum(np.square(y - z))
tss = np.sum(np.square(y - np.mean(y)))
r_2 = 1 - (rss / tss)
return r_2
def accuracy(y: np.ndarray, z: np.ndarray) -> float:
"""Compute the classification accuracy of the model.
Args:
y (np.ndarray): The true labels.
z (np.ndarray): The predicted labels.
Returns:
float: The classification accuracy of the model.
"""
if y.ndim == 1:
# y and z are not one hot encoded
true_class = y
predicted_class = [1 if i > 0.5 else 0 for i in z]
else:
# y and z are one hot encoded
true_class = np.argmax(y, axis=1)
predicted_class = np.argmax(z, axis=1)
return np.sum(true_class == predicted_class) / len(true_class)
def mean_squared_error(y: np.ndarray, z: np.ndarray) -> float:
"""Computes the Mean Squared Error (MSE).
Args:
y (np.ndarray): The true labels.
z (np.ndarray): The predicted labels.
Returns:
float: The MSE value.
"""
return MeanSquaredError().objective(y, z)
def binary_cross_entropy(y: np.ndarray, z: np.ndarray) -> float:
"""Computes the Binary Cross Entropy value (BCE).
Args:
y (np.ndarray): The true labels.
z (np.ndarray): The predicted labels.
Returns:
float: the BCE value.
"""
return BinaryCrossEntropy().objective(y, z)
def cross_entropy(y: np.ndarray, z: np.ndarray) -> float:
"""Computes the Cross Entropy value (CE).
Args:
y (np.ndarray): The true labels.
z (np.ndarray): The predicted labels.
Returns:
float: the CE value.
"""
return CrossEntropy().objective(y, z)
|
{"hexsha": "a5a1200754607888b5baaa1bb56fdc831d3056a4", "size": 2098, "ext": "py", "lang": "Python", "max_stars_repo_path": "showml/losses/metrics.py", "max_stars_repo_name": "shubhomoy/ShowML", "max_stars_repo_head_hexsha": "9fbc366941ad910f1fbd7d91da823616c34fd400", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2022-01-02T13:53:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T16:12:20.000Z", "max_issues_repo_path": "showml/losses/metrics.py", "max_issues_repo_name": "shubhomoy/ShowML", "max_issues_repo_head_hexsha": "9fbc366941ad910f1fbd7d91da823616c34fd400", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-11-10T15:38:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-05T15:52:36.000Z", "max_forks_repo_path": "showml/losses/metrics.py", "max_forks_repo_name": "shubhomoy/ShowML", "max_forks_repo_head_hexsha": "9fbc366941ad910f1fbd7d91da823616c34fd400", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-04T14:24:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T14:33:38.000Z", "avg_line_length": 25.9012345679, "max_line_length": 75, "alphanum_fraction": 0.6163012393, "include": true, "reason": "import numpy", "num_tokens": 534}
|
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5 import QtCore
from PyQt5.QtCore import *
import numpy as np
class QThumbnail(QLabel):
mpsignal = pyqtSignal(list, int)
def __init__(self, parent):
super(QLabel, self).__init__(parent)
self.setMinimumSize(1, 1)
self.setMouseTracking(False)
self.processedImage = None
self.imgr, self.imgc = None, None
# 決定用哪種paintEvent的type, general代表一般的
self.type = 'general'
self.coord = []
self.index = None
def mousePressEvent(self, event: QMouseEvent):
self.mpsignal.emit(self.coord, self.index)
def display_image(self, window=1):
self.imgr, self.imgc = self.processedImage.shape[0:2]
qformat = QImage.Format_Indexed8
if len(self.processedImage.shape) == 3: # rows[0], cols[1], channels[2]
if (self.processedImage.shape[2]) == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(self.processedImage, self.processedImage.shape[1], self.processedImage.shape[0],
self.processedImage.strides[0], qformat)
w, h = self.width(), self.height()
if window == 1:
self.setScaledContents(True)
backlash = self.lineWidth() * 2
self.setPixmap(QPixmap.fromImage(img).scaled(w - backlash, h - backlash, Qt.IgnoreAspectRatio))
self.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.update()
|
{"hexsha": "999d19a8a43649274503401a733cff8ecc32a78f", "size": 1568, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/qthumbnail.py", "max_stars_repo_name": "wenyalintw/Nodule-CADx", "max_stars_repo_head_hexsha": "dd0b3d1d672141f8dfabde1a05ef33f87681f8e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-07-29T01:39:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T16:26:38.000Z", "max_issues_repo_path": "src/qthumbnail.py", "max_issues_repo_name": "wenyalintw/Nodule-CADx", "max_issues_repo_head_hexsha": "dd0b3d1d672141f8dfabde1a05ef33f87681f8e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-07-29T01:50:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-09T00:32:38.000Z", "max_forks_repo_path": "src/qthumbnail.py", "max_forks_repo_name": "wenyalintw/Nodule-CADx", "max_forks_repo_head_hexsha": "dd0b3d1d672141f8dfabde1a05ef33f87681f8e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-31T11:37:30.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-13T02:57:20.000Z", "avg_line_length": 34.8444444444, "max_line_length": 107, "alphanum_fraction": 0.6275510204, "include": true, "reason": "import numpy", "num_tokens": 383}
|
import math
import random
import numpy as np
from parse.ast_node import ASTNode
# From here on, classes describing various mathematical operations
# TODO: minScale, scale, trimScale, widthBucket
class Abs(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return abs(self.exp.execute(table, tree))
class Cbrt(ASTNode): # TODO CHECK GRAMMAR, It receives an array and grammar probably doesn't support it
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return np.cbrt(self.exp.execute(table, tree))
class Ceil(ASTNode): # Same for ceiling. Only receives float value, check in grammar or semantic error?
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.ceil(self.exp.execute(table, tree))
class Degrees(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.degrees(self.exp.execute(table, tree))
class Div(ASTNode):
def __init__(self, exp1, exp2, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp1 = exp1
self.exp2 = exp2
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return self.exp1.execute(table, tree) // self.exp2.execute(table, tree)
class Exp(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.exp(self.exp.execute(table, tree))
class Factorial(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.factorial(self.exp.execute(table, tree))
class Floor(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.floor(self.exp.execute(table, tree))
class Gcd(ASTNode):
def __init__(self, exp1, exp2, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp1 = exp1
self.exp2 = exp2
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.gcd(self.exp1.execute(table, tree), self.exp2.execute(table, tree))
class Lcm(ASTNode): # Only available on Python 3.9+, please update your python version
def __init__(self, exp1, exp2, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp1 = exp1
self.exp2 = exp2
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.lcm(self.exp1.execute(table, tree), self.exp2.execute(table, tree))
class Ln(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.log2(self.exp.execute(table, tree))
class Log(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.log(self.exp.execute(table, tree))
class Log10(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.log10(self.exp.execute(table, tree))
class MinScale(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return True
class Mod(ASTNode):
def __init__(self, exp1, exp2, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp1 = exp1
self.exp2 = exp2
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.fmod(self.exp1.execute(table, tree), self.exp2.execute(table, tree))
class PI(ASTNode):
def __init__(self, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.pi
class Power(ASTNode):
def __init__(self, exp1, exp2, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp1 = exp1
self.exp2 = exp2
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.pow(self.exp1.execute(table, tree), self.exp2.execute(table, tree))
class Radians(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.radians(self.exp.execute(table, tree))
class Random(ASTNode): # TODO check SQL docs, it has a range or something?
def __init__(self, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return random.random()
class Round(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return round(self.exp.execute(table, tree))
class Scale(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
r = self.exp.execute(table, tree)
if isinstance(r, float):
arr = r.__str__().split(".")
if len(arr) == 1:
return 0
else:
return len(arr[1])
else:
return 0
class SetSeed(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return random.seed(self.exp.execute(table, tree))
class Sign(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return np.sign(self.exp.execute(table, tree))
class Sqrt(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.sqrt(self.exp.execute(table, tree))
class TrimScale(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return True
class Trunc(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return math.trunc(self.exp.execute(table, tree))
class WithBucket(ASTNode):
def __init__(self, exp1, exp2, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp1 = exp1
self.exp2 = exp2
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return True
|
{"hexsha": "e2bfa611c84add6622f0b2960d1eb0e8d5a5c5d3", "size": 9223, "ext": "py", "lang": "Python", "max_stars_repo_path": "parser/team03/parse/expressions/expressions_math.py", "max_stars_repo_name": "18SebastianVC/tytus", "max_stars_repo_head_hexsha": "2b22f4339356b6cf46e3235a5219f68e5ba5573b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-09T05:32:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-09T05:32:35.000Z", "max_issues_repo_path": "parser/team03/parse/expressions/expressions_math.py", "max_issues_repo_name": "XiomRB/tytus", "max_issues_repo_head_hexsha": "0873e4bdce5c110bee6ef2aa98240be6a93ae024", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "parser/team03/parse/expressions/expressions_math.py", "max_forks_repo_name": "XiomRB/tytus", "max_forks_repo_head_hexsha": "0873e4bdce5c110bee6ef2aa98240be6a93ae024", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1867088608, "max_line_length": 108, "alphanum_fraction": 0.6305974195, "include": true, "reason": "import numpy", "num_tokens": 2316}
|
function ME = myfunc(x)
% Break Test
for i = 1:100
try
x = x - 1;
if x < 0
error('x has become negative')
end
catch ME
fprintf('x become negative')
break
end
end
|
{"author": "wme7", "repo": "Aero-matlab", "sha": "9430008f2e3b84f28633775a44dff534e780fbac", "save_path": "github-repos/MATLAB/wme7-Aero-matlab", "path": "github-repos/MATLAB/wme7-Aero-matlab/Aero-matlab-9430008f2e3b84f28633775a44dff534e780fbac/NumericalMethods/myfunc.m"}
|
section \<open> Blocks (Abstract Local Variables) \<close>
theory utp_blocks
imports utp_rel_laws utp_wp
begin
subsection \<open> Extending and Contracting Substitutions \<close>
definition subst_ext :: "('\<alpha> \<Longrightarrow> '\<beta>) \<Rightarrow> ('\<alpha>, '\<beta>) psubst" ("ext\<^sub>s") where
\<comment> \<open> Extend state space, setting local state to an arbitrary value \<close>
[upred_defs]: "ext\<^sub>s a = \<lparr>&a \<mapsto>\<^sub>s &\<^bold>v\<rparr>"
definition subst_con :: "('\<alpha> \<Longrightarrow> '\<beta>) \<Rightarrow> ('\<beta>, '\<alpha>) psubst" ("con\<^sub>s") where
\<comment> \<open> Contract the state space with get \<close>
[upred_defs]: "con\<^sub>s a = &a"
lemma subst_con_alt_def: "con\<^sub>s a = \<lparr>\<^bold>v \<mapsto>\<^sub>s &a\<rparr>"
unfolding subst_con_def by (rel_auto)
lemma subst_ext_con [usubst]: "mwb_lens a \<Longrightarrow> con\<^sub>s a \<circ>\<^sub>s ext\<^sub>s a = id\<^sub>s"
by (rel_simp)
lemma subst_apply_con [usubst]: "\<langle>con\<^sub>s a\<rangle>\<^sub>s x = &a:x"
by (rel_simp)
text \<open> Variables in the global state space will be retained after a state is contracted \<close>
lemma subst_con_update_sublens [usubst]:
"\<lbrakk> mwb_lens a; x \<subseteq>\<^sub>L a \<rbrakk> \<Longrightarrow> con\<^sub>s a \<circ>\<^sub>s subst_upd \<sigma> x v = subst_upd (con\<^sub>s a \<circ>\<^sub>s \<sigma>) (x /\<^sub>L a) v"
by (simp add: subst_con_def usubst alpha, rel_simp)
text \<open> Variables in the local state space will be lost after a state is contracted \<close>
lemma subst_con_update_indep [usubst]:
"\<lbrakk> mwb_lens x; mwb_lens a; a \<bowtie> x \<rbrakk> \<Longrightarrow> con\<^sub>s a \<circ>\<^sub>s subst_upd \<sigma> x v = (con\<^sub>s a \<circ>\<^sub>s \<sigma>)"
by (simp add: subst_con_alt_def usubst alpha)
lemma subst_ext_apply [usubst]: "\<langle>ext\<^sub>s a\<rangle>\<^sub>s x = &x \<restriction>\<^sub>e a"
apply (rel_simp)
oops
subsection \<open> Generic Blocks \<close>
text \<open> We ensure that the initial values of local are arbitrarily chosen using the non-deterministic
choice operator. \<close>
definition block_open :: "(<'a, 'c> \<Longleftrightarrow> 'b) \<Rightarrow> ('a, 'b) urel" ("open\<^bsub>_\<^esub>") where
[upred_defs]: "block_open a = \<langle>ext\<^sub>s \<V>\<^bsub>a\<^esub>\<rangle>\<^sub>a ;; \<C>[a] := *"
lemma block_open_alt_def:
"sym_lens a \<Longrightarrow> block_open a = \<langle>ext\<^sub>s \<V>\<^bsub>a\<^esub>\<rangle>\<^sub>a ;; ($\<V>[a]\<acute> =\<^sub>u $\<V>[a])"
by (rel_auto, metis lens_indep_vwb_iff sym_lens.put_region_coregion_cover sym_lens_def)
definition block_close :: "(<'a, 'c> \<Longleftrightarrow> 'b) \<Rightarrow> ('b, 'a) urel" ("close\<^bsub>_\<^esub>") where
[upred_defs]: "block_close a = \<langle>con\<^sub>s \<V>\<^bsub>a\<^esub>\<rangle>\<^sub>a"
lemma wp_open_block [wp]: "psym_lens a \<Longrightarrow> open\<^bsub>a\<^esub> wp b = (\<^bold>\<exists> v \<bullet> \<lparr>&\<V>[a] \<mapsto>\<^sub>s &\<^bold>v, &\<C>[a] \<mapsto>\<^sub>s \<guillemotleft>v\<guillemotright>\<rparr> \<dagger> b)"
by (simp add: block_open_def subst_ext_def wp usubst unrest)
lemma wp_close_block [wp]: "psym_lens a \<Longrightarrow> close\<^bsub>a\<^esub> wp b = con\<^sub>s \<V>\<^bsub>a\<^esub> \<dagger> b"
by (simp add: block_close_def subst_ext_def wp usubst unrest)
lemma block_open_conv:
"sym_lens a \<Longrightarrow> open\<^bsub>a\<^esub>\<^sup>- = close\<^bsub>a\<^esub>"
by (rel_auto, metis lens_indep_def sym_lens.put_region_coregion_cover sym_lens_def)
lemma block_open_close:
"psym_lens a \<Longrightarrow> open\<^bsub>a\<^esub> ;; close\<^bsub>a\<^esub> = II"
by (rel_auto)
text \<open> I needed this property for the assignment open law below. \<close>
lemma usubst_prop: "\<sigma> \<oplus>\<^sub>s a = [a \<mapsto>\<^sub>s &a \<dagger> \<sigma>]"
by (rel_simp)
lemma block_assigns_open:
"psym_lens a \<Longrightarrow> \<langle>\<sigma>\<rangle>\<^sub>a ;; open\<^bsub>a\<^esub> = open\<^bsub>a\<^esub> ;; \<langle>\<sigma> \<oplus>\<^sub>s \<V>\<^bsub>a\<^esub>\<rangle>\<^sub>a"
apply (wp_calc)
apply (simp add: usubst_prop usubst)
apply (rel_auto)
done
lemma block_assign_open:
"psym_lens a \<Longrightarrow> x := v ;; open\<^bsub>a\<^esub> = open\<^bsub>a\<^esub> ;; \<V>[a]:x := (v \<oplus>\<^sub>p \<V>\<^bsub>a\<^esub>)"
by (simp add: block_assigns_open, rel_auto)
lemma block_assign_local_close:
"\<V>\<^bsub>a\<^esub> \<bowtie> x \<Longrightarrow> x := v ;; close\<^bsub>a\<^esub> = close\<^bsub>a\<^esub>"
by (rel_auto)
lemma block_assign_global_close:
"\<lbrakk> psym_lens a; x \<subseteq>\<^sub>L \<V>\<^bsub>a\<^esub> ; \<V>[a] \<natural> v \<rbrakk> \<Longrightarrow> (x := v) ;; close\<^bsub>a\<^esub> = close\<^bsub>a\<^esub> ;; (x\<restriction>\<V>[a] := (v \<restriction>\<^sub>e \<V>\<^bsub>a\<^esub>))"
by (rel_simp)
lemma block_assign_global_close':
"\<lbrakk> sym_lens a; x \<subseteq>\<^sub>L \<V>\<^bsub>a\<^esub> ; \<C>[a] \<sharp> v \<rbrakk> \<Longrightarrow> (x := v) ;; close\<^bsub>a\<^esub> = close\<^bsub>a\<^esub> ;; (x\<restriction>\<V>[a] := (v \<restriction>\<^sub>e \<V>\<^bsub>a\<^esub>))"
by (rule block_assign_global_close, simp_all add: sym_lens_unrest')
lemma hoare_block [hoare_safe]:
assumes "psym_lens a"
shows "\<lbrace>p \<oplus>\<^sub>p \<V>\<^bsub>a\<^esub>\<rbrace>P\<lbrace>q \<oplus>\<^sub>p \<V>\<^bsub>a\<^esub>\<rbrace>\<^sub>u \<Longrightarrow> \<lbrace>p\<rbrace>open\<^bsub>a\<^esub> ;; P ;; close\<^bsub>a\<^esub>\<lbrace>q\<rbrace>\<^sub>u"
using assms by (rel_simp)
lemma "vwb_lens a \<Longrightarrow> a:[P]\<^sup>+ = a:[\<langle>con\<^sub>s a\<rangle>\<^sub>a ;; P ;; \<langle>ext\<^sub>s a\<rangle>\<^sub>a ;; ($a\<acute> =\<^sub>u $a)]"
by (rel_auto)
end
|
{"author": "isabelle-utp", "repo": "utp-main", "sha": "27bdf3aee6d4fc00c8fe4d53283d0101857e0d41", "save_path": "github-repos/isabelle/isabelle-utp-utp-main", "path": "github-repos/isabelle/isabelle-utp-utp-main/utp-main-27bdf3aee6d4fc00c8fe4d53283d0101857e0d41/utp/utp_blocks.thy"}
|
from abbrev import abbreviations
from absl import logging
import csv
import numpy as np
import spacy
import tensorflow_hub as hub
nlp = spacy.load("en_core_web_lg")
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4" # @param ["https://tfhub.dev/google/universal-sentence-encoder/4", "https://tfhub.dev/google/universal-sentence-encoder-large/5"]
model = hub.load(module_url)
print("module %s loaded" % module_url)
def embed(input):
return model(input)
LINES_CSV = 'lines.csv'
MVR_CSV = 'mvr.csv'
OUT_CSV = 'out.csv'
WORD_LEN = 5
INJURY_WORDS = {'injury', 'fatal', 'pi', 'homicide', 'death', 'inj'}
CORR_THRESH = 0.57
lines_defs = []
mvr_defs = []
with open(LINES_CSV, newline='') as lines_csvfile:
lines_reader = csv.DictReader(lines_csvfile, delimiter='\t')
for lines_row in lines_reader:
lines_words = [
''.join(ch for ch in x if ch.isalnum())
for x in lines_row['line_def'].split(' ')
]
lines_defs.append(' '.join(lines_words))
with open(MVR_CSV, newline='') as mvr_csvfile:
mvr_reader = csv.DictReader(mvr_csvfile, delimiter='\t')
for mvr_row in mvr_reader:
mvr_tokens = list(nlp(mvr_row['desc']))
mvr_desc_abbreviations_removed = ' '.join([abbreviations.get(str(e), str(e)) for e in mvr_tokens])
mvr_defs.append(mvr_desc_abbreviations_removed)
messages = lines_defs + mvr_defs
# Reduce logging output.
logging.set_verbosity(logging.ERROR)
message_embeddings = embed(messages)
# Correlation matrix
corr = np.inner(message_embeddings, message_embeddings)
mvr_range = range(len(lines_defs), len(lines_defs) + len(mvr_defs))
max_corr = CORR_THRESH
with open(OUT_CSV, 'w', newline='') as out_csvfile:
fieldnames = ['svc_code', 'description', 'augusta_risk_type', 'bodily_injury', 'correlation']
writer = csv.DictWriter(out_csvfile, fieldnames=fieldnames, delimiter='\t')
writer.writeheader()
with open(MVR_CSV, newline='') as mvr_csvfile:
mvr_reader = csv.DictReader(mvr_csvfile, delimiter='\t')
for i, mvr_row in enumerate(mvr_reader):
matching_indexes = []
correlation = []
injury_flag = False
for j in range(len(lines_defs)):
mvr_idx = i + len(lines_defs)
if corr[mvr_idx][j] >= CORR_THRESH:
matching_indexes.append(str(j + 1))
correlation.append(str(corr[mvr_idx][j]))
if corr[mvr_idx][j] > max_corr:
max_corr = corr[mvr_idx][j]
print("Max Correlation: {}".format(max_corr))
print("MVR: {}".format(mvr_row['desc']))
print("Line: {}".format(messages[j]))
print(' ')
injury_flag = bool(set(messages[mvr_idx].split(' ')) & INJURY_WORDS)
if not matching_indexes:
matching_indexes.append('indeterminate')
writer.writerow({
'svc_code': mvr_row['svc_code'],
'description': mvr_row['desc'].upper(),
'augusta_risk_type': ', '.join(matching_indexes),
'bodily_injury': injury_flag,
'correlation': ', '.join(correlation)
})
|
{"hexsha": "1de069ca97ab84ed39568422fdaaf547e8a18ef8", "size": 3278, "ext": "py", "lang": "Python", "max_stars_repo_path": "script_tf.py", "max_stars_repo_name": "SombiriX/csvcompare", "max_stars_repo_head_hexsha": "26d7d9288b702af8e350fed7f832f6360cefdaaf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "script_tf.py", "max_issues_repo_name": "SombiriX/csvcompare", "max_issues_repo_head_hexsha": "26d7d9288b702af8e350fed7f832f6360cefdaaf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "script_tf.py", "max_forks_repo_name": "SombiriX/csvcompare", "max_forks_repo_head_hexsha": "26d7d9288b702af8e350fed7f832f6360cefdaaf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8314606742, "max_line_length": 199, "alphanum_fraction": 0.6226357535, "include": true, "reason": "import numpy", "num_tokens": 796}
|
# This file is a part of BAT.jl, licensed under the MIT License (MIT).
include("bat_sample.jl")
include("mcmc/mcmc.jl")
include("sampled_density.jl")
include("importance/importance_sampler.jl")
include("partitioned_sampling/partitioned_sampling.jl")
|
{"hexsha": "7bb94d0e24ee8ea7650a8fc12f0d9facd7fe4400", "size": 251, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/samplers/samplers.jl", "max_stars_repo_name": "Cornelius-G/BAT.jl", "max_stars_repo_head_hexsha": "1bb577c8d976066c1f52070984d86020728f599c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-09T06:50:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T17:09:42.000Z", "max_issues_repo_path": "src/samplers/samplers.jl", "max_issues_repo_name": "Cornelius-G/BAT.jl", "max_issues_repo_head_hexsha": "1bb577c8d976066c1f52070984d86020728f599c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/samplers/samplers.jl", "max_forks_repo_name": "Cornelius-G/BAT.jl", "max_forks_repo_head_hexsha": "1bb577c8d976066c1f52070984d86020728f599c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.375, "max_line_length": 70, "alphanum_fraction": 0.7848605578, "num_tokens": 65}
|
# Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
import pandas as pd
import trappy
from utils_tests import TestBART
from bart.common.signal import SignalCompare
import numpy as np
class TestSignalCompare(TestBART):
def __init__(self, *args, **kwargs):
super(TestSignalCompare, self).__init__(*args, **kwargs)
def test_conditional_compare(self):
"""Test conditional_compare"""
# Refer to the example in
# bart.common.signal.SignalCompare.conditional_compare
# doc-strings which explains the calculation for the
# data set below
A = [0, 0, 0, 3, 3, 0, 0, 0]
B = [0, 0, 2, 2, 2, 2, 1, 1]
trace = trappy.BareTrace()
df = pd.DataFrame({"A": A, "B": B})
trace.add_parsed_event("event", df)
s = SignalCompare(trace, "event:A", "event:B")
expected = (1.5, 2.0/7)
self.assertEqual(
s.conditional_compare(
"event:A > event:B",
method="rect"),
expected)
def test_get_overshoot(self):
"""Test get_overshoot"""
A = [0, 0, 0, 3, 3, 0, 0, 0]
B = [0, 0, 2, 2, 2, 2, 1, 1]
trace = trappy.BareTrace()
df = pd.DataFrame({"A": A, "B": B})
trace.add_parsed_event("event", df)
s = SignalCompare(trace, "event:A", "event:B")
expected = (1.5, 2.0/7)
self.assertEqual(
s.get_overshoot(method="rect"),
expected)
A = [0, 0, 0, 1, 1, 0, 0, 0]
B = [0, 0, 2, 2, 2, 2, 1, 1]
df = pd.DataFrame({"A": A, "B": B})
trace.event.data_frame = df
s = SignalCompare(trace, "event:A", "event:B")
expected = (float("nan"), 0.0)
result = s.get_overshoot(method="rect")
self.assertTrue(np.isnan(result[0]))
self.assertEqual(result[1], expected[1])
def test_get_undershoot(self):
"""Test get_undershoot"""
A = [0, 0, 0, 1, 1, 1, 1, 1]
B = [2, 2, 2, 2, 2, 2, 2, 2]
trace = trappy.BareTrace()
df = pd.DataFrame({"A": A, "B": B})
trace.add_parsed_event("event", df)
s = SignalCompare(trace, "event:A", "event:B")
expected = (4.0/14.0, 1.0)
self.assertEqual(
s.get_undershoot(method="rect"),
expected)
A = [3, 3, 3, 3, 3, 3, 3, 3]
B = [2, 2, 2, 2, 2, 2, 1, 1]
df = pd.DataFrame({"A": A, "B": B})
trace.event.data_frame = df
s = SignalCompare(trace, "event:A", "event:B")
expected = (float("nan"), 0.0)
result = s.get_undershoot(method="rect")
self.assertTrue(np.isnan(result[0]))
self.assertEqual(result[1], expected[1])
|
{"hexsha": "71f2c5a28c12ce7413ec652b98ffbc067a57a4ff", "size": 3360, "ext": "py", "lang": "Python", "max_stars_repo_path": "external/bart/tests/test_signal.py", "max_stars_repo_name": "JaimeVHArm/lisa", "max_stars_repo_head_hexsha": "e5dcb7d54f73d57d4071da87c7c8095ba351a899", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-30T16:14:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-30T16:14:02.000Z", "max_issues_repo_path": "external/bart/tests/test_signal.py", "max_issues_repo_name": "JaimeVHArm/lisa", "max_issues_repo_head_hexsha": "e5dcb7d54f73d57d4071da87c7c8095ba351a899", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "external/bart/tests/test_signal.py", "max_forks_repo_name": "JaimeVHArm/lisa", "max_forks_repo_head_hexsha": "e5dcb7d54f73d57d4071da87c7c8095ba351a899", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1111111111, "max_line_length": 74, "alphanum_fraction": 0.5794642857, "include": true, "reason": "import numpy", "num_tokens": 997}
|
# Author: Bichen Wu (bichen@berkeley.edu) 02/20/2017
# -*- coding: utf-8 -*-
"""Utility functions."""
import numpy as np
import time
# ed: label, pred_cls에서 class가 정해진 좌표에 colorize(=visualize)를 해서 리턴하는 함수
def visualize_seg(label_map, mc, one_hot=False):
if one_hot:
label_map = np.argmax(label_map, axis=-1)
out = np.zeros(
(label_map.shape[0], label_map.shape[1], label_map.shape[2], 3))
for l in range(1, mc.NUM_CLASS):
out[label_map==l, :] = mc.CLS_COLOR_MAP[l]
return out
def bgr_to_rgb(ims):
"""Convert a list of images from BGR format to RGB format."""
out = []
for im in ims:
out.append(im[:,:,::-1])
return out
class Timer(object):
def __init__(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.duration = 0.0
self.average_time = 0.0
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.duration = time.time() - self.start_time
self.total_time += self.duration
self.calls += 1
self.average_time = self.total_time/self.calls
if average:
return self.average_time
else:
return self.duration
def conf_error_rate_at_thresh_fn(mask, conf, thresh):
return np.mean((conf>thresh) != mask)
def rmse_fn(diff, nnz):
return np.sqrt(np.sum(diff**2)/nnz)
def abs_accuracy_at_thresh_fn(diff, thresh, mask):
return np.sum((np.abs(diff) < thresh)*mask)/float(np.sum(mask))
def rel_accuracy_at_thresh_fn(pred_ogm, gt_ogm, mask, thresh):
return np.sum(
mask * (np.maximum(pred_ogm, gt_ogm) /
np.minimum(gt_ogm, pred_ogm) < thresh)
)/float(np.sum(mask))
# ed: IOU를 계산하는 함수
def evaluate_iou(label, pred, n_class, epsilon=1e-12):
"""Evaluation script to compute pixel level IoU.
Args:
label: N-d array of shape [batch, W, H], where each element is a class
index.
pred: N-d array of shape [batch, W, H], the each element is the predicted
class index.
n_class: number of classes
epsilon: a small value to prevent division by 0
Returns:
IoU: array of lengh n_class, where each element is the average IoU for this
class.
tps: same shape as IoU, where each element is the number of TP for each
class.
fps: same shape as IoU, where each element is the number of FP for each
class.
fns: same shape as IoU, where each element is the number of FN for each
class.
"""
assert label.shape == pred.shape, \
'label and pred shape mismatch: {} vs {}'.format(
label.shape, pred.shape)
ious = np.zeros(n_class)
tps = np.zeros(n_class)
fns = np.zeros(n_class)
fps = np.zeros(n_class)
# ed: 실제 IOU가 아니라 point-wise labeling 이므로
# 아래처럼 true positive, false negative...등을 클래스별로 구하는듯하다
for cls_id in range(n_class):
tp = np.sum(pred[label == cls_id] == cls_id)
fp = np.sum(label[pred == cls_id] != cls_id)
fn = np.sum(pred[label == cls_id] != cls_id)
ious[cls_id] = tp/(tp+fn+fp+epsilon)
tps[cls_id] = tp
fps[cls_id] = fp
fns[cls_id] = fn
return ious, tps, fps, fns
def condensing_matrix(size_z, size_a, in_channel):
assert size_z % 2 == 1 and size_a % 2==1, \
'size_z and size_a should be odd number'
half_filter_dim = (size_z*size_a)//2
# moving neigboring pixels to channel dimension
nbr2ch_mat = np.zeros(
(size_z, size_a, in_channel, size_z*size_a*in_channel),
dtype=np.float32
)
for z in range(size_z):
for a in range(size_a):
for ch in range(in_channel):
nbr2ch_mat[z, a, ch, z*(size_a*in_channel) + a*in_channel + ch] = 1
# exclude the channel index corresponding to the center position
nbr2ch_mat = np.concatenate(
[nbr2ch_mat[:, :, :, :in_channel*half_filter_dim],
nbr2ch_mat[:, :, :, in_channel*(half_filter_dim+1):]],
axis=3
)
assert nbr2ch_mat.shape == \
(size_z, size_a, in_channel, (size_a*size_z-1)*in_channel), \
'error with the shape of nbr2ch_mat after removing center position'
return nbr2ch_mat
def angular_filter_kernel(size_z, size_a, in_channel, theta_sqs):
"""Compute a gaussian kernel.
Args:
size_z: size on the z dimension.
size_a: size on the a dimension.
in_channel: input (and output) channel size
theta_sqs: an array with length == in_channel. Contains variance for
gaussian kernel for each channel.
Returns:
kernel: ND array of size [size_z, size_a, in_channel, in_channel], which is
just guassian kernel parameters for each channel.
"""
assert size_z % 2 == 1 and size_a % 2==1, \
'size_z and size_a should be odd number'
assert len(theta_sqs) == in_channel, \
'length of theta_sqs and in_channel does no match'
# gaussian kernel
kernel = np.zeros((size_z, size_a, in_channel, in_channel), dtype=np.float32)
for k in range(in_channel):
kernel_2d = np.zeros((size_z, size_a), dtype=np.float32)
for i in range(size_z):
for j in range(size_a):
diff = np.sum(
(np.array([i-size_z//2, j-size_a//2]))**2)
kernel_2d[i, j] = np.exp(-diff/2/theta_sqs[k])
# exclude the center position
kernel_2d[size_z//2, size_a//2] = 0
kernel[:, :, k, k] = kernel_2d
return kernel
|
{"hexsha": "07a9b83673409e274443703dae6c16a2ce8ac6ad", "size": 5274, "ext": "py", "lang": "Python", "max_stars_repo_path": "DEEPLEARNING/DL_SQUEEZESEG/src/squeezeseg_cpp_preprocessing/script/squeezeseg/utils/util.py", "max_stars_repo_name": "Hqss/DINK", "max_stars_repo_head_hexsha": "5fecaa65e2f9da48eb8ac38ef709aa555fca8766", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 189, "max_stars_repo_stars_event_min_datetime": "2019-01-16T03:05:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-14T14:54:16.000Z", "max_issues_repo_path": "DEEPLEARNING/DL_SQUEEZESEG/src/squeezeseg_cpp_preprocessing/script/squeezeseg/utils/util.py", "max_issues_repo_name": "jtpils/DINK", "max_issues_repo_head_hexsha": "5f6b3eaba279126f79ae6607f965311002d7451c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-02-11T06:20:15.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-05T07:03:53.000Z", "max_forks_repo_path": "DEEPLEARNING/DL_SQUEEZESEG/src/squeezeseg_cpp_preprocessing/script/squeezeseg/utils/util.py", "max_forks_repo_name": "jtpils/DINK", "max_forks_repo_head_hexsha": "5f6b3eaba279126f79ae6607f965311002d7451c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2019-01-16T03:05:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-04T21:07:53.000Z", "avg_line_length": 27.46875, "max_line_length": 79, "alphanum_fraction": 0.6528251801, "include": true, "reason": "import numpy", "num_tokens": 1566}
|
[STATEMENT]
lemma less_setsD: "\<lbrakk>A \<lless> B; a \<in> A; b \<in> B\<rbrakk> \<Longrightarrow> a < b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>A \<lless> B; a \<in> A; b \<in> B\<rbrakk> \<Longrightarrow> a < b
[PROOF STEP]
by (auto simp: less_sets_def)
|
{"llama_tokens": 124, "file": "Nash_Williams_Nash_Extras", "length": 1}
|
# Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import numpy
from PyQt5.QtGui import QImage, qRed, qGreen, qBlue
from PyQt5.QtCore import Qt
from UM.Mesh.MeshReader import MeshReader
from UM.Mesh.MeshBuilder import MeshBuilder
from UM.Math.Vector import Vector
from UM.Job import Job
from UM.Logger import Logger
from .ImageReaderUI import ImageReaderUI
from cura.Scene.CuraSceneNode import CuraSceneNode as SceneNode
class ImageReader(MeshReader):
def __init__(self) -> None:
super().__init__()
self._supported_extensions = [".jpg", ".jpeg", ".bmp", ".gif", ".png"]
self._ui = ImageReaderUI(self)
def preRead(self, file_name, *args, **kwargs):
img = QImage(file_name)
if img.isNull():
Logger.log("e", "Image is corrupt.")
return MeshReader.PreReadResult.failed
width = img.width()
depth = img.height()
largest = max(width, depth)
width = width / largest * self._ui.default_width
depth = depth / largest * self._ui.default_depth
self._ui.setWidthAndDepth(width, depth)
self._ui.showConfigUI()
self._ui.waitForUIToClose()
if self._ui.getCancelled():
return MeshReader.PreReadResult.cancelled
return MeshReader.PreReadResult.accepted
def _read(self, file_name):
size = max(self._ui.getWidth(), self._ui.getDepth())
return self._generateSceneNode(file_name, size, self._ui.peak_height, self._ui.base_height, self._ui.smoothing, 512, self._ui.image_color_invert)
def _generateSceneNode(self, file_name, xz_size, peak_height, base_height, blur_iterations, max_size, image_color_invert):
scene_node = SceneNode()
mesh = MeshBuilder()
img = QImage(file_name)
if img.isNull():
Logger.log("e", "Image is corrupt.")
return None
width = max(img.width(), 2)
height = max(img.height(), 2)
aspect = height / width
if img.width() < 2 or img.height() < 2:
img = img.scaled(width, height, Qt.IgnoreAspectRatio)
base_height = max(base_height, 0)
peak_height = max(peak_height, -base_height)
xz_size = max(xz_size, 1)
scale_vector = Vector(xz_size, peak_height, xz_size)
if width > height:
scale_vector = scale_vector.set(z=scale_vector.z * aspect)
elif height > width:
scale_vector = scale_vector.set(x=scale_vector.x / aspect)
if width > max_size or height > max_size:
scale_factor = max_size / width
if height > width:
scale_factor = max_size / height
width = int(max(round(width * scale_factor), 2))
height = int(max(round(height * scale_factor), 2))
img = img.scaled(width, height, Qt.IgnoreAspectRatio)
width_minus_one = width - 1
height_minus_one = height - 1
Job.yieldThread()
texel_width = 1.0 / (width_minus_one) * scale_vector.x
texel_height = 1.0 / (height_minus_one) * scale_vector.z
height_data = numpy.zeros((height, width), dtype=numpy.float32)
for x in range(0, width):
for y in range(0, height):
qrgb = img.pixel(x, y)
avg = float(qRed(qrgb) + qGreen(qrgb) + qBlue(qrgb)) / (3 * 255)
height_data[y, x] = avg
Job.yieldThread()
if image_color_invert:
height_data = 1 - height_data
for _ in range(0, blur_iterations):
copy = numpy.pad(height_data, ((1, 1), (1, 1)), mode= "edge")
height_data += copy[1:-1, 2:]
height_data += copy[1:-1, :-2]
height_data += copy[2:, 1:-1]
height_data += copy[:-2, 1:-1]
height_data += copy[2:, 2:]
height_data += copy[:-2, 2:]
height_data += copy[2:, :-2]
height_data += copy[:-2, :-2]
height_data /= 9
Job.yieldThread()
height_data *= scale_vector.y
height_data += base_height
heightmap_face_count = 2 * height_minus_one * width_minus_one
total_face_count = heightmap_face_count + (width_minus_one * 2) * (height_minus_one * 2) + 2
mesh.reserveFaceCount(total_face_count)
# initialize to texel space vertex offsets.
# 6 is for 6 vertices for each texel quad.
heightmap_vertices = numpy.zeros((width_minus_one * height_minus_one, 6, 3), dtype = numpy.float32)
heightmap_vertices = heightmap_vertices + numpy.array([[
[0, base_height, 0],
[0, base_height, texel_height],
[texel_width, base_height, texel_height],
[texel_width, base_height, texel_height],
[texel_width, base_height, 0],
[0, base_height, 0]
]], dtype = numpy.float32)
offsetsz, offsetsx = numpy.mgrid[0: height_minus_one, 0: width - 1]
offsetsx = numpy.array(offsetsx, numpy.float32).reshape(-1, 1) * texel_width
offsetsz = numpy.array(offsetsz, numpy.float32).reshape(-1, 1) * texel_height
# offsets for each texel quad
heightmap_vertex_offsets = numpy.concatenate([offsetsx, numpy.zeros((offsetsx.shape[0], offsetsx.shape[1]), dtype=numpy.float32), offsetsz], 1)
heightmap_vertices += heightmap_vertex_offsets.repeat(6, 0).reshape(-1, 6, 3)
# apply height data to y values
heightmap_vertices[:, 0, 1] = heightmap_vertices[:, 5, 1] = height_data[:-1, :-1].reshape(-1)
heightmap_vertices[:, 1, 1] = height_data[1:, :-1].reshape(-1)
heightmap_vertices[:, 2, 1] = heightmap_vertices[:, 3, 1] = height_data[1:, 1:].reshape(-1)
heightmap_vertices[:, 4, 1] = height_data[:-1, 1:].reshape(-1)
heightmap_indices = numpy.array(numpy.mgrid[0:heightmap_face_count * 3], dtype=numpy.int32).reshape(-1, 3)
mesh._vertices[0:(heightmap_vertices.size // 3), :] = heightmap_vertices.reshape(-1, 3)
mesh._indices[0:(heightmap_indices.size // 3), :] = heightmap_indices
mesh._vertex_count = heightmap_vertices.size // 3
mesh._face_count = heightmap_indices.size // 3
geo_width = width_minus_one * texel_width
geo_height = height_minus_one * texel_height
# bottom
mesh.addFaceByPoints(0, 0, 0, 0, 0, geo_height, geo_width, 0, geo_height)
mesh.addFaceByPoints(geo_width, 0, geo_height, geo_width, 0, 0, 0, 0, 0)
# north and south walls
for n in range(0, width_minus_one):
x = n * texel_width
nx = (n + 1) * texel_width
hn0 = height_data[0, n]
hn1 = height_data[0, n + 1]
hs0 = height_data[height_minus_one, n]
hs1 = height_data[height_minus_one, n + 1]
mesh.addFaceByPoints(x, 0, 0, nx, 0, 0, nx, hn1, 0)
mesh.addFaceByPoints(nx, hn1, 0, x, hn0, 0, x, 0, 0)
mesh.addFaceByPoints(x, 0, geo_height, nx, 0, geo_height, nx, hs1, geo_height)
mesh.addFaceByPoints(nx, hs1, geo_height, x, hs0, geo_height, x, 0, geo_height)
# west and east walls
for n in range(0, height_minus_one):
y = n * texel_height
ny = (n + 1) * texel_height
hw0 = height_data[n, 0]
hw1 = height_data[n + 1, 0]
he0 = height_data[n, width_minus_one]
he1 = height_data[n + 1, width_minus_one]
mesh.addFaceByPoints(0, 0, y, 0, 0, ny, 0, hw1, ny)
mesh.addFaceByPoints(0, hw1, ny, 0, hw0, y, 0, 0, y)
mesh.addFaceByPoints(geo_width, 0, y, geo_width, 0, ny, geo_width, he1, ny)
mesh.addFaceByPoints(geo_width, he1, ny, geo_width, he0, y, geo_width, 0, y)
mesh.calculateNormals(fast=True)
scene_node.setMeshData(mesh.build())
return scene_node
|
{"hexsha": "5195b61595b002c82ac4e53e139a517eff13c2b3", "size": 7928, "ext": "py", "lang": "Python", "max_stars_repo_path": "Fracktory3-3.0_b11/plugins/ImageReader/ImageReader.py", "max_stars_repo_name": "ganeshmev/Fracktory3-3.0_b11_KLE", "max_stars_repo_head_hexsha": "16066e6993b96a880aa1a2f044a27930cbd0787d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Fracktory3-3.0_b11/plugins/ImageReader/ImageReader.py", "max_issues_repo_name": "ganeshmev/Fracktory3-3.0_b11_KLE", "max_issues_repo_head_hexsha": "16066e6993b96a880aa1a2f044a27930cbd0787d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Fracktory3-3.0_b11/plugins/ImageReader/ImageReader.py", "max_forks_repo_name": "ganeshmev/Fracktory3-3.0_b11_KLE", "max_forks_repo_head_hexsha": "16066e6993b96a880aa1a2f044a27930cbd0787d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8744186047, "max_line_length": 153, "alphanum_fraction": 0.6086024218, "include": true, "reason": "import numpy", "num_tokens": 2163}
|
import os,sys
import os.path
import numpy as np
import pandas as pd
import torch
import torch.utils.data
from torchvision import datasets,transforms
from sklearn.utils import shuffle
import urllib.request
from PIL import Image
import pickle
import utils
########################################################################################################################
def _load_dataset(dloader, transforms, name, ncla, size=None, expand_channels=False):
dat={}
# check if resize transform should be applied
if size is not None:
pass
# load the datasets
dat['train']=dloader('../dat/',train=True,download=True,transform=transforms())
dat['test']=dloader('../dat/',train=False,download=True,transform=transforms())
data={}
data['name']=name
data['ncla']=ncla
for s in ['train','test']:
loader=torch.utils.data.DataLoader(dat[s],batch_size=1,shuffle=False)
data[s]={'x': [],'y': []}
for image,target in loader:
# check if channels should be expanded
if expand_channels is True:
image=image.expand(1,3,image.size(2),image.size(3)) # Create 3 equal channels
# add to dataset
data[s]['x'].append(image)
data[s]['y'].append(target.numpy()[0])
return data
def _load_cifar10():
mean=[x/255 for x in [125.3,123.0,113.9]]
std=[x/255 for x in [63.0,62.1,66.7]]
tfs = lambda: transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)])
return _load_dataset(datasets.CIFAR10, tfs, "cifar10", 10)
def _load_cifar100():
mean=[x/255 for x in [125.3,123.0,113.9]]
std=[x/255 for x in [63.0,62.1,66.7]]
tfs = lambda: transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)])
return _load_dataset(datasets.CIFAR100, tfs, "cifar100", 100)
def _load_mnist():
#mean=(0.1307,) # Mean and std without including the padding
#std=(0.3081,)
mean=(0.1,) # Mean and std including the padding
std=(0.2752,)
tfs = lambda: transforms.Compose([transforms.Pad(padding=2,fill=0),transforms.ToTensor(),transforms.Normalize(mean,std)])
return _load_dataset(datasets.MNIST, tfs, "mnist", 10, expand_channels=True)
def _load_fashion_mnist():
mean=(0.2190,) # Mean and std including the padding
std=(0.3318,)
tfs = lambda: transforms.Compose([transforms.Pad(padding=2,fill=0),transforms.ToTensor(),transforms.Normalize(mean,std)])
return _load_dataset(datasets.FashionMNIST, tfs, "fashion_mnist", 10, expand_channels=True)
def _load_not_mnist():
mean=(0.4254,)
std=(0.4501,)
tfs = lambda: transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)])
return _load_dataset(notMNIST, tfs, "not_mnist", 10, expand_channels=True)
def _load_svhn():
mean=[0.4377,0.4438,0.4728]
std=[0.198,0.201,0.197]
tfs = lambda: transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)])
loader = lambda p, train, download, transform: datasets.SVHN(p, split="train" if train is True else "test", download=download, transform=transform)
return _load_dataset(loader, tfs, "svhn", 10)
def _load_traffic_signs():
mean=[0.3398,0.3117,0.3210]
std=[0.2755,0.2647,0.2712]
tfs = lambda: transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)])
return _load_dataset(TrafficSigns, tfs, "traffic_signs", 43)
def _load_cub200():
mean=[x/255 for x in [90.6379,93.2626,80.7344]]
std=[x/255 for x in [71.4966,71.0943,72.5651]]
tfs = lambda: transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)])
return _load_dataset(Cub200, tfs, "cub200", 200)
def _load_facescrub():
mean=[0.5163,0.5569,0.4695]
std=[0.2307,0.2272,0.2479]
tfs = lambda: transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)])
return _load_dataset(Facescrub, tfs, "facescrub", 100)
def get(seed=0,fixed_order=False,pc_valid=0.15,num_data=8,sample=False):
data={}
taskcla=[]
# output size for training (channels first)
# TODO: update resize options
size=[3,32,32]
# create dataset list
idata=np.arange(num_data)
if sample is True:
idata = np.random.choice(np.arange(9), num_data, replace=False)
# TODO: update dataset collection
# shuffle if activated
if not fixed_order:
idata=list(shuffle(idata,random_state=seed))
print('Task order =',idata)
# iterate through all data and save in binary format (for faster loading)
if not os.path.isdir('../dat/binary_mixture/'):
os.makedirs('../dat/binary_mixture')
# Pre-load
for n,idx in enumerate(idata):
# check if data already exists
if os.path.isfile(os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+'trainx.bin')):
data[n] = dict.fromkeys(['name','ncla','train','test'])
if idx==0:
data[n]['name']='cifar10'
data[n]['ncla']=10
elif idx==1:
data[n]['name']='cifar100'
data[n]['ncla']=100
elif idx==2:
data[n]['name']='mnist'
data[n]['ncla']=10
elif idx==3:
data[n]['name']='svhn'
data[n]['ncla']=10
elif idx==4:
data[n]['name']='fashion_mnist'
data[n]['ncla']=10
elif idx==5:
data[n]['name']='traffic_signs'
data[n]['ncla']=43
elif idx==6:
data[n]['name']='facescrub'
data[n]['ncla']=100
elif idx==7:
data[n]['name']='not_mnist'
data[n]['ncla']=10
elif idx==8:
data[n]['name']='cub200'
data[n]['ncla']=200
else:
print('ERROR: Undefined data set',n)
sys.exit()
# Load
for s in ['train','test']:
data[n][s]={'x':[],'y':[]}
data[n][s]['x'] = torch.load(os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'x.bin'))
data[n][s]['y'] = torch.load(os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'y.bin'))
else:
# check the item
if idx==0:
# CIFAR10
data[n] = _load_cifar10()
elif idx==1:
# CIFAR100
data[n] = _load_cifar100()
elif idx==2:
# MNIST
data[n]= _load_mnist()
elif idx == 3:
# SVHN
data[n] = _load_svhn()
elif idx == 4:
# FashionMNIST
data[n] = _load_fashion_mnist()
elif idx == 5:
# TrafficSigns
data[n] = _load_traffic_signs()
elif idx == 6:
# Facescrub 100 faces
data[n] = _load_facescrub()
elif idx == 7:
# notMNIST A-J letters
data[n] = _load_not_mnist()
elif idx == 8:
# CUB 200
data[n] = _load_cub200()
else:
print('ERROR: Undefined data set',n)
sys.exit()
#print(n,data[n]['name'],data[n]['ncla'],len(data[n]['train']['x']))
# TODO: apply complexity before saving (for easier filter later on)
# "Unify" and save
for s in ['train','test']:
data[n][s]['x']=torch.stack(data[n][s]['x']).view(-1,size[0],size[1],size[2])
data[n][s]['y']=torch.LongTensor(np.array(data[n][s]['y'],dtype=int)).view(-1)
torch.save(data[n][s]['x'], os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'x.bin'))
torch.save(data[n][s]['y'], os.path.join(os.path.expanduser('../dat/binary_mixture'),'data'+str(idx)+s+'y.bin'))
# Validation
for t in data.keys():
r=np.arange(data[t]['train']['x'].size(0))
r=np.array(shuffle(r,random_state=seed),dtype=int)
nvalid=int(pc_valid*len(r))
ivalid=torch.LongTensor(r[:nvalid])
itrain=torch.LongTensor(r[nvalid:])
data[t]['valid']={}
data[t]['valid']['x']=data[t]['train']['x'][ivalid].clone()
data[t]['valid']['y']=data[t]['train']['y'][ivalid].clone()
data[t]['train']['x']=data[t]['train']['x'][itrain].clone()
data[t]['train']['y']=data[t]['train']['y'][itrain].clone()
# Others
n=0
for t in data.keys():
taskcla.append((t,data[t]['ncla']))
n+=data[t]['ncla']
data['ncla']=n
return data,taskcla,size
########################################################################################################################
class FashionMNIST(datasets.MNIST):
"""`Fashion MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
########################################################################################################################
class TrafficSigns(torch.utils.data.Dataset):
"""`German Traffic Signs <http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset>`_ Dataset.
Args:
root (string): Root directory of dataset where directory ``Traffic signs`` exists.
split (string): One of {'train', 'test'}.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.
If dataset is already downloaded, it is not downloaded again.
"""
def __init__(self, root, train=True,transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.filename = "traffic_signs_dataset.zip"
self.url = "https://d17h27t6h515a5.cloudfront.net/topher/2016/October/580d53ce_traffic-sign-data/traffic-sign-data.zip"
# Other options for the same 32x32 pickled dataset
# url="https://d17h27t6h515a5.cloudfront.net/topher/2016/November/581faac4_traffic-signs-data/traffic-signs-data.zip"
# url_train="https://drive.google.com/open?id=0B5WIzrIVeL0WR1dsTC1FdWEtWFE"
# url_test="https://drive.google.com/open?id=0B5WIzrIVeL0WLTlPNlR2RG95S3c"
fpath = os.path.join(root, self.filename)
if not os.path.isfile(fpath):
if not download:
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print('Downloading from '+self.url)
self.download()
training_file = 'lab 2 data/train.p'
testing_file = 'lab 2 data/test.p'
if train:
with open(os.path.join(root,training_file), mode='rb') as f:
train = pickle.load(f)
self.data = train['features']
self.labels = train['labels']
else:
with open(os.path.join(root,testing_file), mode='rb') as f:
test = pickle.load(f)
self.data = test['features']
self.labels = test['labels']
self.data = np.transpose(self.data, (0, 3, 1, 2))
#print(self.data.shape); sys.exit()
def __getitem__(self, index):
"""
Args: index (int): Index
Returns: tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.data)
def download(self):
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
########################################################################################################################
class Facescrub(torch.utils.data.Dataset):
"""Subset of the Facescrub cropped from the official Megaface challenge page: http://megaface.cs.washington.edu/participate/challenge.html, resized to 38x38
Args:
root (string): Root directory of dataset where directory ``Traffic signs`` exists.
split (string): One of {'train', 'test'}.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.
If dataset is already downloaded, it is not downloaded again.
"""
def __init__(self, root, train=True,transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.filename = "facescrub_100.zip"
self.url = "https://github.com/nkundiushuti/facescrub_subset/blob/master/data/facescrub_100.zip?raw=true"
fpath=os.path.join(root,self.filename)
if not os.path.isfile(fpath):
if not download:
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print('Downloading from '+self.url)
self.download()
training_file = 'facescrub_train_100.pkl'
testing_file = 'facescrub_test_100.pkl'
if train:
with open(os.path.join(root,training_file),'rb') as f:
# u = pickle._Unpickler(f)
# u.encoding = 'latin1'
# train = u.load()
train = pickle.load(f)
self.data = train['features'].astype(np.uint8)
self.labels = train['labels'].astype(np.uint8)
"""
print(self.data.shape)
print(self.data.mean())
print(self.data.std())
print(self.labels.max())
#"""
else:
with open(os.path.join(root,testing_file),'rb') as f:
# u = pickle._Unpickler(f)
# u.encoding = 'latin1'
# test = u.load()
test = pickle.load(f)
self.data = test['features'].astype(np.uint8)
self.labels = test['labels'].astype(np.uint8)
def __getitem__(self, index):
"""
Args: index (int): Index
Returns: tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.data)
def download(self):
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
########################################################################################################################
class notMNIST(torch.utils.data.Dataset):
"""The notMNIST dataset is a image recognition dataset of font glypyhs for the letters A through J useful with simple neural networks. It is quite similar to the classic MNIST dataset of handwritten digits 0 through 9.
Args:
root (string): Root directory of dataset where directory ``Traffic signs`` exists.
split (string): One of {'train', 'test'}.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory.
If dataset is already downloaded, it is not downloaded again.
"""
def __init__(self, root, train=True,transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.filename = "notmnist.zip"
self.url = "https://github.com/nkundiushuti/notmnist_convert/blob/master/notmnist.zip?raw=true"
fpath = os.path.join(root, self.filename)
if not os.path.isfile(fpath):
if not download:
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print('Downloading from '+self.url)
self.download()
training_file = 'notmnist_train.pkl'
testing_file = 'notmnist_test.pkl'
if train:
with open(os.path.join(root,training_file),'rb') as f:
# u = pickle._Unpickler(f)
# u.encoding = 'latin1'
# train = u.load()
train = pickle.load(f)
self.data = train['features'].astype(np.uint8)
self.labels = train['labels'].astype(np.uint8)
else:
with open(os.path.join(root,testing_file),'rb') as f:
# u = pickle._Unpickler(f)
# u.encoding = 'latin1'
# test = u.load()
test = pickle.load(f)
self.data = test['features'].astype(np.uint8)
self.labels = test['labels'].astype(np.uint8)
def __getitem__(self, index):
"""
Args: index (int): Index
Returns: tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img[0])
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.data)
def download(self):
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
########################################################################################################################
class Cub200(torch.utils.data.Dataset):
'''Loads the CUB200-2011 Dataset.
Reference: Wah C., Branson S., Welinder P., Perona P., Belongie S. “The Caltech-UCSD Birds-200-2011 Dataset.” Computation & Neural Systems Technical Report, CNS-TR-2011-001.
'''
def __init__(self, root, train=True,transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.filename = "CUB_200_2011.tgz"
self.url = "http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz"
fpath = os.path.join(root, self.filename)
if not os.path.isfile(fpath):
if not download:
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print('Downloading from '+self.url)
self.download()
# load the data information
root = os.path.join(root, 'CUB_200_2011')
train_test = pd.read_csv(os.path.join(root, "train_test_split.txt"), sep=' ', header=None, names=['id', 'is_train'])
images = pd.read_csv(os.path.join(root, "images.txt"), sep=' ', header=None, names=['id', 'path'])
cls_label = pd.read_csv(os.path.join(root, "image_class_labels.txt"), sep=' ', header=None, names=['id', 'class_id'])
cls_names = pd.read_csv(os.path.join(root, "classes.txt"), sep=' ', header=None, names=['class_id', 'class_name'])
# merge the data
min_cid = np.min(cls_names['class_id'])
cls_label['class_id'] = cls_label['class_id'] - min_cid
cls_names['class_id'] = cls_names['class_id'] - min_cid
df_images = pd.merge(train_test, images, on='id')
df_images = pd.merge(df_images, cls_label, on='id')
num_classes = len(cls_names.index)
cid = np.array(cls_names['class_id'])
# shuffle the dataset
# TODO: updated random state?
df_images = df_images.sample(frac=1, random_state=123)
# select the correct data
if train is True:
df_images = df_images[df_images['is_train'] == 1]
else:
df_images = df_images[df_images['is_train'] == 0]
# load into data
tmp_data = []
tmp_lbls = []
for idx, row in df_images.iterrows():
# load the image and label
img = Image.open(os.path.join(root, 'images', row['path'])).convert("RGB")
cla = row['class_id']
# resize the image to common input
img, scale = utils.resize_and_pad(img, (32, 32), 'fit_center')
# append data
tmp_data.append(np.array(img).astype('float'))
tmp_lbls.append(cla)
# combine data
self.data = np.stack(tmp_data, axis=0)
self.labels = np.stack(tmp_lbls, axis=0)
# convert to channels first
self.data = np.transpose(self.data, (0, 3, 1, 2))
def __getitem__(self, index):
"""
Args: index (int): Index
Returns: tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.data)
def download(self):
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import tarfile
tar_ref = tarfile.open(fpath, 'r:gz')
tar_ref.extractall(root)
tar_ref.close()
# CHECK: check if zip needs to be extracted
########################################################################################################################
|
{"hexsha": "25c1ea73571470ae024588a9bebd0500e536a68b", "size": 25096, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/dataloaders/mixture.py", "max_stars_repo_name": "felixnext/dwa", "max_stars_repo_head_hexsha": "a37ea57ac247f00c5bf2d2b32a3a3cf9c2597b9f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/dataloaders/mixture.py", "max_issues_repo_name": "felixnext/dwa", "max_issues_repo_head_hexsha": "a37ea57ac247f00c5bf2d2b32a3a3cf9c2597b9f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/dataloaders/mixture.py", "max_forks_repo_name": "felixnext/dwa", "max_forks_repo_head_hexsha": "a37ea57ac247f00c5bf2d2b32a3a3cf9c2597b9f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0903426791, "max_line_length": 223, "alphanum_fraction": 0.5496891935, "include": true, "reason": "import numpy", "num_tokens": 5833}
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from torch.utils.data import Dataset
from pythia.common.registry import registry
class MultiTask(Dataset):
def __init__(self, dataset_type, config):
super(MultiTask, self).__init__()
self.config = config
self.dataset_type = dataset_type
self.task_names = map(lambda x: x.strip(), self.config["tasks"].split(","))
self.tasks = []
self.tasks_lens = []
for task_name in self.task_names:
task_class = registry.get_task_class(task_name)
if task_class is None:
print("[Error] %s not present in our mapping" % task_name)
return
if task_name not in self.config["task_attributes"]:
print(
"[Error] No attributes present for task %s in config."
" Skipping" % task_name
)
task_attributes = self.config["task_attributes"][task_name]
task_attributes["dataset_type"] = self.dataset_type
task = task_class()
task.load(**task_attributes)
self.tasks.append(task)
self.tasks_lens.append(len(task))
self.task_probabilities = [1 for _ in self.tasks]
self.num_tasks = len(self.tasks)
training_parameters = self.config["training_parameters"]
if training_parameters["task_size_proportional_sampling"]:
self.task_probabilities = self.tasks_lens[:]
len_sum = sum(self.tasks_lens)
self.task_probabilities = [
prob / len_sum for prob in self.task_probabilities
]
self.change_task()
def change_task(self):
self.selected_task = np.random.choice(
self.num_tasks, 1, p=self.task_probabilities
)[0]
self.chosen_task = self.tasks[self.selected_task]
self.chosen_task.change_dataset()
def get_tasks(self):
return self.tasks
def verbose_dump(self, *args):
self.chosen_task.verbose_dump(*args)
def __len__(self):
return sum(self.tasks_lens)
def __getitem__(self, idx):
idx = idx % self.tasks_lens[self.selected_task]
item = self.chosen_task[idx]
return item
def update_registry_for_model(self, config):
for task in self.tasks:
task.update_registry_for_model(config)
def prepare_batch(self, batch):
return self.chosen_task.prepare_batch(batch)
def init_args(self, parser):
for task in self.tasks:
task.init_args(parser)
def clean_config(self, config):
for task in self.tasks:
task.clean_config(config)
return config
|
{"hexsha": "d15759bcc617ea80d39b5da691628842f81dafd1", "size": 2755, "ext": "py", "lang": "Python", "max_stars_repo_path": "pythia/tasks/multi_task.py", "max_stars_repo_name": "mandliya/pythia_updated", "max_stars_repo_head_hexsha": "e986c4dff7cc3a9f6b85ffe8e7d45ea53ab36e95", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2020-03-06T13:05:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-30T15:12:00.000Z", "max_issues_repo_path": "pythia/tasks/multi_task.py", "max_issues_repo_name": "Bunlong/pythia", "max_issues_repo_head_hexsha": "1bed85e59a753bec73e6d3fcf1461651d45c791b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-05T10:11:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-05T10:11:24.000Z", "max_forks_repo_path": "pythia/tasks/multi_task.py", "max_forks_repo_name": "Bunlong/pythia", "max_forks_repo_head_hexsha": "1bed85e59a753bec73e6d3fcf1461651d45c791b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-03-07T08:10:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-24T05:39:36.000Z", "avg_line_length": 29.623655914, "max_line_length": 83, "alphanum_fraction": 0.6145190563, "include": true, "reason": "import numpy", "num_tokens": 556}
|
import numpy as np
import tensorflow as tf
import vgg16
import utils
import cv2
BATCH_SIZE = 100
def mkbatch():
files = open('material_dataset.txt').readlines()
cnt = len(files) // BATCH_SIZE
if len(files) % BATCH_SIZE != 0:
cnt += 1
files = [item.split()[0] for item in files]
batchlist = []
for i in range(cnt):
batchlist.append(files[i*BATCH_SIZE:(i+1)*BATCH_SIZE])
return batchlist
def getbatchdata(filenames):
imgs = []
for filename in filenames:
try:
im = cv2.imread('data/'+filename)
im = np.asarray(im[248:472, 368:592], dtype='float32') / 255.
except:
print("fileerror", 'data/'+filename)
im = np.zeros((224, 224, 3))
imgs.append(im)
return np.stack(imgs, 0)
if __name__ == "__main__":
# with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(per_process_gpu_memory_fraction=0.7)))) as sess:
with tf.Session() as sess:
images = tf.placeholder("float", [None, 224, 224, 3])
vgg = vgg16.Vgg16()
with tf.name_scope("content_vgg"):
vgg.build(images)
batchlst = mkbatch()
feature = []
out_fea = []
feature = vgg.relu7
for idx, batch in enumerate(batchlst):
inp = getbatchdata(batch)
fea = sess.run(feature, feed_dict={images: inp})
out_fea.append(fea)
# print(fea[0])
if idx % 10 == 0:
print(idx, '/', len(batchlst))
out_fea = np.concatenate(out_fea, 0)
print(out_fea.shape)
np.savez('vgg_fea', out_fea)
# prob = sess.run(vgg.prob, feed_dict={images: batch})
|
{"hexsha": "8dfeaf609be1c4a844154e82e40952e3ca079f75", "size": 1707, "ext": "py", "lang": "Python", "max_stars_repo_path": "baselines/tensorflow-vgg-master/vgg16_fabri.py", "max_stars_repo_name": "leix28/ML-Fabri", "max_stars_repo_head_hexsha": "6776f1b93cc84ab40569af3052ffc30bee7f8910", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "baselines/tensorflow-vgg-master/vgg16_fabri.py", "max_issues_repo_name": "leix28/ML-Fabri", "max_issues_repo_head_hexsha": "6776f1b93cc84ab40569af3052ffc30bee7f8910", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "baselines/tensorflow-vgg-master/vgg16_fabri.py", "max_forks_repo_name": "leix28/ML-Fabri", "max_forks_repo_head_hexsha": "6776f1b93cc84ab40569af3052ffc30bee7f8910", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.671875, "max_line_length": 119, "alphanum_fraction": 0.5746924429, "include": true, "reason": "import numpy", "num_tokens": 459}
|
# For extracting unique MMSI from multiple input files
ExtractUniqueMMSI <- function(infiles){
uniqueMMSI <- c() # dataframe to store unique MMSIs
shipcount <- 0 #counter for unique ships
shiplist <- NA # start with a null ship list to compare first ship to
for(i in 1:length(infiles)){ # for each file
infile <- read.csv(infiles[i])
fileships <- unique(infile$MMSI)
addships <- which(is.element(fileships, shiplist) == F)
addcount <- length(addships)
if(addcount > 0){
shiplist[(shipcount+1):(shipcount+addcount)] <- fileships[addships]
}
shipcount <- shipcount + addcount
print(paste("done file #", i, ": ", filelist[i], sep = ""))
print(paste("Ships added:", addcount))
print(paste("Ship count:", shipcount))
print(paste("% ships new:", round(addcount/length(fileships)*100, 2)))
}
print(paste(length(which(nchar(shiplist) < 7)),"MMSI with < 7 digits were excluded"))
print(paste(length(which(nchar(shiplist) > 9)),"MMSI with > 9 digits were excluded"))
keepMMSI <- which(nchar(shiplist) >= 7 & nchar(shiplist) <= 9) # because all MMSIs in clean list have 7-9 characters (see below)
shiplist <- shiplist[keepMMSI]
return (shiplist)
#write.csv(uniqueships, file = outfile, row.names=FALSE)
}
|
{"hexsha": "07bbe1e280ab839dc594edceb70c59c40bf9d96c", "size": 1349, "ext": "r", "lang": "R", "max_stars_repo_path": "R_Functions/ExtractUniqueMMSI.r", "max_stars_repo_name": "Pacific-CEBP/AIS-processing", "max_stars_repo_head_hexsha": "6704511cf69ae51fb14b61c4d53771031b5ac962", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-20T18:35:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-18T01:49:52.000Z", "max_issues_repo_path": "R_Functions/ExtractUniqueMMSI.r", "max_issues_repo_name": "Pacific-CEBP/AIS-processing", "max_issues_repo_head_hexsha": "6704511cf69ae51fb14b61c4d53771031b5ac962", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R_Functions/ExtractUniqueMMSI.r", "max_forks_repo_name": "Pacific-CEBP/AIS-processing", "max_forks_repo_head_hexsha": "6704511cf69ae51fb14b61c4d53771031b5ac962", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-31T21:42:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-29T10:17:02.000Z", "avg_line_length": 25.9423076923, "max_line_length": 130, "alphanum_fraction": 0.6367679763, "num_tokens": 401}
|
# -*- coding: utf-8 -*-
"""
This script makes plots of relevant data.
@author: Jonathan Dumas
"""
import yaml
import os
import pandas as pd
import energyscope as es
import numpy as np
import matplotlib.pyplot as plt
from sys import platform
from energyscope.utils import make_dir, load_config, get_FEC_from_sankey
from energyscope.postprocessing import get_total_einv
def compute_einv_res(cs: str, all_data: dict):
"""
Compute the Einv by RESOURCES part (Einv_op).
:param cs: case study path
:param user_data: user_data directory
:param all_data: the data into a dict of pd.DataFrames.
:return: the data into pd.DataFrames
"""
# Load Einv data
df_einv = pd.read_csv(f"{cs}/output/einv_breakdown.csv", index_col=0)
# Define the RESOURCES list
RESOURCES = list(all_data['Resources'].index)
return df_einv.loc[RESOURCES].copy()['Einv_op']
def compute_einv_tech(cs: str, all_data: dict):
"""
Compute the Einv by TECHNOLOGIES part (Einv_const).
:param cs: case study path
:param user_data: user_data directory
:param all_data: the data into a dict of pd.DataFrames.
:return: the data into pd.DataFrames
"""
# Load Einv data
df_einv = pd.read_csv(f"{cs}/output/einv_breakdown.csv", index_col=0)
# Define the TECHNOLOGIES list
TECHNOLOGIES = list(all_data['Technologies'].index)
return df_einv.loc[TECHNOLOGIES].copy()['Einv_constr']
def retrieve_einv_const_by_categories(range_val, all_data: dict, dir: str, user_data: str):
"""
Retrieve the Einv_const values for all case studies classed by categories of technologies.
:param range_val: range of GWP constrained values.
:param all_data: the data into a dict of pd.DataFrames.
:param dir: case study path and name.
:param user_data: user_data directory.
:return: dict with keys being the categories of technologies. For each catagory, a pd.DataFrame with Einv_const values for all scenarios.
"""
# Retrieve all Einv_const values for all case studies
einv_tech = []
for run in ['run_' + str(i) for i in range_val]:
cs_temp = dir + '/' + run
einv_tech.append(compute_einv_tech(cs=cs_temp, all_data=all_data))
df_einv_tech = pd.concat(einv_tech, axis=1)
df_einv_tech.columns = [i for i in range_val]
# Retrieve the technologies categories:
df_aux_tech = pd.read_csv(user_data + "/aux_technologies.csv", index_col=0)
# tech_cat = ['Electricity', 'Heat', 'Mobility', 'Infrastructure', 'Synthetic fuels', 'Storage']
tech_cat = list(df_aux_tech['Category'].values)
tech_cat = list(dict.fromkeys(tech_cat)) # remove duplicate
# Class the technologies by categories into a dict
tech_by_cat = dict()
for cat in tech_cat:
tech_by_cat[cat] = list(df_aux_tech['Category'][df_aux_tech['Category'] == cat].index)
# Retrieve the values of Einv_const per category of technology (and remove tech where Einv_const is always 0)
tech_classed_by_cat = dict()
for cat in tech_by_cat.keys():
tech_classed_by_cat[cat] = retrieve_non_zero_val(df=df_einv_tech.loc[tech_by_cat[cat]].transpose()) /1000 # TWh
return tech_classed_by_cat
def compute_einv_details(cs: str, user_data: str, all_data: dict):
"""
Compute the Einv by RESOURCES and TECHNOLOGIES, it details the breakdown by subcategories of RESOURCES and categories of TECHNOLOGIES.
:param cs: case study path
:param user_data: user_data directory
:param all_data: the data into a dict of pd.DataFrames.
:return: the data into pd.DataFrames
"""
# Load Einv data
df_einv = pd.read_csv(f"{cs}/output/einv_breakdown.csv", index_col=0)
# Define the RESOURCES and TECHNOLOGIES lists
RESOURCES = list(all_data['Resources'].index)
TECHNOLOGIES = list(all_data['Technologies'].index)
df_inv_res = df_einv.loc[RESOURCES].copy()
df_inv_tech = df_einv.loc[TECHNOLOGIES].copy()
# Get the category and subcategory indexes
df_aux_res = pd.read_csv(user_data + "/aux_resources.csv", index_col=0)
df_aux_tech = pd.read_csv(user_data + "/aux_technologies.csv", index_col=0)
# 1. Compute the Einv by subcategory of resources
res_subcat = list(df_aux_res['Subcategory'].values)
res_subcat = list(dict.fromkeys(res_subcat)) # remove duplicate
res_by_subcat = dict()
for sub_cat in res_subcat:
res_by_subcat[sub_cat] = list(df_aux_res['Subcategory'][df_aux_res['Subcategory'] == sub_cat].index)
einv_res_by_subcat = dict()
for sub_cat in res_by_subcat.keys():
einv_res_by_subcat[sub_cat] = df_inv_res.loc[res_by_subcat[sub_cat]]
df_inv_res_by_subcat = pd.DataFrame(
data=[einv_res_by_subcat[sub_cat].sum().sum() for sub_cat in einv_res_by_subcat.keys()],
index=einv_res_by_subcat.keys(), columns=['RESSOURCES'])
# 2. Compute the Einv by category of technologies
tech_cat = list(df_aux_tech['Category'].values)
tech_cat = list(dict.fromkeys(tech_cat)) # remove duplicate
tech_by_cat = dict()
for cat in tech_cat:
tech_by_cat[cat] = list(df_aux_tech['Category'][df_aux_tech['Category'] == cat].index)
einv_tech_by_cat = dict()
for cat in tech_by_cat.keys():
einv_tech_by_cat[cat] = df_inv_tech.loc[tech_by_cat[cat]]
df_inv_tech_by_cat = pd.DataFrame(data=[einv_tech_by_cat[cat].sum().sum() for cat in einv_tech_by_cat.keys()],
index=einv_tech_by_cat.keys(), columns=['TECHNOLOGIES'])
return df_inv_res_by_subcat, df_inv_tech_by_cat
def compute_primary_energy(cs: str, user_data: str, run: str, all_data: dict):
"""
Compute the primary energy for a given case study.
:param cs: case study path.
:param user_data: user_data directory
:param run: run name.
:return: the data into pd.DataFrames.
"""
# load year_balance.csv
df_y_balance = pd.read_csv(f"{cs}/output/year_balance.csv", index_col=0)
# list the ressources
RESOURCES = list(all_data['Resources'][all_data['Resources']['Category'] != 'Others'].index) # remove ressources related to CO2
RESOURCES.remove('CO2_EMISSIONS')
# select primary energy from the year_balance.csv into a pd.DataFrame
# df_temp = df_y_balance.loc[RESOURCES].sum().loc[['ELECTRICITY', 'GASOLINE', 'DIESEL', 'LFO', 'GAS', 'WOOD',
# 'WET_BIOMASS', 'COAL', 'URANIUM', 'WASTE', 'H2', 'AMMONIA',
# 'METHANOL',
# 'RES_WIND', 'RES_SOLAR', 'RES_HYDRO', 'RES_GEO']] / 1000 # TWh
df_temp = df_y_balance.loc[RESOURCES].sum(axis=1) / 1000 # TWh
df_primary_energy = pd.DataFrame(data=df_temp.values, index=df_temp.index, columns=['RESSOURCES'])
# Label each ressource by its subcategory: ['Other non-renewable', 'Fossil fuel', 'Biomass', 'Non-biomass']
df_primary_energy['Subcategory'] = ''
df_aux_res = pd.read_csv(user_data + "/aux_resources.csv", index_col=0)
for ind in df_primary_energy.index:
df_primary_energy['Subcategory'].loc[ind] = df_aux_res.loc[ind]['Subcategory']
# List of the subcategories into a list
res_subcat = list(df_primary_energy['Subcategory'].values)
res_subcat = list(dict.fromkeys(res_subcat)) # remove duplicate
# aggregate the primary energy by subcategory
primary_dict = dict()
for subcat in res_subcat:
primary_dict[subcat] = df_primary_energy[df_primary_energy['Subcategory'] == subcat]['RESSOURCES'].sum()
return pd.DataFrame(data=primary_dict.values(), index=primary_dict.keys(), columns=[run]), df_primary_energy.sort_values(by=['Subcategory'])
def fec_given_tech(tech: str, data: pd.DataFrame, prod_corr:float):
"""
Compute the FEC related to a given EUD and TECHNO.
:param tech: technology type to satisfy this EUD type such as IND_COGEN_GAS if EUD = HEAT_HIGH_T
:param data: dataframe with the year_balance.csv
return: FEC value
"""
# get the inputs for a given technology: electricity, gas, H2, etc.
inputs_tech = data.loc[tech][data.loc[tech] < 0].copy()
# get the outputs for a given technology: electricity, heat high T, heat low T FHN, etc.
outputs_tech = data.loc[tech][data.loc[tech] > 0].copy()
if outputs_tech.sum() == 0:
return
else:
# remove C02 emissions
outputs_labels = list(outputs_tech.index)
for lab in ['CO2_ATM', 'CO2_INDUSTRY', 'CO2_CAPTURED']:
if lab in outputs_labels:
outputs_tech = outputs_tech.drop([lab], axis=0)
# Ex: eud = 'HEAT_HIGH_T' and tech = 'IND_COGEN_GAS'
# IND_COGEN_GAS inputs: gas with 2.1739
# IND_COGEN_GAS outputs: electricity with 0.9565 and HEAT_HIGH_T with 1
# -> FEC = (1 * (1+0.9565)) * (2.1739)
# Warning a technology may have several inputs such as CAR_PHEV with 0.1376 of ELECTRICITY and 0.1087 of GASOLINE for 1 of MOB_PRIVATE
return (prod_corr / outputs_tech.sum()) * (-inputs_tech.sum())
def compute_fec(data: pd.DataFrame, user_data:str):
"""
Compute the system FEC for a given simulation in GWh.
:param data: year_balance.csv
:return FEC detailed by EUD and technologies into fec_details dict, and FEC aggregated by EUD into fec_tot dict.
Assumption: FEC ELECTRICITY = EUF ELECTRICITY
See the FEC computation details for a given EUD in the function fec_given_tech(eud=eud, tech=tech, data=data)
"""
EUD_types = ['HEAT_HIGH_T', 'HEAT_LOW_T_DHN', 'HEAT_LOW_T_DECEN', 'MOB_PUBLIC', 'MOB_PRIVATE', 'MOB_FREIGHT_RAIL',
'MOB_FREIGHT_BOAT', 'MOB_FREIGHT_ROAD', 'HVC', 'AMMONIA', 'METHANOL']
df_aux_res = pd.read_csv(user_data + "/aux_resources.csv", index_col=0)
RESOURCES = list(df_aux_res.index)
fec_details = dict()
fec_tot = dict()
prod_tech_EUD = dict()
for eud in EUD_types:
fec_EUD = []
# list of tech that produced this eud
prod_tech_EUD[eud] = data[eud].drop(index=['END_USES_DEMAND'])[data[eud] > 0]
prod_sum = prod_tech_EUD[eud].sum()
# total consumption of this energy
conso_sum= -data[eud].drop(index=['END_USES_DEMAND'])[data[eud] < 0].sum()
# Note: conso_eud + eud = prod_sum
# We calculate the FEC of the eud and not of conso_eud + eud! -> a correction factor is required
for tech in list(prod_tech_EUD[eud].index):
# correction factor to calculate the FEC corresponding at the consumption of the eud
corr_factor = prod_tech_EUD[eud][tech] / prod_sum
prod_corr = prod_tech_EUD[eud][tech] - conso_sum * corr_factor
if tech not in RESOURCES:
fec_tech_corr = fec_given_tech(tech=tech, data=data, prod_corr=prod_corr)
# fec_tech = fec_given_tech(tech=tech, data=data, prod_corr=prod_tech_EUD[eud][tech])
else:
fec_tech_corr = prod_corr
# fec_tech = prod_tech_EUD[eud][tech]
# print('%s %s %.1f %.1f %.1f' %(eud, tech, fec_tech, fec_tech_corr, corr_factor))
fec_EUD.append([tech, fec_tech_corr])
fec_details[eud] = pd.DataFrame(fec_EUD)
fec_tot[eud] = pd.DataFrame(fec_EUD)[1].sum()
fec_details['ELECTRICITY'] = data['ELECTRICITY'].loc['END_USES_DEMAND']
fec_tot['ELECTRICITY'] = data['ELECTRICITY'].loc['END_USES_DEMAND']
return fec_details, fec_tot
def eroi_computation(dir: str, user_data: str, range_val):
"""
EROI, Einv, and FEC computation for several case studies.
:param dir: directory to the case studies.
:param range: GWP_ini values.
:return: results into pd.DataFrame.
"""
fec_tot_list = []
eroi_list = []
for run in ['run_' + str(i) for i in range_val]:
dir_temp = dir + '/' + run
df_year_balance = pd.read_csv(dir_temp + "/output/year_balance.csv", index_col=0)
fec_details, fec_tot = compute_fec(data=df_year_balance, user_data=user_data)
fec_temp = sum(fec_tot.values())
einv_temp = get_total_einv(dir_temp)
eroi_temp = fec_temp / einv_temp
fec_tot_list.append(pd.DataFrame(data=fec_tot.values(), index=fec_tot.keys(), columns=[run]))
eroi_list.append([eroi_temp, fec_temp / 1000, einv_temp / 1000])
df_fec_details = pd.concat(fec_tot_list, axis=1) / 1000 # TWh
df_fec_details.columns = [i for i in range_val]
df_eroi = pd.DataFrame(data=np.asarray(eroi_list), index=[i for i in range_val], columns=['EROI', 'FEC', 'Einv'])
return df_eroi, df_fec_details
def res_details(range_val, all_data: dict, dir: str, user_data: str):
"""
Compute the Einv and primary energy details.
:param range_val: range of GWP constrained values.
:param all_data: the data into a dict of pd.DataFrames.
:param dir: case study path and name.
:param user_data: user_data directory.
:return: Einv and primary energy results in pd.DataFrames.
"""
Einv_Res_cat_list = []
Einv_Tech_cat_list = []
Einv_res_list = []
EI_by_cat_list = []
EI_list = []
for run in ['run_' + str(i) for i in range_val]:
cs_temp = dir + '/' + run
# Compute the Einv details divided into resources and technologies by categories
df_Einv_RES_cat_temp, df_Einv_TECH_cat_temp = compute_einv_details(cs=cs_temp,
user_data=user_data,
all_data=all_data)
Einv_Res_cat_list.append(df_Einv_RES_cat_temp)
Einv_Tech_cat_list.append(df_Einv_TECH_cat_temp)
# Einv_op only
Einv_res_list.append(compute_einv_res(cs=cs_temp, all_data=all_data))
# Compute the primary energy
df_EI_cat_temp, df_EI_temp = compute_primary_energy(cs=cs_temp, user_data=user_data, run=run, all_data=all_data)
EI_by_cat_list.append(df_EI_cat_temp)
EI_list.append(df_EI_temp.drop(columns=['Subcategory']))
cols = [i for i in range_val]
df_Einv_op = pd.concat(Einv_res_list, axis=1) / 1000 # TWh
df_Einv_op.columns = cols
df_EI = pd.concat(EI_list, axis=1)
df_EI.columns = cols
df_EI['Subcategory'] = df_EI_temp['Subcategory'].copy()
df_Einv_RES_cat = pd.concat(Einv_Res_cat_list, axis=1) / 1000 # TWh
df_Einv_RES_cat.columns = cols
df_Einv_tech_cat = pd.concat(Einv_Tech_cat_list, axis=1) / 1000 # TWh
df_Einv_tech_cat.columns = cols
df_EI_cat = pd.concat(EI_by_cat_list, axis=1)
df_EI_cat.columns = cols
return df_Einv_op, df_Einv_RES_cat, df_Einv_tech_cat, df_EI_cat, df_EI
def get_gwp(cs: str):
"""
Get the GWP from gwp_breakdown.csv.
:param cs: directory name.
:return GWP value.
"""
gwp = pd.read_csv(f"{cs}/output/gwp_breakdown.csv", index_col=0, sep=',')
return gwp.sum()
def get_cost(cs: str):
"""
Get the cost from cost_breakdown.csv.
:param cs: directory name.
:return cost values breakdown between C_inv, C_maint, and C_op.
"""
cost = pd.read_csv(f"{cs}/output/cost_breakdown.csv", index_col=0, sep=',')
return cost.sum()
def gwp_computation(dir: str, range_val):
"""
GWP computation for several case studies.
:param dir: directory to the case studies.
:param range: GWP_ini values.
:return: GWP in MtC02eq/y
"""
GWP_list = []
for run in ['run_' + str(i) for i in range_val]:
dir_temp = dir + '/' + run
GWP_val = get_gwp(cs=dir_temp)
GWP_list.append([GWP_val['GWP_constr'], GWP_val['GWP_op']])
return pd.DataFrame(data=np.asarray(GWP_list)/1000, index=[i for i in range_val], columns=['GWP_cons', 'GWP_op'])
def cost_computation(dir: str, range_val):
"""
Cost computation for several case studies.
:param dir: directory to the case studies.
:param range: GWP_ini values.
:return: Cost in bEUR/y
"""
cost_list = []
for run in ['run_' + str(i) for i in range_val]:
dir_temp = dir + '/' + run
cost_val = get_cost(cs=dir_temp)
cost_list.append([cost_val['C_inv'], cost_val['C_maint'], cost_val['C_op']])
return pd.DataFrame(data=np.asarray(cost_list)/1000, index=[i for i in range_val], columns=['C_inv', 'C_maint', 'C_op'])
def gwp_breakdown(dir: str, range_val):
"""
GWP breakdown for several scenarios.
:param dir: directory to the case studies.
:param range_val: scenario values.
:return: GWP_const and GWP_op into pd.DataFrame
"""
gwp_const_list = []
gwp_op_list = []
for run in ['run_' + str(i) for i in range_val]:
dir_temp = dir + '/' + run
gwp = pd.read_csv(f"{dir_temp}/output/gwp_breakdown.csv", index_col=0, sep=',')
gwp_const_list.append(gwp['GWP_constr'])
gwp_op_list.append(gwp['GWP_op'])
df_gwp_const = pd.concat(gwp_const_list, axis=1)
df_gwp_const.columns = [i for i in range_val]
df_gwp_op = pd.concat(gwp_op_list, axis=1)
df_gwp_op.columns = [i for i in range_val]
return df_gwp_const / 1000, df_gwp_op / 1000 # MtC02/y
def cost_breakdown(dir: str, range_val):
"""
Cost breakdown for several scenarios.
:param dir: directory to the case studies.
:param range_val: scenario values.
:return: GWP_const and GWP_op into pd.DataFrame
"""
cost_inv_list = []
cost_maint_list = []
cost_op_list = []
for run in ['run_' + str(i) for i in range_val]:
dir_temp = dir + '/' + run
gwp = pd.read_csv(f"{dir_temp}/output/cost_breakdown.csv", index_col=0, sep=',')
cost_inv_list.append(gwp['C_inv'])
cost_maint_list.append(gwp['C_maint'])
cost_op_list.append(gwp['C_op'])
df_cost_inv = pd.concat(cost_inv_list, axis=1)
df_cost_inv.columns = [i for i in range_val]
df_cost_maint = pd.concat(cost_maint_list, axis=1)
df_cost_maint.columns = [i for i in range_val]
df_cost_op = pd.concat(cost_op_list, axis=1)
df_cost_op.columns = [i for i in range_val]
return df_cost_inv / 1000, df_cost_maint / 1000 , df_cost_op / 1000 # bEUR/y
def gwp_const_per_category(df_gwp_const: pd.DataFrame, user_data: str):
"""
Build a dict with technology categories as keys.
In each category a pd.DataFrame lists the GWP_const of the corresponding technologies for several scenarios.
:param df_gwp_const: GWP_const raw data for several scenarios.
:param user_data: path to user_data.
:return: dict.
"""
df_aux_tech = pd.read_csv(user_data + "/aux_technologies.csv", index_col=0)
# Retrieve the list subcategory of technologies
tech_subcategory_list = list(dict.fromkeys(list(df_aux_tech['Subcategory'])))
tech_by_subcategory = dict()
for cat in tech_subcategory_list:
tech_by_subcategory[cat] = list(df_aux_tech[df_aux_tech['Subcategory'] == cat].index)
# Select per technology category the GWP_const
gwp_const_by_tech_cat = dict()
for cat in tech_by_subcategory.keys():
temp_list = []
for tech in tech_by_subcategory[cat]:
if tech in list(df_gwp_const.columns):
temp_list.append(df_gwp_const[tech])
if len(temp_list) > 0:
gwp_const_by_tech_cat[cat] = pd.concat(temp_list, axis=1)
else:
gwp_const_by_tech_cat[cat] = None
return gwp_const_by_tech_cat
def retrieve_non_zero_val(df: pd.DataFrame):
"""
Retrieve columns of a DataFrame with 0 values for all rows.
:param df: DataFrame of shape (n_scenarios, n_cols).
:return: DataFrame of shape (n_scenarios, n_cols_new) with n_cols_new <= n_cols.
"""
return df.loc[:, (df != 0).any(axis=0)].copy()
def res_assets_capacity(range_val, dir: str):
"""
Retrieve the asset installed capacities.
:param range_val: range of GWP constrained values.
:param dir: case study path and name.
:return: Asset installed capacities into a pd.DataFrame.
"""
assets_list = []
for run in ['run_' + str(i) for i in range_val]:
df_asset_temp = pd.read_csv(dir + '/' + run + "/output/assets.csv", index_col=0)
assets_list.append(df_asset_temp['f'])
df_assets = pd.concat(assets_list, axis=1)
df_assets.index.name = ''
df_assets.columns = [i for i in range_val]
return df_assets.drop(index='UNITS').astype(float)
if __name__ == '__main__':
# Load configuration into a dict
config = load_config(config_fn='config.yaml')
# Loading data
all_data = es.import_data(user_data_dir=config['user_data'], developer_data_dir=config['developer_data'])
# Modify the minimum capacities of some technologies
for tech in config['Technologies']['f_min']:
all_data['Technologies']['f_min'].loc[tech] = config['Technologies']['f_min'][tech]
GWP_tot = True
if GWP_tot:
dir_name = 're_be_GWP_tot'
else:
dir_name = 're_be_GWP_op'
# Read case study name
run = 'run_100'
cs_test = f"{config['case_studies_dir']}/{dir_name + '_0/' + run}"
# Compute the FEC from the year_balance.csv
df_year_balance = pd.read_csv(f"{cs_test}/output/year_balance.csv", index_col=0)
fec_details, fec_tot = compute_fec(data=df_year_balance, user_data=config['user_data'])
fec_tot_val = sum(fec_tot.values()) / 1000 # TWh
# Compute the FEC from SANKEY
ef = get_FEC_from_sankey(case_study_dir=cs_test, col=run)
fec_sankey = ef.sum()
einv = get_total_einv(cs_test) / 1000 # TWh
print('FEC SANKEY %.2f vs year_balance %.2f [TWh/y]' % (fec_sankey, fec_tot_val))
print('EROI %.2f %.2f' % (fec_sankey / einv, fec_tot_val / einv))
GWP_val = get_gwp(cs=cs_test)
print('GWP_cons %.1f GWP_op %.1f [ktC02/y]' %(GWP_val['GWP_constr'], GWP_val['GWP_op']))
# Compute Einv by ressources and technologies
df_inv_res_by_subcat, df_inv_tech_by_cat = compute_einv_details(cs=cs_test, user_data=config['user_data'], all_data=all_data)
# Primary Energy by subcategory
df_primary_energy_subcat, df_primary_energy = compute_primary_energy(cs=cs_test, user_data=config['user_data'], run=run, all_data=all_data)
|
{"hexsha": "b92d0e01927bfa275063b4d7bc9b792c38c12242", "size": 22145, "ext": "py", "lang": "Python", "max_stars_repo_path": "projects/eroi_study/utils_res.py", "max_stars_repo_name": "energyscope/EnergyScope_multi_criteria", "max_stars_repo_head_hexsha": "438ca2d3a8502110ce45ed6a1165eb0ff7c2d57c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-13T11:53:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-13T11:53:45.000Z", "max_issues_repo_path": "projects/eroi_study/utils_res.py", "max_issues_repo_name": "energyscope/EnergyScope_multi_criteria", "max_issues_repo_head_hexsha": "438ca2d3a8502110ce45ed6a1165eb0ff7c2d57c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "projects/eroi_study/utils_res.py", "max_forks_repo_name": "energyscope/EnergyScope_multi_criteria", "max_forks_repo_head_hexsha": "438ca2d3a8502110ce45ed6a1165eb0ff7c2d57c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3365949119, "max_line_length": 144, "alphanum_fraction": 0.6746895462, "include": true, "reason": "import numpy", "num_tokens": 6098}
|
from pocovidnet.utils_butterfly_data import (
get_processing_info, get_paths, label_to_dir
)
import os
import cv2
import numpy as np
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-data', type=str, default="butterfly")
parser.add_argument('-out', type=str, default="pocus_videos/convex")
parser.add_argument('-json', type=str, default="data_from_butterfly.json")
args = parser.parse_args()
butterfly_dir = args.data
out_dir = args.out
actual_names, labels = get_paths(args.json)
# manually add the ones which I know are in the data
files_to_process, labs_to_process = get_processing_info(
butterfly_dir, actual_names, labels
)
del_upper = 100
for i in range(1, len(files_to_process)):
vid_arr = []
fp = files_to_process[i]
fn = fp.split(os.sep)[-1]
cap = cv2.VideoCapture(fp) # capturing the video from the given path
# frame rate
n_frames = cap.get(7)
frameRate = cap.get(5)
out_path = os.path.join(
out_dir,
label_to_dir(labs_to_process[i]).split(os.sep)[1][:3]
)
print(out_path)
print(
"PROCESS", fn, labs_to_process[i], "framerate", int(cap.get(5)),
"width", cap.get(3), "height", cap.get(4), "number frames:",
cap.get(7)
)
if os.path.exists(out_path + "_" + fn.split(".")[0] + ".mpeg"):
print(
"already done, ", out_path + "_" + fn.split(".")[0] + ".mpeg"
)
continue
nr_selected = 0
while cap.isOpened():
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if not ret:
break
frame = np.asarray(frame).astype(int)
# crop
width_border = int(cap.get(3) * 0.15)
width_box = int(cap.get(3)) - 2 * width_border
if width_box + del_upper > cap.get(4):
width_box = int(cap.get(4) - del_upper)
width_border = int(cap.get(3) / 2 - width_box / 2)
frame = frame[del_upper:width_box +
del_upper, width_border:width_box + width_border]
# detect green point
green_point = frame[:, :, 1] - frame[:, :, 0]
# get first frame for green point deletion:
if frameId == 0:
frame_start = green_point
# skip the green moving points
if np.any((green_point - frame_start) > 100):
# plt.imshow(green_point)
# plt.show()
print("VID WITH GREEN DOT")
break
# delete blue symbol
blue_symbol = np.where(green_point < -50)
frame[blue_symbol] = frame[0, 0]
# delete green symbol
if np.any(green_point > 220):
green_symbol = np.where(green_point > 50)
frame[green_symbol] = frame[0, 0]
# resize
frame = np.asarray(frame).astype(np.uint8)
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# frame = cv2.resize(frame, (240, 240))
# if frameId == 0:
# plt.imshow(frame)
# plt.show()
vid_arr.append(frame)
cap.release()
vid_arr = np.asarray(vid_arr)
# SAVE VIDEO
if len(vid_arr) > 5:
curr_size = vid_arr.shape[1:3]
print("output video size", curr_size)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
writer = cv2.VideoWriter(
out_path + "_Butterfly_" + fn.split(".")[0] + ".avi", fourcc,
20.0, tuple(curr_size)
)
for x in vid_arr:
writer.write(x.astype("uint8"))
writer.release()
# io.vwrite(
# out_path + "_Butterfly_" + fn.split(".")[0] + ".mpeg",
# vid_arr,
# outputdict={"-vcodec": "mpeg2video"}
# )
print("DONE", vid_arr.shape)
else:
print("GREEN DOT:", fn)
|
{"hexsha": "9754a64995c3ed348715e0ca4d17da52f2480b54", "size": 4194, "ext": "py", "lang": "Python", "max_stars_repo_path": "pocovidnet/scripts/process_butterfly_videos.py", "max_stars_repo_name": "983632847/covid19_pocus_ultrasound", "max_stars_repo_head_hexsha": "3625e95bbf189926dbd12966ef59ee71ed10e453", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-24T07:40:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-24T07:40:40.000Z", "max_issues_repo_path": "pocovidnet/scripts/process_butterfly_videos.py", "max_issues_repo_name": "983632847/covid19_pocus_ultrasound", "max_issues_repo_head_hexsha": "3625e95bbf189926dbd12966ef59ee71ed10e453", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pocovidnet/scripts/process_butterfly_videos.py", "max_forks_repo_name": "983632847/covid19_pocus_ultrasound", "max_forks_repo_head_hexsha": "3625e95bbf189926dbd12966ef59ee71ed10e453", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8461538462, "max_line_length": 78, "alphanum_fraction": 0.5250357654, "include": true, "reason": "import numpy", "num_tokens": 1012}
|
[STATEMENT]
lemma "\<lfloor>P \<^bold>\<rightarrow> \<^bold>O\<^sub>aP\<rfloor>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>c. P c \<sqsubseteq> (\<^bold>O\<^sub>aP) c
[PROOF STEP]
nitpick
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>c. P c \<sqsubseteq> (\<^bold>O\<^sub>aP) c
[PROOF STEP]
oops \<comment> \<open> (actual) deontic modal collapse is countersatisfiable \<close>
|
{"llama_tokens": 170, "file": "GewirthPGCProof_CJDDLplus", "length": 2}
|
[STATEMENT]
lemma diamond_fin_word_inf_word:
assumes "Ind (set v) (sset w)" "path v p" "run w p"
shows "run w (fold ex v p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. run w (target v p)
[PROOF STEP]
using diamond_inf_word_step assms
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>Ind {?a} (sset ?w); en ?a ?p; run ?w ?p\<rbrakk> \<Longrightarrow> run ?w (ex ?a ?p)
Ind (set v) (sset w)
path v p
run w p
goal (1 subgoal):
1. run w (target v p)
[PROOF STEP]
by (induct v arbitrary: p, auto)
|
{"llama_tokens": 225, "file": "Partial_Order_Reduction_Transition_System_Traces", "length": 2}
|
from .truthdiscoverer import TruthDiscoverer
import pandas as pd
import numpy as np
class MajorityVoting(TruthDiscoverer):
"""Find truths by majority voting."""
def discover(self, claims, auxiliary_data=None):
return (self._majority_vote(claims), None)
def _majority_vote(self, claims):
"""Perform truth discovery using majority voting
Parameters
----------
claims: pd.DataFrame
a data frame that has columns [source_id, object_id, value]
Returns
-------
discovered_truths: pd.DataFrame
a data frame that has [object_id, value]
"""
c_df = claims[['source_id', 'object_id', 'value']].copy()
discovered_truths = c_df.groupby(['object_id'
]).apply(lambda x: self.elect(x))
discovered_truths = pd.DataFrame(discovered_truths)
discovered_truths = discovered_truths.rename(columns={
0: 'value'
}).reset_index()
return discovered_truths
def elect(self, x):
"""compute the truth value based on voting; the value received the most votes (by sources) is returned
Parameters
----------
x: pd.DataFrame
Returns
-------
discovered_truth: pd.DataFrame
the discovered truth
"""
return x.value.value_counts().idxmax()
|
{"hexsha": "2836306a6ed223dedec1667c791104ac7931f32f", "size": 1435, "ext": "py", "lang": "Python", "max_stars_repo_path": "spectrum/judge/majority.py", "max_stars_repo_name": "totucuong/spectrum", "max_stars_repo_head_hexsha": "77628c14251f3078b83a505260d71e46ec56775b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2018-07-14T00:37:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T13:35:34.000Z", "max_issues_repo_path": "spectrum/judge/majority.py", "max_issues_repo_name": "totucuong/spectrum", "max_issues_repo_head_hexsha": "77628c14251f3078b83a505260d71e46ec56775b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-12-07T11:23:05.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-17T09:59:08.000Z", "max_forks_repo_path": "spectrum/judge/majority.py", "max_forks_repo_name": "totucuong/spectrum", "max_forks_repo_head_hexsha": "77628c14251f3078b83a505260d71e46ec56775b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-04-24T06:14:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-05T04:50:07.000Z", "avg_line_length": 31.8888888889, "max_line_length": 110, "alphanum_fraction": 0.5825783972, "include": true, "reason": "import numpy", "num_tokens": 287}
|
/**
* @file llfloaterregioninfo.cpp
* @author Aaron Brashears
* @brief Implementation of the region info and controls floater and panels.
*
* $LicenseInfo:firstyear=2004&license=viewerlgpl$
* Second Life Viewer Source Code
* Copyright (C) 2010, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*/
#include "llviewerprecompiledheaders.h"
#include "llfloaterregioninfo.h"
#include <algorithm>
#include <functional>
#include "lldir.h"
#include "lldispatcher.h"
#include "llglheaders.h"
#include "llregionflags.h"
#include "llstl.h"
#include "llvfile.h"
#include "llxfermanager.h"
#include "indra_constants.h"
#include "message.h"
#include "llloadingindicator.h"
#include "llradiogroup.h"
#include "llsd.h"
#include "llsdserialize.h"
#include "llagent.h"
#include "llappviewer.h"
#include "llavatarname.h"
#include "llfloateravatarpicker.h"
#include "llbutton.h"
#include "llcheckboxctrl.h"
#include "llclipboard.h"
#include "llcombobox.h"
#include "llestateinfomodel.h"
#include "llfilepicker.h"
#include "llfloatergodtools.h" // for send_sim_wide_deletes()
#include "llfloatertopobjects.h" // added to fix SL-32336
#include "llfloatergroups.h"
#include "llfloaterreg.h"
#include "llfloaterregiondebugconsole.h"
#include "llfloatertelehub.h"
#include "llinventorymodel.h"
#include "lllineeditor.h"
#include "llnamelistctrl.h"
#include "llnotifications.h"
#include "llnotificationsutil.h"
#include "llregioninfomodel.h"
#include "llscrolllistitem.h"
#include "llsliderctrl.h"
#include "llslurl.h"
#include "llspinctrl.h"
#include "lltabcontainer.h"
#include "lltextbox.h"
#include "llinventory.h"
#include "lltexturectrl.h"
#include "lltrans.h"
#include "llviewercontrol.h"
#include "lluictrlfactory.h"
#include "llviewerinventory.h"
#include "llviewertexture.h"
#include "llviewertexturelist.h"
#include "llviewerregion.h"
#include "llviewerstats.h"
#include "llviewertexteditor.h"
#include "llviewerwindow.h"
#include "llvlcomposition.h"
#include "lltrans.h"
#include "llagentui.h"
#include "llmeshrepository.h"
#include "llfloaterregionrestarting.h"
#include "llpanelexperiencelisteditor.h"
#include <boost/function.hpp>
#include "llpanelexperiencepicker.h"
#include "llexperiencecache.h"
#include "llpanelexperiences.h"
#include "llcorehttputil.h"
#include "llavatarnamecache.h"
#include "llenvironment.h"
// <FS:CR> Aurora Sim - Region Settings Console
#include "llviewernetwork.h"
#include "llworld.h"
#include "llstartup.h"
// </FS:CR> Aurora Sim - Region Settings Console
#include "llviewermenufile.h"
const S32 TERRAIN_TEXTURE_COUNT = 4;
const S32 CORNER_COUNT = 4;
const U32 MAX_LISTED_NAMES = 100;
#define TMP_DISABLE_WLES // STORM-1180
///----------------------------------------------------------------------------
/// Local class declaration
///----------------------------------------------------------------------------
class LLDispatchEstateUpdateInfo : public LLDispatchHandler
{
public:
LLDispatchEstateUpdateInfo() {}
virtual ~LLDispatchEstateUpdateInfo() {}
virtual bool operator()(
const LLDispatcher* dispatcher,
const std::string& key,
const LLUUID& invoice,
const sparam_t& strings);
};
class LLDispatchSetEstateAccess : public LLDispatchHandler
{
public:
LLDispatchSetEstateAccess() {}
virtual ~LLDispatchSetEstateAccess() {}
virtual bool operator()(
const LLDispatcher* dispatcher,
const std::string& key,
const LLUUID& invoice,
const sparam_t& strings);
};
class LLDispatchSetEstateExperience : public LLDispatchHandler
{
public:
virtual bool operator()(
const LLDispatcher* dispatcher,
const std::string& key,
const LLUUID& invoice,
const sparam_t& strings);
LLSD getIDs( sparam_t::const_iterator it, sparam_t::const_iterator end, S32 count );
};
/*
void unpack_request_params(
LLMessageSystem* msg,
LLDispatcher::sparam_t& strings,
LLDispatcher::iparam_t& integers)
{
char str_buf[MAX_STRING];
S32 str_count = msg->getNumberOfBlocksFast(_PREHASH_StringData);
S32 i;
for (i = 0; i < str_count; ++i)
{
// we treat the SParam as binary data (since it might be an
// LLUUID in compressed form which may have embedded \0's,)
str_buf[0] = '\0';
S32 data_size = msg->getSizeFast(_PREHASH_StringData, i, _PREHASH_SParam);
if (data_size >= 0)
{
msg->getBinaryDataFast(_PREHASH_StringData, _PREHASH_SParam,
str_buf, data_size, i, MAX_STRING - 1);
strings.push_back(std::string(str_buf, data_size));
}
}
U32 int_buf;
S32 int_count = msg->getNumberOfBlocksFast(_PREHASH_IntegerData);
for (i = 0; i < int_count; ++i)
{
msg->getU32("IntegerData", "IParam", int_buf, i);
integers.push_back(int_buf);
}
}
*/
class LLPanelRegionEnvironment : public LLPanelEnvironmentInfo
{
public:
LLPanelRegionEnvironment();
virtual ~LLPanelRegionEnvironment();
virtual void refresh() override;
virtual bool isRegion() const override { return true; }
virtual LLParcel * getParcel() override { return nullptr; }
virtual bool canEdit() override { return LLEnvironment::instance().canAgentUpdateRegionEnvironment(); }
virtual bool isLargeEnough() override { return true; } // regions are always large enough.
bool refreshFromRegion(LLViewerRegion* region);
virtual BOOL postBuild() override;
virtual void onOpen(const LLSD& key) override {};
virtual S32 getParcelId() override { return INVALID_PARCEL_ID; }
protected:
static const U32 DIRTY_FLAG_OVERRIDE;
virtual void refreshFromSource() override;
bool confirmUpdateEstateEnvironment(const LLSD& notification, const LLSD& response);
void onChkAllowOverride(bool value);
private:
bool mAllowOverrideRestore;
connection_t mCommitConnect;
};
bool estate_dispatch_initialized = false;
///----------------------------------------------------------------------------
/// LLFloaterRegionInfo
///----------------------------------------------------------------------------
//S32 LLFloaterRegionInfo::sRequestSerial = 0;
LLUUID LLFloaterRegionInfo::sRequestInvoice;
LLFloaterRegionInfo::LLFloaterRegionInfo(const LLSD& seed)
: LLFloater(seed),
mEnvironmentPanel(NULL),
mRegionChangedCallback()
{}
BOOL LLFloaterRegionInfo::postBuild()
{
mTab = getChild<LLTabContainer>("region_panels");
mTab->setCommitCallback(boost::bind(&LLFloaterRegionInfo::onTabSelected, this, _2));
// contruct the panels
LLPanelRegionInfo* panel;
panel = new LLPanelEstateInfo;
mInfoPanels.push_back(panel);
panel->buildFromFile("panel_region_estate.xml");
mTab->addTabPanel(LLTabContainer::TabPanelParams().panel(panel).select_tab(true));
panel = new LLPanelEstateAccess;
mInfoPanels.push_back(panel);
panel->buildFromFile("panel_region_access.xml");
mTab->addTabPanel(panel);
panel = new LLPanelEstateCovenant;
mInfoPanels.push_back(panel);
panel->buildFromFile("panel_region_covenant.xml");
mTab->addTabPanel(panel);
panel = new LLPanelRegionGeneralInfo;
mInfoPanels.push_back(panel);
panel->getCommitCallbackRegistrar().add("RegionInfo.ManageTelehub", boost::bind(&LLPanelRegionInfo::onClickManageTelehub, panel));
panel->buildFromFile("panel_region_general.xml");
mTab->addTabPanel(panel);
// <FS:CR> Aurora Sim - Region Settings Console
// We only use this panel on Aurora-based sims
std::string url = gAgent.getRegionCapability("DispatchOpenRegionSettings");
if (!url.empty())
{
panel = new LLPanelRegionOpenSettingsInfo;
mInfoPanels.push_back(panel);
panel->buildFromFile("panel_region_open_region_settings.xml");
mTab->addTabPanel(panel);
}
// </FS:CR> Aurora Sim - Region Settings Console
panel = new LLPanelRegionTerrainInfo;
mInfoPanels.push_back(panel);
panel->buildFromFile("panel_region_terrain.xml");
mTab->addTabPanel(panel);
mEnvironmentPanel = new LLPanelRegionEnvironment;
mEnvironmentPanel->buildFromFile("panel_region_environment.xml");
// mEnvironmentPanel->configureForRegion();
mTab->addTabPanel(mEnvironmentPanel);
panel = new LLPanelRegionDebugInfo;
mInfoPanels.push_back(panel);
panel->buildFromFile("panel_region_debug.xml");
mTab->addTabPanel(panel);
if(gDisconnected)
{
return TRUE;
}
if(!gAgent.getRegionCapability("RegionExperiences").empty())
{
panel = new LLPanelRegionExperiences;
mInfoPanels.push_back(panel);
panel->buildFromFile("panel_region_experiences.xml");
mTab->addTabPanel(panel);
}
gMessageSystem->setHandlerFunc(
"EstateOwnerMessage",
&processEstateOwnerRequest);
// Request region info when agent region changes.
mRegionChangedCallback = gAgent.addRegionChangedCallback(boost::bind(&LLFloaterRegionInfo::onRegionChanged, this));
return TRUE;
}
LLFloaterRegionInfo::~LLFloaterRegionInfo()
{
if (mRegionChangedCallback.connected())
{
mRegionChangedCallback.disconnect();
}
}
void LLFloaterRegionInfo::onOpen(const LLSD& key)
{
if(gDisconnected)
{
disableTabCtrls();
return;
}
refreshFromRegion(gAgent.getRegion());
requestRegionInfo();
requestMeshRezInfo();
if (!mGodLevelChangeSlot.connected())
{
mGodLevelChangeSlot = gAgent.registerGodLevelChanageListener(boost::bind(&LLFloaterRegionInfo::onGodLevelChange, this, _1));
}
}
void LLFloaterRegionInfo::onClose(bool app_quitting)
{
if (mGodLevelChangeSlot.connected())
{
mGodLevelChangeSlot.disconnect();
}
}
void LLFloaterRegionInfo::onRegionChanged()
{
if (getVisible()) //otherwise onOpen will do request
{
requestRegionInfo();
}
}
// static
void LLFloaterRegionInfo::requestRegionInfo()
{
LLTabContainer* tab = findChild<LLTabContainer>("region_panels");
if (tab)
{
tab->getChild<LLPanel>("General")->setCtrlsEnabled(FALSE);
tab->getChild<LLPanel>("Debug")->setCtrlsEnabled(FALSE);
tab->getChild<LLPanel>("Terrain")->setCtrlsEnabled(FALSE);
tab->getChild<LLPanel>("Estate")->setCtrlsEnabled(FALSE);
tab->getChild<LLPanel>("Access")->setCtrlsEnabled(FALSE);
}
// Must allow anyone to request the RegionInfo data
// so non-owners/non-gods can see the values.
// Therefore can't use an EstateOwnerMessage JC
LLMessageSystem* msg = gMessageSystem;
msg->newMessage("RequestRegionInfo");
msg->nextBlock("AgentData");
msg->addUUID("AgentID", gAgent.getID());
msg->addUUID("SessionID", gAgent.getSessionID());
gAgent.sendReliableMessage();
}
// static
void LLFloaterRegionInfo::processEstateOwnerRequest(LLMessageSystem* msg,void**)
{
static LLDispatcher dispatch;
// <FS:Ansariel> FIRE-22573: Always refresh windlight even if floater not open
//LLFloaterRegionInfo* floater = LLFloaterReg::findTypedInstance<LLFloaterRegionInfo>("region_info");
//if(!floater)
//{
// return;
//}
// </FS:Ansariel>
if (!estate_dispatch_initialized)
{
LLPanelEstateInfo::initDispatch(dispatch);
}
// <FS:Ansariel> FIRE-22573: Always refresh windlight even if floater not open
//LLPanelEstateInfo* panel = LLFloaterRegionInfo::getPanelEstate();
// unpack the message
std::string request;
LLUUID invoice;
LLDispatcher::sparam_t strings;
LLDispatcher::unpackMessage(msg, request, invoice, strings);
if(invoice != getLastInvoice())
{
LL_WARNS() << "Mismatched Estate message: " << request << LL_ENDL;
return;
}
//dispatch the message
dispatch.dispatch(request, invoice, strings);
// <FS:Ansariel> FIRE-22573: Always refresh windlight even if floater not open
LLFloaterRegionInfo* floater = LLFloaterReg::findTypedInstance<LLFloaterRegionInfo>("region_info");
if(!floater)
{
return;
}
LLPanelEstateInfo* panel = LLFloaterRegionInfo::getPanelEstate();
// </FS:Ansariel>
if (panel)
{
panel->updateControls(gAgent.getRegion());
}
}
// static
void LLFloaterRegionInfo::processRegionInfo(LLMessageSystem* msg)
{
LLPanel* panel;
LLFloaterRegionInfo* floater = LLFloaterReg::findTypedInstance<LLFloaterRegionInfo>("region_info");
if(!floater)
{
return;
}
#if 0
// We need to re-request environment setting here,
// otherwise after we apply (send) updated region settings we won't get them back,
// so our environment won't be updated.
// This is also the way to know about externally changed region environment.
LLEnvManagerNew::instance().requestRegionSettings();
#endif
LLTabContainer* tab = floater->getChild<LLTabContainer>("region_panels");
LLViewerRegion* region = gAgent.getRegion();
BOOL allow_modify = gAgent.isGodlike() || (region && region->canManageEstate());
// *TODO: Replace parsing msg with accessing the region info model.
LLRegionInfoModel& region_info = LLRegionInfoModel::instance();
// extract message
std::string sim_name;
std::string sim_type = LLTrans::getString("land_type_unknown");
U64 region_flags;
U8 agent_limit;
S32 hard_agent_limit;
F32 object_bonus_factor;
U8 sim_access;
F32 water_height;
F32 terrain_raise_limit;
F32 terrain_lower_limit;
BOOL use_estate_sun;
F32 sun_hour;
msg->getString("RegionInfo", "SimName", sim_name);
msg->getU8("RegionInfo", "MaxAgents", agent_limit);
msg->getS32("RegionInfo2", "HardMaxAgents", hard_agent_limit);
msg->getF32("RegionInfo", "ObjectBonusFactor", object_bonus_factor);
msg->getU8("RegionInfo", "SimAccess", sim_access);
msg->getF32Fast(_PREHASH_RegionInfo, _PREHASH_WaterHeight, water_height);
msg->getF32Fast(_PREHASH_RegionInfo, _PREHASH_TerrainRaiseLimit, terrain_raise_limit);
msg->getF32Fast(_PREHASH_RegionInfo, _PREHASH_TerrainLowerLimit, terrain_lower_limit);
msg->getBOOL("RegionInfo", "UseEstateSun", use_estate_sun);
// actually the "last set" sun hour, not the current sun hour. JC
msg->getF32("RegionInfo", "SunHour", sun_hour);
// the only reasonable way to decide if we actually have any data is to
// check to see if any of these fields have nonzero sizes
if (msg->getSize("RegionInfo2", "ProductSKU") > 0 ||
msg->getSize("RegionInfo2", "ProductName") > 0)
{
msg->getString("RegionInfo2", "ProductName", sim_type);
LLTrans::findString(sim_type, sim_type); // try localizing sim product name
}
if (msg->has(_PREHASH_RegionInfo3))
{
msg->getU64("RegionInfo3", "RegionFlagsExtended", region_flags);
}
else
{
U32 flags = 0;
msg->getU32("RegionInfo", "RegionFlags", flags);
region_flags = flags;
}
// GENERAL PANEL
panel = tab->getChild<LLPanel>("General");
panel->getChild<LLUICtrl>("region_text")->setValue(LLSD(sim_name));
panel->getChild<LLUICtrl>("region_type")->setValue(LLSD(sim_type));
panel->getChild<LLUICtrl>("version_channel_text")->setValue(gLastVersionChannel);
panel->getChild<LLUICtrl>("block_terraform_check")->setValue((region_flags & REGION_FLAGS_BLOCK_TERRAFORM) ? TRUE : FALSE );
panel->getChild<LLUICtrl>("block_fly_check")->setValue((region_flags & REGION_FLAGS_BLOCK_FLY) ? TRUE : FALSE );
panel->getChild<LLUICtrl>("block_fly_over_check")->setValue((region_flags & REGION_FLAGS_BLOCK_FLYOVER) ? TRUE : FALSE );
panel->getChild<LLUICtrl>("allow_damage_check")->setValue((region_flags & REGION_FLAGS_ALLOW_DAMAGE) ? TRUE : FALSE );
panel->getChild<LLUICtrl>("restrict_pushobject")->setValue((region_flags & REGION_FLAGS_RESTRICT_PUSHOBJECT) ? TRUE : FALSE );
panel->getChild<LLUICtrl>("allow_land_resell_check")->setValue((region_flags & REGION_FLAGS_BLOCK_LAND_RESELL) ? FALSE : TRUE );
panel->getChild<LLUICtrl>("allow_parcel_changes_check")->setValue((region_flags & REGION_FLAGS_ALLOW_PARCEL_CHANGES) ? TRUE : FALSE );
panel->getChild<LLUICtrl>("block_parcel_search_check")->setValue((region_flags & REGION_FLAGS_BLOCK_PARCEL_SEARCH) ? TRUE : FALSE );
panel->getChild<LLUICtrl>("agent_limit_spin")->setValue(LLSD((F32)agent_limit) );
panel->getChild<LLUICtrl>("object_bonus_spin")->setValue(LLSD(object_bonus_factor) );
panel->getChild<LLUICtrl>("access_combo")->setValue(LLSD(sim_access) );
panel->getChild<LLSpinCtrl>("agent_limit_spin")->setMaxValue(hard_agent_limit);
LLPanelRegionGeneralInfo* panel_general = LLFloaterRegionInfo::getPanelGeneral();
if (panel)
{
panel_general->setObjBonusFactor(object_bonus_factor);
}
// detect teen grid for maturity
U32 parent_estate_id;
msg->getU32("RegionInfo", "ParentEstateID", parent_estate_id);
BOOL teen_grid = (parent_estate_id == 5); // *TODO add field to estate table and test that
panel->getChildView("access_combo")->setEnabled(gAgent.isGodlike() || (region && region->canManageEstate() && !teen_grid));
panel->setCtrlsEnabled(allow_modify);
// <FS:Zi> Add estate ID and region grid position to Region panel
S32 grid_pos_x=-1;
S32 grid_pos_y=-1;
U32 estate_id;
if(region)
{
//compute the grid position of the region
LLVector3d global_pos = region->getPosGlobalFromRegion(LLVector3::zero);
grid_pos_x = (S32) (global_pos.mdV[VX]/256.0f);
grid_pos_y = (S32) (global_pos.mdV[VY]/256.0f);
}
msg->getU32Fast(_PREHASH_RegionInfo, _PREHASH_EstateID, estate_id);
panel->getChild<LLLineEditor>("estate_id")->setValue(LLSD((F32) estate_id));
panel->getChild<LLLineEditor>("grid_position_x")->setValue(LLSD((F32) grid_pos_x));
panel->getChild<LLLineEditor>("grid_position_y")->setValue(LLSD((F32) grid_pos_y));
// </FS:Zi>
// DEBUG PANEL
panel = tab->getChild<LLPanel>("Debug");
panel->getChild<LLUICtrl>("region_text")->setValue(LLSD(sim_name) );
panel->getChild<LLUICtrl>("disable_scripts_check")->setValue(LLSD((BOOL)((region_flags & REGION_FLAGS_SKIP_SCRIPTS) ? TRUE : FALSE )) );
panel->getChild<LLUICtrl>("disable_collisions_check")->setValue(LLSD((BOOL)((region_flags & REGION_FLAGS_SKIP_COLLISIONS) ? TRUE : FALSE )) );
panel->getChild<LLUICtrl>("disable_physics_check")->setValue(LLSD((BOOL)((region_flags & REGION_FLAGS_SKIP_PHYSICS) ? TRUE : FALSE )) );
panel->setCtrlsEnabled(allow_modify);
// TERRAIN PANEL
panel = tab->getChild<LLPanel>("Terrain");
panel->getChild<LLUICtrl>("region_text")->setValue(LLSD(sim_name));
panel->getChild<LLUICtrl>("water_height_spin")->setValue(region_info.mWaterHeight);
panel->getChild<LLUICtrl>("terrain_raise_spin")->setValue(region_info.mTerrainRaiseLimit);
panel->getChild<LLUICtrl>("terrain_lower_spin")->setValue(region_info.mTerrainLowerLimit);
panel->setCtrlsEnabled(allow_modify);
if (floater->getVisible())
{
// Note: region info also causes LLRegionInfoModel::instance().update(msg); -> requestRegion(); -> changed message
// we need to know env version here and in update(msg) to know when to request and when not to, when to filter 'changed'
floater->refreshFromRegion(gAgent.getRegion());
} // else will rerequest on onOpen either way
}
// static
LLPanelEstateInfo* LLFloaterRegionInfo::getPanelEstate()
{
LLFloaterRegionInfo* floater = LLFloaterReg::getTypedInstance<LLFloaterRegionInfo>("region_info");
if (!floater) return NULL;
LLTabContainer* tab = floater->getChild<LLTabContainer>("region_panels");
LLPanelEstateInfo* panel = (LLPanelEstateInfo*)tab->getChild<LLPanel>("Estate");
return panel;
}
// static
LLPanelEstateAccess* LLFloaterRegionInfo::getPanelAccess()
{
LLFloaterRegionInfo* floater = LLFloaterReg::getTypedInstance<LLFloaterRegionInfo>("region_info");
if (!floater) return NULL;
LLTabContainer* tab = floater->getChild<LLTabContainer>("region_panels");
LLPanelEstateAccess* panel = (LLPanelEstateAccess*)tab->getChild<LLPanel>("Access");
return panel;
}
// static
LLPanelEstateCovenant* LLFloaterRegionInfo::getPanelCovenant()
{
LLFloaterRegionInfo* floater = LLFloaterReg::getTypedInstance<LLFloaterRegionInfo>("region_info");
if (!floater) return NULL;
LLTabContainer* tab = floater->getChild<LLTabContainer>("region_panels");
LLPanelEstateCovenant* panel = (LLPanelEstateCovenant*)tab->getChild<LLPanel>("Covenant");
return panel;
}
// static
LLPanelRegionGeneralInfo* LLFloaterRegionInfo::getPanelGeneral()
{
LLFloaterRegionInfo* floater = LLFloaterReg::getTypedInstance<LLFloaterRegionInfo>("region_info");
if (!floater) return NULL;
LLTabContainer* tab = floater->getChild<LLTabContainer>("region_panels");
LLPanelRegionGeneralInfo* panel = (LLPanelRegionGeneralInfo*)tab->getChild<LLPanel>("General");
return panel;
}
// static
LLPanelRegionEnvironment* LLFloaterRegionInfo::getPanelEnvironment()
{
LLFloaterRegionInfo* floater = LLFloaterReg::getTypedInstance<LLFloaterRegionInfo>("region_info");
if (!floater) return NULL;
LLTabContainer* tab = floater->getChild<LLTabContainer>("region_panels");
LLPanelRegionEnvironment* panel = (LLPanelRegionEnvironment*)tab->getChild<LLPanel>("panel_env_info");
return panel;
}
// static
LLPanelRegionTerrainInfo* LLFloaterRegionInfo::getPanelRegionTerrain()
{
LLFloaterRegionInfo* floater = LLFloaterReg::getTypedInstance<LLFloaterRegionInfo>("region_info");
if (!floater)
{
llassert(floater);
return NULL;
}
LLTabContainer* tab_container = floater->getChild<LLTabContainer>("region_panels");
LLPanelRegionTerrainInfo* panel =
dynamic_cast<LLPanelRegionTerrainInfo*>(tab_container->getChild<LLPanel>("Terrain"));
llassert(panel);
return panel;
}
LLPanelRegionExperiences* LLFloaterRegionInfo::getPanelExperiences()
{
LLFloaterRegionInfo* floater = LLFloaterReg::getTypedInstance<LLFloaterRegionInfo>("region_info");
if (!floater) return NULL;
LLTabContainer* tab = floater->getChild<LLTabContainer>("region_panels");
return (LLPanelRegionExperiences*)tab->getChild<LLPanel>("Experiences");
}
void LLFloaterRegionInfo::disableTabCtrls()
{
LLTabContainer* tab = getChild<LLTabContainer>("region_panels");
tab->getChild<LLPanel>("General")->setCtrlsEnabled(FALSE);
tab->getChild<LLPanel>("Debug")->setCtrlsEnabled(FALSE);
tab->getChild<LLPanel>("Terrain")->setCtrlsEnabled(FALSE);
tab->getChild<LLPanel>("panel_env_info")->setCtrlsEnabled(FALSE);
tab->getChild<LLPanel>("Estate")->setCtrlsEnabled(FALSE);
tab->getChild<LLPanel>("Access")->setCtrlsEnabled(FALSE);
}
// <FS:CR> Aurora Sim - Region Settings Console
// static
LLPanelRegionOpenSettingsInfo* LLFloaterRegionInfo::getPanelOpenSettings()
{
LLFloaterRegionInfo* floater = LLFloaterReg::getTypedInstance<LLFloaterRegionInfo>("region_info");
if (!floater) return NULL;
LLTabContainer* tab = floater->getChild<LLTabContainer>("region_panels");
LLPanelRegionOpenSettingsInfo* panel = (LLPanelRegionOpenSettingsInfo*)tab->getChild<LLPanel>("RegionSettings");
return panel;
}
// </FS:CR> Aurora Sim - Region Settings Console
void LLFloaterRegionInfo::onTabSelected(const LLSD& param)
{
LLPanel* active_panel = getChild<LLPanel>(param.asString());
active_panel->onOpen(LLSD());
}
void LLFloaterRegionInfo::refreshFromRegion(LLViewerRegion* region)
{
if (!region)
{
return;
}
// call refresh from region on all panels
std::for_each(
mInfoPanels.begin(),
mInfoPanels.end(),
llbind2nd(
std::mem_fun(&LLPanelRegionInfo::refreshFromRegion),
region));
mEnvironmentPanel->refreshFromRegion(region);
}
// public
void LLFloaterRegionInfo::refresh()
{
for(info_panels_t::iterator iter = mInfoPanels.begin();
iter != mInfoPanels.end(); ++iter)
{
(*iter)->refresh();
}
mEnvironmentPanel->refresh();
}
void LLFloaterRegionInfo::enableTopButtons()
{
getChildView("top_colliders_btn")->setEnabled(true);
getChildView("top_scripts_btn")->setEnabled(true);
}
void LLFloaterRegionInfo::disableTopButtons()
{
getChildView("top_colliders_btn")->setEnabled(false);
getChildView("top_scripts_btn")->setEnabled(false);
}
void LLFloaterRegionInfo::onGodLevelChange(U8 god_level)
{
LLFloaterRegionInfo* floater = LLFloaterReg::getTypedInstance<LLFloaterRegionInfo>("region_info");
if (floater && floater->getVisible())
{
refreshFromRegion(gAgent.getRegion());
}
}
///----------------------------------------------------------------------------
/// Local class implementation
///----------------------------------------------------------------------------
//
// LLPanelRegionInfo
//
LLPanelRegionInfo::LLPanelRegionInfo()
: LLPanel()
{
}
void LLPanelRegionInfo::onBtnSet()
{
if (sendUpdate())
{
disableButton("apply_btn");
}
}
void LLPanelRegionInfo::onChangeChildCtrl(LLUICtrl* ctrl)
{
updateChild(ctrl); // virtual function
}
// Enables the "set" button if it is not already enabled
void LLPanelRegionInfo::onChangeAnything()
{
enableButton("apply_btn");
refresh();
}
// static
// Enables set button on change to line editor
void LLPanelRegionInfo::onChangeText(LLLineEditor* caller, void* user_data)
{
LLPanelRegionInfo* panel = dynamic_cast<LLPanelRegionInfo*>(caller->getParent());
if(panel)
{
panel->enableButton("apply_btn");
panel->refresh();
}
}
// virtual
BOOL LLPanelRegionInfo::postBuild()
{
// If the panel has an Apply button, set a callback for it.
LLUICtrl* apply_btn = findChild<LLUICtrl>("apply_btn");
if (apply_btn)
{
apply_btn->setCommitCallback(boost::bind(&LLPanelRegionInfo::onBtnSet, this));
}
refresh();
return TRUE;
}
// virtual
void LLPanelRegionInfo::updateChild(LLUICtrl* child_ctr)
{
}
// virtual
bool LLPanelRegionInfo::refreshFromRegion(LLViewerRegion* region)
{
if (region) mHost = region->getHost();
return true;
}
void LLPanelRegionInfo::sendEstateOwnerMessage(
LLMessageSystem* msg,
const std::string& request,
const LLUUID& invoice,
const strings_t& strings)
{
LL_INFOS() << "Sending estate request '" << request << "'" << LL_ENDL;
msg->newMessage("EstateOwnerMessage");
msg->nextBlockFast(_PREHASH_AgentData);
msg->addUUIDFast(_PREHASH_AgentID, gAgent.getID());
msg->addUUIDFast(_PREHASH_SessionID, gAgent.getSessionID());
msg->addUUIDFast(_PREHASH_TransactionID, LLUUID::null); //not used
msg->nextBlock("MethodData");
msg->addString("Method", request);
msg->addUUID("Invoice", invoice);
if(strings.empty())
{
msg->nextBlock("ParamList");
msg->addString("Parameter", NULL);
}
else
{
strings_t::const_iterator it = strings.begin();
strings_t::const_iterator end = strings.end();
for(; it != end; ++it)
{
msg->nextBlock("ParamList");
msg->addString("Parameter", *it);
}
}
msg->sendReliable(mHost);
}
void LLPanelRegionInfo::enableButton(const std::string& btn_name, BOOL enable)
{
LLView* button = findChildView(btn_name);
if (button) button->setEnabled(enable);
}
void LLPanelRegionInfo::disableButton(const std::string& btn_name)
{
LLView* button = findChildView(btn_name);
if (button) button->setEnabled(FALSE);
}
void LLPanelRegionInfo::initCtrl(const std::string& name)
{
getChild<LLUICtrl>(name)->setCommitCallback(boost::bind(&LLPanelRegionInfo::onChangeAnything, this));
}
void LLPanelRegionInfo::onClickManageTelehub()
{
LLFloaterReg::hideInstance("region_info");
LLFloaterReg::showInstance("telehubs");
}
/////////////////////////////////////////////////////////////////////////////
// LLPanelRegionGeneralInfo
//
bool LLPanelRegionGeneralInfo::refreshFromRegion(LLViewerRegion* region)
{
BOOL allow_modify = gAgent.isGodlike() || (region && region->canManageEstate());
setCtrlsEnabled(allow_modify);
getChildView("apply_btn")->setEnabled(FALSE);
getChildView("access_text")->setEnabled(allow_modify);
// getChildView("access_combo")->setEnabled(allow_modify);
// now set in processRegionInfo for teen grid detection
getChildView("kick_btn")->setEnabled(allow_modify);
getChildView("kick_all_btn")->setEnabled(allow_modify);
getChildView("im_btn")->setEnabled(allow_modify);
getChildView("manage_telehub_btn")->setEnabled(allow_modify);
// Data gets filled in by processRegionInfo
return LLPanelRegionInfo::refreshFromRegion(region);
}
BOOL LLPanelRegionGeneralInfo::postBuild()
{
// Enable the "Apply" button if something is changed. JC
initCtrl("block_terraform_check");
initCtrl("block_fly_check");
initCtrl("block_fly_over_check");
initCtrl("allow_damage_check");
initCtrl("allow_land_resell_check");
initCtrl("allow_parcel_changes_check");
initCtrl("agent_limit_spin");
initCtrl("object_bonus_spin");
initCtrl("access_combo");
initCtrl("restrict_pushobject");
initCtrl("block_parcel_search_check");
childSetAction("kick_btn", boost::bind(&LLPanelRegionGeneralInfo::onClickKick, this));
childSetAction("kick_all_btn", onClickKickAll, this);
childSetAction("im_btn", onClickMessage, this);
// childSetAction("manage_telehub_btn", onClickManageTelehub, this);
LLUICtrl* apply_btn = findChild<LLUICtrl>("apply_btn");
if (apply_btn)
{
apply_btn->setCommitCallback(boost::bind(&LLPanelRegionGeneralInfo::onBtnSet, this));
}
refresh();
return TRUE;
}
void LLPanelRegionGeneralInfo::onBtnSet()
{
if(mObjBonusFactor == getChild<LLUICtrl>("object_bonus_spin")->getValue().asReal())
{
if (sendUpdate())
{
disableButton("apply_btn");
}
}
else
{
LLNotificationsUtil::add("ChangeObjectBonusFactor", LLSD(), LLSD(), boost::bind(&LLPanelRegionGeneralInfo::onChangeObjectBonus, this, _1, _2));
}
}
bool LLPanelRegionGeneralInfo::onChangeObjectBonus(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
if (option == 0)
{
if (sendUpdate())
{
disableButton("apply_btn");
}
}
return false;
}
void LLPanelRegionGeneralInfo::onClickKick()
{
LL_INFOS() << "LLPanelRegionGeneralInfo::onClickKick" << LL_ENDL;
// this depends on the grandparent view being a floater
// in order to set up floater dependency
LLView * button = findChild<LLButton>("kick_btn");
LLFloater* parent_floater = gFloaterView->getParentFloater(this);
LLFloater* child_floater = LLFloaterAvatarPicker::show(boost::bind(&LLPanelRegionGeneralInfo::onKickCommit, this, _1),
FALSE, TRUE, FALSE, parent_floater->getName(), button);
if (child_floater)
{
parent_floater->addDependentFloater(child_floater);
}
}
void LLPanelRegionGeneralInfo::onKickCommit(const uuid_vec_t& ids)
{
if (ids.empty()) return;
if(ids[0].notNull())
{
strings_t strings;
// [0] = our agent id
// [1] = target agent id
std::string buffer;
gAgent.getID().toString(buffer);
strings.push_back(buffer);
ids[0].toString(buffer);
strings.push_back(strings_t::value_type(buffer));
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
sendEstateOwnerMessage(gMessageSystem, "teleporthomeuser", invoice, strings);
}
}
// static
void LLPanelRegionGeneralInfo::onClickKickAll(void* userdata)
{
LL_INFOS() << "LLPanelRegionGeneralInfo::onClickKickAll" << LL_ENDL;
LLNotificationsUtil::add("KickUsersFromRegion",
LLSD(),
LLSD(),
boost::bind(&LLPanelRegionGeneralInfo::onKickAllCommit, (LLPanelRegionGeneralInfo*)userdata, _1, _2));
}
bool LLPanelRegionGeneralInfo::onKickAllCommit(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
if (option == 0)
{
strings_t strings;
// [0] = our agent id
std::string buffer;
gAgent.getID().toString(buffer);
strings.push_back(buffer);
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
// historical message name
sendEstateOwnerMessage(gMessageSystem, "teleporthomeallusers", invoice, strings);
}
return false;
}
// static
void LLPanelRegionGeneralInfo::onClickMessage(void* userdata)
{
LL_INFOS() << "LLPanelRegionGeneralInfo::onClickMessage" << LL_ENDL;
LLNotificationsUtil::add("MessageRegion",
LLSD(),
LLSD(),
boost::bind(&LLPanelRegionGeneralInfo::onMessageCommit, (LLPanelRegionGeneralInfo*)userdata, _1, _2));
}
// static
bool LLPanelRegionGeneralInfo::onMessageCommit(const LLSD& notification, const LLSD& response)
{
if(LLNotificationsUtil::getSelectedOption(notification, response) != 0) return false;
std::string text = response["message"].asString();
if (text.empty()) return false;
LL_INFOS() << "Message to everyone: " << text << LL_ENDL;
strings_t strings;
// [0] grid_x, unused here
// [1] grid_y, unused here
// [2] agent_id of sender
// [3] sender name
// [4] message
strings.push_back("-1");
strings.push_back("-1");
std::string buffer;
gAgent.getID().toString(buffer);
strings.push_back(buffer);
std::string name;
LLAgentUI::buildFullname(name);
strings.push_back(strings_t::value_type(name));
strings.push_back(strings_t::value_type(text));
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
sendEstateOwnerMessage(gMessageSystem, "simulatormessage", invoice, strings);
return false;
}
void LLFloaterRegionInfo::requestMeshRezInfo()
{
std::string sim_console_url = gAgent.getRegionCapability("SimConsoleAsync");
if (!sim_console_url.empty())
{
std::string request_str = "get mesh_rez_enabled";
LLCoreHttpUtil::HttpCoroutineAdapter::messageHttpPost(sim_console_url, LLSD(request_str),
"Requested mesh_rez_enabled", "Error requesting mesh_rez_enabled");
}
}
// setregioninfo
// strings[0] = 'Y' - block terraform, 'N' - not
// strings[1] = 'Y' - block fly, 'N' - not
// strings[2] = 'Y' - allow damage, 'N' - not
// strings[3] = 'Y' - allow land sale, 'N' - not
// strings[4] = agent limit
// strings[5] = object bonus
// strings[6] = sim access (0 = unknown, 13 = PG, 21 = Mature, 42 = Adult)
// strings[7] = restrict pushobject
// strings[8] = 'Y' - allow parcel subdivide, 'N' - not
// strings[9] = 'Y' - block parcel search, 'N' - allow
BOOL LLPanelRegionGeneralInfo::sendUpdate()
{
LL_INFOS() << "LLPanelRegionGeneralInfo::sendUpdate()" << LL_ENDL;
// <FS:Ansariel> Crash fix
if (!gAgent.getRegion())
{
return FALSE;
}
// </FS:Ansariel>
// First try using a Cap. If that fails use the old method.
LLSD body;
std::string url = gAgent.getRegionCapability("DispatchRegionInfo");
if (!url.empty())
{
body["block_terraform"] = getChild<LLUICtrl>("block_terraform_check")->getValue();
body["block_fly"] = getChild<LLUICtrl>("block_fly_check")->getValue();
body["block_fly_over"] = getChild<LLUICtrl>("block_fly_over_check")->getValue();
body["allow_damage"] = getChild<LLUICtrl>("allow_damage_check")->getValue();
body["allow_land_resell"] = getChild<LLUICtrl>("allow_land_resell_check")->getValue();
body["agent_limit"] = getChild<LLUICtrl>("agent_limit_spin")->getValue();
body["prim_bonus"] = getChild<LLUICtrl>("object_bonus_spin")->getValue();
body["sim_access"] = getChild<LLUICtrl>("access_combo")->getValue();
body["restrict_pushobject"] = getChild<LLUICtrl>("restrict_pushobject")->getValue();
body["allow_parcel_changes"] = getChild<LLUICtrl>("allow_parcel_changes_check")->getValue();
body["block_parcel_search"] = getChild<LLUICtrl>("block_parcel_search_check")->getValue();
LLCoreHttpUtil::HttpCoroutineAdapter::messageHttpPost(url, body,
"Region info update posted.", "Region info update not posted.");
}
else
{
strings_t strings;
std::string buffer;
buffer = llformat("%s", (getChild<LLUICtrl>("block_terraform_check")->getValue().asBoolean() ? "Y" : "N"));
strings.push_back(strings_t::value_type(buffer));
buffer = llformat("%s", (getChild<LLUICtrl>("block_fly_check")->getValue().asBoolean() ? "Y" : "N"));
strings.push_back(strings_t::value_type(buffer));
buffer = llformat("%s", (getChild<LLUICtrl>("allow_damage_check")->getValue().asBoolean() ? "Y" : "N"));
strings.push_back(strings_t::value_type(buffer));
buffer = llformat("%s", (getChild<LLUICtrl>("allow_land_resell_check")->getValue().asBoolean() ? "Y" : "N"));
strings.push_back(strings_t::value_type(buffer));
F32 value = (F32)getChild<LLUICtrl>("agent_limit_spin")->getValue().asReal();
buffer = llformat("%f", value);
strings.push_back(strings_t::value_type(buffer));
value = (F32)getChild<LLUICtrl>("object_bonus_spin")->getValue().asReal();
buffer = llformat("%f", value);
strings.push_back(strings_t::value_type(buffer));
buffer = llformat("%d", getChild<LLUICtrl>("access_combo")->getValue().asInteger());
strings.push_back(strings_t::value_type(buffer));
buffer = llformat("%s", (getChild<LLUICtrl>("restrict_pushobject")->getValue().asBoolean() ? "Y" : "N"));
strings.push_back(strings_t::value_type(buffer));
buffer = llformat("%s", (getChild<LLUICtrl>("allow_parcel_changes_check")->getValue().asBoolean() ? "Y" : "N"));
strings.push_back(strings_t::value_type(buffer));
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
sendEstateOwnerMessage(gMessageSystem, "setregioninfo", invoice, strings);
}
// if we changed access levels, tell user about it
LLViewerRegion* region = gAgent.getRegion();
if (region && (getChild<LLUICtrl>("access_combo")->getValue().asInteger() != region->getSimAccess()) )
{
LLNotificationsUtil::add("RegionMaturityChange");
}
return TRUE;
}
// <FS:CR> Aurora Sim - Region Settings Panel
/////////////////////////////////////////////////////////////////////////////
// LLPanelRegionOpenSettingsInfo
/////////////////////////////////////////////////////////////////////////////
bool LLPanelRegionOpenSettingsInfo::refreshFromRegion(LLViewerRegion* region)
{
// Data gets filled in by hippo manager
BOOL allow_modify = gAgent.isGodlike() || (region && region->canManageEstate());
LLWorld *regionlimits = LLWorld::getInstance();
childSetValue("draw_distance", LLSD(regionlimits->getDrawDistance()));
childSetValue("force_draw_distance", LLSD(regionlimits->getLockedDrawDistance()));
childSetValue("allow_minimap", LLSD(regionlimits->getAllowMinimap()));
childSetValue("allow_physical_prims", LLSD(regionlimits->getAllowPhysicalPrims()));
childSetValue("max_drag_distance", LLSD(regionlimits->getMaxDragDistance()));
childSetValue("min_hole_size", LLSD(regionlimits->getRegionMinHoleSize()));
childSetValue("max_hollow_size", LLSD(regionlimits->getRegionMaxHollowSize()));
childSetValue("max_inventory_items_transfer", LLSD(regionlimits->getMaxInventoryItemsTransfer()));
childSetValue("max_link_count", LLSD((LLSD::Integer)regionlimits->getMaxLinkedPrims()));
childSetValue("max_link_count_phys", LLSD(regionlimits->getMaxPhysLinkedPrims()));
childSetValue("max_phys_prim_scale", LLSD(regionlimits->getMaxPhysPrimScale()));
childSetValue("max_prim_scale", LLSD(regionlimits->getRegionMaxPrimScale()));
childSetValue("min_prim_scale", LLSD(regionlimits->getRegionMinPrimScale()));
childSetValue("render_water", LLSD(regionlimits->getAllowRenderWater()));
childSetValue("show_tags", LLSD(regionlimits->getAllowRenderName()));
childSetValue("max_groups", LLSD(gMaxAgentGroups));
childSetValue("allow_parcel_windlight", LLSD(regionlimits->getAllowParcelWindLight()));
childSetValue("enable_teen_mode", LLSD(regionlimits->getEnableTeenMode()));
childSetValue("enforce_max_build", LLSD(regionlimits->getEnforceMaxBuild()));
childSetValue("terrain_detail_scale", LLSD(regionlimits->getTerrainDetailScale()));
setCtrlsEnabled(allow_modify);
return LLPanelRegionInfo::refreshFromRegion(region);
}
BOOL LLPanelRegionOpenSettingsInfo::postBuild()
{
// Enable the "Apply" button if something is changed. JC
initCtrl("draw_distance");
initCtrl("force_draw_distance");
initCtrl("max_drag_distance");
initCtrl("max_prim_scale");
initCtrl("min_prim_scale");
initCtrl("max_phys_prim_scale");
initCtrl("max_hollow_size");
initCtrl("min_hole_size");
initCtrl("max_link_count");
initCtrl("max_link_count_phys");
initCtrl("max_inventory_items_transfer");
initCtrl("max_groups");
initCtrl("render_water");
initCtrl("allow_minimap");
initCtrl("allow_physical_prims");
initCtrl("enable_teen_mode");
initCtrl("show_tags");
initCtrl("allow_parcel_windlight");
initCtrl("terrain_detail_scale");
childSetAction("apply_ors_btn", onClickOrs, this);
refreshFromRegion(gAgent.getRegion());
return LLPanelRegionInfo::postBuild();
}
void LLPanelRegionOpenSettingsInfo::onClickHelp(void* data)
{
std::string* xml_alert = (std::string*)data;
LLNotifications::instance().add(*xml_alert);
}
void LLPanelRegionOpenSettingsInfo::onClickOrs(void* userdata)
{
LLPanelRegionOpenSettingsInfo* self;
self = (LLPanelRegionOpenSettingsInfo*)userdata;
LL_INFOS() << "LLPanelRegionOpenSettingsInfo::onClickOrs()" << LL_ENDL;
LLSD body;
std::string url = gAgent.getRegionCapability("DispatchOpenRegionSettings");
if (!url.empty())
{
body["draw_distance"] = (LLSD::Integer)self->childGetValue("draw_distance");
body["force_draw_distance"] = (LLSD::Boolean)self->childGetValue("force_draw_distance");
body["allow_minimap"] = (LLSD::Boolean)self->childGetValue("allow_minimap");
body["allow_physical_prims"] = (LLSD::Boolean)self->childGetValue("allow_physical_prims");
body["max_drag_distance"] = (LLSD::Real)self->childGetValue("max_drag_distance");
body["min_hole_size"] = (LLSD::Real)self->childGetValue("min_hole_size");
body["max_hollow_size"] = (LLSD::Real)self->childGetValue("max_hollow_size");
body["max_inventory_items_transfer"] = (LLSD::Integer)self->childGetValue("max_inventory_items_transfer");
body["max_link_count"] = (LLSD::Real)self->childGetValue("max_link_count");
body["max_link_count_phys"] = (LLSD::Real)self->childGetValue("max_link_count_phys");
body["max_phys_prim_scale"] = (LLSD::Real)self->childGetValue("max_phys_prim_scale");
body["max_prim_scale"] = (LLSD::Real)self->childGetValue("max_prim_scale");
body["min_prim_scale"] = (LLSD::Real)self->childGetValue("min_prim_scale");
body["render_water"] = (LLSD::Boolean)self->childGetValue("render_water");
body["terrain_detail_scale"] = (LLSD::Real)self->childGetValue("terrain_detail_scale");
body["show_tags"] = (LLSD::Real)self->childGetValue("show_tags");
body["max_groups"] = (LLSD::Real)self->childGetValue("max_groups");
body["allow_parcel_windlight"] = (LLSD::Boolean)self->childGetValue("allow_parcel_windlight");
body["enable_teen_mode"] = (LLSD::Boolean)self->childGetValue("enable_teen_mode");
body["enforce_max_build"] = (LLSD::Boolean)self->childGetValue("enforce_max_build");
LLCoreHttpUtil::HttpCoroutineAdapter::messageHttpPost(url, body, "Posted onClickOrs", "Error posting onClickOrs");
//LL_INFOS() << "data: " << LLSDXMLStreamer(body) << LL_ENDL;
}
}
// </FS:CR> Aurora Sim - Region Settings Console
/////////////////////////////////////////////////////////////////////////////
// LLPanelRegionDebugInfo
/////////////////////////////////////////////////////////////////////////////
BOOL LLPanelRegionDebugInfo::postBuild()
{
LLPanelRegionInfo::postBuild();
initCtrl("disable_scripts_check");
initCtrl("disable_collisions_check");
initCtrl("disable_physics_check");
childSetAction("choose_avatar_btn", boost::bind(&LLPanelRegionDebugInfo::onClickChooseAvatar, this));
childSetAction("return_btn", onClickReturn, this);
childSetAction("top_colliders_btn", onClickTopColliders, this);
childSetAction("top_scripts_btn", onClickTopScripts, this);
childSetAction("restart_btn", onClickRestart, this);
childSetAction("cancel_restart_btn", onClickCancelRestart, this);
childSetAction("region_debug_console_btn", onClickDebugConsole, this);
return TRUE;
}
// virtual
bool LLPanelRegionDebugInfo::refreshFromRegion(LLViewerRegion* region)
{
BOOL allow_modify = gAgent.isGodlike() || (region && region->canManageEstate());
setCtrlsEnabled(allow_modify);
getChildView("apply_btn")->setEnabled(FALSE);
getChildView("target_avatar_name")->setEnabled(FALSE);
getChildView("choose_avatar_btn")->setEnabled(allow_modify);
getChildView("return_scripts")->setEnabled(allow_modify && !mTargetAvatar.isNull());
getChildView("return_other_land")->setEnabled(allow_modify && !mTargetAvatar.isNull());
getChildView("return_estate_wide")->setEnabled(allow_modify && !mTargetAvatar.isNull());
getChildView("return_btn")->setEnabled(allow_modify && !mTargetAvatar.isNull());
getChildView("top_colliders_btn")->setEnabled(allow_modify);
getChildView("top_scripts_btn")->setEnabled(allow_modify);
getChildView("restart_btn")->setEnabled(allow_modify);
getChildView("cancel_restart_btn")->setEnabled(allow_modify);
getChildView("region_debug_console_btn")->setEnabled(allow_modify);
return LLPanelRegionInfo::refreshFromRegion(region);
}
// virtual
BOOL LLPanelRegionDebugInfo::sendUpdate()
{
LL_INFOS() << "LLPanelRegionDebugInfo::sendUpdate" << LL_ENDL;
strings_t strings;
std::string buffer;
buffer = llformat("%s", (getChild<LLUICtrl>("disable_scripts_check")->getValue().asBoolean() ? "Y" : "N"));
strings.push_back(buffer);
buffer = llformat("%s", (getChild<LLUICtrl>("disable_collisions_check")->getValue().asBoolean() ? "Y" : "N"));
strings.push_back(buffer);
buffer = llformat("%s", (getChild<LLUICtrl>("disable_physics_check")->getValue().asBoolean() ? "Y" : "N"));
strings.push_back(buffer);
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
sendEstateOwnerMessage(gMessageSystem, "setregiondebug", invoice, strings);
return TRUE;
}
void LLPanelRegionDebugInfo::onClickChooseAvatar()
{
LLView * button = findChild<LLButton>("choose_avatar_btn");
LLFloater* parent_floater = gFloaterView->getParentFloater(this);
LLFloater * child_floater = LLFloaterAvatarPicker::show(boost::bind(&LLPanelRegionDebugInfo::callbackAvatarID, this, _1, _2),
FALSE, TRUE, FALSE, parent_floater->getName(), button);
if (child_floater)
{
parent_floater->addDependentFloater(child_floater);
}
}
void LLPanelRegionDebugInfo::callbackAvatarID(const uuid_vec_t& ids, const std::vector<LLAvatarName> names)
{
if (ids.empty() || names.empty()) return;
mTargetAvatar = ids[0];
getChild<LLUICtrl>("target_avatar_name")->setValue(LLSD(names[0].getCompleteName()));
refreshFromRegion( gAgent.getRegion() );
}
// static
void LLPanelRegionDebugInfo::onClickReturn(void* data)
{
LLPanelRegionDebugInfo* panelp = (LLPanelRegionDebugInfo*) data;
if (panelp->mTargetAvatar.isNull()) return;
LLSD args;
args["USER_NAME"] = panelp->getChild<LLUICtrl>("target_avatar_name")->getValue().asString();
LLSD payload;
payload["avatar_id"] = panelp->mTargetAvatar;
U32 flags = SWD_ALWAYS_RETURN_OBJECTS;
if (panelp->getChild<LLUICtrl>("return_scripts")->getValue().asBoolean())
{
flags |= SWD_SCRIPTED_ONLY;
}
if (panelp->getChild<LLUICtrl>("return_other_land")->getValue().asBoolean())
{
flags |= SWD_OTHERS_LAND_ONLY;
}
payload["flags"] = int(flags);
payload["return_estate_wide"] = panelp->getChild<LLUICtrl>("return_estate_wide")->getValue().asBoolean();
LLNotificationsUtil::add("EstateObjectReturn", args, payload,
boost::bind(&LLPanelRegionDebugInfo::callbackReturn, panelp, _1, _2));
}
bool LLPanelRegionDebugInfo::callbackReturn(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
if (option != 0) return false;
LLUUID target_avatar = notification["payload"]["avatar_id"].asUUID();
if (!target_avatar.isNull())
{
U32 flags = notification["payload"]["flags"].asInteger();
bool return_estate_wide = notification["payload"]["return_estate_wide"];
if (return_estate_wide)
{
// send as estate message - routed by spaceserver to all regions in estate
strings_t strings;
strings.push_back(llformat("%d", flags));
strings.push_back(target_avatar.asString());
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
sendEstateOwnerMessage(gMessageSystem, "estateobjectreturn", invoice, strings);
}
else
{
// send to this simulator only
send_sim_wide_deletes(target_avatar, flags);
}
}
return false;
}
// static
void LLPanelRegionDebugInfo::onClickTopColliders(void* data)
{
LLPanelRegionDebugInfo* self = (LLPanelRegionDebugInfo*)data;
strings_t strings;
strings.push_back("1"); // one physics step
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
LLFloaterTopObjects* instance = LLFloaterReg::getTypedInstance<LLFloaterTopObjects>("top_objects");
if(!instance) return;
LLFloaterReg::showInstance("top_objects");
instance->clearList();
instance->disableRefreshBtn();
self->getChildView("top_colliders_btn")->setEnabled(false);
self->getChildView("top_scripts_btn")->setEnabled(false);
self->sendEstateOwnerMessage(gMessageSystem, "colliders", invoice, strings);
}
// static
void LLPanelRegionDebugInfo::onClickTopScripts(void* data)
{
LLPanelRegionDebugInfo* self = (LLPanelRegionDebugInfo*)data;
strings_t strings;
strings.push_back("6"); // top 5 scripts
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
LLFloaterTopObjects* instance = LLFloaterReg::getTypedInstance<LLFloaterTopObjects>("top_objects");
if(!instance) return;
LLFloaterReg::showInstance("top_objects");
instance->clearList();
instance->disableRefreshBtn();
self->getChildView("top_colliders_btn")->setEnabled(false);
self->getChildView("top_scripts_btn")->setEnabled(false);
self->sendEstateOwnerMessage(gMessageSystem, "scripts", invoice, strings);
}
// static
void LLPanelRegionDebugInfo::onClickRestart(void* data)
{
// <Ansariel FIRE-1073>
LLPanelRegionDebugInfo* self = (LLPanelRegionDebugInfo*)data;
LLSD delay;
if (self) delay = self->getChild<LLSpinCtrl>("restart_delay")->getValue();
else delay = LLSD(120);
// </Ansariel FIRE-1073>
LLNotificationsUtil::add("ConfirmRestart", LLSD(), LLSD(),
boost::bind(&LLPanelRegionDebugInfo::callbackRestart, (LLPanelRegionDebugInfo*)data, _1, _2, delay));
}
bool LLPanelRegionDebugInfo::callbackRestart(const LLSD& notification, const LLSD& response, const LLSD& seconds)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
if (option != 0) return false;
strings_t strings;
strings.push_back(seconds.asString());
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
sendEstateOwnerMessage(gMessageSystem, "restart", invoice, strings);
return false;
}
// static
void LLPanelRegionDebugInfo::onClickCancelRestart(void* data)
{
LLPanelRegionDebugInfo* self = (LLPanelRegionDebugInfo*)data;
strings_t strings;
strings.push_back("-1");
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
self->sendEstateOwnerMessage(gMessageSystem, "restart", invoice, strings);
}
// static
void LLPanelRegionDebugInfo::onClickDebugConsole(void* data)
{
LLFloaterReg::showInstance("region_debug_console");
}
BOOL LLPanelRegionTerrainInfo::validateTextureSizes()
{
for(S32 i = 0; i < TERRAIN_TEXTURE_COUNT; ++i)
{
std::string buffer;
buffer = llformat("texture_detail_%d", i);
LLTextureCtrl* texture_ctrl = getChild<LLTextureCtrl>(buffer);
if (!texture_ctrl) continue;
LLUUID image_asset_id = texture_ctrl->getImageAssetID();
LLViewerTexture* img = LLViewerTextureManager::getFetchedTexture(image_asset_id);
S32 components = img->getComponents();
// Must ask for highest resolution version's width. JC
S32 width = img->getFullWidth();
S32 height = img->getFullHeight();
//LL_INFOS() << "texture detail " << i << " is " << width << "x" << height << "x" << components << LL_ENDL;
if (components != 3)
{
LLSD args;
args["TEXTURE_NUM"] = i+1;
args["TEXTURE_BIT_DEPTH"] = llformat("%d",components * 8);
LLNotificationsUtil::add("InvalidTerrainBitDepth", args);
return FALSE;
}
// <FS:Ansariel> Allow terrain textures up to 1024x1024 pixels
// as in Phoenix (FIRE-2319)
//if (width > 512 || height > 512)
if (width > 1024 || height > 1024)
{
LLSD args;
args["TEXTURE_NUM"] = i+1;
args["TEXTURE_SIZE_X"] = width;
args["TEXTURE_SIZE_Y"] = height;
LLNotificationsUtil::add("InvalidTerrainSize", args);
return FALSE;
}
}
return TRUE;
}
BOOL LLPanelRegionTerrainInfo::validateTextureHeights()
{
for (S32 i = 0; i < CORNER_COUNT; ++i)
{
std::string low = llformat("height_start_spin_%d", i);
std::string high = llformat("height_range_spin_%d", i);
if (getChild<LLUICtrl>(low)->getValue().asReal() > getChild<LLUICtrl>(high)->getValue().asReal())
{
return FALSE;
}
}
return TRUE;
}
/////////////////////////////////////////////////////////////////////////////
// LLPanelRegionTerrainInfo
/////////////////////////////////////////////////////////////////////////////
// Initialize statics
BOOL LLPanelRegionTerrainInfo::postBuild()
{
LLPanelRegionInfo::postBuild();
initCtrl("water_height_spin");
initCtrl("terrain_raise_spin");
initCtrl("terrain_lower_spin");
std::string buffer;
for(S32 i = 0; i < TERRAIN_TEXTURE_COUNT; ++i)
{
buffer = llformat("texture_detail_%d", i);
initCtrl(buffer);
}
for(S32 i = 0; i < CORNER_COUNT; ++i)
{
buffer = llformat("height_start_spin_%d", i);
initCtrl(buffer);
buffer = llformat("height_range_spin_%d", i);
initCtrl(buffer);
}
childSetAction("download_raw_btn", onClickDownloadRaw, this);
childSetAction("upload_raw_btn", onClickUploadRaw, this);
childSetAction("bake_terrain_btn", onClickBakeTerrain, this);
mAskedTextureHeights = false;
mConfirmedTextureHeights = false;
return LLPanelRegionInfo::postBuild();
}
// virtual
bool LLPanelRegionTerrainInfo::refreshFromRegion(LLViewerRegion* region)
{
BOOL owner_or_god = gAgent.isGodlike()
|| (region && (region->getOwner() == gAgent.getID()));
BOOL owner_or_god_or_manager = owner_or_god
|| (region && region->isEstateManager());
setCtrlsEnabled(owner_or_god_or_manager);
getChildView("apply_btn")->setEnabled(FALSE);
if (region)
{
getChild<LLUICtrl>("region_text")->setValue(LLSD(region->getName()));
LLVLComposition* compp = region->getComposition();
LLTextureCtrl* texture_ctrl;
std::string buffer;
for(S32 i = 0; i < TERRAIN_TEXTURE_COUNT; ++i)
{
buffer = llformat("texture_detail_%d", i);
texture_ctrl = getChild<LLTextureCtrl>(buffer);
if(texture_ctrl)
{
LL_DEBUGS() << "Detail Texture " << i << ": "
<< compp->getDetailTextureID(i) << LL_ENDL;
LLUUID tmp_id(compp->getDetailTextureID(i));
texture_ctrl->setImageAssetID(tmp_id);
}
}
for(S32 i = 0; i < CORNER_COUNT; ++i)
{
buffer = llformat("height_start_spin_%d", i);
getChild<LLUICtrl>(buffer)->setValue(LLSD(compp->getStartHeight(i)));
buffer = llformat("height_range_spin_%d", i);
getChild<LLUICtrl>(buffer)->setValue(LLSD(compp->getHeightRange(i)));
}
}
else
{
LL_DEBUGS() << "no region set" << LL_ENDL;
getChild<LLUICtrl>("region_text")->setValue(LLSD(""));
}
getChildView("download_raw_btn")->setEnabled(owner_or_god);
getChildView("upload_raw_btn")->setEnabled(owner_or_god);
getChildView("bake_terrain_btn")->setEnabled(owner_or_god);
return LLPanelRegionInfo::refreshFromRegion(region);
}
// virtual
BOOL LLPanelRegionTerrainInfo::sendUpdate()
{
LL_INFOS() << "LLPanelRegionTerrainInfo::sendUpdate" << LL_ENDL;
std::string buffer;
strings_t strings;
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
// update the model
LLRegionInfoModel& region_info = LLRegionInfoModel::instance();
region_info.mWaterHeight = (F32) getChild<LLUICtrl>("water_height_spin")->getValue().asReal();
region_info.mTerrainRaiseLimit = (F32) getChild<LLUICtrl>("terrain_raise_spin")->getValue().asReal();
region_info.mTerrainLowerLimit = (F32) getChild<LLUICtrl>("terrain_lower_spin")->getValue().asReal();
// and sync the region with it
region_info.sendRegionTerrain(invoice);
// =======================================
// Assemble and send texturedetail message
// Make sure user hasn't chosen wacky textures unless we're on Aurora-sim.
// <FS:CR> Aurora Sim - Region Settings Console
#ifdef OPENSIM
if (!validateTextureSizes() && !LLGridManager::getInstance()->isInAuroraSim())
{
return FALSE;
}
#else
if (!validateTextureSizes())
{
return FALSE;
}
#endif // OPENSIM
// </FS:CR> Aurora Sim - Region Settings Console
// Check if terrain Elevation Ranges are correct
if (gSavedSettings.getBOOL("RegionCheckTextureHeights") && !validateTextureHeights())
{
if (!mAskedTextureHeights)
{
LLNotificationsUtil::add("ConfirmTextureHeights", LLSD(), LLSD(), boost::bind(&LLPanelRegionTerrainInfo::callbackTextureHeights, this, _1, _2));
mAskedTextureHeights = true;
return FALSE;
}
else if (!mConfirmedTextureHeights)
{
return FALSE;
}
}
LLTextureCtrl* texture_ctrl;
std::string id_str;
LLMessageSystem* msg = gMessageSystem;
for(S32 i = 0; i < TERRAIN_TEXTURE_COUNT; ++i)
{
buffer = llformat("texture_detail_%d", i);
texture_ctrl = getChild<LLTextureCtrl>(buffer);
if(texture_ctrl)
{
LLUUID tmp_id(texture_ctrl->getImageAssetID());
tmp_id.toString(id_str);
buffer = llformat("%d %s", i, id_str.c_str());
strings.push_back(buffer);
}
}
sendEstateOwnerMessage(msg, "texturedetail", invoice, strings);
strings.clear();
// ========================================
// Assemble and send textureheights message
for(S32 i = 0; i < CORNER_COUNT; ++i)
{
buffer = llformat("height_start_spin_%d", i);
std::string buffer2 = llformat("height_range_spin_%d", i);
std::string buffer3 = llformat("%d %f %f", i, (F32)getChild<LLUICtrl>(buffer)->getValue().asReal(), (F32)getChild<LLUICtrl>(buffer2)->getValue().asReal());
strings.push_back(buffer3);
}
sendEstateOwnerMessage(msg, "textureheights", invoice, strings);
strings.clear();
// ========================================
// Send texturecommit message
sendEstateOwnerMessage(msg, "texturecommit", invoice, strings);
return TRUE;
}
bool LLPanelRegionTerrainInfo::callbackTextureHeights(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
if (option == 0) // ok
{
mConfirmedTextureHeights = true;
}
else if (option == 1) // cancel
{
mConfirmedTextureHeights = false;
}
else if (option == 2) // don't ask
{
gSavedSettings.setBOOL("RegionCheckTextureHeights", FALSE);
mConfirmedTextureHeights = true;
}
onBtnSet();
mAskedTextureHeights = false;
return false;
}
// static
// <FS:Ansariel> Threaded filepickers
//void LLPanelRegionTerrainInfo::onClickDownloadRaw(void* data)
//{
// LLFilePicker& picker = LLFilePicker::instance();
// if (!picker.getSaveFile(LLFilePicker::FFSAVE_RAW, "terrain.raw"))
// {
// LL_WARNS() << "No file" << LL_ENDL;
// return;
// }
// std::string filepath = picker.getFirstFile();
// gXferManager->expectFileForRequest(filepath);
//
// LLPanelRegionTerrainInfo* self = (LLPanelRegionTerrainInfo*)data;
// strings_t strings;
// strings.push_back("download filename");
// strings.push_back(filepath);
// self->sendEstateOwnerMessage(gMessageSystem, "terrain", invoice, strings);
//}
//
//// static
//void LLPanelRegionTerrainInfo::onClickUploadRaw(void* data)
//{
// LLFilePicker& picker = LLFilePicker::instance();
// if (!picker.getOpenFile(LLFilePicker::FFLOAD_RAW))
// {
// LL_WARNS() << "No file" << LL_ENDL;
// return;
// }
// std::string filepath = picker.getFirstFile();
// gXferManager->expectFileForTransfer(filepath);
//
// LLPanelRegionTerrainInfo* self = (LLPanelRegionTerrainInfo*)data;
// strings_t strings;
// strings.push_back("upload filename");
// strings.push_back(filepath);
// LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
// self->sendEstateOwnerMessage(gMessageSystem, "terrain", invoice, strings);
//
// LLNotificationsUtil::add("RawUploadStarted");
//}
void LLPanelRegionTerrainInfo::onClickDownloadRaw(void* data)
{
LLPanelRegionTerrainInfo* self = (LLPanelRegionTerrainInfo*)data;
(new LLFilePickerReplyThread(boost::bind(&LLPanelRegionTerrainInfo::onDownloadRawFilepickerCB, self, _1), LLFilePicker::FFSAVE_RAW, "terrain.raw"))->getFile();
}
void LLPanelRegionTerrainInfo::onDownloadRawFilepickerCB(const std::vector<std::string>& filenames)
{
std::string filepath = filenames[0];
gXferManager->expectFileForRequest(filepath);
strings_t strings;
strings.push_back("download filename");
strings.push_back(filepath);
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
sendEstateOwnerMessage(gMessageSystem, "terrain", invoice, strings);
}
// static
void LLPanelRegionTerrainInfo::onClickUploadRaw(void* data)
{
LLPanelRegionTerrainInfo* self = (LLPanelRegionTerrainInfo*)data;
(new LLFilePickerReplyThread(boost::bind(&LLPanelRegionTerrainInfo::onUploadRawFilepickerCB, self, _1), LLFilePicker::FFLOAD_RAW, false))->getFile();
}
void LLPanelRegionTerrainInfo::onUploadRawFilepickerCB(const std::vector<std::string>& filenames)
{
std::string filepath = filenames[0];
gXferManager->expectFileForTransfer(filepath);
strings_t strings;
strings.push_back("upload filename");
strings.push_back(filepath);
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
sendEstateOwnerMessage(gMessageSystem, "terrain", invoice, strings);
LLNotificationsUtil::add("RawUploadStarted");
}
// </FS:Ansariel> Threaded filepickers
// static
void LLPanelRegionTerrainInfo::onClickBakeTerrain(void* data)
{
LLNotificationsUtil::add("ConfirmBakeTerrain", LLSD(), LLSD(), boost::bind(&LLPanelRegionTerrainInfo::callbackBakeTerrain, (LLPanelRegionTerrainInfo*)data, _1, _2));
}
bool LLPanelRegionTerrainInfo::callbackBakeTerrain(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
if (option != 0) return false;
strings_t strings;
strings.push_back("bake");
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
sendEstateOwnerMessage(gMessageSystem, "terrain", invoice, strings);
return false;
}
/////////////////////////////////////////////////////////////////////////////
// LLPanelEstateInfo
//
LLPanelEstateInfo::LLPanelEstateInfo()
: LLPanelRegionInfo(),
mEstateID(0) // invalid
{
LLEstateInfoModel& estate_info = LLEstateInfoModel::instance();
estate_info.setCommitCallback(boost::bind(&LLPanelEstateInfo::refreshFromEstate, this));
estate_info.setUpdateCallback(boost::bind(&LLPanelEstateInfo::refreshFromEstate, this));
}
// static
void LLPanelEstateInfo::initDispatch(LLDispatcher& dispatch)
{
std::string name;
name.assign("estateupdateinfo");
static LLDispatchEstateUpdateInfo estate_update_info;
dispatch.addHandler(name, &estate_update_info);
name.assign("setaccess");
static LLDispatchSetEstateAccess set_access;
dispatch.addHandler(name, &set_access);
name.assign("setexperience");
static LLDispatchSetEstateExperience set_experience;
dispatch.addHandler(name, &set_experience);
estate_dispatch_initialized = true;
}
//---------------------------------------------------------------------------
// Kick from estate methods
//---------------------------------------------------------------------------
void LLPanelEstateInfo::onClickKickUser()
{
// this depends on the grandparent view being a floater
// in order to set up floater dependency
LLView * button = findChild<LLButton>("kick_user_from_estate_btn");
LLFloater* parent_floater = gFloaterView->getParentFloater(this);
LLFloater* child_floater = LLFloaterAvatarPicker::show(boost::bind(&LLPanelEstateInfo::onKickUserCommit, this, _1),
FALSE, TRUE, FALSE, parent_floater->getName(), button);
if (child_floater)
{
parent_floater->addDependentFloater(child_floater);
}
}
void LLPanelEstateInfo::onKickUserCommit(const uuid_vec_t& ids)
{
if (ids.empty()) return;
//Bring up a confirmation dialog
LLSD args;
args["EVIL_USER"] = LLSLURL("agent", ids[0], "completename").getSLURLString();
LLSD payload;
payload["agent_id"] = ids[0];
LLNotificationsUtil::add("EstateKickUser", args, payload, boost::bind(&LLPanelEstateInfo::kickUserConfirm, this, _1, _2));
}
bool LLPanelEstateInfo::kickUserConfirm(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
switch(option)
{
case 0:
{
//Kick User
strings_t strings;
strings.push_back(notification["payload"]["agent_id"].asString());
sendEstateOwnerMessage(gMessageSystem, "kickestate", LLFloaterRegionInfo::getLastInvoice(), strings);
break;
}
default:
break;
}
return false;
}
//---------------------------------------------------------------------------
// Core Add/Remove estate access methods
// TODO: INTERNATIONAL: don't build message text here;
// instead, create multiple translatable messages and choose
// one based on the status.
//---------------------------------------------------------------------------
std::string all_estates_text()
{
LLPanelEstateInfo* panel = LLFloaterRegionInfo::getPanelEstate();
if (!panel) return "(" + LLTrans::getString("RegionInfoError") + ")";
LLStringUtil::format_map_t args;
std::string owner = panel->getOwnerName();
LLViewerRegion* region = gAgent.getRegion();
if (gAgent.isGodlike())
{
args["[OWNER]"] = owner.c_str();
return LLTrans::getString("RegionInfoAllEstatesOwnedBy", args);
}
else if (region && region->getOwner() == gAgent.getID())
{
return LLTrans::getString("RegionInfoAllEstatesYouOwn");
}
else if (region && region->isEstateManager())
{
args["[OWNER]"] = owner.c_str();
return LLTrans::getString("RegionInfoAllEstatesYouManage", args);
}
else
{
return "(" + LLTrans::getString("RegionInfoError") + ")";
}
}
// static
bool LLPanelEstateInfo::isLindenEstate()
{
U32 estate_id = LLEstateInfoModel::instance().getID();
return (estate_id <= ESTATE_LAST_LINDEN);
}
struct LLEstateAccessChangeInfo
{
LLEstateAccessChangeInfo(const LLSD& sd)
{
mDialogName = sd["dialog_name"].asString();
mOperationFlag = (U32)sd["operation"].asInteger();
LLSD::array_const_iterator end_it = sd["allowed_ids"].endArray();
for (LLSD::array_const_iterator id_it = sd["allowed_ids"].beginArray();
id_it != end_it;
++id_it)
{
mAgentOrGroupIDs.push_back(id_it->asUUID());
}
}
const LLSD asLLSD() const
{
LLSD sd;
sd["name"] = mDialogName;
sd["operation"] = (S32)mOperationFlag;
for (U32 i = 0; i < mAgentOrGroupIDs.size(); ++i)
{
sd["allowed_ids"].append(mAgentOrGroupIDs[i]);
if (mAgentNames.size() > i)
{
sd["allowed_names"].append(mAgentNames[i].asLLSD());
}
}
return sd;
}
U32 mOperationFlag; // ESTATE_ACCESS_BANNED_AGENT_ADD, _REMOVE, etc.
std::string mDialogName;
uuid_vec_t mAgentOrGroupIDs; // List of agent IDs to apply to this change
std::vector<LLAvatarName> mAgentNames; // Optional list of the agent names for notifications
};
// static
void LLPanelEstateInfo::updateEstateOwnerName(const std::string& name)
{
LLPanelEstateInfo* panelp = LLFloaterRegionInfo::getPanelEstate();
if (panelp)
{
panelp->setOwnerName(name);
}
}
// static
void LLPanelEstateInfo::updateEstateName(const std::string& name)
{
LLPanelEstateInfo* panelp = LLFloaterRegionInfo::getPanelEstate();
if (panelp)
{
panelp->getChildRef<LLTextBox>("estate_name").setText(name);
}
}
void LLPanelEstateInfo::updateControls(LLViewerRegion* region)
{
BOOL god = gAgent.isGodlike();
BOOL owner = (region && (region->getOwner() == gAgent.getID()));
BOOL manager = (region && region->isEstateManager());
setCtrlsEnabled(god || owner || manager);
getChildView("apply_btn")->setEnabled(FALSE);
getChildView("message_estate_btn")->setEnabled(god || owner || manager);
getChildView("kick_user_from_estate_btn")->setEnabled(god || owner || manager);
refresh();
}
bool LLPanelEstateInfo::refreshFromRegion(LLViewerRegion* region)
{
updateControls(region);
// let the parent class handle the general data collection.
bool rv = LLPanelRegionInfo::refreshFromRegion(region);
// We want estate info. To make sure it works across region
// boundaries and multiple packets, we add a serial number to the
// integers and track against that on update.
strings_t strings;
//integers_t integers;
//LLFloaterRegionInfo::incrementSerial();
LLFloaterRegionInfo::nextInvoice();
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
//integers.push_back(LLFloaterRegionInfo::());::getPanelEstate();
sendEstateOwnerMessage(gMessageSystem, "getinfo", invoice, strings);
refresh();
return rv;
}
void LLPanelEstateInfo::updateChild(LLUICtrl* child_ctrl)
{
// Ensure appropriate state of the management ui.
updateControls(gAgent.getRegion());
}
bool LLPanelEstateInfo::estateUpdate(LLMessageSystem* msg)
{
LL_INFOS() << "LLPanelEstateInfo::estateUpdate()" << LL_ENDL;
return false;
}
BOOL LLPanelEstateInfo::postBuild()
{
// set up the callbacks for the generic controls
initCtrl("externally_visible_radio");
initCtrl("allow_direct_teleport");
initCtrl("limit_payment");
initCtrl("limit_age_verified");
initCtrl("voice_chat_check");
initCtrl("parcel_access_override");
childSetAction("message_estate_btn", boost::bind(&LLPanelEstateInfo::onClickMessageEstate, this));
childSetAction("kick_user_from_estate_btn", boost::bind(&LLPanelEstateInfo::onClickKickUser, this));
getChild<LLUICtrl>("parcel_access_override")->setCommitCallback(boost::bind(&LLPanelEstateInfo::onChangeAccessOverride, this));
getChild<LLUICtrl>("externally_visible_radio")->setFocus(TRUE);
return LLPanelRegionInfo::postBuild();
}
void LLPanelEstateInfo::refresh()
{
// Disable access restriction controls if they make no sense.
bool public_access = ("estate_public_access" == getChild<LLUICtrl>("externally_visible_radio")->getValue().asString());
// <FS:Ansariel> Does not exist as of 16-06-2017
// getChildView("Only Allow")->setEnabled(public_access);
getChildView("limit_payment")->setEnabled(public_access);
getChildView("limit_age_verified")->setEnabled(public_access);
// if this is set to false, then the limit fields are meaningless and should be turned off
if (public_access == false)
{
getChild<LLUICtrl>("limit_payment")->setValue(false);
getChild<LLUICtrl>("limit_age_verified")->setValue(false);
}
}
void LLPanelEstateInfo::refreshFromEstate()
{
const LLEstateInfoModel& estate_info = LLEstateInfoModel::instance();
getChild<LLUICtrl>("estate_name")->setValue(estate_info.getName());
setOwnerName(LLSLURL("agent", estate_info.getOwnerID(), "inspect").getSLURLString());
getChild<LLUICtrl>("externally_visible_radio")->setValue(estate_info.getIsExternallyVisible() ? "estate_public_access" : "estate_restricted_access");
getChild<LLUICtrl>("voice_chat_check")->setValue(estate_info.getAllowVoiceChat());
getChild<LLUICtrl>("allow_direct_teleport")->setValue(estate_info.getAllowDirectTeleport());
getChild<LLUICtrl>("limit_payment")->setValue(estate_info.getDenyAnonymous());
getChild<LLUICtrl>("limit_age_verified")->setValue(estate_info.getDenyAgeUnverified());
getChild<LLUICtrl>("parcel_access_override")->setValue(estate_info.getAllowAccessOverride());
// Ensure appriopriate state of the management UI
updateControls(gAgent.getRegion());
refresh();
}
BOOL LLPanelEstateInfo::sendUpdate()
{
LL_INFOS() << "LLPanelEsateInfo::sendUpdate()" << LL_ENDL;
LLNotification::Params params("ChangeLindenEstate");
params.functor.function(boost::bind(&LLPanelEstateInfo::callbackChangeLindenEstate, this, _1, _2));
if (isLindenEstate())
{
// trying to change reserved estate, warn
LLNotifications::instance().add(params);
}
else
{
// for normal estates, just make the change
LLNotifications::instance().forceResponse(params, 0);
}
return TRUE;
}
bool LLPanelEstateInfo::callbackChangeLindenEstate(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
switch(option)
{
case 0:
{
LLEstateInfoModel& estate_info = LLEstateInfoModel::instance();
// update model
estate_info.setUseFixedSun(false); // we don't support fixed sun estates anymore
estate_info.setIsExternallyVisible("estate_public_access" == getChild<LLUICtrl>("externally_visible_radio")->getValue().asString());
estate_info.setAllowDirectTeleport(getChild<LLUICtrl>("allow_direct_teleport")->getValue().asBoolean());
estate_info.setDenyAnonymous(getChild<LLUICtrl>("limit_payment")->getValue().asBoolean());
estate_info.setDenyAgeUnverified(getChild<LLUICtrl>("limit_age_verified")->getValue().asBoolean());
estate_info.setAllowVoiceChat(getChild<LLUICtrl>("voice_chat_check")->getValue().asBoolean());
estate_info.setAllowAccessOverride(getChild<LLUICtrl>("parcel_access_override")->getValue().asBoolean());
// JIGGLYPUFF
//estate_info.setAllowAccessOverride(getChild<LLUICtrl>("")->getValue().asBoolean());
// send the update to sim
estate_info.sendEstateInfo();
}
// we don't want to do this because we'll get it automatically from the sim
// after the spaceserver processes it
// else
// {
// // caps method does not automatically send this info
// LLFloaterRegionInfo::requestRegionInfo();
// }
break;
case 1:
default:
// do nothing
break;
}
return false;
}
/*
// Request = "getowner"
// SParam[0] = "" (empty string)
// IParam[0] = serial
void LLPanelEstateInfo::getEstateOwner()
{
// TODO -- disable the panel
// and call this function whenever we cross a region boundary
// re-enable when owner matches, and get new estate info
LLMessageSystem* msg = gMessageSystem;
msg->newMessageFast(_PREHASH_EstateOwnerRequest);
msg->nextBlockFast(_PREHASH_AgentData);
msg->addUUIDFast(_PREHASH_AgentID, gAgent.getID());
msg->nextBlockFast(_PREHASH_RequestData);
msg->addStringFast(_PREHASH_Request, "getowner");
// we send an empty string so that the variable block is not empty
msg->nextBlockFast(_PREHASH_StringData);
msg->addStringFast(_PREHASH_SParam, "");
msg->nextBlockFast(_PREHASH_IntegerData);
msg->addS32Fast(_PREHASH_IParam, LLFloaterRegionInfo::getSerial());
gAgent.sendMessage();
}
*/
const std::string LLPanelEstateInfo::getOwnerName() const
{
return getChild<LLUICtrl>("estate_owner")->getValue().asString();
}
void LLPanelEstateInfo::setOwnerName(const std::string& name)
{
getChild<LLUICtrl>("estate_owner")->setValue(LLSD(name));
}
// static
void LLPanelEstateInfo::onClickMessageEstate(void* userdata)
{
LL_INFOS() << "LLPanelEstateInfo::onClickMessageEstate" << LL_ENDL;
LLNotificationsUtil::add("MessageEstate", LLSD(), LLSD(), boost::bind(&LLPanelEstateInfo::onMessageCommit, (LLPanelEstateInfo*)userdata, _1, _2));
}
bool LLPanelEstateInfo::onMessageCommit(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
std::string text = response["message"].asString();
if(option != 0) return false;
if(text.empty()) return false;
LL_INFOS() << "Message to everyone: " << text << LL_ENDL;
strings_t strings;
//integers_t integers;
std::string name;
LLAgentUI::buildFullname(name);
strings.push_back(strings_t::value_type(name));
strings.push_back(strings_t::value_type(text));
LLUUID invoice(LLFloaterRegionInfo::getLastInvoice());
sendEstateOwnerMessage(gMessageSystem, "instantmessage", invoice, strings);
return false;
}
void LLPanelEstateInfo::onChangeAccessOverride()
{
if (!getChild<LLUICtrl>("parcel_access_override")->getValue().asBoolean())
{
LLNotificationsUtil::add("EstateParcelAccessOverride");
}
}
LLPanelEstateCovenant::LLPanelEstateCovenant()
:
mCovenantID(LLUUID::null),
mAssetStatus(ASSET_ERROR)
{
}
// virtual
bool LLPanelEstateCovenant::refreshFromRegion(LLViewerRegion* region)
{
LLTextBox* region_name = getChild<LLTextBox>("region_name_text");
if (region_name)
{
region_name->setText(region->getName());
}
LLTextBox* resellable_clause = getChild<LLTextBox>("resellable_clause");
if (resellable_clause)
{
if (region->getRegionFlag(REGION_FLAGS_BLOCK_LAND_RESELL))
{
resellable_clause->setText(getString("can_not_resell"));
}
else
{
resellable_clause->setText(getString("can_resell"));
}
}
LLTextBox* changeable_clause = getChild<LLTextBox>("changeable_clause");
if (changeable_clause)
{
if (region->getRegionFlag(REGION_FLAGS_ALLOW_PARCEL_CHANGES))
{
changeable_clause->setText(getString("can_change"));
}
else
{
changeable_clause->setText(getString("can_not_change"));
}
}
LLTextBox* region_maturity = getChild<LLTextBox>("region_maturity_text");
if (region_maturity)
{
region_maturity->setText(region->getSimAccessString());
}
LLTextBox* region_landtype = getChild<LLTextBox>("region_landtype_text");
region_landtype->setText(region->getLocalizedSimProductName());
// let the parent class handle the general data collection.
bool rv = LLPanelRegionInfo::refreshFromRegion(region);
LLMessageSystem *msg = gMessageSystem;
msg->newMessage("EstateCovenantRequest");
msg->nextBlockFast(_PREHASH_AgentData);
msg->addUUIDFast(_PREHASH_AgentID, gAgent.getID());
msg->addUUIDFast(_PREHASH_SessionID,gAgent.getSessionID());
msg->sendReliable(region->getHost());
return rv;
}
// virtual
bool LLPanelEstateCovenant::estateUpdate(LLMessageSystem* msg)
{
LL_INFOS() << "LLPanelEstateCovenant::estateUpdate()" << LL_ENDL;
return true;
}
// virtual
BOOL LLPanelEstateCovenant::postBuild()
{
mEstateNameText = getChild<LLTextBox>("estate_name_text");
mEstateOwnerText = getChild<LLTextBox>("estate_owner_text");
mLastModifiedText = getChild<LLTextBox>("covenant_timestamp_text");
mEditor = getChild<LLViewerTextEditor>("covenant_editor");
LLButton* reset_button = getChild<LLButton>("reset_covenant");
reset_button->setEnabled(gAgent.canManageEstate());
reset_button->setClickedCallback(LLPanelEstateCovenant::resetCovenantID, NULL);
return LLPanelRegionInfo::postBuild();
}
// virtual
void LLPanelEstateCovenant::updateChild(LLUICtrl* child_ctrl)
{
}
// virtual
BOOL LLPanelEstateCovenant::handleDragAndDrop(S32 x, S32 y, MASK mask, BOOL drop,
EDragAndDropType cargo_type,
void* cargo_data,
EAcceptance* accept,
std::string& tooltip_msg)
{
LLInventoryItem* item = (LLInventoryItem*)cargo_data;
if (!gAgent.canManageEstate())
{
*accept = ACCEPT_NO;
return TRUE;
}
switch(cargo_type)
{
case DAD_NOTECARD:
*accept = ACCEPT_YES_COPY_SINGLE;
if (item && drop)
{
LLSD payload;
payload["item_id"] = item->getUUID();
LLNotificationsUtil::add("EstateChangeCovenant", LLSD(), payload,
LLPanelEstateCovenant::confirmChangeCovenantCallback);
}
break;
default:
*accept = ACCEPT_NO;
break;
}
return TRUE;
}
// static
bool LLPanelEstateCovenant::confirmChangeCovenantCallback(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
LLInventoryItem* item = gInventory.getItem(notification["payload"]["item_id"].asUUID());
LLPanelEstateCovenant* self = LLFloaterRegionInfo::getPanelCovenant();
if (!item || !self) return false;
switch(option)
{
case 0:
self->loadInvItem(item);
break;
default:
break;
}
return false;
}
// static
void LLPanelEstateCovenant::resetCovenantID(void* userdata)
{
LLNotificationsUtil::add("EstateChangeCovenant", LLSD(), LLSD(), confirmResetCovenantCallback);
}
// static
bool LLPanelEstateCovenant::confirmResetCovenantCallback(const LLSD& notification, const LLSD& response)
{
LLPanelEstateCovenant* self = LLFloaterRegionInfo::getPanelCovenant();
if (!self) return false;
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
switch(option)
{
case 0:
self->loadInvItem(NULL);
break;
default:
break;
}
return false;
}
void LLPanelEstateCovenant::loadInvItem(LLInventoryItem *itemp)
{
const BOOL high_priority = TRUE;
if (itemp)
{
gAssetStorage->getInvItemAsset(gAgent.getRegionHost(),
gAgent.getID(),
gAgent.getSessionID(),
itemp->getPermissions().getOwner(),
LLUUID::null,
itemp->getUUID(),
itemp->getAssetUUID(),
itemp->getType(),
onLoadComplete,
(void*)this,
high_priority);
mAssetStatus = ASSET_LOADING;
}
else
{
mAssetStatus = ASSET_LOADED;
setCovenantTextEditor(LLTrans::getString("RegionNoCovenant"));
sendChangeCovenantID(LLUUID::null);
}
}
// static
void LLPanelEstateCovenant::onLoadComplete(LLVFS *vfs,
const LLUUID& asset_uuid,
LLAssetType::EType type,
void* user_data, S32 status, LLExtStat ext_status)
{
LL_INFOS() << "LLPanelEstateCovenant::onLoadComplete()" << LL_ENDL;
LLPanelEstateCovenant* panelp = (LLPanelEstateCovenant*)user_data;
if( panelp )
{
if(0 == status)
{
LLVFile file(vfs, asset_uuid, type, LLVFile::READ);
S32 file_length = file.getSize();
std::vector<char> buffer(file_length+1);
file.read((U8*)&buffer[0], file_length);
// put a EOS at the end
buffer[file_length] = 0;
if( (file_length > 19) && !strncmp( &buffer[0], "Linden text version", 19 ) )
{
if( !panelp->mEditor->importBuffer( &buffer[0], file_length+1 ) )
{
LL_WARNS() << "Problem importing estate covenant." << LL_ENDL;
LLNotificationsUtil::add("ProblemImportingEstateCovenant");
}
else
{
panelp->sendChangeCovenantID(asset_uuid);
}
}
else
{
// Version 0 (just text, doesn't include version number)
panelp->sendChangeCovenantID(asset_uuid);
}
}
else
{
if( LL_ERR_ASSET_REQUEST_NOT_IN_DATABASE == status ||
LL_ERR_FILE_EMPTY == status)
{
LLNotificationsUtil::add("MissingNotecardAssetID");
}
else if (LL_ERR_INSUFFICIENT_PERMISSIONS == status)
{
LLNotificationsUtil::add("NotAllowedToViewNotecard");
}
else
{
LLNotificationsUtil::add("UnableToLoadNotecardAsset");
}
LL_WARNS() << "Problem loading notecard: " << status << LL_ENDL;
}
panelp->mAssetStatus = ASSET_LOADED;
panelp->setCovenantID(asset_uuid);
}
}
// key = "estatechangecovenantid"
// strings[0] = str(estate_id) (added by simulator before relay - not here)
// strings[1] = str(covenant_id)
void LLPanelEstateCovenant::sendChangeCovenantID(const LLUUID &asset_id)
{
if (asset_id != getCovenantID())
{
setCovenantID(asset_id);
LLMessageSystem* msg = gMessageSystem;
msg->newMessage("EstateOwnerMessage");
msg->nextBlockFast(_PREHASH_AgentData);
msg->addUUIDFast(_PREHASH_AgentID, gAgent.getID());
msg->addUUIDFast(_PREHASH_SessionID, gAgent.getSessionID());
msg->addUUIDFast(_PREHASH_TransactionID, LLUUID::null); //not used
msg->nextBlock("MethodData");
msg->addString("Method", "estatechangecovenantid");
msg->addUUID("Invoice", LLFloaterRegionInfo::getLastInvoice());
msg->nextBlock("ParamList");
msg->addString("Parameter", getCovenantID().asString());
gAgent.sendReliableMessage();
}
}
// virtual
BOOL LLPanelEstateCovenant::sendUpdate()
{
return TRUE;
}
std::string LLPanelEstateCovenant::getEstateName() const
{
return mEstateNameText->getText();
}
void LLPanelEstateCovenant::setEstateName(const std::string& name)
{
mEstateNameText->setText(name);
}
// static
void LLPanelEstateCovenant::updateCovenantText(const std::string& string, const LLUUID& asset_id)
{
LLPanelEstateCovenant* panelp = LLFloaterRegionInfo::getPanelCovenant();
if( panelp )
{
panelp->mEditor->setText(string);
panelp->setCovenantID(asset_id);
}
}
// static
void LLPanelEstateCovenant::updateEstateName(const std::string& name)
{
LLPanelEstateCovenant* panelp = LLFloaterRegionInfo::getPanelCovenant();
if( panelp )
{
panelp->mEstateNameText->setText(name);
}
}
// static
void LLPanelEstateCovenant::updateLastModified(const std::string& text)
{
LLPanelEstateCovenant* panelp = LLFloaterRegionInfo::getPanelCovenant();
if( panelp )
{
panelp->mLastModifiedText->setText(text);
}
}
// static
void LLPanelEstateCovenant::updateEstateOwnerName(const std::string& name)
{
LLPanelEstateCovenant* panelp = LLFloaterRegionInfo::getPanelCovenant();
if( panelp )
{
panelp->mEstateOwnerText->setText(name);
}
}
std::string LLPanelEstateCovenant::getOwnerName() const
{
return mEstateOwnerText->getText();
}
void LLPanelEstateCovenant::setOwnerName(const std::string& name)
{
mEstateOwnerText->setText(name);
}
void LLPanelEstateCovenant::setCovenantTextEditor(const std::string& text)
{
mEditor->setText(text);
}
// key = "estateupdateinfo"
// strings[0] = estate name
// strings[1] = str(owner_id)
// strings[2] = str(estate_id)
// strings[3] = str(estate_flags)
// strings[4] = str((S32)(sun_hour * 1024))
// strings[5] = str(parent_estate_id)
// strings[6] = str(covenant_id)
// strings[7] = str(covenant_timestamp)
// strings[8] = str(send_to_agent_only)
// strings[9] = str(abuse_email_addr)
bool LLDispatchEstateUpdateInfo::operator()(
const LLDispatcher* dispatcher,
const std::string& key,
const LLUUID& invoice,
const sparam_t& strings)
{
LL_DEBUGS() << "Received estate update" << LL_ENDL;
// Update estate info model.
// This will call LLPanelEstateInfo::refreshFromEstate().
// *TODO: Move estate message handling stuff to llestateinfomodel.cpp.
LLEstateInfoModel::instance().update(strings);
return true;
}
bool LLDispatchSetEstateAccess::operator()(
const LLDispatcher* dispatcher,
const std::string& key,
const LLUUID& invoice,
const sparam_t& strings)
{
LLPanelEstateAccess* panel = LLFloaterRegionInfo::getPanelAccess();
if (panel && panel->getPendingUpdate())
{
panel->setPendingUpdate(false);
panel->updateLists();
}
return true;
}
LLSD LLDispatchSetEstateExperience::getIDs( sparam_t::const_iterator it, sparam_t::const_iterator end, S32 count )
{
LLSD idList = LLSD::emptyArray();
LLUUID id;
while(count--> 0)
{
memcpy(id.mData, (*(it++)).data(), UUID_BYTES);
idList.append(id);
}
return idList;
}
// key = "setexperience"
// strings[0] = str(estate_id)
// strings[1] = str(send_to_agent_only)
// strings[2] = str(num blocked)
// strings[3] = str(num trusted)
// strings[4] = str(num allowed)
// strings[8] = bin(uuid) ...
// ...
bool LLDispatchSetEstateExperience::operator()(
const LLDispatcher* dispatcher,
const std::string& key,
const LLUUID& invoice,
const sparam_t& strings)
{
LLPanelRegionExperiences* panel = LLFloaterRegionInfo::getPanelExperiences();
if (!panel) return true;
sparam_t::const_iterator it = strings.begin();
++it; // U32 estate_id = strtol((*it).c_str(), NULL, 10);
++it; // U32 send_to_agent_only = strtoul((*(++it)).c_str(), NULL, 10);
LLUUID id;
S32 num_blocked = strtol((*(it++)).c_str(), NULL, 10);
S32 num_trusted = strtol((*(it++)).c_str(), NULL, 10);
S32 num_allowed = strtol((*(it++)).c_str(), NULL, 10);
LLSD ids = LLSD::emptyMap()
.with("blocked", getIDs(it, strings.end(), num_blocked))
.with("trusted", getIDs(it + (num_blocked), strings.end(), num_trusted))
.with("allowed", getIDs(it + (num_blocked+num_trusted), strings.end(), num_allowed));
panel->processResponse(ids);
return true;
}
BOOL LLPanelRegionExperiences::postBuild()
{
mAllowed = setupList("panel_allowed", ESTATE_EXPERIENCE_ALLOWED_ADD, ESTATE_EXPERIENCE_ALLOWED_REMOVE);
mTrusted = setupList("panel_trusted", ESTATE_EXPERIENCE_TRUSTED_ADD, ESTATE_EXPERIENCE_TRUSTED_REMOVE);
mBlocked = setupList("panel_blocked", ESTATE_EXPERIENCE_BLOCKED_ADD, ESTATE_EXPERIENCE_BLOCKED_REMOVE);
getChild<LLLayoutPanel>("trusted_layout_panel")->setVisible(TRUE);
getChild<LLTextBox>("experiences_help_text")->setText(getString("estate_caption"));
getChild<LLTextBox>("trusted_text_help")->setText(getString("trusted_estate_text"));
getChild<LLTextBox>("allowed_text_help")->setText(getString("allowed_estate_text"));
getChild<LLTextBox>("blocked_text_help")->setText(getString("blocked_estate_text"));
return LLPanelRegionInfo::postBuild();
}
LLPanelExperienceListEditor* LLPanelRegionExperiences::setupList( const char* control_name, U32 add_id, U32 remove_id )
{
LLPanelExperienceListEditor* child = findChild<LLPanelExperienceListEditor>(control_name);
if(child)
{
child->getChild<LLTextBox>("text_name")->setText(child->getString(control_name));
child->setMaxExperienceIDs(ESTATE_MAX_EXPERIENCE_IDS);
child->setAddedCallback( boost::bind(&LLPanelRegionExperiences::itemChanged, this, add_id, _1));
child->setRemovedCallback(boost::bind(&LLPanelRegionExperiences::itemChanged, this, remove_id, _1));
}
return child;
}
void LLPanelRegionExperiences::processResponse( const LLSD& content )
{
if(content.has("default"))
{
mDefaultExperience = content["default"].asUUID();
}
mAllowed->setExperienceIds(content["allowed"]);
mBlocked->setExperienceIds(content["blocked"]);
LLSD trusted = content["trusted"];
if(mDefaultExperience.notNull())
{
mTrusted->setStickyFunction(boost::bind(LLPanelExperiencePicker::FilterMatching, _1, mDefaultExperience));
trusted.append(mDefaultExperience);
}
mTrusted->setExperienceIds(trusted);
mAllowed->refreshExperienceCounter();
mBlocked->refreshExperienceCounter();
mTrusted->refreshExperienceCounter();
}
// Used for both access add and remove operations, depending on the flag
// passed in (ESTATE_EXPERIENCE_ALLOWED_ADD, ESTATE_EXPERIENCE_ALLOWED_REMOVE, etc.)
// static
bool LLPanelRegionExperiences::experienceCoreConfirm(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
const U32 originalFlags = (U32)notification["payload"]["operation"].asInteger();
LLViewerRegion* region = gAgent.getRegion();
LLSD::array_const_iterator end_it = notification["payload"]["allowed_ids"].endArray();
for (LLSD::array_const_iterator iter = notification["payload"]["allowed_ids"].beginArray();
iter != end_it;
iter++)
{
U32 flags = originalFlags;
if (iter + 1 != end_it)
flags |= ESTATE_ACCESS_NO_REPLY;
const LLUUID id = iter->asUUID();
switch(option)
{
case 0:
// This estate
sendEstateExperienceDelta(flags, id);
break;
case 1:
{
// All estates, either than I own or manage for this owner.
// This will be verified on simulator. JC
if (!region) break;
if (region->getOwner() == gAgent.getID()
|| gAgent.isGodlike())
{
flags |= ESTATE_ACCESS_APPLY_TO_ALL_ESTATES;
sendEstateExperienceDelta(flags, id);
}
else if (region->isEstateManager())
{
flags |= ESTATE_ACCESS_APPLY_TO_MANAGED_ESTATES;
sendEstateExperienceDelta(flags, id);
}
break;
}
case 2:
default:
break;
}
}
return false;
}
// Send the actual "estateexperiencedelta" message
void LLPanelRegionExperiences::sendEstateExperienceDelta(U32 flags, const LLUUID& experience_id)
{
strings_t str(3, std::string());
gAgent.getID().toString(str[0]);
str[1] = llformat("%u", flags);
experience_id.toString(str[2]);
LLPanelRegionExperiences* panel = LLFloaterRegionInfo::getPanelExperiences();
if (panel)
{
panel->sendEstateOwnerMessage(gMessageSystem, "estateexperiencedelta", LLFloaterRegionInfo::getLastInvoice(), str);
}
}
void LLPanelRegionExperiences::infoCallback(LLHandle<LLPanelRegionExperiences> handle, const LLSD& content)
{
if(handle.isDead())
return;
LLPanelRegionExperiences* floater = handle.get();
if (floater)
{
floater->processResponse(content);
}
}
/*static*/
std::string LLPanelRegionExperiences::regionCapabilityQuery(LLViewerRegion* region, const std::string &cap)
{
// region->getHandle() How to get a region * from a handle?
return region->getCapability(cap);
}
bool LLPanelRegionExperiences::refreshFromRegion(LLViewerRegion* region)
{
BOOL allow_modify = gAgent.isGodlike() || (region && region->canManageEstate());
mAllowed->loading();
mAllowed->setReadonly(!allow_modify);
// remove grid-wide experiences
mAllowed->addFilter(boost::bind(LLPanelExperiencePicker::FilterWithProperty, _1, LLExperienceCache::PROPERTY_GRID));
// remove default experience
mAllowed->addFilter(boost::bind(LLPanelExperiencePicker::FilterMatching, _1, mDefaultExperience));
mBlocked->loading();
mBlocked->setReadonly(!allow_modify);
// only grid-wide experiences
mBlocked->addFilter(boost::bind(LLPanelExperiencePicker::FilterWithoutProperty, _1, LLExperienceCache::PROPERTY_GRID));
// but not privileged ones
mBlocked->addFilter(boost::bind(LLPanelExperiencePicker::FilterWithProperty, _1, LLExperienceCache::PROPERTY_PRIVILEGED));
// remove default experience
mBlocked->addFilter(boost::bind(LLPanelExperiencePicker::FilterMatching, _1, mDefaultExperience));
mTrusted->loading();
mTrusted->setReadonly(!allow_modify);
LLExperienceCache::instance().getRegionExperiences(boost::bind(&LLPanelRegionExperiences::regionCapabilityQuery, region, _1),
boost::bind(&LLPanelRegionExperiences::infoCallback, getDerivedHandle<LLPanelRegionExperiences>(), _1));
return LLPanelRegionInfo::refreshFromRegion(region);
}
LLSD LLPanelRegionExperiences::addIds(LLPanelExperienceListEditor* panel)
{
LLSD ids;
const uuid_list_t& id_list = panel->getExperienceIds();
for(uuid_list_t::const_iterator it = id_list.begin(); it != id_list.end(); ++it)
{
ids.append(*it);
}
return ids;
}
BOOL LLPanelRegionExperiences::sendUpdate()
{
LLViewerRegion* region = gAgent.getRegion();
LLSD content;
content["allowed"]=addIds(mAllowed);
content["blocked"]=addIds(mBlocked);
content["trusted"]=addIds(mTrusted);
LLExperienceCache::instance().setRegionExperiences(boost::bind(&LLPanelRegionExperiences::regionCapabilityQuery, region, _1),
content, boost::bind(&LLPanelRegionExperiences::infoCallback, getDerivedHandle<LLPanelRegionExperiences>(), _1));
return TRUE;
}
void LLPanelRegionExperiences::itemChanged( U32 event_type, const LLUUID& id )
{
std::string dialog_name;
switch (event_type)
{
case ESTATE_EXPERIENCE_ALLOWED_ADD:
dialog_name = "EstateAllowedExperienceAdd";
break;
case ESTATE_EXPERIENCE_ALLOWED_REMOVE:
dialog_name = "EstateAllowedExperienceRemove";
break;
case ESTATE_EXPERIENCE_TRUSTED_ADD:
dialog_name = "EstateTrustedExperienceAdd";
break;
case ESTATE_EXPERIENCE_TRUSTED_REMOVE:
dialog_name = "EstateTrustedExperienceRemove";
break;
case ESTATE_EXPERIENCE_BLOCKED_ADD:
dialog_name = "EstateBlockedExperienceAdd";
break;
case ESTATE_EXPERIENCE_BLOCKED_REMOVE:
dialog_name = "EstateBlockedExperienceRemove";
break;
default:
return;
}
LLSD payload;
payload["operation"] = (S32)event_type;
payload["dialog_name"] = dialog_name;
payload["allowed_ids"].append(id);
LLSD args;
args["ALL_ESTATES"] = all_estates_text();
LLNotification::Params params(dialog_name);
params.payload(payload)
.substitutions(args)
.functor.function(LLPanelRegionExperiences::experienceCoreConfirm);
if (LLPanelEstateInfo::isLindenEstate())
{
LLNotifications::instance().forceResponse(params, 0);
}
else
{
LLNotifications::instance().add(params);
}
onChangeAnything();
}
LLPanelEstateAccess::LLPanelEstateAccess()
: LLPanelRegionInfo(), mPendingUpdate(false)
{}
BOOL LLPanelEstateAccess::postBuild()
{
getChild<LLUICtrl>("allowed_avatar_name_list")->setCommitCallback(boost::bind(&LLPanelEstateInfo::onChangeChildCtrl, this, _1));
LLNameListCtrl *avatar_name_list = getChild<LLNameListCtrl>("allowed_avatar_name_list");
if (avatar_name_list)
{
avatar_name_list->setCommitOnSelectionChange(TRUE);
avatar_name_list->setMaxItemCount(ESTATE_MAX_ACCESS_IDS);
}
getChild<LLUICtrl>("allowed_search_input")->setCommitCallback(boost::bind(&LLPanelEstateAccess::onAllowedSearchEdit, this, _2));
childSetAction("add_allowed_avatar_btn", boost::bind(&LLPanelEstateAccess::onClickAddAllowedAgent, this));
childSetAction("remove_allowed_avatar_btn", boost::bind(&LLPanelEstateAccess::onClickRemoveAllowedAgent, this));
childSetAction("copy_allowed_list_btn", boost::bind(&LLPanelEstateAccess::onClickCopyAllowedList, this));
getChild<LLUICtrl>("allowed_group_name_list")->setCommitCallback(boost::bind(&LLPanelEstateInfo::onChangeChildCtrl, this, _1));
LLNameListCtrl* group_name_list = getChild<LLNameListCtrl>("allowed_group_name_list");
if (group_name_list)
{
group_name_list->setCommitOnSelectionChange(TRUE);
group_name_list->setMaxItemCount(ESTATE_MAX_ACCESS_IDS);
}
getChild<LLUICtrl>("allowed_group_search_input")->setCommitCallback(boost::bind(&LLPanelEstateAccess::onAllowedGroupsSearchEdit, this, _2));
getChild<LLUICtrl>("add_allowed_group_btn")->setCommitCallback(boost::bind(&LLPanelEstateAccess::onClickAddAllowedGroup, this));
childSetAction("remove_allowed_group_btn", boost::bind(&LLPanelEstateAccess::onClickRemoveAllowedGroup, this));
childSetAction("copy_allowed_group_list_btn", boost::bind(&LLPanelEstateAccess::onClickCopyAllowedGroupList, this));
getChild<LLUICtrl>("banned_avatar_name_list")->setCommitCallback(boost::bind(&LLPanelEstateInfo::onChangeChildCtrl, this, _1));
LLNameListCtrl* banned_name_list = getChild<LLNameListCtrl>("banned_avatar_name_list");
if (banned_name_list)
{
banned_name_list->setCommitOnSelectionChange(TRUE);
banned_name_list->setMaxItemCount(ESTATE_MAX_ACCESS_IDS);
}
getChild<LLUICtrl>("banned_search_input")->setCommitCallback(boost::bind(&LLPanelEstateAccess::onBannedSearchEdit, this, _2));
childSetAction("add_banned_avatar_btn", boost::bind(&LLPanelEstateAccess::onClickAddBannedAgent, this));
childSetAction("remove_banned_avatar_btn", boost::bind(&LLPanelEstateAccess::onClickRemoveBannedAgent, this));
childSetAction("copy_banned_list_btn", boost::bind(&LLPanelEstateAccess::onClickCopyBannedList, this));
getChild<LLUICtrl>("estate_manager_name_list")->setCommitCallback(boost::bind(&LLPanelEstateInfo::onChangeChildCtrl, this, _1));
LLNameListCtrl* manager_name_list = getChild<LLNameListCtrl>("estate_manager_name_list");
if (manager_name_list)
{
manager_name_list->setCommitOnSelectionChange(TRUE);
manager_name_list->setMaxItemCount(ESTATE_MAX_MANAGERS * 4); // Allow extras for dupe issue
}
childSetAction("add_estate_manager_btn", boost::bind(&LLPanelEstateAccess::onClickAddEstateManager, this));
childSetAction("remove_estate_manager_btn", boost::bind(&LLPanelEstateAccess::onClickRemoveEstateManager, this));
return TRUE;
}
void LLPanelEstateAccess::updateControls(LLViewerRegion* region)
{
BOOL god = gAgent.isGodlike();
BOOL owner = (region && (region->getOwner() == gAgent.getID()));
BOOL manager = (region && region->isEstateManager());
BOOL enable_cotrols = god || owner || manager;
setCtrlsEnabled(enable_cotrols);
BOOL has_allowed_avatar = getChild<LLNameListCtrl>("allowed_avatar_name_list")->getFirstSelected() ? TRUE : FALSE;
BOOL has_allowed_group = getChild<LLNameListCtrl>("allowed_group_name_list")->getFirstSelected() ? TRUE : FALSE;
BOOL has_banned_agent = getChild<LLNameListCtrl>("banned_avatar_name_list")->getFirstSelected() ? TRUE : FALSE;
BOOL has_estate_manager = getChild<LLNameListCtrl>("estate_manager_name_list")->getFirstSelected() ? TRUE : FALSE;
getChildView("add_allowed_avatar_btn")->setEnabled(enable_cotrols);
getChildView("remove_allowed_avatar_btn")->setEnabled(has_allowed_avatar && enable_cotrols);
getChildView("allowed_avatar_name_list")->setEnabled(enable_cotrols);
getChildView("add_allowed_group_btn")->setEnabled(enable_cotrols);
getChildView("remove_allowed_group_btn")->setEnabled(has_allowed_group && enable_cotrols);
getChildView("allowed_group_name_list")->setEnabled(enable_cotrols);
// Can't ban people from mainland, orientation islands, etc. because this
// creates much network traffic and server load.
// Disable their accounts in CSR tool instead.
bool linden_estate = LLPanelEstateInfo::isLindenEstate();
bool enable_ban = enable_cotrols && !linden_estate;
getChildView("add_banned_avatar_btn")->setEnabled(enable_ban);
getChildView("remove_banned_avatar_btn")->setEnabled(has_banned_agent && enable_ban);
getChildView("banned_avatar_name_list")->setEnabled(enable_cotrols);
// estate managers can't add estate managers
getChildView("add_estate_manager_btn")->setEnabled(god || owner);
getChildView("remove_estate_manager_btn")->setEnabled(has_estate_manager && (god || owner));
getChildView("estate_manager_name_list")->setEnabled(god || owner);
if (enable_cotrols != mCtrlsEnabled)
{
mCtrlsEnabled = enable_cotrols;
updateLists(); // update the lists on the agent's access level change
}
}
//---------------------------------------------------------------------------
// Add/Remove estate access button callbacks
//---------------------------------------------------------------------------
void LLPanelEstateAccess::onClickAddAllowedAgent()
{
LLCtrlListInterface *list = childGetListInterface("allowed_avatar_name_list");
if (!list) return;
if (list->getItemCount() >= ESTATE_MAX_ACCESS_IDS)
{
//args
LLSD args;
args["MAX_AGENTS"] = llformat("%d", ESTATE_MAX_ACCESS_IDS);
LLNotificationsUtil::add("MaxAllowedAgentOnRegion", args);
return;
}
accessAddCore(ESTATE_ACCESS_ALLOWED_AGENT_ADD, "EstateAllowedAgentAdd");
}
void LLPanelEstateAccess::onClickRemoveAllowedAgent()
{
accessRemoveCore(ESTATE_ACCESS_ALLOWED_AGENT_REMOVE, "EstateAllowedAgentRemove", "allowed_avatar_name_list");
}
void LLPanelEstateAccess::onClickAddAllowedGroup()
{
LLCtrlListInterface *list = childGetListInterface("allowed_group_name_list");
if (!list) return;
if (list->getItemCount() >= ESTATE_MAX_ACCESS_IDS)
{
LLSD args;
args["MAX_GROUPS"] = llformat("%d", ESTATE_MAX_ACCESS_IDS);
LLNotificationsUtil::add("MaxAllowedGroupsOnRegion", args);
return;
}
LLNotification::Params params("ChangeLindenAccess");
params.functor.function(boost::bind(&LLPanelEstateAccess::addAllowedGroup, this, _1, _2));
if (LLPanelEstateInfo::isLindenEstate())
{
LLNotifications::instance().add(params);
}
else
{
LLNotifications::instance().forceResponse(params, 0);
}
}
bool LLPanelEstateAccess::addAllowedGroup(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
if (option != 0) return false;
LLFloater* parent_floater = gFloaterView->getParentFloater(this);
LLFloaterGroupPicker* widget = LLFloaterReg::showTypedInstance<LLFloaterGroupPicker>("group_picker", LLSD(gAgent.getID()));
if (widget)
{
widget->removeNoneOption();
widget->setSelectGroupCallback(boost::bind(&LLPanelEstateAccess::addAllowedGroup2, this, _1));
if (parent_floater)
{
LLRect new_rect = gFloaterView->findNeighboringPosition(parent_floater, widget);
widget->setOrigin(new_rect.mLeft, new_rect.mBottom);
parent_floater->addDependentFloater(widget);
}
}
return false;
}
void LLPanelEstateAccess::onClickRemoveAllowedGroup()
{
accessRemoveCore(ESTATE_ACCESS_ALLOWED_GROUP_REMOVE, "EstateAllowedGroupRemove", "allowed_group_name_list");
}
void LLPanelEstateAccess::onClickAddBannedAgent()
{
LLCtrlListInterface *list = childGetListInterface("banned_avatar_name_list");
if (!list) return;
if (list->getItemCount() >= ESTATE_MAX_ACCESS_IDS)
{
LLSD args;
args["MAX_BANNED"] = llformat("%d", ESTATE_MAX_ACCESS_IDS);
LLNotificationsUtil::add("MaxBannedAgentsOnRegion", args);
return;
}
accessAddCore(ESTATE_ACCESS_BANNED_AGENT_ADD, "EstateBannedAgentAdd");
}
void LLPanelEstateAccess::onClickRemoveBannedAgent()
{
accessRemoveCore(ESTATE_ACCESS_BANNED_AGENT_REMOVE, "EstateBannedAgentRemove", "banned_avatar_name_list");
}
void LLPanelEstateAccess::onClickCopyAllowedList()
{
copyListToClipboard("allowed_avatar_name_list");
}
void LLPanelEstateAccess::onClickCopyAllowedGroupList()
{
copyListToClipboard("allowed_group_name_list");
}
void LLPanelEstateAccess::onClickCopyBannedList()
{
copyListToClipboard("banned_avatar_name_list");
}
// static
void LLPanelEstateAccess::onClickAddEstateManager()
{
LLCtrlListInterface *list = childGetListInterface("estate_manager_name_list");
if (!list) return;
if (list->getItemCount() >= ESTATE_MAX_MANAGERS)
{ // Tell user they can't add more managers
LLSD args;
args["MAX_MANAGER"] = llformat("%d", ESTATE_MAX_MANAGERS);
LLNotificationsUtil::add("MaxManagersOnRegion", args);
}
else
{ // Go pick managers to add
accessAddCore(ESTATE_ACCESS_MANAGER_ADD, "EstateManagerAdd");
}
}
// static
void LLPanelEstateAccess::onClickRemoveEstateManager()
{
accessRemoveCore(ESTATE_ACCESS_MANAGER_REMOVE, "EstateManagerRemove", "estate_manager_name_list");
}
// Special case callback for groups, since it has different callback format than names
void LLPanelEstateAccess::addAllowedGroup2(LLUUID id)
{
LLPanelEstateAccess* panel = LLFloaterRegionInfo::getPanelAccess();
if (panel)
{
LLNameListCtrl* group_list = panel->getChild<LLNameListCtrl>("allowed_group_name_list");
LLScrollListItem* item = group_list->getNameItemByAgentId(id);
if (item)
{
LLSD args;
args["GROUP"] = item->getColumn(0)->getValue().asString();
LLNotificationsUtil::add("GroupIsAlreadyInList", args);
return;
}
}
LLSD payload;
payload["operation"] = (S32)ESTATE_ACCESS_ALLOWED_GROUP_ADD;
payload["dialog_name"] = "EstateAllowedGroupAdd";
payload["allowed_ids"].append(id);
LLSD args;
args["ALL_ESTATES"] = all_estates_text();
LLNotification::Params params("EstateAllowedGroupAdd");
params.payload(payload)
.substitutions(args)
.functor.function(accessCoreConfirm);
if (LLPanelEstateInfo::isLindenEstate())
{
LLNotifications::instance().forceResponse(params, 0);
}
else
{
LLNotifications::instance().add(params);
}
}
// static
void LLPanelEstateAccess::accessAddCore(U32 operation_flag, const std::string& dialog_name)
{
LLSD payload;
payload["operation"] = (S32)operation_flag;
payload["dialog_name"] = dialog_name;
// agent id filled in after avatar picker
LLNotification::Params params("ChangeLindenAccess");
params.payload(payload)
.functor.function(accessAddCore2);
if (LLPanelEstateInfo::isLindenEstate())
{
LLNotifications::instance().add(params);
}
else
{
// same as clicking "OK"
LLNotifications::instance().forceResponse(params, 0);
}
}
// static
bool LLPanelEstateAccess::accessAddCore2(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
if (option != 0)
{
// abort change
return false;
}
LLEstateAccessChangeInfo* change_info = new LLEstateAccessChangeInfo(notification["payload"]);
//Get parent floater name
LLPanelEstateAccess* panel = LLFloaterRegionInfo::getPanelAccess();
LLFloater* parent_floater = panel ? gFloaterView->getParentFloater(panel) : NULL;
const std::string& parent_floater_name = parent_floater ? parent_floater->getName() : "";
//Determine the button that triggered opening of the avatar picker
//(so that a shadow frustum from the button to the avatar picker can be created)
LLView * button = NULL;
switch (change_info->mOperationFlag)
{
case ESTATE_ACCESS_ALLOWED_AGENT_ADD:
button = panel->findChild<LLButton>("add_allowed_avatar_btn");
break;
case ESTATE_ACCESS_BANNED_AGENT_ADD:
button = panel->findChild<LLButton>("add_banned_avatar_btn");
break;
case ESTATE_ACCESS_MANAGER_ADD:
button = panel->findChild<LLButton>("add_estate_manager_btn");
break;
}
// avatar picker yes multi-select, yes close-on-select
LLFloater* child_floater = LLFloaterAvatarPicker::show(boost::bind(&LLPanelEstateAccess::accessAddCore3, _1, _2, (void*)change_info),
TRUE, TRUE, FALSE, parent_floater_name, button);
//Allows the closed parent floater to close the child floater (avatar picker)
if (child_floater)
{
parent_floater->addDependentFloater(child_floater);
}
return false;
}
// static
void LLPanelEstateAccess::accessAddCore3(const uuid_vec_t& ids, std::vector<LLAvatarName> names, void* data)
{
LLEstateAccessChangeInfo* change_info = (LLEstateAccessChangeInfo*)data;
if (!change_info) return;
if (ids.empty())
{
// User didn't select a name.
delete change_info;
change_info = NULL;
return;
}
// User did select a name.
change_info->mAgentOrGroupIDs = ids;
// Can't put estate owner on ban list
LLPanelEstateAccess* panel = LLFloaterRegionInfo::getPanelAccess();
if (!panel) return;
LLViewerRegion* region = gAgent.getRegion();
if (!region) return;
if (change_info->mOperationFlag & ESTATE_ACCESS_ALLOWED_AGENT_ADD)
{
LLNameListCtrl* name_list = panel->getChild<LLNameListCtrl>("allowed_avatar_name_list");
int currentCount = (name_list ? name_list->getItemCount() : 0);
if (ids.size() + currentCount > ESTATE_MAX_ACCESS_IDS)
{
LLSD args;
args["NUM_ADDED"] = llformat("%d", ids.size());
args["MAX_AGENTS"] = llformat("%d", ESTATE_MAX_ACCESS_IDS);
args["LIST_TYPE"] = LLTrans::getString("RegionInfoListTypeAllowedAgents");
args["NUM_EXCESS"] = llformat("%d", (ids.size() + currentCount) - ESTATE_MAX_ACCESS_IDS);
LLNotificationsUtil::add("MaxAgentOnRegionBatch", args);
delete change_info;
return;
}
uuid_vec_t ids_allowed;
std::vector<LLAvatarName> names_allowed;
std::string already_allowed;
bool single = true;
for (U32 i = 0; i < ids.size(); ++i)
{
LLScrollListItem* item = name_list->getNameItemByAgentId(ids[i]);
if (item)
{
if (!already_allowed.empty())
{
already_allowed += ", ";
single = false;
}
already_allowed += item->getColumn(0)->getValue().asString();
}
else
{
ids_allowed.push_back(ids[i]);
names_allowed.push_back(names[i]);
}
}
if (!already_allowed.empty())
{
LLSD args;
args["AGENT"] = already_allowed;
args["LIST_TYPE"] = LLTrans::getString("RegionInfoListTypeAllowedAgents");
LLNotificationsUtil::add(single ? "AgentIsAlreadyInList" : "AgentsAreAlreadyInList", args);
if (ids_allowed.empty())
{
delete change_info;
return;
}
}
change_info->mAgentOrGroupIDs = ids_allowed;
change_info->mAgentNames = names_allowed;
}
if (change_info->mOperationFlag & ESTATE_ACCESS_BANNED_AGENT_ADD)
{
LLNameListCtrl* name_list = panel->getChild<LLNameListCtrl>("banned_avatar_name_list");
LLNameListCtrl* em_list = panel->getChild<LLNameListCtrl>("estate_manager_name_list");
int currentCount = (name_list ? name_list->getItemCount() : 0);
if (ids.size() + currentCount > ESTATE_MAX_ACCESS_IDS)
{
LLSD args;
args["NUM_ADDED"] = llformat("%d", ids.size());
args["MAX_AGENTS"] = llformat("%d", ESTATE_MAX_ACCESS_IDS);
args["LIST_TYPE"] = LLTrans::getString("RegionInfoListTypeBannedAgents");
args["NUM_EXCESS"] = llformat("%d", (ids.size() + currentCount) - ESTATE_MAX_ACCESS_IDS);
LLNotificationsUtil::add("MaxAgentOnRegionBatch", args);
delete change_info;
return;
}
uuid_vec_t ids_allowed;
std::vector<LLAvatarName> names_allowed;
std::string already_banned;
std::string em_ban;
bool single = true;
for (U32 i = 0; i < ids.size(); ++i)
{
bool is_allowed = true;
LLScrollListItem* em_item = em_list->getNameItemByAgentId(ids[i]);
if (em_item)
{
if (!em_ban.empty())
{
em_ban += ", ";
}
em_ban += em_item->getColumn(0)->getValue().asString();
is_allowed = false;
}
LLScrollListItem* item = name_list->getNameItemByAgentId(ids[i]);
if (item)
{
if (!already_banned.empty())
{
already_banned += ", ";
single = false;
}
already_banned += item->getColumn(0)->getValue().asString();
is_allowed = false;
}
if (is_allowed)
{
ids_allowed.push_back(ids[i]);
names_allowed.push_back(names[i]);
}
}
if (!em_ban.empty())
{
LLSD args;
args["AGENT"] = em_ban;
LLNotificationsUtil::add("ProblemBanningEstateManager", args);
if (ids_allowed.empty())
{
delete change_info;
return;
}
}
if (!already_banned.empty())
{
LLSD args;
args["AGENT"] = already_banned;
args["LIST_TYPE"] = LLTrans::getString("RegionInfoListTypeBannedAgents");
LLNotificationsUtil::add(single ? "AgentIsAlreadyInList" : "AgentsAreAlreadyInList", args);
if (ids_allowed.empty())
{
delete change_info;
return;
}
}
change_info->mAgentOrGroupIDs = ids_allowed;
change_info->mAgentNames = names_allowed;
}
LLSD args;
args["ALL_ESTATES"] = all_estates_text();
LLNotification::Params params(change_info->mDialogName);
params.substitutions(args)
.payload(change_info->asLLSD())
.functor.function(accessCoreConfirm);
if (LLPanelEstateInfo::isLindenEstate())
{
// just apply to this estate
LLNotifications::instance().forceResponse(params, 0);
}
else
{
// ask if this estate or all estates with this owner
LLNotifications::instance().add(params);
}
}
// static
void LLPanelEstateAccess::accessRemoveCore(U32 operation_flag, const std::string& dialog_name, const std::string& list_ctrl_name)
{
LLPanelEstateAccess* panel = LLFloaterRegionInfo::getPanelAccess();
if (!panel) return;
LLNameListCtrl* name_list = panel->getChild<LLNameListCtrl>(list_ctrl_name);
if (!name_list) return;
std::vector<LLScrollListItem*> list_vector = name_list->getAllSelected();
if (list_vector.size() == 0)
return;
LLSD payload;
payload["operation"] = (S32)operation_flag;
payload["dialog_name"] = dialog_name;
for (std::vector<LLScrollListItem*>::const_iterator iter = list_vector.begin();
iter != list_vector.end();
iter++)
{
LLScrollListItem *item = (*iter);
payload["allowed_ids"].append(item->getUUID());
}
LLNotification::Params params("ChangeLindenAccess");
params.payload(payload)
.functor.function(accessRemoveCore2);
if (LLPanelEstateInfo::isLindenEstate())
{
// warn on change linden estate
LLNotifications::instance().add(params);
}
else
{
// just proceed, as if clicking OK
LLNotifications::instance().forceResponse(params, 0);
}
}
// static
bool LLPanelEstateAccess::accessRemoveCore2(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
if (option != 0)
{
// abort
return false;
}
// If Linden estate, can only apply to "this" estate, not all estates
// owned by NULL.
if (LLPanelEstateInfo::isLindenEstate())
{
accessCoreConfirm(notification, response);
}
else
{
LLSD args;
args["ALL_ESTATES"] = all_estates_text();
LLNotificationsUtil::add(notification["payload"]["dialog_name"],
args,
notification["payload"],
accessCoreConfirm);
}
return false;
}
// Used for both access add and remove operations, depending on the mOperationFlag
// passed in (ESTATE_ACCESS_BANNED_AGENT_ADD, ESTATE_ACCESS_ALLOWED_AGENT_REMOVE, etc.)
// static
bool LLPanelEstateAccess::accessCoreConfirm(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
const U32 originalFlags = (U32)notification["payload"]["operation"].asInteger();
U32 flags = originalFlags;
LLViewerRegion* region = gAgent.getRegion();
if (option == 2) // cancel
{
return false;
}
else if (option == 1)
{
// All estates, either than I own or manage for this owner.
// This will be verified on simulator. JC
if (!region) return false;
if (region->getOwner() == gAgent.getID()
|| gAgent.isGodlike())
{
flags |= ESTATE_ACCESS_APPLY_TO_ALL_ESTATES;
}
else if (region->isEstateManager())
{
flags |= ESTATE_ACCESS_APPLY_TO_MANAGED_ESTATES;
}
}
std::string names;
U32 listed_names = 0;
for (U32 i = 0; i < notification["payload"]["allowed_ids"].size(); ++i)
{
if (i + 1 != notification["payload"]["allowed_ids"].size())
{
flags |= ESTATE_ACCESS_NO_REPLY;
}
else
{
flags &= ~ESTATE_ACCESS_NO_REPLY;
}
const LLUUID id = notification["payload"]["allowed_ids"][i].asUUID();
if (((U32)notification["payload"]["operation"].asInteger() & ESTATE_ACCESS_BANNED_AGENT_ADD)
&& region && (region->getOwner() == id))
{
LLNotificationsUtil::add("OwnerCanNotBeDenied");
break;
}
sendEstateAccessDelta(flags, id);
if ((flags & (ESTATE_ACCESS_ALLOWED_GROUP_ADD | ESTATE_ACCESS_ALLOWED_GROUP_REMOVE)) == 0)
{
// fill the name list for confirmation
if (listed_names < MAX_LISTED_NAMES)
{
if (!names.empty())
{
names += ", ";
}
if (!notification["payload"]["allowed_names"][i]["display_name"].asString().empty())
{
names += notification["payload"]["allowed_names"][i]["display_name"].asString();
}
else
{ //try to get an agent name from cache
LLAvatarName av_name;
if (LLAvatarNameCache::get(id, &av_name))
{
names += av_name.getCompleteName();
}
}
}
listed_names++;
}
}
if (listed_names > MAX_LISTED_NAMES)
{
LLSD args;
args["EXTRA_COUNT"] = llformat("%d", listed_names - MAX_LISTED_NAMES);
names += " " + LLTrans::getString("AndNMore", args);
}
if (!names.empty()) // show the conirmation
{
LLSD args;
args["AGENT"] = names;
if (flags & (ESTATE_ACCESS_ALLOWED_AGENT_ADD | ESTATE_ACCESS_ALLOWED_AGENT_REMOVE))
{
args["LIST_TYPE"] = LLTrans::getString("RegionInfoListTypeAllowedAgents");
}
else if (flags & (ESTATE_ACCESS_BANNED_AGENT_ADD | ESTATE_ACCESS_BANNED_AGENT_REMOVE))
{
args["LIST_TYPE"] = LLTrans::getString("RegionInfoListTypeBannedAgents");
}
if (flags & ESTATE_ACCESS_APPLY_TO_ALL_ESTATES)
{
args["ESTATE"] = LLTrans::getString("RegionInfoAllEstates");
}
else if (flags & ESTATE_ACCESS_APPLY_TO_MANAGED_ESTATES)
{
args["ESTATE"] = LLTrans::getString("RegionInfoManagedEstates");
}
else
{
args["ESTATE"] = LLTrans::getString("RegionInfoThisEstate");
}
bool single = (listed_names == 1);
if (flags & (ESTATE_ACCESS_ALLOWED_AGENT_ADD | ESTATE_ACCESS_BANNED_AGENT_ADD))
{
LLNotificationsUtil::add(single ? "AgentWasAddedToList" : "AgentsWereAddedToList", args);
}
else if (flags & (ESTATE_ACCESS_ALLOWED_AGENT_REMOVE | ESTATE_ACCESS_BANNED_AGENT_REMOVE))
{
LLNotificationsUtil::add(single ? "AgentWasRemovedFromList" : "AgentsWereRemovedFromList", args);
}
}
LLPanelEstateAccess* panel = LLFloaterRegionInfo::getPanelAccess();
if (panel)
{
panel->setPendingUpdate(true);
}
return false;
}
// key = "estateaccessdelta"
// str(estate_id) will be added to front of list by forward_EstateOwnerRequest_to_dataserver
// str[0] = str(agent_id) requesting the change
// str[1] = str(flags) (ESTATE_ACCESS_DELTA_*)
// str[2] = str(agent_id) to add or remove
// static
void LLPanelEstateAccess::sendEstateAccessDelta(U32 flags, const LLUUID& agent_or_group_id)
{
LLMessageSystem* msg = gMessageSystem;
msg->newMessage("EstateOwnerMessage");
msg->nextBlockFast(_PREHASH_AgentData);
msg->addUUIDFast(_PREHASH_AgentID, gAgent.getID());
msg->addUUIDFast(_PREHASH_SessionID, gAgent.getSessionID());
msg->addUUIDFast(_PREHASH_TransactionID, LLUUID::null); //not used
msg->nextBlock("MethodData");
msg->addString("Method", "estateaccessdelta");
msg->addUUID("Invoice", LLFloaterRegionInfo::getLastInvoice());
std::string buf;
gAgent.getID().toString(buf);
msg->nextBlock("ParamList");
msg->addString("Parameter", buf);
buf = llformat("%u", flags);
msg->nextBlock("ParamList");
msg->addString("Parameter", buf);
agent_or_group_id.toString(buf);
msg->nextBlock("ParamList");
msg->addString("Parameter", buf);
gAgent.sendReliableMessage();
}
void LLPanelEstateAccess::updateChild(LLUICtrl* child_ctrl)
{
// Ensure appropriate state of the management ui.
updateControls(gAgent.getRegion());
}
void LLPanelEstateAccess::updateLists()
{
std::string cap_url = gAgent.getRegionCapability("EstateAccess");
if (!cap_url.empty())
{
LLCoros::instance().launch("LLFloaterRegionInfo::requestEstateGetAccessCoro", boost::bind(LLPanelEstateAccess::requestEstateGetAccessCoro, cap_url));
}
}
void LLPanelEstateAccess::requestEstateGetAccessCoro(std::string url)
{
LLCore::HttpRequest::policy_t httpPolicy(LLCore::HttpRequest::DEFAULT_POLICY_ID);
LLCoreHttpUtil::HttpCoroutineAdapter::ptr_t httpAdapter(new LLCoreHttpUtil::HttpCoroutineAdapter("requestEstateGetAccessoCoro", httpPolicy));
LLCore::HttpRequest::ptr_t httpRequest(new LLCore::HttpRequest);
LLSD result = httpAdapter->getAndSuspend(httpRequest, url);
LLSD httpResults = result[LLCoreHttpUtil::HttpCoroutineAdapter::HTTP_RESULTS];
LLCore::HttpStatus status = LLCoreHttpUtil::HttpCoroutineAdapter::getStatusFromLLSD(httpResults);
LLPanelEstateAccess* panel = LLFloaterRegionInfo::getPanelAccess();
if (!panel) return;
LLNameListCtrl* allowed_agent_name_list = panel->getChild<LLNameListCtrl>("allowed_avatar_name_list");
if (allowed_agent_name_list && result.has("AllowedAgents"))
{
LLStringUtil::format_map_t args;
args["[ALLOWEDAGENTS]"] = llformat("%d", result["AllowedAgents"].size());
args["[MAXACCESS]"] = llformat("%d", ESTATE_MAX_ACCESS_IDS);
std::string msg = LLTrans::getString("RegionInfoAllowedResidents", args);
panel->getChild<LLUICtrl>("allow_resident_label")->setValue(LLSD(msg));
allowed_agent_name_list->clearSortOrder();
allowed_agent_name_list->deleteAllItems();
for (LLSD::array_const_iterator it = result["AllowedAgents"].beginArray(); it != result["AllowedAgents"].endArray(); ++it)
{
LLUUID id = (*it)["id"].asUUID();
allowed_agent_name_list->addNameItem(id);
}
allowed_agent_name_list->sortByName(TRUE);
}
LLNameListCtrl* banned_agent_name_list = panel->getChild<LLNameListCtrl>("banned_avatar_name_list");
if (banned_agent_name_list && result.has("BannedAgents"))
{
LLStringUtil::format_map_t args;
args["[BANNEDAGENTS]"] = llformat("%d", result["BannedAgents"].size());
args["[MAXBANNED]"] = llformat("%d", ESTATE_MAX_ACCESS_IDS);
std::string msg = LLTrans::getString("RegionInfoBannedResidents", args);
panel->getChild<LLUICtrl>("ban_resident_label")->setValue(LLSD(msg));
banned_agent_name_list->clearSortOrder();
banned_agent_name_list->deleteAllItems();
for (LLSD::array_const_iterator it = result["BannedAgents"].beginArray(); it != result["BannedAgents"].endArray(); ++it)
{
LLSD item;
item["id"] = (*it)["id"].asUUID();
LLSD& columns = item["columns"];
columns[0]["column"] = "name"; // to be populated later
columns[1]["column"] = "last_login_date";
columns[1]["value"] = (*it)["last_login_date"].asString().substr(0, 16); // cut the seconds
std::string ban_date = (*it)["ban_date"].asString();
columns[2]["column"] = "ban_date";
columns[2]["value"] = ban_date[0] != '0' ? ban_date.substr(0, 16) : LLTrans::getString("na"); // server returns the "0000-00-00 00:00:00" date in case it doesn't know it
columns[3]["column"] = "bannedby";
LLUUID banning_id = (*it)["banning_id"].asUUID();
LLAvatarName av_name;
if (banning_id.isNull())
{
columns[3]["value"] = LLTrans::getString("na");
}
else if (LLAvatarNameCache::get(banning_id, &av_name))
{
columns[3]["value"] = av_name.getCompleteName(); //TODO: fetch the name if it wasn't cached
}
banned_agent_name_list->addElement(item);
}
banned_agent_name_list->sortByName(TRUE);
}
LLNameListCtrl* allowed_group_name_list = panel->getChild<LLNameListCtrl>("allowed_group_name_list");
if (allowed_group_name_list && result.has("AllowedGroups"))
{
LLStringUtil::format_map_t args;
args["[ALLOWEDGROUPS]"] = llformat("%d", result["AllowedGroups"].size());
args["[MAXACCESS]"] = llformat("%d", ESTATE_MAX_GROUP_IDS);
std::string msg = LLTrans::getString("RegionInfoAllowedGroups", args);
panel->getChild<LLUICtrl>("allow_group_label")->setValue(LLSD(msg));
allowed_group_name_list->clearSortOrder();
allowed_group_name_list->deleteAllItems();
for (LLSD::array_const_iterator it = result["AllowedGroups"].beginArray(); it != result["AllowedGroups"].endArray(); ++it)
{
LLUUID id = (*it)["id"].asUUID();
allowed_group_name_list->addGroupNameItem(id);
}
allowed_group_name_list->sortByName(TRUE);
}
LLNameListCtrl* estate_manager_name_list = panel->getChild<LLNameListCtrl>("estate_manager_name_list");
if (estate_manager_name_list && result.has("Managers"))
{
LLStringUtil::format_map_t args;
args["[ESTATEMANAGERS]"] = llformat("%d", result["Managers"].size());
args["[MAXMANAGERS]"] = llformat("%d", ESTATE_MAX_MANAGERS);
std::string msg = LLTrans::getString("RegionInfoEstateManagers", args);
panel->getChild<LLUICtrl>("estate_manager_label")->setValue(LLSD(msg));
estate_manager_name_list->clearSortOrder();
estate_manager_name_list->deleteAllItems();
for (LLSD::array_const_iterator it = result["Managers"].beginArray(); it != result["Managers"].endArray(); ++it)
{
LLUUID id = (*it)["agent_id"].asUUID();
estate_manager_name_list->addNameItem(id);
}
estate_manager_name_list->sortByName(TRUE);
}
panel->updateControls(gAgent.getRegion());
}
//---------------------------------------------------------------------------
// Access lists search
//---------------------------------------------------------------------------
void LLPanelEstateAccess::onAllowedSearchEdit(const std::string& search_string)
{
LLPanelEstateAccess* panel = LLFloaterRegionInfo::getPanelAccess();
if (!panel) return;
LLNameListCtrl* allowed_agent_name_list = panel->getChild<LLNameListCtrl>("allowed_avatar_name_list");
searchAgent(allowed_agent_name_list, search_string);
}
void LLPanelEstateAccess::onAllowedGroupsSearchEdit(const std::string& search_string)
{
LLPanelEstateAccess* panel = LLFloaterRegionInfo::getPanelAccess();
if (!panel) return;
LLNameListCtrl* allowed_group_name_list = panel->getChild<LLNameListCtrl>("allowed_group_name_list");
searchAgent(allowed_group_name_list, search_string);
}
void LLPanelEstateAccess::onBannedSearchEdit(const std::string& search_string)
{
LLPanelEstateAccess* panel = LLFloaterRegionInfo::getPanelAccess();
if (!panel) return;
LLNameListCtrl* banned_agent_name_list = panel->getChild<LLNameListCtrl>("banned_avatar_name_list");
searchAgent(banned_agent_name_list, search_string);
}
void LLPanelEstateAccess::searchAgent(LLNameListCtrl* listCtrl, const std::string& search_string)
{
if (!listCtrl) return;
if (!search_string.empty())
{
listCtrl->setSearchColumn(0); // name column
listCtrl->selectItemByPrefix(search_string, FALSE);
}
else
{
listCtrl->deselectAllItems(TRUE);
}
}
void LLPanelEstateAccess::copyListToClipboard(std::string list_name)
{
LLPanelEstateAccess* panel = LLFloaterRegionInfo::getPanelAccess();
if (!panel) return;
LLNameListCtrl* name_list = panel->getChild<LLNameListCtrl>(list_name);
if (!name_list) return;
std::vector<LLScrollListItem*> list_vector = name_list->getAllData();
if (list_vector.size() == 0) return;
LLSD::String list_to_copy;
for (std::vector<LLScrollListItem*>::const_iterator iter = list_vector.begin();
iter != list_vector.end();
iter++)
{
LLScrollListItem *item = (*iter);
if (item)
{
list_to_copy += item->getColumn(0)->getValue().asString();
}
if (std::next(iter) != list_vector.end())
{
list_to_copy += "\n";
}
}
LLClipboard::instance().copyToClipboard(utf8str_to_wstring(list_to_copy), 0, list_to_copy.length());
}
bool LLPanelEstateAccess::refreshFromRegion(LLViewerRegion* region)
{
updateLists();
return LLPanelRegionInfo::refreshFromRegion(region);
}
//=========================================================================
const U32 LLPanelRegionEnvironment::DIRTY_FLAG_OVERRIDE(0x01 << 4);
LLPanelRegionEnvironment::LLPanelRegionEnvironment():
LLPanelEnvironmentInfo(),
mAllowOverrideRestore(false)
{
}
LLPanelRegionEnvironment::~LLPanelRegionEnvironment()
{
if (mCommitConnect.connected())
mCommitConnect.disconnect();
}
BOOL LLPanelRegionEnvironment::postBuild()
{
LLEstateInfoModel& estate_info = LLEstateInfoModel::instance();
if (!LLPanelEnvironmentInfo::postBuild())
return FALSE;
getChild<LLUICtrl>(BTN_USEDEFAULT)->setLabelArg("[USEDEFAULT]", getString(STR_LABEL_USEDEFAULT));
getChild<LLUICtrl>(CHK_ALLOWOVERRIDE)->setVisible(TRUE);
getChild<LLUICtrl>(PNL_ENVIRONMENT_ALTITUDES)->setVisible(TRUE);
getChild<LLUICtrl>(CHK_ALLOWOVERRIDE)->setCommitCallback([this](LLUICtrl *, const LLSD &value){ onChkAllowOverride(value.asBoolean()); });
mCommitConnect = estate_info.setCommitCallback(boost::bind(&LLPanelRegionEnvironment::refreshFromEstate, this));
return TRUE;
}
void LLPanelRegionEnvironment::refresh()
{
commitDayLenOffsetChanges(false); // commit unsaved changes if any
if (!mCurrentEnvironment)
{
if (mCurEnvVersion <= INVALID_PARCEL_ENVIRONMENT_VERSION)
{
refreshFromSource(); // will immediately set mCurEnvVersion
} // else - already requesting
return;
}
LLPanelEnvironmentInfo::refresh();
getChild<LLUICtrl>(CHK_ALLOWOVERRIDE)->setValue(mAllowOverride);
}
bool LLPanelRegionEnvironment::refreshFromRegion(LLViewerRegion* region)
{
if (!region)
{
setNoSelection(true);
setControlsEnabled(false);
mCurEnvVersion = INVALID_PARCEL_ENVIRONMENT_VERSION;
getChild<LLUICtrl>("region_text")->setValue(LLSD(""));
}
else
{
getChild<LLUICtrl>("region_text")->setValue(LLSD(region->getName()));
}
setNoSelection(false);
if (gAgent.getRegion()->getRegionID() != region->getRegionID())
{
setCrossRegion(true);
mCurEnvVersion = INVALID_PARCEL_ENVIRONMENT_VERSION;
}
setCrossRegion(false);
refreshFromSource();
return true;
}
void LLPanelRegionEnvironment::refreshFromSource()
{
LL_DEBUGS("ENVIRONMENT") << "Requesting environment for region, known version " << mCurEnvVersion << LL_ENDL;
LLHandle<LLPanel> that_h = getHandle();
if (mCurEnvVersion < UNSET_PARCEL_ENVIRONMENT_VERSION)
{
// to mark as requesting
mCurEnvVersion = UNSET_PARCEL_ENVIRONMENT_VERSION;
}
LLEnvironment::instance().requestRegion(
[that_h](S32 parcel_id, LLEnvironment::EnvironmentInfo::ptr_t envifo) { _onEnvironmentReceived(that_h, parcel_id, envifo); });
setControlsEnabled(false);
}
bool LLPanelRegionEnvironment::confirmUpdateEstateEnvironment(const LLSD& notification, const LLSD& response)
{
S32 option = LLNotificationsUtil::getSelectedOption(notification, response);
switch (option)
{
case 0:
{
LLEstateInfoModel& estate_info = LLEstateInfoModel::instance();
// update model
estate_info.setAllowEnvironmentOverride(mAllowOverride);
// send the update to sim
estate_info.sendEstateInfo();
clearDirtyFlag(DIRTY_FLAG_OVERRIDE);
}
break;
case 1:
mAllowOverride = mAllowOverrideRestore;
getChild<LLUICtrl>(CHK_ALLOWOVERRIDE)->setValue(mAllowOverride);
break;
default:
break;
}
return false;
}
void LLPanelRegionEnvironment::onChkAllowOverride(bool value)
{
setDirtyFlag(DIRTY_FLAG_OVERRIDE);
mAllowOverrideRestore = mAllowOverride;
mAllowOverride = value;
std::string notification("EstateParcelEnvironmentOverride");
if (LLPanelEstateInfo::isLindenEstate())
notification = "ChangeLindenEstate";
LLSD args;
args["ESTATENAME"] = LLEstateInfoModel::instance().getName();
LLNotification::Params params(notification);
params.substitutions(args);
params.functor.function([this](const LLSD& notification, const LLSD& response) { confirmUpdateEstateEnvironment(notification, response); });
if (!value || LLPanelEstateInfo::isLindenEstate())
{ // warn if turning off or a Linden Estate
LLNotifications::instance().add(params);
}
else
{
LLNotifications::instance().forceResponse(params, 0);
}
}
|
{"hexsha": "4f81af2755400b58db1a6c7570bb831fe023e30f", "size": 131558, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "indra/newview/llfloaterregioninfo.cpp", "max_stars_repo_name": "SaladDais/LLUDP-Encryption", "max_stars_repo_head_hexsha": "8a426cd0dd154e1a10903e0e6383f4deb2a6098a", "max_stars_repo_licenses": ["ISC"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-01-29T07:10:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T07:10:03.000Z", "max_issues_repo_path": "indra/newview/llfloaterregioninfo.cpp", "max_issues_repo_name": "SaladDais/LLUDP-Encryption", "max_issues_repo_head_hexsha": "8a426cd0dd154e1a10903e0e6383f4deb2a6098a", "max_issues_repo_licenses": ["ISC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "indra/newview/llfloaterregioninfo.cpp", "max_forks_repo_name": "SaladDais/LLUDP-Encryption", "max_forks_repo_head_hexsha": "8a426cd0dd154e1a10903e0e6383f4deb2a6098a", "max_forks_repo_licenses": ["ISC"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-10-01T22:22:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T22:22:27.000Z", "avg_line_length": 32.1421939897, "max_line_length": 172, "alphanum_fraction": 0.7314188419, "num_tokens": 34095}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 26 14:21:02 2016
@author: Sebastijan Mrak <smrak@gmail.com>
"""
import numpy as np
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
#%% Keograms
def plotKeogram(t, y, kg, title=None, legend=None, ylim=None, pcolorbar=None,
ytick=None, cmap=None, cbartick=None, cbartitle=None,
xtick=None, xlim=None):
"""
Sebastijan Mrak
Function plotKeogram takes x and y grid values, where x axis is meant to be
time in datatime.dateimte format. It plots keogram values 'kg' on top of set
grid with pcolormesh function. There are also many supporting parameters to
enrich the figure.
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
ax = fig.add_subplot(111)
if cmap is not None:
plt.pcolormesh(t, y, np.nan_to_num(kg.T), cmap=cmap)
else:
plt.pcolormesh(t, y, np.nan_to_num(kg.T))
plt.xlabel('UT')
plt.ylabel('Elevation [deg]')
if title is not None:
plt.title(title)
if ylim is not None:
ax.set_ylim(ylim)
if legend is not None:
plt.legend()
if pcolorbar is not None:
if cbartick is not None:
cbar = plt.colorbar(ticks=cbartick)
cbar.ax.set_ylabel(cbartitle)
else:
plt.colorbar()
if ytick is not None:
ax.set_yticks(ytick)
if xtick is not None:
ax.set_xticks(xtick)
if xlim is not None:
ax.set_xlim(xlim)
ax.xaxis.set(major_formatter=formatter)
plt.show()
def plot2Keogram(t1, t2, y1, y2, kg1, kg2, title1=None, title2=None,
legend=None, ylim=None, pcolorbar=None, ytick1=None, ytick2=None,
cmap=None, cbartick1=None, cbartick2=None,
cbartitle1=None, cbartitle2=None, xtick=None, xlim=None):
"""
Sebastijan Mrak
Function plot2Keogram takes x and y grid values, where x axis is meant to be
time in datatime.dateimte format. It plots keogram values 'kg' on top of set
grid with pcolormesh function. It takes 2 different sets of input data and plots
them on separate subplots, where x-axis is shared among subplots.There are
also many supporting parameters to enrich the figure.
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
plt.rc('axes', labelsize=20)
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.rc('ytick', labelsize=16) # fontsize of the tick labels
ax1 = fig.add_subplot(211)
if cmap is not None:
plt.pcolormesh(t1, y1, np.nan_to_num(kg1.T), cmap=cmap)
else:
plt.pcolormesh(t1, y1, np.nan_to_num(kg1.T))
plt.setp(ax1.get_xticklabels(), visible=False)
if title1 is not None:
plt.title(title1)
if legend is not None:
plt.legend()
ax2 = fig.add_subplot(212, sharex=ax1)
if cmap is not None:
plt.pcolormesh(t2, y2, np.nan_to_num(kg2.T), cmap=cmap)
else:
plt.pcolormesh(t2, y2, np.nan_to_num(kg2.T))
plt.xlabel('UT')
if title2 is not None:
plt.title(title2)
if legend is not None:
plt.legend()
if ylim is not None:
ax1.set_ylim(ylim)
ax2.set_ylim(ylim)
if ytick1 is not None:
ax1.set_yticks(ytick1)
ax2.set_yticks(ytick2)
if xtick is not None:
ax1.set_xticks(xtick)
if xlim is not None:
ax1.set_xlim(xlim)
ax1.xaxis.set(major_formatter=formatter)
fig.tight_layout()
fig.subplots_adjust(hspace = .01)
plt.show()
p1 = ax1.get_position()
pos1 = p1.get_points()
pos1[0][0] = 0.1
pos1[1][0] = 0.86
p1.set_points(pos1)
ax1.set_position(p1)
p2 = ax2.get_position()
pos2 = p2.get_points()
pos2[0][0] = 0.1
pos2[1][0] = 0.86
p2.set_points(pos2)
ax2.set_position(p2)
if pcolorbar is not None:
if cbartick1 is not None:
p1 = ax1.get_position()
pos1 = p1.get_points()
cbaxes = fig.add_axes([0.88, pos1[0][1]+0.01, 0.01, pos1[1][1]-pos1[0][1]-0.01])
cbar = plt.colorbar(cax = cbaxes)
cbar.ax.set_ylabel(cbartitle1)
if cbartick2 is not None:
p2 = ax2.get_position()
pos2 = p2.get_points()
cbaxes = fig.add_axes([0.88, pos2[0][1]+0.01, 0.01, pos2[1][1]-pos2[0][1]-0.01])
cbar = plt.colorbar(ticks=cbartick2, cax = cbaxes)
cbar.ax.set_ylabel(cbartitle2)
fig.text(0.04, 0.5, 'Elevation [deg]', va='center', rotation='vertical', fontsize=20)
def plot3Keogram(t1, t2, t3, y1, y2, y3, kg1, kg2, kg3, title1=None,
title2=None, title3=None, legend=None, ylim=None,
pcolorbar=None, ytick=None, xtick=None, cmap=None,
cbartick1=None, cbartick2=None, cbartick3=None,
cbartitle1=None, cbartitle2=None, cbartitle3=None,
xlim=None):
"""
Sebastijan Mrak
Function plot2Keogram takes x and y grid values, where x axis is meant to be
time in datatime.dateimte format. It plots keogram values 'kg' on top of set
grid with pcolormesh function. It takes 3 different sets of input data and plots
them on separate subplots, where x-axis is shared among subplots.There are
also many supporting parameters to enrich the figure
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
ax1 = fig.add_subplot(311)
if cmap is not None:
plt.pcolormesh(t1, y1, np.nan_to_num(kg1.T), cmap=cmap)
else:
plt.pcolormesh(t1, y1, np.nan_to_num(kg1.T))
plt.setp(ax1.get_xticklabels(), visible=False)
plt.ylabel('Elevation [deg]')
if title1 is not None:
plt.title(title1)
if legend is not None:
plt.legend()
if pcolorbar is not None:
if cbartick1 is not None:
cbar = plt.colorbar(ticks=cbartick1)
cbar.ax.set_ylabel(cbartitle1)
else:
plt.colorbar()
ax2 = fig.add_subplot(312, sharex=ax1)
plt.pcolormesh(t2, y2, np.nan_to_num(kg2.T))
if cmap is not None:
plt.pcolormesh(t2, y2, np.nan_to_num(kg2.T), cmap=cmap)
else:
plt.pcolormesh(t2, y2, np.nan_to_num(kg2.T))
plt.setp(ax2.get_xticklabels(), visible=False)
plt.ylabel('Elevation [deg]')
if title2 is not None:
plt.title(title2)
if legend is not None:
plt.legend()
if pcolorbar is not None:
if cbartick2 is not None:
cbar2 = plt.colorbar(ticks=cbartick2)
cbar2.ax.set_ylabel(cbartitle2)
else:
plt.colorbar()
ax3 = fig.add_subplot(313, sharex=ax1)
if cmap is not None:
plt.pcolormesh(t3, y3, np.nan_to_num(kg3.T), cmap=cmap)
else:
plt.pcolormesh(t3, y3, np.nan_to_num(kg3.T))
plt.xlabel('UT')
plt.ylabel('Elevation [deg]')
if title3 is not None:
plt.title(title3)
if legend is not None:
plt.legend()
if pcolorbar is not None:
if cbartick2 is not None:
cbar3 = plt.colorbar(ticks=cbartick3)
cbar3.ax.set_ylabel(cbartitle3)
else:
plt.colorbar()
if ylim is not None:
ax1.set_ylim(ylim)
ax2.set_ylim(ylim)
ax3.set_ylim(ylim)
if ytick is not None:
ax1.set_yticks(ytick)
ax2.set_yticks(ytick)
ax3.set_yticks(ytick)
if xtick is not None:
ax1.set_xticks(xtick)
if xlim is not None:
ax1.set_xlim(xlim)
ax1.xaxis.set(major_formatter=formatter)
fig.tight_layout()
plt.show()
def plot4Keogram(t1, t2, t3, t4, y1, y2, y3, y4, kg1, kg2, kg3, kg4,
title1=None, title2=None, title3=None, title4=None,
legend=None, ylim=None, pcolorbar=None, ytick1=None,
ytick2=None, ytick3=None, ytick4=None, xtick=None, cmap=None,
cbartick1=None, cbartick2=None, cbartick3=None,cbartick4=None,
cbartitle1=None, cbartitle2=None, cbartitle3=None, cbartitle4=None,
xlim=None, elevation=None, obstimes=None, lli=None):
"""
Sebastijan Mrak
Function plot2Keogram takes x and y grid values, where x axis is meant to be
time in datatime.dateimte format. It plots keogram values 'kg' on top of set
grid with pcolormesh function. It takes 3 different sets of input data and plots
them on separate subplots, where x-axis is shared among subplots.There are
also many supporting parameters to enrich the figure
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure(figsize=(8,6), dpi=150)
plt.rc('axes', labelsize=12)
plt.rc('xtick', labelsize=8) # fontsize of the tick labels
plt.rc('ytick', labelsize=8) # fontsize of the tick labels
ax1 = fig.add_subplot(411)
if cmap is not None:
plt.pcolormesh(t1, y1, np.nan_to_num(kg1.T), cmap=cmap)
else:
plt.pcolormesh(t1, y1, np.nan_to_num(kg1.T))
if title1 is not None:
plt.title(title1)
if legend is not None:
plt.legend()
plt.setp(ax1.get_xticklabels(), visible=False)
################################################################################
ax2 = fig.add_subplot(412, sharex=ax1)
plt.pcolormesh(t2, y2, np.nan_to_num(kg2.T))
if cmap is not None:
plt.pcolormesh(t2, y2, np.nan_to_num(kg2.T), cmap=cmap)
else:
plt.pcolormesh(t2, y2, np.nan_to_num(kg2.T))
plt.setp(ax2.get_xticklabels(), visible=False)
if title2 is not None:
plt.title(title2)
if legend is not None:
plt.legend()
################################################################################
ax3 = fig.add_subplot(413, sharex=ax1)
if cmap is not None:
plt.pcolormesh(t3, y3, np.nan_to_num(kg3.T), cmap=cmap)
else:
plt.pcolormesh(t3, y3, np.nan_to_num(kg3.T))
if title3 is not None:
plt.title(title3)
if legend is not None:
plt.legend()
plt.setp(ax3.get_xticklabels(), visible=False)
################################################################################
ax4 = fig.add_subplot(414, sharex=ax1)
if cmap is not None:
plt.pcolormesh(t4, y4, np.nan_to_num(kg4.T), cmap=cmap)
else:
plt.pcolormesh(t4, y4, np.nan_to_num(kg4.T))
# if elevation is not None:
# plt.plot(obstimes, elevation, 'r', lw=1)
# if lli is not None:
# idx = np.where((lli%2) == 1)[0]
# plt.scatter(obstimes[idx], elevation[idx], facecolors='none', edgecolors='m', s=10)
plt.xlabel('UT')
if title4 is not None:
plt.title(title4)
if legend is not None:
plt.legend()
################################################################################
if ylim is not None:
ax1.set_ylim(ylim)
ax2.set_ylim(ylim)
ax3.set_ylim(ylim)
ax4.set_ylim(ylim)
if ytick1 is not None:
ax1.set_yticks(ytick1)
if ytick2 is not None:
ax2.set_yticks(ytick2)
if ytick3 is not None:
ax3.set_yticks(ytick3)
if ytick4 is not None:
ax4.set_yticks(ytick4)
if xtick is not None:
ax1.set_xticks(xtick)
if xlim is not None:
ax1.set_xlim(xlim)
ax1.xaxis.set(major_formatter=formatter)
fig.tight_layout()
fig.subplots_adjust(hspace = .01)
plt.show()
############################################################################
p1 = ax1.get_position()
pos1 = p1.get_points()
pos1[0][0] = 0.1
pos1[1][0] = 0.86
p1.set_points(pos1)
ax1.set_position(p1)
p2 = ax2.get_position()
pos2 = p2.get_points()
pos2[0][0] = 0.1
pos2[1][0] = 0.86
p2.set_points(pos2)
ax2.set_position(p2)
p3 = ax3.get_position()
pos3 = p3.get_points()
pos3[0][0] = 0.1
pos3[1][0] = 0.86
p3.set_points(pos3)
ax3.set_position(p3)
p4 = ax4.get_position()
pos4 = p4.get_points()
pos4[0][0] = 0.1
pos4[1][0] = 0.86
p4.set_points(pos4)
ax4.set_position(p4)
if pcolorbar is not None:
if cbartick1 is not None:
p1 = ax1.get_position()
pos1 = p1.get_points()
cbaxes = fig.add_axes([0.88, pos1[0][1]+0.01, 0.01, pos1[1][1]-pos1[0][1]-0.01])
cbar = plt.colorbar(cax = cbaxes)
cbar.ax.set_ylabel(cbartitle1)
if cbartick2 is not None:
p2 = ax2.get_position()
pos2 = p2.get_points()
cbaxes = fig.add_axes([0.88, pos2[0][1]+0.01, 0.01, pos2[1][1]-pos2[0][1]-0.01])
cbar = plt.colorbar(ticks=cbartick2, cax = cbaxes)
cbar.ax.set_ylabel(cbartitle2)
if cbartick3 is not None:
p3 = ax3.get_position()
pos3 = p3.get_points()
cbaxes = fig.add_axes([0.88, pos3[0][1]+0.01, 0.01, pos3[1][1]-pos3[0][1]-0.01])
cbar = plt.colorbar(ticks=cbartick3, cax = cbaxes)
cbar.ax.set_ylabel(cbartitle3)
if cbartick4 is not None:
p4 = ax4.get_position()
pos4 = p4.get_points()
cbaxes = fig.add_axes([0.88, pos4[0][1]+0.01, 0.01, pos4[1][1]-pos4[0][1]-0.01])
cbar = plt.colorbar(ticks=cbartick4, cax = cbaxes)
cbar.ax.set_ylabel(cbartitle4)
else:
plt.colorbar()
if elevation is not None:
ax1.plot(obstimes, elevation, 'r', lw=1)
ax4.plot(obstimes, elevation, 'r', lw=1)
if lli is not None:
idx = np.where((lli%2) == 1)[0]
ax1.scatter(obstimes[idx], elevation[idx], facecolors='none', edgecolors='m', s=10)
ax4.scatter(obstimes[idx], elevation[idx], facecolors='none', edgecolors='m', s=10)
fig.text(0.04, 0.5, 'Elevation [deg]', va='center', rotation='vertical')
#%% GPS stuff
def plotTEC(t, y, xlabel=None, ylabel=None, title=None, xlim=None, ylim=None,
color='b', ytick=None, xtick=None):
"""
Sebastijan Mrak
Plot a single track t-y plot which is meant to be a time dependent graph. Time
has to be in a datetime.datetime format. It offeres some optional parameters to
shape the figure.
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(t, y, color)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if title is not None:
plt.title(title)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if xtick is not None:
ax.set_xticks(xtick)
if ytick is not None:
ax.set_yticks(ytick)
ax.xaxis.set(major_formatter=formatter)
plt.show()
def plotTECLossOfLock(t, y, l, lli='x', xlabel=None, ylabel=None, title=None, xlim=None, ylim=None,
color='b', colorx='xr', ms=10):
"""
Sebastijan Mrak
Plot a single track t-y plot which is meant to be a time dependent graph including
a loss of lock indicators on the plot. Time has to be in a datetime.datetime
format. It offeres some optional parameters to shape the figure.
"""
idx = np.where((l%2)==1)[0]
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(t, y, color)
if lli == 'x':
plt.plot(t[idx], y[idx], colorx, ms=ms)
elif lli == 'line':
lli_range = ax.get_ylim()
for ix in idx:
plt.plot([t[ix], t[ix]], [lli_range[0], lli_range[1]], 'r')
else:
print ("Enter right parameter for loss of lock index presentation")
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if title is not None:
plt.title(title)
if xlim is not None:
ax.set_xlim(xlim)
if xlim is not None:
ax.set_xlim(xlim)
ax.xaxis.set(major_formatter=formatter)
plt.show()
def plot2SubplotGPS(t1, t2, y1, y2, l=None, lli1='x', lli2='x', xlabel=None, ylabel1=None,
ylabel2=None, title1=None, title2=None, xlim=None, ms=10,
ylim1=None, ylim2=None, color1='b', colorx='xr', color2='b'):
"""
Sebastijan Mrak
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
ax1 = fig.add_subplot(211)
plt.plot(t1, y1, color1)
if ylabel1 is not None:
plt.ylabel(ylabel1)
if title1 is not None:
plt.title(title1)
if l is not None:
idx=np.where((l%2)==1)[0]
if lli1 == 'x':
plt.plot(t1[idx], y1[idx], colorx, ms=ms)
elif lli1 == 'line':
lli_range = ax1.get_ylim()
for ix in idx:
plt.plot([t1[ix], t1[ix]], [lli_range[0], lli_range[1]], 'r')
else:
print ("Enter right parameter for loss of lock index presentation")
if ylim1 is not None:
ax1.set_ylim(ylim1)
plt.setp(ax1.get_xticklabels(), visible=False)
ax2=fig.add_subplot(212, sharex=ax1)
plt.plot(t2, y2, color2)
if ylabel2 is not None:
plt.ylabel(ylabel2)
if title2 is not None:
plt.title(title2)
if l is not None:
idx=np.where((l%2)==1)[0]
if lli2 == 'x':
plt.plot(t2[idx], y2[idx], colorx, ms=ms)
elif lli2 == 'line':
lli_range = ax2.get_ylim()
for ix in idx:
plt.plot([t2[ix], t2[ix]], [lli_range[0], lli_range[1]], 'r')
else:
print ("Enter right parameter for loss of lock index presentation")
if ylim2 is not None:
ax1.set_ylim(ylim2)
if xlabel is not None:
ax2.set_xlabel(xlabel)
if xlim is not None:
ax1.set_xlim(xlim)
ax1.xaxis.set(major_formatter=formatter)
fig.tight_layout()
plt.show()
#%% XY Plots
def plot2subplotK_xy(x, y, i, t, B, el=None, xlim=None, ylim1=None, ylim2=None,
xlabel=None, ylabel1=None, ylabel2=None, ytick1=None, ytick2=None,
cmap='viridis', pcolorbar=None, cbartick=None, cbartitle=None,
obstimes=None, ipp=None):
"""
Sebastijan Mrak
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
plt.rc('axes', labelsize=20)
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.rc('ytick', labelsize=16) # fontsize of the tick labels
ax1=fig.add_subplot(211)
plt.pcolormesh(x, y, np.nan_to_num(i.T), cmap=cmap)
if ipp is not None:
plt.plot(obstimes, ipp, '-r', lw=1)
if ylabel1 is not None:
plt.ylabel(ylabel1)
if ylim1 is not None:
ax1.set_ylim(ylim1)
if ytick1 is not None:
ax1.set_yticks(ytick1)
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = fig.add_subplot(212, sharex=ax1)
plt.plot(t, B, lw=2)
if ylabel2 is not None:
plt.ylabel(ylabel2)
if ylim2 is not None:
ax2.set_ylim(ylim2)
if xlim is not None:
ax1.set_xlim(xlim)
if xlabel is not None:
plt.xlabel(xlabel)
ax1.xaxis.set(major_formatter=formatter)
fig.tight_layout()
fig.subplots_adjust(hspace = .01)
plt.show()
#
p1 = ax1.get_position()
pos1 = p1.get_points()
pos1[1][0] = 0.86
p1.set_points(pos1)
ax1.set_position(p1)
# find current position [x,y,width,height]
p1 = ax1.get_position()
p2 = ax2.get_position()
pos1 = p1.get_points()
pos2 = p2.get_points()
# set width of second axes equal to first
pos2[1][0] = pos1[1][0]
p2.set_points(pos2)
ax2.set_position(p2)
if pcolorbar is not None:
if cbartick is not None:
p1 = ax1.get_position()
pos1 = p1.get_points()
cbaxes = fig.add_axes([0.88, pos1[0][1], 0.01, pos1[1][1]-pos1[0][1]])
cbar = plt.colorbar(ticks=cbartick, cax = cbaxes)
cbar.ax.set_ylabel(cbartitle)
else:
plt.colorbar()
def plot2subplot(t1, t2, y1, y2, t21=None, t22=None, y21=None, y22=None,
color1='b', color2='b', color3='r', color4='g',
xlabel=None, title1=None, title2=None, ylabel1=None,
ylabel2=None, xlim=None, ylim1=None, ylim2=None,
label1=None, label2=None, label3=None, label4=None,
legend1=None, legend2=None, lli1=None, lm='x',
ms=10, colorx1='xr', lli2=None):
"""
Sebastijan Mrak
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
ax1 = fig.add_subplot(211)
if legend1 is not None:
plt.plot(t1, y1, color1, label=label1)
plt.legend()
else:
plt.plot(t1, y1, color1)
if lli1 is not None:
idx=np.where((lli1%2)==1)[0]
if lm == 'x':
plt.plot(t1[idx], y1[idx], colorx1, ms=ms)
elif lm == 'line':
lli_range = ax1.get_ylim()
for ix in idx:
plt.plot([t1[ix], t1[ix]], [lli_range[0], lli_range[1]], 'r')
else:
print ("Enter right parameter for loss of lock index presentation")
plt.setp(ax1.get_xticklabels(), visible=False)
if title1 is not None:
plt.title(title1)
if ylabel1 is not None:
plt.ylabel(ylabel1)
if ylim1 is not None:
ax1.set_ylim(ylim1)
ax2 = fig.add_subplot(212, sharex=ax1)
if legend2 is None:
plt.plot(t2, y2, color2)
if (t21 is not None) and (y21 is not None):
plt.plot(t21, y21, color3)
if (t22 is not None) and (y22 is not None):
plt.plot(t22, y22, color4)
else:
plt.plot(t2, y2, color2, label=label2)
if (t21 is not None) and (y21 is not None):
plt.plot(t21, y21, color3, label=label3)
if (t22 is not None) and (y22 is not None):
plt.plot(t22, y22, color4, label=label4)
ax2.legend(loc=2, bbox_to_anchor=(1.001, 0,0, 1), prop={'size':10},
fancybox=True)
if lli2 is not None:
idx=np.where((lli2%2)==1)[0]
lli_range = ax2.get_ylim()
for ix in idx:
plt.plot([t2[ix], t2[ix]], [lli_range[0], lli_range[1]], 'r')
if title2 is not None:
plt.title(title2)
if ylabel2 is not None:
plt.ylabel(ylabel2)
if ylim2 is not None:
ax2.set_ylim(ylim2)
if xlim is not None:
ax1.set_xlim(xlim)
if xlabel is not None:
plt.xlabel(xlabel)
ax1.xaxis.set(major_formatter=formatter)
fig.tight_layout()
plt.show()
def plot3subplot(t1, t2, t3, y1, y2, y3, el1=None, t31=None, t32=None, y31=None, y32=None,
color1='b', color2='b', color3='r', color4='g',
xlabel=None, title1=None, title2=None, title3=None, ylabel1=None,
ylabel2=None, ylabel3=None, xlim=None, ylim1=None, ylim2=None, ylim3=None,
label1=None, label2=None, label3=None, label4=None, ytick1=[],
legend1=None, legend2=None, lli1=None, lm='x',
ms=10, colorx1='xr', lli2=None, cmap='viridis', pcolorbar=None,
cbartick=None, cbartitle=None):
"""
Sebastijan Mrak
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure(figsize=(800,600), dpi=150)
ax1=fig.add_subplot(311)
plt.pcolormesh(t1, el1, np.nan_to_num(y1.T), cmap=cmap)
if ylabel1 is not None:
plt.ylabel(ylabel1)
if ylim1 is not None:
ax1.set_ylim(ylim1)
if title1 is not None:
plt.title(title1)
if pcolorbar is not None:
if cbartick is not None:
cbar = plt.colorbar(ticks=cbartick)
cbar.ax.set_ylabel(cbartitle)
else:
plt.colorbar()
if ytick1 is not None:
ax1.set_yticks(ytick1)
ax2 = fig.add_subplot(312, sharex=ax1)
if legend1 is not None:
plt.plot(t2, y2, color1, label=label1)
plt.legend()
else:
plt.plot(t2, y2, color1)
if lli1 is not None:
idx=np.where((lli1%2)==1)[0]
if lm == 'x':
plt.plot(t2[idx], y2[idx], colorx1, ms=ms)
elif lm == 'line':
lli_range = ax2.get_ylim()
for ix in idx:
plt.plot([t2[ix], t2[ix]], [lli_range[0], lli_range[1]], 'r')
else:
print ("Enter right parameter for loss of lock index presentation")
plt.setp(ax1.get_xticklabels(), visible=False)
if title2 is not None:
plt.title(title1)
if ylabel2 is not None:
plt.ylabel(ylabel2)
if ylim2 is not None:
ax2.set_ylim(ylim2)
plt.setp(ax2.get_xticklabels(), visible=False)
ax3 = fig.add_subplot(313, sharex=ax1)
if legend2 is None:
plt.plot(t3, y3, color2)
if (t31 is not None) and (y31 is not None):
plt.plot(t31, y31, color3)
if (t32 is not None) and (y32 is not None):
plt.plot(t32, y32, color4)
else:
plt.plot(t3, y3, color2, label=label2)
if (t31 is not None) and (y31 is not None):
plt.plot(t31, y31, color3, label=label3)
if (t32 is not None) and (y32 is not None):
plt.plot(t32, y32, color4, label=label4)
ax3.legend(loc=2, bbox_to_anchor=(1.001, 0,0, 1), prop={'size':10},
fancybox=True)
if lli2 is not None:
idx=np.where((lli2%2)==1)[0]
lli_range = ax3.get_ylim()
for ix in idx:
plt.plot([t1[ix], t1[ix]], [lli_range[0], lli_range[1]], 'r')
if title3 is not None:
plt.title(title3)
if ylabel3 is not None:
plt.ylabel(ylabel3)
if ylim3 is not None:
ax2.set_ylim(ylim3)
if xlim is not None:
ax1.set_xlim(xlim)
if xlabel is not None:
plt.xlabel(xlabel)
ax1.xaxis.set(major_formatter=formatter)
fig.tight_layout()
plt.show()
# find current position [x,y,width,height]
p1 = ax1.get_position()
p2 = ax2.get_position()
p3 = ax3.get_position()
pos1 = p1.get_points()
pos2 = p2.get_points()
pos3 = p3.get_points()
# set width of second axes equal to first
pos2[1][0] = pos1[1][0]
p2.set_points(pos2)
ax2.set_position(p2)
pos3[1][0] = pos1[1][0]
p3.set_points(pos3)
ax3.set_position(p3)
def plot4subplot(t1, t2, t3, t4, y1, y2, y3,y4, el1=None, t31=None, t32=None,
y31=None, y32=None, color1='b', color2='b', color3='r', color4='g',
color5='b', xlabel=None, title1=None, title2=None, title3=None,
title4=None, ylabel1=None, ylabel2=None, ylabel3=None, ylabel4=None,
xlim=None, ylim1=None, ylim2=None, ylim3=None, ylim4=None,
label1=None, label2=None, label3=None, label4=None, ytick1=[],
legend1=None, legend2=None, lli1=None, lm='x', lli3=None,
ms=10, colorx1='xr', lli2=None, cmap='viridis', pcolorbar=None,
cbartick=None, cbartitle=None, ytick2=None, ytick3=None, ytick4=None,
ipp_elevation=None, lli_keo=None, lw=2):
"""
Sebastijan Mrak
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
plt.rc('axes', labelsize=18)
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.rc('ytick', labelsize=16) # fontsize of the tick labels
ax1=fig.add_subplot(411)
plt.pcolormesh(t1, el1, np.nan_to_num(y1.T), cmap=cmap)
if ipp_elevation is not None:
plt.plot(t2, ipp_elevation, '-r', lw=lw)
if lli_keo is not None:
idx = np.where((lli_keo%2) == 1)[0]
ax1.scatter(t2[idx], ipp_elevation[idx], facecolors='none', edgecolors='m', s=15)
if ylabel1 is not None:
plt.ylabel(ylabel1)
if ylim1 is not None:
ax1.set_ylim(ylim1)
if title1 is not None:
plt.title(title1)
if ytick1 is not None:
ax1.set_yticks(ytick1)
plt.setp(ax1.get_xticklabels(), visible=False)
############################################################################
ax2 = fig.add_subplot(412, sharex=ax1)
if legend1 is not None:
plt.plot(t2, y2, color1, label=label1)
plt.legend()
else:
plt.plot(t2, y2, color1)
if lli1 is not None:
idx=np.where((lli1%2)==1)[0]
if lm == 'x':
plt.plot(t2[idx], y2[idx], colorx1, ms=ms)
elif lm == 'line':
lli_range = ax2.get_ylim()
for ix in idx:
plt.plot([t2[ix], t2[ix]], [lli_range[0], lli_range[1]], 'r')
else:
print ("Enter right parameter for loss of lock index presentation")
if title2 is not None:
plt.title(title1)
if ylabel2 is not None:
plt.ylabel(ylabel2)
if ylim2 is not None:
ax2.set_ylim(ylim2)
if ytick2 is not None:
ax2.set_yticks(ytick2)
plt.setp(ax2.get_xticklabels(), visible=False)
############################################################################
ax3 = fig.add_subplot(413, sharex=ax1)
if legend2 is None:
plt.plot(t3, y3, color2)
if (t31 is not None) and (y31 is not None):
plt.plot(t31, y31, color3)
if (t32 is not None) and (y32 is not None):
plt.plot(t32, y32, color4)
else:
plt.plot(t3, y3, color2, label=label2)
if (t31 is not None) and (y31 is not None):
plt.plot(t31, y31, color3, label=label3)
if (t32 is not None) and (y32 is not None):
plt.plot(t32, y32, color4, label=label4)
ax3.legend(loc=2, bbox_to_anchor=(0.88, 0,0, 1), prop={'size':12},
fancybox=True)
if lli2 is not None:
idx=np.where((lli2%2)==1)[0]
lli_range = ax3.get_ylim()
for ix in idx:
plt.plot([t1[ix], t1[ix]], [lli_range[0], lli_range[1]], 'r')
if title3 is not None:
plt.title(title3)
if ylabel3 is not None:
plt.ylabel(ylabel3)
if ylim3 is not None:
ax3.set_ylim(ylim3)
if ytick3 is not None:
ax3.set_yticks(ytick3)
plt.setp(ax3.get_xticklabels(), visible=False)
############################################################################
ax4 = fig.add_subplot(414, sharex=ax1)
plt.plot(t4, y4, color5)
if lli3 is not None:
idx=np.where((lli2%2)==1)[0]
lli_range = ax4.get_ylim()
for ix in idx:
plt.plot([t4[ix], t4[ix]], [lli_range[0], lli_range[1]], 'r')
if title4 is not None:
plt.title(title4)
if ylabel4 is not None:
plt.ylabel(ylabel4)
if ylim4 is not None:
ax4.set_ylim(ylim4)
if ytick4 is not None:
ax4.set_yticks(ytick4)
############################################################################
if xlim is not None:
ax1.set_xlim(xlim)
if xlabel is not None:
plt.xlabel(xlabel)
ax1.xaxis.set(major_formatter=formatter)
fig.tight_layout()
fig.subplots_adjust(hspace = .01)
plt.show()
#
p1 = ax1.get_position()
pos1 = p1.get_points()
pos1[1][0] = 0.86
p1.set_points(pos1)
ax1.set_position(p1)
# find current position [x,y,width,height]
p1 = ax1.get_position()
p2 = ax2.get_position()
p3 = ax3.get_position()
p4 = ax4.get_position()
pos1 = p1.get_points()
pos2 = p2.get_points()
pos3 = p3.get_points()
pos4 = p4.get_points()
# set width of second axes equal to first
pos2[1][0] = pos1[1][0]
p2.set_points(pos2)
ax2.set_position(p2)
pos3[1][0] = pos1[1][0]
p3.set_points(pos3)
ax3.set_position(p3)
pos4[1][0] = pos1[1][0]
p4.set_points(pos4)
ax4.set_position(p4)
if pcolorbar is not None:
if cbartick is not None:
p1 = ax1.get_position()
pos1 = p1.get_points()
cbaxes = fig.add_axes([0.88, pos1[0][1], 0.01, pos1[1][1]-pos1[0][1]])
cbar = plt.colorbar(ticks=cbartick, cax = cbaxes)
cbar.ax.set_ylabel(cbartitle)
else:
plt.colorbar()
def plot5subplot(t1, t2, t3, t4, y1, y2, y3,y4, el1=None, t31=None, t32=None,
y31=None, y32=None, color1='b', color2='b', color3='r', color4='g',
color5='b', xlabel=None, title1=None, title2=None, title3=None,
title4=None, ylabel1=None, ylabel2=None, ylabel3=None, ylabel4=None,
xlim=None, ylim1=None, ylim2=None, ylim3=None, ylim4=None,
label1=None, label2=None, label3=None, label4=None, ytick1=[],
legend1=None, legend2=None, lli1=None, lm='x', lli3=None,
ms=10, colorx1='xr', lli2=None, cmap='viridis', pcolorbar=None,
cbartick=None, cbartitle=None, ytick2=None, ytick3=None, ytick4=None,
ipp_elevation=None, lli_keo=None, lw=2, Bt=None, Bx=None, title5=None,
cbx=None, ylabel5=None, ylim5=None, ytick5=None):
"""
Sebastijan Mrak
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
plt.rc('axes', labelsize=16)
plt.rc('xtick', labelsize=14) # fontsize of the tick labels
plt.rc('ytick', labelsize=14) # fontsize of the tick labels
ax1=fig.add_subplot(511)
plt.pcolormesh(t1, el1, np.nan_to_num(y1.T), cmap=cmap)
if ipp_elevation is not None:
plt.plot(t3, ipp_elevation, '-r', lw=lw)
if lli_keo is not None:
idx = np.where((lli_keo%2) == 1)[0]
ax1.scatter(t3[idx], ipp_elevation[idx], facecolors='none', edgecolors='m', s=15)
if ylabel1 is not None:
plt.ylabel(ylabel1)
if ylim1 is not None:
ax1.set_ylim(ylim1)
if title1 is not None:
plt.title(title1)
if ytick1 is not None:
ax1.set_yticks(ytick1)
plt.setp(ax1.get_xticklabels(), visible=False)
############################################################################
ax2 = fig.add_subplot(512, sharex=ax1)
if legend1 is None:
plt.plot(t2, y2, color2)
if (t31 is not None) and (y31 is not None):
plt.plot(t31, y31, color3)
if (t32 is not None) and (y32 is not None):
plt.plot(t32, y32, color4)
else:
plt.plot(t2, y2, color2,label=label2)
plt.plot(t2, y2, color2+'.',)
if (t31 is not None) and (y31 is not None):
plt.plot(t31, y31, color3, label=label3)
plt.plot(t31, y31, color3+'.',)
if (t32 is not None) and (y32 is not None):
plt.plot(t32, y32, color4, label=label4)
plt.plot(t32, y32, color4+'.',)
ax2.legend(loc=2, bbox_to_anchor=(0.88, 0,0, 1), prop={'size':12},
fancybox=True)
if lli2 is not None:
idx=np.where((lli2%2)==1)[0]
lli_range = ax2.get_ylim()
for ix in idx:
plt.plot([t1[ix], t1[ix]], [lli_range[0], lli_range[1]], 'r')
if title2 is not None:
plt.title(title2)
if ylabel2 is not None:
plt.ylabel(ylabel2)
if ylim2 is not None:
ax2.set_ylim(ylim2)
if ytick2 is not None:
ax2.set_yticks(ytick2)
plt.setp(ax2.get_xticklabels(), visible=False)
############################################################################
ax3 = fig.add_subplot(513, sharex=ax1)
if legend2 is not None:
plt.plot(t3, y3, color1, lw=2, label=label1)
plt.legend()
else:
plt.plot(t3, y3, color1, lw=2)
if lli1 is not None:
idx=np.where((lli1%2)==1)[0]
if lm == 'x':
plt.plot(t3[idx], y3[idx], colorx1, ms=ms)
elif lm == 'line':
lli_range = ax3.get_ylim()
for ix in idx:
plt.plot([t3[ix], t3[ix]], [lli_range[0], lli_range[1]], 'r')
else:
print ("Enter right parameter for loss of lock index presentation")
if title3 is not None:
plt.title(title3)
if ylabel3 is not None:
plt.ylabel(ylabel3)
if ylim3 is not None:
ax3.set_ylim(ylim3)
if ytick3 is not None:
ax3.set_yticks(ytick3)
plt.setp(ax3.get_xticklabels(), visible=False)
############################################################################
ax4 = fig.add_subplot(514, sharex=ax1)
plt.plot(t4, y4, color5, lw=2)
if lli3 is not None:
idx=np.where((lli2%2)==1)[0]
lli_range = ax4.get_ylim()
for ix in idx:
plt.plot([t4[ix], t4[ix]], [lli_range[0], lli_range[1]], 'r')
if title4 is not None:
plt.title(title4)
if ylabel4 is not None:
plt.ylabel(ylabel4)
if ylim4 is not None:
ax4.set_ylim(ylim4)
if ytick4 is not None:
ax4.set_yticks(ytick4)
plt.setp(ax4.get_xticklabels(), visible=False)
############################################################################
ax5 = fig.add_subplot(515, sharex=ax1)
plt.plot(Bt, Bx, color=cbx, lw=2)
if title5 is not None:
plt.title(title5)
if ylabel5 is not None:
plt.ylabel(ylabel5)
if ylim5 is not None:
ax5.set_ylim(ylim5)
if ytick5 is not None:
ax5.set_yticks(ytick5)
############################################################################
if xlim is not None:
ax1.set_xlim(xlim)
if xlabel is not None:
plt.xlabel(xlabel)
ax1.xaxis.set(major_formatter=formatter)
fig.tight_layout()
fig.subplots_adjust(hspace = .01)
plt.show()
#
p1 = ax1.get_position()
pos1 = p1.get_points()
pos1[1][0] = 0.86
p1.set_points(pos1)
ax1.set_position(p1)
# find current position [x,y,width,height]
p1 = ax1.get_position()
p2 = ax2.get_position()
p3 = ax3.get_position()
p4 = ax4.get_position()
p5 = ax5.get_position()
pos1 = p1.get_points()
pos2 = p2.get_points()
pos3 = p3.get_points()
pos4 = p4.get_points()
pos5 = p5.get_points()
# set width of second axes equal to first
pos2[1][0] = pos1[1][0]
p2.set_points(pos2)
ax2.set_position(p2)
pos3[1][0] = pos1[1][0]
p3.set_points(pos3)
ax3.set_position(p3)
pos4[1][0] = pos1[1][0]
p4.set_points(pos4)
ax4.set_position(p4)
pos5[1][0] = pos1[1][0]
p5.set_points(pos5)
ax5.set_position(p5)
if pcolorbar is not None:
if cbartick is not None:
p1 = ax1.get_position()
pos1 = p1.get_points()
cbaxes = fig.add_axes([0.88, pos1[0][1], 0.01, pos1[1][1]-pos1[0][1]])
cbar = plt.colorbar(ticks=cbartick, cax = cbaxes)
cbar.ax.set_ylabel(cbartitle)
else:
plt.colorbar()
def plotimf(t, Bx, By, Bz, AE, xlabel=None, ylabel1=None, ylabel2=None, xlim=None,
ylim1=None, ylim2=None, legend=None, lw=1, ytick1=None, ytick2=None,
xtick=None, obstimes=None, centerline=None):
"""
Sebastijan Mrak
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure(figsize=(15,5))
plt.rc('axes', labelsize=18)
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.rc('ytick', labelsize=16) # fontsize of the tick labels
ax1=fig.add_subplot(211)
plt.plot(t, Bx, 'b', lw=lw, label='Bx')
plt.plot(t, By, 'm', lw=lw, label='By')
plt.plot(t, Bz, 'r', lw=lw, label='Bz')
if centerline is True:
plt.plot([t[0], t[-1]], [0,0], 'k', lw=lw)
if obstimes is not None:
plt.plot
if legend is not None:
ax1.legend(loc=2, bbox_to_anchor=(1.01, 0,0, 1), prop={'size':12},
fancybox=True)
if ylabel1 is not None:
plt.ylabel(ylabel1)
if ylim1 is not None:
ax1.set_ylim(ylim1)
if ytick1 is not None:
ax1.set_yticks(ytick1)
if obstimes is not None:
yrange = ax1.get_ylim()
plt.plot([obstimes[0],obstimes[0]], [yrange[0], yrange[1]], '--k', lw=1)
plt.plot([obstimes[-1],obstimes[-1]], [yrange[0], yrange[1]], '--k', lw=1)
plt.setp(ax1.get_xticklabels(), visible=False)
ax2=fig.add_subplot(212, sharex=ax1)
plt.plot(t, AE, color='b', lw=lw)
if ylabel2 is not None:
plt.ylabel(ylabel2)
if ylim2 is not None:
ax2.set_ylim(ylim2)
if ytick2 is not None:
ax2.set_yticks(ytick2)
if obstimes is not None:
yrange = ax2.get_ylim()
plt.plot([obstimes[0],obstimes[0]], [yrange[0], yrange[1]], '--k', lw=1)
plt.plot([obstimes[-1],obstimes[-1]], [yrange[0], yrange[1]], '--k', lw=1)
if xlabel is not None:
plt.xlabel(xlabel)
if xlim is not None:
ax1.set_xlim(xlim)
ax1.xaxis.set(major_formatter=formatter)
fig.tight_layout()
fig.subplots_adjust(hspace = .01)
plt.show()
def plotimfExtended(t, Bx, By, Bz, AE=None, v=None, xlabel=None, ylabel1=None,
ylabel2=None, ylabel3=None, xlim=None,
ylim1=None, ylim2=None, ylim3=None, legend=None, lw=1, ytick1=None,
ytick2=None, ytick3=None, xtick=None, obstimes=None, centerline=None):
"""
Sebastijan Mrak
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
plt.rc('axes', labelsize=18)
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.rc('ytick', labelsize=16) # fontsize of the tick labels
ax1=fig.add_subplot(311)
plt.plot(t, Bx, 'b', lw=lw, label='Bx')
plt.plot(t, By, 'm', lw=lw, label='By')
plt.plot(t, Bz, 'r', lw=lw, label='Bz')
if centerline is True:
plt.plot([t[0], t[-1]], [0,0], 'k', lw=lw)
if obstimes is not None:
plt.plot
if legend is not None:
ax1.legend(loc=2, bbox_to_anchor=(1.01, 0,0, 1), prop={'size':12},
fancybox=True)
if ylabel1 is not None:
plt.ylabel(ylabel1)
if ylim1 is not None:
ax1.set_ylim(ylim1)
if ytick1 is not None:
ax1.set_yticks(ytick1)
if obstimes is not None:
yrange = ax1.get_ylim()
plt.plot([obstimes[0],obstimes[0]], [yrange[0], yrange[1]], '--k', lw=1)
plt.plot([obstimes[-1],obstimes[-1]], [yrange[0], yrange[1]], '--k', lw=1)
plt.setp(ax1.get_xticklabels(), visible=False)
ax2=fig.add_subplot(312, sharex=ax1)
plt.plot(t, AE, color='b', lw=lw)
if ylabel2 is not None:
plt.ylabel(ylabel2)
if ylim2 is not None:
ax2.set_ylim(ylim2)
if ytick2 is not None:
ax2.set_yticks(ytick2)
if obstimes is not None:
yrange = ax2.get_ylim()
plt.plot([obstimes[0],obstimes[0]], [yrange[0], yrange[1]], '--k', lw=1)
plt.plot([obstimes[-1],obstimes[-1]], [yrange[0], yrange[1]], '--k', lw=1)
plt.setp(ax2.get_xticklabels(), visible=False)
ax3=fig.add_subplot(313, sharex=ax1)
plt.plot(t, v, color='b', lw=lw)
if ylabel3 is not None:
plt.ylabel(ylabel3)
if ylim3 is not None:
ax3.set_ylim(ylim3)
if ytick3 is not None:
ax3.set_yticks(ytick3)
if xlabel is not None:
plt.xlabel(xlabel)
if xlim is not None:
ax1.set_xlim(xlim)
ax1.xaxis.set(major_formatter=formatter)
fig.tight_layout()
fig.subplots_adjust(hspace = .01)
plt.show()
def plotMagnetometer(t, y1, y2, ylabel=None, xlabel='UT', xlim=None, ylim=None,
colorx='b', colory='k', colorz='m', zeroline=False):
"""
Sebastijan Mrak
Magnetometer, all components
"""
formatter = mdates.DateFormatter('%H:%M')
fig = plt.figure()
plt.rc('axes', labelsize=18)
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.rc('ytick', labelsize=16) # fontsize of the tick labels
ax1=fig.add_subplot(111)
ax1.plot(t, y1[1][:], colorx)
ax1.plot(t, y2[0][:], colory)
if xlim is not None:
ax1.set_xlim(xlim)
ax1.xaxis.set(major_formatter=formatter)
ax1.set_xlabel(xlabel)
plt.show()
|
{"hexsha": "4be8c5c59f220f6b74e6f1ec422394d473047da4", "size": 43989, "ext": "py", "lang": "Python", "max_stars_repo_path": "gsit/plotting.py", "max_stars_repo_name": "aldebaran1/gsit", "max_stars_repo_head_hexsha": "d4309799d0d7bc0d670a34e8983c6ac0eb17569b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-09-06T05:07:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T07:35:00.000Z", "max_issues_repo_path": "gsit/plotting.py", "max_issues_repo_name": "aldebaran1/gsit", "max_issues_repo_head_hexsha": "d4309799d0d7bc0d670a34e8983c6ac0eb17569b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-31T18:42:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-31T18:42:36.000Z", "max_forks_repo_path": "gsit/plotting.py", "max_forks_repo_name": "aldebaran1/gsit", "max_forks_repo_head_hexsha": "d4309799d0d7bc0d670a34e8983c6ac0eb17569b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-09-25T16:12:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-09T07:35:06.000Z", "avg_line_length": 35.7925142392, "max_line_length": 99, "alphanum_fraction": 0.5690740867, "include": true, "reason": "import numpy", "num_tokens": 13244}
|
"""DataFrame-level subtraction operations.
Subtract one set of regions from another, returning the one-way difference.
The functions here operate on pandas DataFrame and Series instances, not
GenomicArray types.
"""
from __future__ import print_function, absolute_import, division
import logging
import numpy as np
import pandas as pd
from .intersect import by_ranges
def subtract(table, other):
if not len(other):
return table
return pd.DataFrame.from_records(_subtraction(table, other),
columns=table.columns)
def _subtraction(table, other):
for keeper, rows_to_exclude in by_ranges(other, table, 'outer', True):
if len(rows_to_exclude):
logging.debug(" %s:%d-%d : Subtracting %d excluded regions",
keeper.chromosome, keeper.start, keeper.end,
len(rows_to_exclude))
keep_left = (keeper.start < rows_to_exclude.start.iat[0])
keep_right = (keeper.end > rows_to_exclude.end.iat[-1])
if keep_left and keep_right:
# Keep both original edges of the source region
# =========
# -- --
starts = np.r_[keeper.start, rows_to_exclude.end.values]
ends = np.r_[rows_to_exclude.start.values, keeper.end]
elif keep_left:
# Exclusion overlaps only the right side
# =======
# -- ---
starts = np.r_[keeper.start, rows_to_exclude.end.values[:-1]]
ends = rows_to_exclude.start.values
elif keep_right:
# Exclusion overlaps only the left side
# ========
# --- --
starts = rows_to_exclude.end.values
ends = np.r_[rows_to_exclude.start.values[1:], keeper.end]
elif len(rows_to_exclude) > 1:
# Exclusions overlap both edges
# ======
# -- -- ---
starts = rows_to_exclude.end.values[:-1]
ends = rows_to_exclude.start.values[1:]
else:
# Exclusion covers the whole region
continue
for start, end in zip(starts, ends):
if end > start:
yield keeper._replace(start=start, end=end)
else:
logging.debug("Discarding pair: (%d, %d)", start, end)
else:
logging.debug(" %s:%d-%d : No excluded regions",
keeper.chromosome, keeper.start, keeper.end)
yield keeper
|
{"hexsha": "0a14ef3aaf4d301ec1991ad9f63f83fe7f0be726", "size": 2652, "ext": "py", "lang": "Python", "max_stars_repo_path": "skgenome/subtract.py", "max_stars_repo_name": "jeremy9959/cnvkit", "max_stars_repo_head_hexsha": "b839a2b323113a7d318d216f61a0ed6657c70ed4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skgenome/subtract.py", "max_issues_repo_name": "jeremy9959/cnvkit", "max_issues_repo_head_hexsha": "b839a2b323113a7d318d216f61a0ed6657c70ed4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skgenome/subtract.py", "max_forks_repo_name": "jeremy9959/cnvkit", "max_forks_repo_head_hexsha": "b839a2b323113a7d318d216f61a0ed6657c70ed4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3287671233, "max_line_length": 77, "alphanum_fraction": 0.5418552036, "include": true, "reason": "import numpy", "num_tokens": 520}
|
from pathlib import Path
import numpy as np
from skimage.draw import disk
from skimage.io import imsave
from PIL import Image
from tqdm import tqdm
import tables
from skimage.transform import rescale, resize, downscale_local_mean
import sys
sys.path.append("../")
from derive_dataset import get_max_r2
def generate_supertune_sequence(f, nframes=10):
# Nominal framerate
fps = 30
sz = 448
ds = 4 # Downsample by what factor.
# The paper says .1 degrees diameter. However, I measured .4 degrees in a video from
# that era.
# Radius.
ndots = 400
lifetime = 6 # In frames.
gridx = f.get_node("/gridx_st")[:]
dx = gridx.max() - gridx.min()
disk_sz_deg = 0.4 / 2 * (dx / 48) # in degrees
disk_sz = int(sz * disk_sz_deg / dx + 1) # round up
assert disk_sz > 0
Y_st = f.get_node("/Y_st")[:].T
Yall_st = f.get_node("/Yall_st")[:].T
fields = f.get_node("/X_st")[:]
assert Y_st.shape[0] == 216
field_side = int(np.sqrt(fields.shape[0] // 2).item())
Ms = []
for f in range(fields.shape[1]):
stim = fields[:, f].reshape((2, field_side, field_side)).transpose((2, 1, 0))
# Upscale.
xv = np.array(Image.fromarray(stim[:, :, 0]).resize((sz, sz), Image.NEAREST))
yv = -np.array(Image.fromarray(stim[:, :, 1]).resize((sz, sz), Image.NEAREST))
mask = abs(xv) + abs(yv) > 0
xrg = np.where((abs(xv) + abs(yv)).sum(axis=0))[0]
yrg = np.where((abs(xv) + abs(yv)).sum(axis=1))[0]
# print(xrg.min(), yrg.min())
star_pos = np.random.uniform(size=(ndots, 2))
star_pos[:, 0] = xrg.min() + (xrg.max() - xrg.min()) * star_pos[:, 0]
star_pos[:, 1] = yrg.min() + (yrg.max() - yrg.min()) * star_pos[:, 1]
ims = []
for i in range(nframes):
speed_mult = (xv.shape[0] / dx) / fps # pixels / degrees / frame * s
# print(star_pos[:, 0].astype(np.int))
ypos = star_pos[:, 1].copy()
xpos = star_pos[:, 0].copy()
star_pos[:, 0] += (
speed_mult * xv[ypos.astype(np.int) % sz, xpos.astype(np.int) % sz]
)
star_pos[:, 1] += (
speed_mult * yv[ypos.astype(np.int) % sz, xpos.astype(np.int) % sz]
)
# Now render the field.
img = np.zeros((sz, sz), dtype=np.uint8)
rr, cc = disk(
(img.shape[0] // 2, img.shape[1] // 2), disk_sz, shape=img.shape
)
rr = rr - img.shape[0] // 2
cc = cc - img.shape[1] // 2
dotr = (star_pos[:, 1].reshape((-1, 1)) + rr.reshape((1, -1))).ravel()
dotc = (star_pos[:, 0].reshape((-1, 1)) + cc.reshape((1, -1))).ravel()
validx = (
(dotr >= 0)
& (dotr < img.shape[0])
& (dotc >= 0)
& (dotc < img.shape[1])
)
dotr = dotr[validx].astype(np.int)
dotc = dotc[validx].astype(np.int)
img = np.zeros((sz, sz), dtype=np.uint8)
img[dotr, dotc] = 255
img = img * mask
img = (
img.reshape((sz // ds, ds, sz // ds, ds))
.mean(axis=3)
.mean(axis=1)
.astype(np.uint8)
)
ims.append(img)
# imsave(f"figures/seq_{f:03}_{(i):02}.png", img)
star_pos_ = np.random.uniform(size=(ndots // lifetime, 2))
idx = (
np.arange(i * (ndots // lifetime), (i + 1) * (ndots // lifetime))
% ndots
)
star_pos[idx, 0] = xrg.min() + (xrg.max() - xrg.min()) * star_pos_[:, 0]
star_pos[idx, 1] = yrg.min() + (yrg.max() - yrg.min()) * star_pos_[:, 1]
M = np.stack(ims, axis=0)
Ms.append(M)
M = np.concatenate(Ms, axis=0)
Xidx = nframes * np.arange(216).reshape((-1, 1)) + np.arange(nframes).reshape(
(1, -1)
)
assert len(np.unique(Xidx)) == Xidx.size
assert Xidx.size == M.shape[0]
return M, Xidx, Y_st, Yall_st
def generate_hyperflow_sequence(f):
# Nominal framerate
fps = 30
sz = 448
ds = 4 # Downsample by what factor.
# The paper says .1 degrees diameter. However, I measured .4 degrees in a video from
# that era.
# Radius.
lifetime = 6 # In frames.
stimidx = f.get_node("/stimidx_hf")[:] - 1
gridx = f.get_node("/gridx_hf")[:]
Y = f.get_node("/Y_hf")[:]
dx = gridx.max() - gridx.min()
# It's unclear from the documentation whether is was the number of dots or
# the dot size which was changed when the stimulus was made smaller. Assume
# it's a smaller dot.
ndots = 400
disk_sz_deg = 0.4 / 2 * (dx / 48) # in degrees
disk_sz = int(sz * disk_sz_deg / dx + 1) # round up
assert disk_sz > 0
fields = f.get_node("/stim_hf")[:].squeeze()
field_side = int(np.sqrt(fields.shape[0] // 2).item())
ims = []
for frame in range(fields.shape[-1]):
stim = (
fields[:, frame].reshape((2, field_side, field_side)).transpose((2, 1, 0))
)
# Upscale.
xv = np.array(Image.fromarray(stim[:, :, 0]).resize((sz, sz), Image.BILINEAR))
yv = -np.array(Image.fromarray(stim[:, :, 1]).resize((sz, sz), Image.BILINEAR))
# Apply a mask.
mask = np.array(
Image.fromarray(
(255 * (abs(stim[:, :, 0]) + abs(stim[:, :, 1]) > 0).astype(np.uint8))
).resize((sz, sz), Image.BILINEAR)
)
# Apply a mask. Note the inherent mask smoothing.
mask = mask > 64
# Apply a mask.
vidx, hidx = np.nonzero(mask)
if frame == 0:
# Sample positions with replacement.
if len(vidx) > 0:
dot_idx = np.random.randint(0, len(vidx), ndots)
assert len(dot_idx) == ndots
star_pos = np.zeros((ndots, 2), dtype=np.float)
star_pos[:, 0] = hidx[dot_idx]
star_pos[:, 1] = vidx[dot_idx]
else:
# Occasionally, the first frame may be empty.
star_pos = np.zeros((ndots, 2), dtype=np.float)
else:
# Replace expired dots.
replace_idx = (
np.arange(
frame * (ndots // lifetime), (frame + 1) * (ndots // lifetime)
)
% ndots
)
if len(vidx) > 0:
# The stimulus can be wholly off-screen.
dot_idx = np.random.randint(0, len(vidx), len(replace_idx))
assert len(dot_idx) == len(replace_idx)
star_pos[replace_idx, 0] = hidx[dot_idx]
star_pos[replace_idx, 1] = vidx[dot_idx]
# Advance the stars.
speed_mult = (xv.shape[0] / dx) / fps # pixels / degrees / frame * s
# xv is in degrees / s
# and star_pos is in pixels
# hence speed_mult is in s / degrees * pixels
ypos = star_pos[:, 1].copy()
xpos = star_pos[:, 0].copy()
star_pos[:, 0] += (
speed_mult * xv[ypos.astype(np.int) % sz, xpos.astype(np.int) % sz]
)
star_pos[:, 1] += (
speed_mult * yv[ypos.astype(np.int) % sz, xpos.astype(np.int) % sz]
)
# Now render the field.
img = np.zeros((sz, sz), dtype=np.uint8)
rr, cc = disk((img.shape[0] // 2, img.shape[1] // 2), disk_sz, shape=img.shape)
rr = rr - img.shape[0] // 2
cc = cc - img.shape[1] // 2
dotr = (star_pos[:, 1].reshape((-1, 1)) + rr.reshape((1, -1))).ravel()
dotc = (star_pos[:, 0].reshape((-1, 1)) + cc.reshape((1, -1))).ravel()
validx = (
(dotr >= 0) & (dotr < img.shape[0]) & (dotc >= 0) & (dotc < img.shape[1])
)
dotr = dotr[validx].astype(np.int)
dotc = dotc[validx].astype(np.int)
img = np.zeros((sz, sz), dtype=np.uint8)
img[dotr, dotc] = 255
img = img * mask
# ds-fold antialiasing.
img = (
img.reshape((sz // ds, ds, sz // ds, ds))
.mean(axis=3)
.mean(axis=1)
.astype(np.uint8)
)
ims.append(img)
# imsave(f"figures/hf/seq_{(frame):05}.png", img)
M = np.stack(ims, axis=0)
# M = np.stack([M, M, M], axis=1)
assert M.shape == (fields.shape[-1], sz // ds, sz // ds)
return (M, stimidx.T, Y)
def generate_matched_sequence(f, stem):
X_st, Xidx_st, Y_st, Yall_st = generate_supertune_sequence(f)
X_hf, Xidx_hf, Y_hf = generate_hyperflow_sequence(f)
assert Xidx_st.shape[1] == Xidx_hf.shape[1]
assert Xidx_st[0, 1] > Xidx_st[0, 0]
assert Xidx_hf[0, 1] > Xidx_hf[0, 0]
signal_power = (
1
/ (Yall_st.shape[0] - 1)
* (Yall_st.shape[0] * Yall_st.mean(0).var() - Yall_st.var(1).mean())
)
response_power = Yall_st.mean(0).var()
corr_multiplier = np.sqrt(response_power / signal_power)
fout = tables.open_file(f"/mnt/e/data_derived/packlab-mst/{stem}.h5", "w")
fout.create_array("/", "X_traintune", obj=X_hf)
fout.create_array("/", "Xidx_traintune", obj=Xidx_hf)
fout.create_array("/", "Y_traintune", obj=Y_hf)
fout.create_array("/", "X_report", obj=X_st)
fout.create_array("/", "Xidx_report", obj=Xidx_st)
fout.create_array("/", "Y_report", obj=Y_st)
fout.create_array("/", "Yall_report", obj=Y_st)
fout.create_array("/", "corr_multiplier", obj=corr_multiplier)
fout.close()
def generate_matched_sequences():
files = Path("/mnt/e/data_derived/packlab-mst/").glob("*.mat")
files = sorted(files)
for filename in tqdm(files):
f = tables.open_file(filename)
if f.get_node("/stmatcheshf")[:]:
generate_matched_sequence(f, filename.stem)
f.close()
if __name__ == "__main__":
generate_matched_sequences()
|
{"hexsha": "de8c431c401ba06d4babaeae4d87cee7c7ed5a9f", "size": 9972, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/generate_packlab_mst_hyperflow.py", "max_stars_repo_name": "patrickmineault/brain-scorer", "max_stars_repo_head_hexsha": "5e882bafb323ff58028ade2394d18176e6c02e80", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-07-22T02:19:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T15:07:35.000Z", "max_issues_repo_path": "scripts/generate_packlab_mst_hyperflow.py", "max_issues_repo_name": "patrickmineault/your-head-is-there-to-move-you-around", "max_issues_repo_head_hexsha": "5e882bafb323ff58028ade2394d18176e6c02e80", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/generate_packlab_mst_hyperflow.py", "max_forks_repo_name": "patrickmineault/your-head-is-there-to-move-you-around", "max_forks_repo_head_hexsha": "5e882bafb323ff58028ade2394d18176e6c02e80", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-22T02:27:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T02:08:42.000Z", "avg_line_length": 31.6571428571, "max_line_length": 88, "alphanum_fraction": 0.523465704, "include": true, "reason": "import numpy", "num_tokens": 2951}
|
### Julia OpenStreetMapX Package ###
### MIT License ###
### Copyright 2014 ###
### Default Speed Limits in Kilometers Per Hour ###
const SPEED_ROADS_URBAN = Dict{Int,Float64}(
1 => 100, # Motorway
2 => 90, # Trunk
3 => 90, # Primary
4 => 70, # Secondary
5 => 50, # Tertiary
6 => 40, # Residential/Unclassified
7 => 20, # Service
8 => 10) # Living street
const SPEED_ROADS_RURAL = Dict{Int,Float64}(
1 => 100,
2 => 90,
3 => 90,
4 => 70,
5 => 50,
6 => 40,
7 => 40,
8 => 30)
|
{"hexsha": "37d07d703dab1158f079df5b74d94dd5a7d1d956", "size": 594, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/speeds.jl", "max_stars_repo_name": "arash-dehghan/OpenStreetMapX.jl", "max_stars_repo_head_hexsha": "179251a5cfa4a62c123dbf793674c0374a07f841", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/speeds.jl", "max_issues_repo_name": "arash-dehghan/OpenStreetMapX.jl", "max_issues_repo_head_hexsha": "179251a5cfa4a62c123dbf793674c0374a07f841", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/speeds.jl", "max_forks_repo_name": "arash-dehghan/OpenStreetMapX.jl", "max_forks_repo_head_hexsha": "179251a5cfa4a62c123dbf793674c0374a07f841", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.76, "max_line_length": 51, "alphanum_fraction": 0.4831649832, "num_tokens": 215}
|
# Autogenerated wrapper script for xrootdgo_jll for x86_64-w64-mingw32
export xrootdgo
JLLWrappers.@generate_wrapper_header("xrootdgo")
JLLWrappers.@declare_library_product(xrootdgo, "xrootdgo.dll")
function __init__()
JLLWrappers.@generate_init_header()
JLLWrappers.@init_library_product(
xrootdgo,
"bin\\xrootdgo.dll",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@generate_init_footer()
end # __init__()
|
{"hexsha": "544439dcadd6a5b954e4d802bb811c1910705ee4", "size": 446, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wrappers/x86_64-w64-mingw32.jl", "max_stars_repo_name": "JuliaBinaryWrappers/xrootdgo_jll.jl", "max_stars_repo_head_hexsha": "3d6747353e4c39f3dc30d32905fa9b5658bbd26f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wrappers/x86_64-w64-mingw32.jl", "max_issues_repo_name": "JuliaBinaryWrappers/xrootdgo_jll.jl", "max_issues_repo_head_hexsha": "3d6747353e4c39f3dc30d32905fa9b5658bbd26f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wrappers/x86_64-w64-mingw32.jl", "max_forks_repo_name": "JuliaBinaryWrappers/xrootdgo_jll.jl", "max_forks_repo_head_hexsha": "3d6747353e4c39f3dc30d32905fa9b5658bbd26f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.875, "max_line_length": 70, "alphanum_fraction": 0.7466367713, "num_tokens": 134}
|
import json
import os
import glob
import random
from typing import Union
try:
import xarray as xr
except ModuleNotFoundError:
xr = None
import numpy as np
import pandas as pd
from .datasets import Datasets
from .utils import check_attributes, download, sanity_check
from ai4water.utils.utils import dateandtime_now
try: # shapely may not be installed, as it may be difficult to isntall and is only needed for plotting data.
from ai4water.pre_processing.spatial_utils import plot_shapefile
except ModuleNotFoundError:
plot_shapefile = None
# directory separator
SEP = os.sep
def gb_message():
link = "https://doi.org/10.5285/8344e4f3-d2ea-44f5-8afa-86d2987543a9"
raise ValueError(f"Dwonlaoad the data from {link} and provide the directory "
f"path as dataset=Camels(data=data)")
class Camels(Datasets):
"""
Get CAMELS dataset.
This class first downloads the CAMELS dataset if it is not already downloaded.
Then the selected attribute for a selected id are fetched and provided to the
user using the method `fetch`.
Attributes
-----------
- ds_dir str/path: diretory of the dataset
- dynamic_features list: tells which dynamic attributes are available in
this dataset
- static_features list: a list of static attributes.
- static_attribute_categories list: tells which kinds of static attributes
are present in this category.
Methods
---------
- stations : returns name/id of stations for which the data (dynamic attributes)
exists as list of strings.
- fetch : fetches all attributes (both static and dynamic type) of all
station/gauge_ids or a speficified station. It can also be used to
fetch all attributes of a number of stations ids either by providing
their guage_id or by just saying that we need data of 20 stations
which will then be chosen randomly.
- fetch_dynamic_features :
fetches speficied dynamic attributes of one specified station. If the
dynamic attribute is not specified, all dynamic attributes will be
fetched for the specified station. If station is not specified, the
specified dynamic attributes will be fetched for all stations.
- fetch_static_features :
works same as `fetch_dynamic_features` but for `static` attributes.
Here if the `category` is not specified then static attributes of
the specified station for all categories are returned.
stations : returns list of stations
"""
DATASETS = {
'CAMELS-BR': {'url': "https://zenodo.org/record/3964745#.YA6rUxZS-Uk",
},
'CAMELS-GB': {'url': gb_message},
}
def stations(self):
raise NotImplementedError
def _read_dynamic_from_csv(self, stations, dynamic_features, st=None, en=None):
raise NotImplementedError
def fetch_static_features(self, station, features):
raise NotImplementedError
@property
def start(self): # start of data
raise NotImplementedError
@property
def end(self): # end of data
raise NotImplementedError
@property
def dynamic_features(self)->list:
raise NotImplementedError
def _check_length(self, st, en):
if st is None:
st = self.start
if en is None:
en = self.end
return st, en
def to_ts(self, static, st, en, as_ts=False, freq='D'):
st, en = self._check_length(st, en)
if as_ts:
idx = pd.date_range(st, en, freq=freq)
static = pd.DataFrame(np.repeat(static.values, len(idx), axis=0), index=idx,
columns=static.columns)
return static
else:
return static
@property
def camels_dir(self):
"""Directory where all camels datasets will be saved. This will under
datasets directory"""
return os.path.join(self.base_ds_dir, "CAMELS")
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
if x is None:
x = os.path.join(self.camels_dir, self.__class__.__name__)
if not os.path.exists(x):
os.makedirs(x)
# sanity_check(self.name, x)
self._ds_dir = x
def fetch(self,
stations: Union[str, list, int, float, None] = None,
dynamic_features: Union[list, str, None] = 'all',
static_features: Union[str, list, None] = None,
st: Union[None, str] = None,
en: Union[None, str] = None,
as_dataframe:bool = False,
**kwargs
) -> Union[dict, pd.DataFrame]:
"""
Fetches the attributes of one or more stations.
Arguments:
stations : if string, it is supposed to be a station name/gauge_id.
If list, it will be a list of station/gauge_ids. If int, it will
be supposed that the user want data for this number of
stations/gauge_ids. If None (default), then attributes of all
available stations. If float, it will be supposed that the user
wants data of this fraction of stations.
dynamic_features : If not None, then it is the attributes to be
fetched. If None, then all available attributes are fetched
static_features : list of static attributes to be fetches. None
means no static attribute will be fetched.
st : starting date of data to be returned. If None, the data will be
returned from where it is available.
en : end date of data to be returned. If None, then the data will be
returned till the date data is available.
as_dataframe : whether to return dynamic attributes as pandas
dataframe or as xarray dataset.
kwargs : keyword arguments to read the files
returns:
If both static and dynamic features are obtained then it returns a
dictionary whose keys are station/gauge_ids and values are the
attributes and dataframes.
Otherwise either dynamic or static features are returned.
"""
if isinstance(stations, int):
# the user has asked to randomly provide data for some specified number of stations
stations = random.sample(self.stations(), stations)
elif isinstance(stations, list):
pass
elif isinstance(stations, str):
stations = [stations]
elif isinstance(stations, float):
num_stations = int(len(self.stations()) * stations)
stations = random.sample(self.stations(), num_stations)
elif stations is None:
# fetch for all stations
stations = self.stations()
else:
raise TypeError(f"Unknown value provided for stations {stations}")
if xr is None:
raise ModuleNotFoundError("modeule xarray must be installed to use `datasets` module")
return self.fetch_stations_attributes(stations,
dynamic_features,
static_features,
st=st,
en=en,
as_dataframe=as_dataframe,
**kwargs)
def _maybe_to_netcdf(self, fname:str):
self.dyn_fname = os.path.join(self.ds_dir, f'{fname}.nc')
if not os.path.exists(self.dyn_fname):
# saving all the data in netCDF file using xarray
print(f'converting data to netcdf format for faster io operations')
data = self.fetch(static_features=None)
data_vars = {}
coords = {}
for k, v in data.items():
data_vars[k] = (['time', 'dynamic_features'], v)
index = v.index
index.name = 'time'
coords = {
'dynamic_features': list(v.columns),
'time': index
}
xds = xr.Dataset(
data_vars=data_vars,
coords=coords,
attrs={'date': f"create on {dateandtime_now()}"}
)
xds.to_netcdf(self.dyn_fname)
def fetch_stations_attributes(self,
stations: list,
dynamic_features='all',
static_features=None,
st=None,
en=None,
as_dataframe:bool = False,
**kwargs):
"""Reads attributes of more than one stations.
Arguments:
stations : list of stations for which data is to be fetched.
dynamic_features : list of dynamic attributes to be fetched.
if 'all', then all dynamic attributes will be fetched.
static_features : list of static attributes to be fetched.
If `all`, then all static attributes will be fetched. If None,
then no static attribute will be fetched.
st : start of data to be fetched.
en : end of data to be fetched.
as_dataframe : whether to return the data as pandas dataframe. default
is xr.dataset object
kwargs dict: additional keyword arguments
Returns:
Dynamic and static features of multiple stations. Dynamic features
are by default returned as xr.Dataset unless `as_dataframe` is True, in
such a case, it is a pandas dataframe with multiindex. If xr.Dataset,
it consists of `data_vars` equal to number of stations and for each
station, the `DataArray` is of dimensions (time, dynamic_features).
where `time` is defined by `st` and `en` i.e length of `DataArray`.
In case, when the returned object is pandas DataFrame, the first index
is `time` and second index is `dyanamic_features`. Static attributes
are always returned as pandas DataFrame and have following shape
`(stations, static_features). If `dynamic_features` is None,
then they are not returned and the returned value only consists of
static features. Same holds true for `static_features`.
If both are not None, then the returned type is a dictionary with
`static` and `dynamic` keys.
Raises:
ValueError, if both dynamic_features and static_features are None
"""
st, en = self._check_length(st, en)
if dynamic_features is not None:
dynamic_features = check_attributes(dynamic_features, self.dynamic_features)
if not os.path.exists(self.dyn_fname):
# read from csv files
# following code will run only once when fetch is called inside init method
dyn = self._read_dynamic_from_csv(stations, dynamic_features, st=st, en=en)
else:
dyn = xr.load_dataset(self.dyn_fname) # daataset
dyn = dyn[stations].sel(dynamic_features=dynamic_features, time=slice(st, en))
if as_dataframe:
dyn = dyn.to_dataframe(['time', 'dynamic_features'])
if static_features is not None:
static = self.fetch_static_features(stations, static_features)
stns = {'dynamic': dyn, 'static': static}
else:
stns = dyn
elif static_features is not None:
return self.fetch_static_features(stations, static_features)
else:
raise ValueError
return stns
def fetch_dynamic_features(self,
stn_id,
attributes='all',
st=None,
en=None,
as_dataframe=False):
"""Fetches all or selected dynamic attributes of one station."""
assert isinstance(stn_id, str)
station = [stn_id]
return self.fetch_stations_attributes(station,
attributes,
None,
st=st,
en=en,
as_dataframe=as_dataframe)
def fetch_station_attributes(self,
station: str,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
as_ts: bool = False,
st: Union[str, None] = None,
en: Union[str, None] = None,
**kwargs) -> pd.DataFrame:
"""
Fetches attributes for one station.
Arguments:
station : station id/gauge id for which the data is to be fetched.
dynamic_features
static_features
as_ts : whether static attributes are to be converted into a time
series or not. If yes then the returned time series will be of
same length as that of dynamic attribtues.
st : starting point from which the data to be fetched. By default
the data will be fetched from where it is available.
en : end point of data to be fetched. By default the dat will be fetched
Return:
dataframe if as_ts is True else it returns a dictionary of static and
dynamic attributes for a station/gauge_id
"""
st, en = self._check_length(st, en)
station_df = pd.DataFrame()
if dynamic_features:
dynamic = self.fetch_dynamic_features(station, dynamic_features, st=st,
en=en, **kwargs)
station_df = pd.concat([station_df, dynamic])
if static_features is not None:
static = self.fetch_static_features(station, static_features)
if as_ts:
station_df = pd.concat([station_df, static], axis=1)
else:
station_df ={'dynamic': station_df, 'static': static}
elif static_features is not None:
station_df = self.fetch_static_features(station, static_features)
return station_df
class LamaH(Camels):
"""
Large-Sample Data for Hydrology and Environmental Sciences for Central Europe
from url = "https://zenodo.org/record/4609826#.YFNp59zt02w"
paper: https://essd.copernicus.org/preprints/essd-2021-72/
"""
url = "https://zenodo.org/record/4609826#.YFNp59zt02w"
_data_types = ['total_upstrm', 'diff_upstrm_all', 'diff_upstrm_lowimp'
]
time_steps = ['daily', 'hourly'
]
static_attribute_categories = ['']
def __init__(self, *,
time_step: str,
data_type: str,
**kwargs
):
"""
Arguments:
time_step : possible values are `daily` or `hourly`
data_type : possible values are `total_upstrm`, `diff_upstrm_all`
or 'diff_upstrm_lowimp'
"""
assert time_step in self.time_steps, f"invalid time_step {time_step} given"
assert data_type in self._data_types, f"invalid data_type {data_type} given."
self.time_step = time_step
self.data_type = data_type
super().__init__(**kwargs)
self._download()
fpath = os.path.join(self.ds_dir, 'lamah_diff_upstrm_lowimp_hourly_dyn.nc')
_data_types = self._data_types if self.time_step == 'daily' else ['total_upstrm']
if not os.path.exists(fpath):
for dt in _data_types:
for ts in self.time_steps:
self.time_step = ts
self.data_type = dt
fname = f"lamah_{dt}_{ts}_dyn"
self._maybe_to_netcdf(fname)
self.time_step = time_step
self.data_type = data_type
self.dyn_fname = os.path.join(self.ds_dir, f'lamah_{data_type}_{time_step}_dyn.nc')
@property
def dynamic_features(self):
station = self.stations()[0]
df = self.read_ts_of_station(station)
return df.columns.to_list()
@property
def static_features(self) -> list:
fname = os.path.join(self.data_type_dir, f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
return df.columns.to_list()
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def data_type_dir(self):
directory = 'CAMELS_AT'
if self.time_step == 'hourly':
directory = 'CAMELS_AT1' # todo, use it only for hourly, daily is causing errors
# self.ds_dir/CAMELS_AT/data_type_dir
f = [f for f in os.listdir(os.path.join(self.ds_dir, directory)) if self.data_type in f][0]
return os.path.join(self.ds_dir, f'{directory}{SEP}{f}')
def stations(self)->list:
# assuming file_names of the format ID_{stn_id}.csv
_dirs = os.listdir(os.path.join(self.data_type_dir, f'2_timeseries{SEP}{self.time_step}'))
s = [f.split('_')[1].split('.csv')[0] for f in _dirs]
return s
def _read_dynamic_from_csv(self,
stations,
dynamic_features:Union[str, list]='all',
st=None,
en=None,
):
"""Reads attributes of one station"""
stations_attributes = {}
for station in stations:
station_df = pd.DataFrame()
if dynamic_features is not None:
dynamic_df = self.read_ts_of_station(station)
station_df = pd.concat([station_df, dynamic_df])
stations_attributes[station] = station_df
return stations_attributes
def fetch_static_features(self,
station:Union[str, list],
features=None
)->pd.DataFrame:
fname = os.path.join(self.data_type_dir, f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
#if features is not None:
static_features = check_attributes(features, self.static_features)
df = df[static_features]
if isinstance(station, list):
stations = [str(i) for i in station]
elif isinstance(station, int):
stations = str(station)
else:
stations = station
df.index = df.index.astype(str)
df = df.loc[stations]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return df
def read_ts_of_station(self, station) -> pd.DataFrame:
# read a file containing timeseries data for one station
fname = os.path.join(self.data_type_dir,
f'2_timeseries{SEP}{self.time_step}{SEP}ID_{station}.csv')
df = pd.read_csv(fname, sep=';')
if self.time_step == 'daily':
periods = pd.PeriodIndex(year=df["YYYY"], month=df["MM"], day=df["DD"], freq="D")
df.index = periods.to_timestamp()
else:
periods = pd.PeriodIndex(year=df["YYYY"], month=df["MM"], day=df["DD"], hour=df["hh"], minute=df["mm"], freq="H")
df.index = periods.to_timestamp()
# remove the cols specifying index
[df.pop(item) for item in ['YYYY', 'MM', 'DD', 'hh', 'mm'] if item in df]
return df
@property
def start(self):
return "19810101"
@property
def end(self):
return "20191231"
class HYSETS(Camels):
"""
database for hydrometeorological modeling of 14,425 North American watersheds
from 1950-2018 following the work of
[Arsenault et al., 2020](https://doi.org/10.1038/s41597-020-00583-2)
The user must manually download the files, unpack them and provide
the `path` where these files are saved.
This data comes with multiple sources. Each source having one or more dynamic_features
Following data_source are available.
|sources | dynamic_features |
|---------------|------------------|
|SNODAS_SWE | dscharge, swe|
|SCDNA | discharge, pr, tasmin, tasmax|
|nonQC_stations | discharge, pr, tasmin, tasmax|
|Livneh | discharge, pr, tasmin, tasmax|
|ERA5 | discharge, pr, tasmax, tasmin|
|ERAS5Land_SWE | discharge, swe|
|ERA5Land | discharge, pr, tasmax, tasmin|
all sources contain one or more following dynamic_features
with following shapes
|dynamic_features | shape |
|----------------------------|------------|
|time | (25202,) |
|watershedID | (14425,) |
|drainage_area | (14425,) |
|drainage_area_GSIM | (14425,) |
|flag_GSIM_boundaries | (14425,) |
|flag_artificial_boundaries | (14425,) |
|centroid_lat | (14425,) |
|centroid_lon | (14425,) |
|elevation | (14425,) |
|slope | (14425,) |
|discharge | (14425, 25202) |
|pr | (14425, 25202) |
|tasmax | (14425, 25202) |
|tasmin | (14425, 25202) |
"""
doi = "https://doi.org/10.1038/s41597-020-00583-2"
url = "https://osf.io/rpc3w/"
Q_SRC = ['ERA5', 'ERA5Land', 'ERA5Land_SWE', 'Livneh', 'nonQC_stations', 'SCDNA', 'SNODAS_SWE']
SWE_SRC = ['ERA5Land_SWE', 'SNODAS_SWE']
OTHER_SRC = [src for src in Q_SRC if src not in ['ERA5Land_SWE', 'SNODAS_SWE']]
dynamic_features = ['discharge', 'swe', 'tasmin', 'tasmax', 'pr']
def __init__(self,
path:str,
swe_source:str = "SNODAS_SWE",
discharge_source: str = "ERA5",
tasmin_source: str = "ERA5",
tasmax_source: str = "ERA5",
pr_source: str = "ERA5",
**kwargs
):
"""
Arguments:
path : path where all the data files are saved.
swe_source : source of swe data.
discharge_source : source of discharge data
tasmin_source : source of tasmin data
tasmax_source : source of tasmax data
pr_source : source of pr data
kwargs : arguments for `Camels` base class
"""
assert swe_source in self.SWE_SRC, f'source must be one of {self.SWE_SRC}'
assert discharge_source in self.Q_SRC, f'source must be one of {self.Q_SRC}'
assert tasmin_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert tasmax_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert pr_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
self.sources = {
'swe': swe_source,
'discharge': discharge_source,
'tasmin': tasmin_source,
'tasmax': tasmax_source,
'pr': pr_source
}
super().__init__(**kwargs)
self.ds_dir = path
fpath = os.path.join(self.ds_dir, 'hysets_dyn.nc')
if not os.path.exists(fpath):
self._maybe_to_netcdf('hysets_dyn')
def _maybe_to_netcdf(self, fname:str):
# todo saving as one file takes very long time
oneD_vars = []
twoD_vars = []
for src in self.Q_SRC:
xds = xr.open_dataset(os.path.join(self.ds_dir, f'HYSETS_2020_{src}.nc'))
for var in xds.variables:
print(f'getting {var} from source {src} ')
if len(xds[var].data.shape) > 1:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
twoD_vars.append(xar)
else:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
oneD_vars.append(xar)
oneD_xds = xr.merge(oneD_vars)
twoD_xds = xr.merge(twoD_vars)
oneD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_static.nc"))
twoD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_dyn.nc"))
return
@property
def ds_dir(self):
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
sanity_check('HYSETS', x)
self._ds_dir = x
@property
def static_features(self):
df = self.read_static_data()
return df.columns.to_list()
def stations(self) -> list:
return self.read_static_data().index.to_list()
@property
def start(self):
return "19500101"
@property
def end(self):
return "20181231"
def fetch_stations_attributes(self,
stations: list,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
st = None,
en = None,
as_dataframe: bool = False,
**kwargs):
stations = check_attributes(stations, self.stations())
stations = [int(stn) for stn in stations]
if dynamic_features is not None:
dyn = self._fetch_dynamic_features(stations=stations,
dynamic_features=dynamic_features,
as_dataframe=as_dataframe,
**kwargs
)
if static_features is not None: # we want both static and dynamic
to_return = {}
static = self._fetch_static_features(station=stations,
static_features=static_features,
**kwargs
)
to_return['static'] = static
to_return['dynamic'] = dyn
else:
to_return = dyn
elif static_features is not None:
# we want only static
to_return = self._fetch_static_features(
station=stations,
static_features=static_features,
**kwargs
)
else:
raise ValueError
return to_return
def fetch_dynamic_features(self,
station,
dynamic_features='all',
st=None,
en=None,
as_dataframe=False):
"""Fetches dynamic attributes of one station."""
station = [int(station)]
return self._fetch_dynamic_features(stations=station,
dynamic_features=dynamic_features,
st=st,
en=en,
as_dataframe=as_dataframe)
def _fetch_dynamic_features(self,
stations:list,
dynamic_features='all',
st=None,
en=None,
as_dataframe=False,
as_ts=False
):
"""Fetches dynamic attributes of station."""
st, en = self._check_length(st, en)
attrs = check_attributes(dynamic_features, self.dynamic_features)
stations = np.subtract(stations, 1).tolist()
# maybe we don't need to read all variables
sources = {k:v for k,v in self.sources.items() if k in attrs}
# original .nc file contains datasets with dynamic and static features as data_vars
# however, for uniformity of this API and easy usage, we want a Dataset to have
# station names/gauge_ids as data_vars and each data_var has
# dimension (time, dynamic_variables)
# Therefore, first read all data for each station from .nc file
# then rearrange it.
# todo, this operation is slower because of `to_dataframe`
# also doing this removes all the metadata
x = {}
f = os.path.join(self.ds_dir, "hysets_dyn.nc")
xds = xr.open_dataset(f)
for stn in stations:
xds1 = xds[[f'{k}_{v}' for k, v in sources.items()]].sel(watershed=stn, time=slice(st, en))
xds1 = xds1.rename_vars({f'{k}_{v}': k for k, v in sources.items()})
x[stn] = xds1.to_dataframe(['time'])
xds = xr.Dataset(x)
xds = xds.rename_dims({'dim_1': 'dynamic_features'})
xds = xds.rename_vars({'dim_1': 'dynamic_features'})
if as_dataframe:
return xds.to_dataframe(['time', 'dynamic_features'])
return xds
def _fetch_static_features(self,
station,
static_features:Union[str, list]='all',
st=None,
en=None,
as_ts=False):
df = self.read_static_data()
static_features = check_attributes(static_features, self.static_features)
if isinstance(station, str):
station = [station]
elif isinstance(station, int):
station = [str(station)]
elif isinstance(station, list):
station = [str(stn) for stn in station]
else:
raise ValueError
return self.to_ts(df.loc[station][static_features], st=st, en=en, as_ts=as_ts)
def fetch_static_features(self,
station,
features='all',
st=None,
en=None,
as_ts=False
)->pd.DataFrame:
return self._fetch_static_features(station, features, st, en, as_ts)
def read_static_data(self):
fname = os.path.join(self.ds_dir, 'HYSETS_watershed_properties.txt')
static_df = pd.read_csv(fname, index_col='Watershed_ID', sep=';')
static_df.index = static_df.index.astype(str)
return static_df
class CAMELS_US(Camels):
"""
Downloads and processes CAMELS dataset of 671 catchments named as CAMELS
from https://ral.ucar.edu/solutions/products/camels
https://doi.org/10.5194/hess-19-209-2015
"""
DATASETS = ['CAMELS_US']
url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_timeseries_v1p2_metForcing_obsFlow.zip"
catchment_attr_url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/camels_attributes_v2.0.zip"
folders = {'basin_mean_daymet': f'basin_mean_forcing{SEP}daymet',
'basin_mean_maurer': f'basin_mean_forcing{SEP}maurer',
'basin_mean_nldas': f'basin_mean_forcing{SEP}nldas',
'basin_mean_v1p15_daymet': f'basin_mean_forcing{SEP}v1p15{SEP}daymet',
'basin_mean_v1p15_nldas': f'basin_mean_forcing{SEP}v1p15{SEP}nldas',
'elev_bands': f'elev{SEP}daymet',
'hru': f'hru_forcing{SEP}daymet'}
dynamic_features = ['dayl(s)', 'prcp(mm/day)', 'srad(W/m2)',
'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)', 'Flow']
def __init__(self, data_source='basin_mean_daymet'):
assert data_source in self.folders, f'allwed data sources are {self.folders.keys()}'
self.data_source = data_source
super().__init__("CAMELS_US")
if os.path.exists(self.ds_dir):
print(f"dataset is already downloaded at {self.ds_dir}")
else:
download(self.url, os.path.join(self.camels_dir, f'CAMELS_US{SEP}CAMELS_US.zip'))
download(self.catchment_attr_url, os.path.join(self.camels_dir, f"CAMELS_US{SEP}catchment_attrs.zip"))
self._unzip()
self.attr_dir = os.path.join(self.ds_dir, f'catchment_attrs{SEP}camels_attributes_v2.0')
self.dataset_dir = os.path.join(self.ds_dir, f'CAMELS_US{SEP}basin_dataset_public_v1p2')
self._maybe_to_netcdf('camels_us_dyn')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def start(self):
return "19800101"
@property
def end(self):
return "20141231"
@property
def static_features(self):
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, 'catchment_attrs', 'camels_attributes_v2.0')}/*.txt")
cols = []
for f in files:
_df = pd.read_csv(f, sep=';', index_col='gauge_id', nrows=1)
cols += list(_df.columns)
else:
df = pd.read_csv(static_fpath, index_col='gauge_id', nrows=1)
cols = list(df.columns)
return cols
def stations(self) -> list:
stns = []
for _dir in os.listdir(os.path.join(self.dataset_dir, 'usgs_streamflow')):
cat = os.path.join(self.dataset_dir, f'usgs_streamflow{SEP}{_dir}')
stns += [fname.split('_')[0] for fname in os.listdir(cat)]
# remove stations for which static values are not available
for stn in ['06775500', '06846500', '09535100']:
stns.remove(stn)
return stns
def _read_dynamic_from_csv(self,
stations,
dynamic_features:Union[str, list]='all',
st=None,
en=None,
):
dyn = {}
for station in stations:
# attributes = check_attributes(dynamic_features, self.dynamic_features)
assert isinstance(station, str)
df = None
df1 = None
dir_name = self.folders[self.data_source]
for cat in os.listdir(os.path.join(self.dataset_dir, dir_name)):
cat_dirs = os.listdir(os.path.join(self.dataset_dir, f'{dir_name}{SEP}{cat}'))
stn_file = f'{station}_lump_cida_forcing_leap.txt'
if stn_file in cat_dirs:
df = pd.read_csv(os.path.join(self.dataset_dir,
f'{dir_name}{SEP}{cat}{SEP}{stn_file}'),
sep="\s+|;|:",
skiprows=4,
engine='python',
names=['Year', 'Mnth', 'Day', 'Hr', 'dayl(s)', 'prcp(mm/day)', 'srad(W/m2)',
'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)'],
)
df.index = pd.to_datetime(df['Year'].map(str) + '-' + df['Mnth'].map(str) + '-' + df['Day'].map(str))
flow_dir = os.path.join(self.dataset_dir, 'usgs_streamflow')
for cat in os.listdir(flow_dir):
cat_dirs = os.listdir(os.path.join(flow_dir, cat))
stn_file = f'{station}_streamflow_qc.txt'
if stn_file in cat_dirs:
fpath = os.path.join(flow_dir, f'{cat}{SEP}{stn_file}')
df1 = pd.read_csv(fpath, sep="\s+|;|:'",
names=['station', 'Year', 'Month', 'Day', 'Flow', 'Flag'],
engine='python')
df1.index = pd.to_datetime(
df1['Year'].map(str) + '-' + df1['Month'].map(str) + '-' + df1['Day'].map(str))
out_df = pd.concat([df[['dayl(s)', 'prcp(mm/day)', 'srad(W/m2)', 'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)']],
df1['Flow']],
axis=1)
dyn[station] = out_df
return dyn
def fetch_static_features(self, station, features):
attributes = check_attributes(features, self.static_features)
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, 'catchment_attrs', 'camels_attributes_v2.0')}/*.txt")
static_df = pd.DataFrame()
for f in files:
# index should be read as string
idx = pd.read_csv(f, sep=';', usecols=['gauge_id'], dtype=str)
_df = pd.read_csv(f, sep=';', index_col='gauge_id')
_df.index = idx['gauge_id']
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath, index_label='gauge_id')
else: # index should be read as string bcs it has 0s at the start
idx = pd.read_csv(static_fpath, usecols=['gauge_id'], dtype=str)
static_df = pd.read_csv(static_fpath, index_col='gauge_id')
static_df.index = idx['gauge_id']
static_df.index = static_df.index.astype(str)
df = static_df.loc[station][attributes]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return df
class CAMELS_BR(Camels):
"""
Downloads and processes CAMELS dataset of Brazil
"""
url = "https://zenodo.org/record/3964745#.YA6rUxZS-Uk"
folders = {'streamflow_m3s': '02_CAMELS_BR_streamflow_m3s',
'streamflow_mm': '03_CAMELS_BR_streamflow_mm_selected_catchments',
'simulated_streamflow_m3s': '04_CAMELS_BR_streamflow_simulated',
'precipitation_cpc': '07_CAMELS_BR_precipitation_cpc',
'precipitation_mswep': '06_CAMELS_BR_precipitation_mswep',
'precipitation_chirps': '05_CAMELS_BR_precipitation_chirps',
'evapotransp_gleam': '08_CAMELS_BR_evapotransp_gleam',
'evapotransp_mgb': '09_CAMELS_BR_evapotransp_mgb',
'potential_evapotransp_gleam': '10_CAMELS_BR_potential_evapotransp_gleam',
'temperature_min': '11_CAMELS_BR_temperature_min_cpc',
'temperature_mean': '12_CAMELS_BR_temperature_mean_cpc',
'temperature_max': '13_CAMELS_BR_temperature_max_cpc'
}
def __init__(self):
super().__init__("CAMELS-BR")
self._download()
self._maybe_to_netcdf('camels_dyn_br')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def _all_dirs(self):
"""All the folders in the dataset_directory"""
return [f for f in os.listdir(self.ds_dir) if os.path.isdir(os.path.join(self.ds_dir, f))]
@property
def static_dir(self):
path = None
for _dir in self._all_dirs:
if "attributes" in _dir:
# supposing that 'attributes' axist in only one file/folder in self.ds_dir
path = os.path.join(self.ds_dir, f'{_dir}{SEP}{_dir}')
return path
@property
def static_files(self):
all_files = None
if self.static_dir is not None:
all_files = glob.glob(f"{self.static_dir}/*.txt")
return all_files
@property
def dynamic_features(self) -> list:
return list(CAMELS_BR.folders.keys())
@property
def static_attribute_categories(self):
static_attrs = []
for f in self.static_files:
ff = str(os.path.basename(f).split('.txt')[0])
static_attrs.append('_'.join(ff.split('_')[2:]))
return static_attrs
@property
def static_features(self):
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, '01_CAMELS_BR_attributes','01_CAMELS_BR_attributes')}/*.txt")
cols = []
for f in files:
_df = pd.read_csv(f, sep=' ', index_col='gauge_id', nrows=1)
cols += list(_df.columns)
else:
df = pd.read_csv(static_fpath, index_col='gauge_id', nrows=1)
cols = list(df.columns)
return cols
@property
def start(self):
return "19800101"
@property
def end(self):
return "20181231"
def all_stations(self, attribute) -> list:
"""Tells all station ids for which a data of a specific attribute is available."""
all_files = []
for _attr, _dir in self.folders.items():
if attribute in _attr:
all_files = os.listdir(os.path.join(self.ds_dir, f'{_dir}{SEP}{_dir}'))
stations = []
for f in all_files:
stations.append(str(f.split('_')[0]))
return stations
def stations(self, to_exclude=None)->list:
"""Returns a list of station ids which are common among all dynamic attributes.
>>>dataset = CAMELS_BR()
>>>stations = dataset.stations()
"""
if to_exclude is not None:
if not isinstance(to_exclude, list):
assert isinstance(to_exclude, str)
to_exclude = [to_exclude]
else:
to_exclude = []
stations = {}
for dyn_attr in self.dynamic_features:
if dyn_attr not in to_exclude:
stations[dyn_attr] = self.all_stations(dyn_attr)
stns = list(set.intersection(*map(set, list(stations.values()))))
return stns
def _read_dynamic_from_csv(self,
stations,
attributes:Union[str, list]='all',
st=None,
en=None,
):
"""
returns the dynamic/time series attribute/attributes for one station id.
```python
>>>dataset = CAMELS_BR()
>>>pcp = dataset.fetch_dynamic_features('10500000', 'precipitation_cpc')
...# fetch all time series data associated with a station.
>>>x = dataset.fetch_dynamic_features('51560000', dataset.dynamic_features)
```
"""
attributes = check_attributes(attributes, self.dynamic_features)
dyn = {}
for stn_id in stations:
# making one separate dataframe for one station
data = pd.DataFrame()
for attr, _dir in self.folders.items():
if attr in attributes:
path = os.path.join(self.ds_dir, f'{_dir}{SEP}{_dir}')
# supposing that the filename starts with stn_id and has .txt extension.
fname = [f for f in os.listdir(path) if f.startswith(str(stn_id)) and f.endswith('.txt')]
fname = fname[0]
if os.path.exists(os.path.join(path, fname)):
df = pd.read_csv(os.path.join(path, fname), sep=' ')
df.index = pd.to_datetime(df[['year', 'month', 'day']])
df.index.freq = pd.infer_freq(df.index)
df = df[st:en]
# only read one column which matches the attr
# todo, qual_flag maybe important
[df.pop(item) for item in df.columns if item != attr]
data = pd.concat([data, df], axis=1)
else:
raise FileNotFoundError(f"file {fname} not found at {path}")
dyn[stn_id] = data
return dyn
def fetch_static_features(self,
station,
features=None
) -> pd.DataFrame:
"""
Arguments:
stn_id int/list:
station id whose attribute to fetch
attributes str/list:
name of attribute to fetch. Default is None, which will return all the
attributes for a particular station of the specified category.
index_col_name str:
name of column containing station names
as_ts bool:
Example:
-------
```python
>>>dataset = Camels('CAMELS-BR')
>>>df = dataset.fetch_static_features(11500000, 'climate')
```
"""
if isinstance(station, int):
station = [str(station)]
elif isinstance(station, list):
station = [str(stn) for stn in station]
elif isinstance(station, str):
station = [station]
else:
raise ValueError
attributes = check_attributes(features, self.static_features)
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, '01_CAMELS_BR_attributes','01_CAMELS_BR_attributes')}/*.txt")
static_df = pd.DataFrame()
for f in files:
_df = pd.read_csv(f, sep=' ', index_col='gauge_id')
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath, index_label='gauge_id')
else:
static_df = pd.read_csv(static_fpath, index_col='gauge_id')
static_df.index = static_df.index.astype(str)
return pd.DataFrame(static_df.loc[station][attributes])
class CAMELS_GB(Camels):
"""
This dataset must be manually downloaded by the user.
The path of the downloaded folder must be provided while initiating this class.
"""
dynamic_features = ["precipitation", "pet", "temperature", "discharge_spec",
"discharge_vol", "peti",
"humidity", "shortwave_rad", "longwave_rad", "windspeed"]
def __init__(self, path=None):
super().__init__(name="CAMELS-GB")
self.ds_dir = path
self._maybe_to_netcdf('camels_gb_dyn')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
sanity_check('CAMELS-GB', x)
self._ds_dir = x
@property
def static_attribute_categories(self) -> list:
attributes = []
path = os.path.join(self.ds_dir, 'data')
for f in os.listdir(path):
if os.path.isfile(os.path.join(path, f)) and f.endswith('csv'):
attributes.append(f.split('_')[2])
return attributes
@property
def start(self):
return "19701001"
@property
def end(self):
return "20150930"
@property
def static_features(self):
files = glob.glob(f"{os.path.join(self.ds_dir, 'data')}/*.csv")
cols = []
for f in files:
if 'static_features.csv' not in f:
df = pd.read_csv(f, nrows=1, index_col='gauge_id')
cols += (list(df.columns))
return cols
def stations(self, to_exclude=None):
# CAMELS_GB_hydromet_timeseries_StationID_number
path = os.path.join(self.ds_dir, f'data{SEP}timeseries')
gauge_ids = []
for f in os.listdir(path):
gauge_ids.append(f.split('_')[4])
return gauge_ids
def _read_dynamic_from_csv(self,
stations,
attributes:Union[str, list]='all',
st=None,
en=None,
):
"""Fetches dynamic attribute/attributes of one station."""
dyn = {}
for stn_id in stations:
# making one separate dataframe for one station
path = os.path.join(self.ds_dir, f"data{SEP}timeseries")
fname = None
for f in os.listdir(path):
if stn_id in f:
fname = f
break
df = pd.read_csv(os.path.join(path, fname), index_col= 'date')
df.index = pd.to_datetime(df.index)
df.index.freq = pd.infer_freq(df.index)
dyn[stn_id] = df
return dyn
def fetch_static_features(self,
station:str,
features='all'
) -> pd.DataFrame:
"""Fetches static attributes of one station for one or more category as dataframe."""
attributes = check_attributes(features, self.static_features)
static_fname = 'static_features.csv'
static_fpath = os.path.join(self.ds_dir, 'data', static_fname)
if os.path.exists(static_fpath):
static_df = pd.read_csv(static_fpath, index_col='gauge_id')
else:
files = glob.glob(f"{os.path.join(self.ds_dir, 'data')}/*.csv")
static_df = pd.DataFrame()
for f in files:
_df = pd.read_csv(f, index_col='gauge_id')
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath)
if isinstance(station, str):
station =[station]
elif isinstance(station, int):
station = [str(station)]
elif isinstance(station, list):
station = [str(stn) for stn in station]
else:
raise ValueError
static_df.index = static_df.index.astype(str)
return static_df.loc[station][attributes]
class CAMELS_AUS(Camels):
"""
Inherits from Camels class. Fetches CAMELS-AUS dataset.
"""
url = 'https://doi.pangaea.de/10.1594/PANGAEA.921850'
urls = {
"01_id_name_metadata.zip": "https://download.pangaea.de/dataset/921850/files/",
"02_location_boundary_area.zip": "https://download.pangaea.de/dataset/921850/files/",
"03_streamflow.zip": "https://download.pangaea.de/dataset/921850/files/",
"04_attributes.zip": "https://download.pangaea.de/dataset/921850/files/",
"05_hydrometeorology.zip": "https://download.pangaea.de/dataset/921850/files/",
"CAMELS_AUS_Attributes-Indices_MasterTable.csv": "https://download.pangaea.de/dataset/921850/files/",
"Units_01_TimeseriesData.pdf": "https://download.pangaea.de/dataset/921850/files/",
"Units_02_AttributeMasterTable.pdf": "https://download.pangaea.de/dataset/921850/files/",
}
folders = {
'streamflow_MLd': f'03_streamflow{SEP}03_streamflow{SEP}streamflow_MLd',
'streamflow_MLd_inclInfilled': f'03_streamflow{SEP}03_streamflow{SEP}streamflow_MLd_inclInfilled',
'streamflow_mmd': f'03_streamflow{SEP}03_streamflow{SEP}streamflow_mmd',
'et_morton_actual_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}et_morton_actual_SILO',
'et_morton_point_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}et_morton_point_SILO',
'et_morton_wet_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}et_morton_wet_SILO',
'et_short_crop_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}et_short_crop_SILO',
'et_tall_crop_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}et_tall_crop_SILO',
'evap_morton_lake_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}evap_morton_lake_SILO',
'evap_pan_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}evap_pan_SILO',
'evap_syn_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}evap_syn_SILO',
'precipitation_AWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}01_precipitation_timeseries{SEP}precipitation_AWAP',
'precipitation_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}01_precipitation_timeseries{SEP}precipitation_SILO',
'precipitation_var_SWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}01_precipitation_timeseries{SEP}precipitation_var_AWAP',
'solarrad_AWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}AWAP{SEP}solarrad_AWAP',
'tmax_AWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}AWAP{SEP}tmax_AWAP',
'tmin_AWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}AWAP{SEP}tmin_AWAP',
'vprp_AWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}AWAP{SEP}vprp_AWAP',
'mslp_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}mslp_SILO',
'radiation_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}radiation_SILO',
'rh_tmax_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}rh_tmax_SILO',
'rh_tmin_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}rh_tmin_SILO',
'tmax_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}tmax_SILO',
'tmin_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}tmin_SILO',
'vp_deficit_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}vp_deficit_SILO',
'vp_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}vp_SILO',
}
def __init__(self, path:str=None):
"""
Arguments:
path: path where the CAMELS-AUS dataset has been downloaded. This path
must contain five zip files and one xlsx file. If None, then the
data will downloaded.
"""
if path is not None:
assert isinstance(path, str), f'path must be string like but it is "{path}" of type {path.__class__.__name__}'
if not os.path.exists(path) or len(os.listdir(path)) < 2:
raise FileNotFoundError(f"The path {path} does not exist")
self.ds_dir = path
super().__init__()
if not os.path.exists(self.ds_dir):
os.makedirs(self.ds_dir)
for _file, url in self.urls.items():
fpath = os.path.join(self.ds_dir, _file)
if not os.path.exists(fpath):
download(url + _file, fpath)
self._unzip()
self._maybe_to_netcdf('camels_aus_dyn')
@property
def start(self):
return "19570101"
@property
def end(self):
return "20181231"
@property
def location(self):
return "Australia"
def stations(self, as_list=True)->list:
fname = os.path.join(self.ds_dir, f"01_id_name_metadata{SEP}01_id_name_metadata{SEP}id_name_metadata.csv")
df = pd.read_csv(fname)
if as_list:
return df['station_id'].to_list()
else:
return df
@property
def static_attribute_categories(self):
attributes = []
path = os.path.join(self.ds_dir, f'04_attributes{SEP}04_attributes')
for f in os.listdir(path):
if os.path.isfile(os.path.join(path, f)) and f.endswith('csv'):
f = str(f.split('.csv')[0])
attributes.append(''.join(f.split('_')[2:]))
return attributes
@property
def static_features(self) -> list:
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, '04_attributes', '04_attributes')}/*.csv")
cols = []
for f in files:
_df = pd.read_csv(f, index_col='station_id', nrows=1)
cols += list(_df.columns)
else:
df = pd.read_csv(static_fpath, index_col='station_id', nrows=1)
cols = list(df.columns)
return cols
@property
def dynamic_features(self) -> list:
return list(self.folders.keys())
def _read_static(self, stations, attributes,
st=None, en=None):
attributes = check_attributes(attributes, self.static_features)
static_fname = 'static_features.csv'
static_fpath = os.path.join(self.ds_dir, static_fname)
if os.path.exists(static_fpath):
static_df = pd.read_csv(static_fpath, index_col='station_id')
else:
files = glob.glob(f"{os.path.join(self.ds_dir, '04_attributes', '04_attributes')}/*.csv")
static_df = pd.DataFrame()
for f in files:
_df = pd.read_csv(f, index_col='station_id')
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath)
static_df.index = static_df.index.astype(str)
df = static_df.loc[stations][attributes]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return self.to_ts(df, st, en)
def _read_dynamic_from_csv(self, stations, dynamic_features, **kwargs):
dyn_attrs = {}
dyn = {}
for _attr in dynamic_features:
_path = os.path.join(self.ds_dir, f'{self.folders[_attr]}.csv')
_df = pd.read_csv(_path, na_values=['-99.99'])
_df.index = pd.to_datetime(_df[['year', 'month', 'day']])
[_df.pop(col) for col in ['year', 'month', 'day']]
dyn_attrs[_attr] = _df
# making one separate dataframe for one station
for stn in stations:
stn_df = pd.DataFrame()
for attr, attr_df in dyn_attrs.items():
if attr in dynamic_features:
stn_df[attr] = attr_df[stn]
dyn[stn] = stn_df
return dyn
def fetch_static_features(self,
station,
features='all',
**kwargs) -> pd.DataFrame:
"""Fetches static attribuets of one station as dataframe."""
return self._read_static(station, features)
def plot(self, what, stations=None, **kwargs):
assert what in ['outlets', 'boundaries']
f1 = os.path.join(self.ds_dir, f'02_location_boundary_area{SEP}02_location_boundary_area{SEP}shp{SEP}CAMELS_AUS_BasinOutlets_adopted.shp')
f2 = os.path.join(self.ds_dir, f'02_location_boundary_area{SEP}02_location_boundary_area{SEP}shp{SEP}bonus data{SEP}Australia_boundaries.shp')
if plot_shapefile is not None:
return plot_shapefile(f1, bbox_shp=f2, recs=stations, rec_idx=0, **kwargs)
else:
raise ModuleNotFoundError("Shapely must be installed in order to plot the datasets.")
class CAMELS_CL(Camels):
"""
Downloads and processes CAMELS dataset of Chile
https://doi.org/10.5194/hess-22-5817-2018
"""
urls = {
"1_CAMELScl_attributes.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"2_CAMELScl_streamflow_m3s.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"3_CAMELScl_streamflow_mm.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"4_CAMELScl_precip_cr2met.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"5_CAMELScl_precip_chirps.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"6_CAMELScl_precip_mswep.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"7_CAMELScl_precip_tmpa.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"8_CAMELScl_tmin_cr2met.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"9_CAMELScl_tmax_cr2met.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"10_CAMELScl_tmean_cr2met.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"11_CAMELScl_pet_8d_modis.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"12_CAMELScl_pet_hargreaves.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"13_CAMELScl_swe.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"14_CAMELScl_catch_hierarchy.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"CAMELScl_catchment_boundaries.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
}
dynamic_features = ['streamflow_m3s', 'streamflow_mm',
'precip_cr2met', 'precip_chirps', 'precip_mswep', 'precip_tmpa',
'tmin_cr2met', 'tmax_cr2met', 'tmean_cr2met',
'pet_8d_modis', 'pet_hargreaves',
'swe'
]
"""
Arguments:
path: path where the CAMELS-AUS dataset has been downloaded. This path must
contain five zip files and one xlsx file.
"""
def __init__(self,
path: str = None
):
self.ds_dir = path
super().__init__()
if not os.path.exists(self.ds_dir):
os.makedirs(self.ds_dir)
for _file, url in self.urls.items():
fpath = os.path.join(self.ds_dir, _file)
if not os.path.exists(fpath):
download(url+_file, fpath)
self._unzip()
self.dyn_fname = os.path.join(self.ds_dir, 'camels_cl_dyn.nc')
self._maybe_to_netcdf('camels_cl_dyn')
@property
def _all_dirs(self):
"""All the folders in the dataset_directory"""
return [f for f in os.listdir(self.ds_dir) if os.path.isdir(os.path.join(self.ds_dir, f))]
@property
def start(self):
return "19130215"
@property
def end(self):
return "20180309"
@property
def static_features(self) -> list:
path = os.path.join(self.ds_dir, f"1_CAMELScl_attributes{SEP}1_CAMELScl_attributes.txt")
df = pd.read_csv(path, sep='\t', index_col='gauge_id')
return df.index.to_list()
def stations(self) -> list:
"""Tells all station ids for which a data of a specific attribute is available."""
stn_fname = os.path.join(self.ds_dir, 'stations.json')
if not os.path.exists(stn_fname):
_stations = {}
for dyn_attr in self.dynamic_features:
for _dir in self._all_dirs:
if dyn_attr in _dir:
fname = os.path.join(self.ds_dir, f"{_dir}{SEP}{_dir}.txt")
df = pd.read_csv(fname, sep='\t', nrows=2, index_col='gauge_id')
_stations[dyn_attr] = list(df.columns)
stns = list(set.intersection(*map(set, list(_stations.values()))))
with open(stn_fname, 'w') as fp:
json.dump(stns, fp)
else:
with open(stn_fname, 'r') as fp:
stns = json.load(fp)
return stns
def _read_dynamic_from_csv(self, stations, dynamic_features, st=None, en=None):
dyn = {}
st, en = self._check_length(st, en)
assert all(stn in self.stations() for stn in stations)
dynamic_features = check_attributes(dynamic_features, self.dynamic_features)
# reading all dynnamic attributes
dyn_attrs = {}
for attr in dynamic_features:
fname = [f for f in self._all_dirs if '_' + attr in f][0]
fname = os.path.join(self.ds_dir, f'{fname}{SEP}{fname}.txt')
_df = pd.read_csv(fname, sep='\t', index_col=['gauge_id'], na_values=" ")
_df.index = pd.to_datetime(_df.index)
dyn_attrs[attr] = _df[st:en]
# making one separate dataframe for one station
for stn in stations:
stn_df = pd.DataFrame()
for attr, attr_df in dyn_attrs.items():
if attr in dynamic_features:
stn_df[attr] = attr_df[stn]
dyn[stn] = stn_df[st:en]
return dyn
def _read_static(self, stations:list, attributes:list)->pd.DataFrame:
# overwritten for speed
stns_df = pd.DataFrame(columns=attributes)
path = os.path.join(self.ds_dir, f"1_CAMELScl_attributes{SEP}1_CAMELScl_attributes.txt")
_df = pd.read_csv(path, sep='\t', index_col='gauge_id')
for stn in stations:
df = pd.DataFrame()
if stn in _df:
df[stn] = _df[stn]
elif ' ' + stn in _df:
df[stn] = _df[' ' + stn]
stns_df = stns_df.append(df.transpose()[attributes])
return stns_df
def fetch_static_features(self,
station,
features=None,
st=None,
en=None
):
attributes = check_attributes(features, self.static_features)
if isinstance(station, str):
station = [station]
return self._read_static(station, attributes)
class HYPE(Camels):
"""
Downloads and preprocesses HYPE dataset from https://zenodo.org/record/4029572.
This is a rainfall-runoff dataset of 564 stations from 1985 to 2019 at daily
monthly and yearly time steps.
paper : https://doi.org/10.2166/nh.2010.007
"""
url = [
"https://zenodo.org/record/581435",
"https://zenodo.org/record/4029572"
]
dynamic_features = [
'AET_mm',
'Baseflow_mm',
'Infiltration_mm',
'SM_mm',
'Streamflow_mm',
'Runoff_mm',
'Qsim_m3-s',
'Prec_mm',
'PET_mm'
]
def __init__(self, time_step: str = 'daily', **kwargs):
assert time_step in ['daily', 'month', 'year']
self.time_step = time_step
self.ds_dir = None
super().__init__(**kwargs)
self._download()
fpath = os.path.join(self.ds_dir, 'hype_year_dyn.nc')
if not os.path.exists(fpath):
self.time_step = 'daily'
self._maybe_to_netcdf('hype_daily_dyn')
self.time_step = 'month'
self._maybe_to_netcdf('hype_month_dyn')
self.time_step = 'year'
self._maybe_to_netcdf('hype_year_dyn')
self.time_step = time_step
self.dyn_fname = os.path.join(self.ds_dir, f'hype_{time_step}_dyn.nc')
def stations(self)->list:
_stations = np.arange(1, 565).astype(str)
return list(_stations)
@property
def static_features(self):
return []
def _read_dynamic_from_csv(self,
stations:list,
attributes:Union[str, list]='all',
st=None,
en=None,
):
dynamic_features = check_attributes(attributes, self.dynamic_features)
_dynamic_attributes = []
for dyn_attr in dynamic_features:
pref, suff = dyn_attr.split('_')[0], dyn_attr.split('_')[-1]
_dyn_attr = f"{pref}_{self.time_step}_{suff}"
_dynamic_attributes.append(_dyn_attr)
df_attrs = {}
for dyn_attr in _dynamic_attributes:
fname = f"{dyn_attr}.csv"
fpath = os.path.join(self.ds_dir, fname)
index_col_name = 'DATE'
if fname in ['SM_month_mm.csv', 'SM_year_mm.csv']:
index_col_name = 'Date'
_df = pd.read_csv(fpath, index_col=index_col_name)
_df.index = pd.to_datetime(_df.index)
df_attrs[dyn_attr] = _df.loc[self.start:self.end] # todo, some stations have wider range than self.st/self.en
stns_dfs = {}
for st in stations:
stn_dfs = []
cols = []
for dyn_attr, dyn_df in df_attrs.items():
stn_dfs.append(dyn_df[st])
col_name = f"{dyn_attr.split('_')[0]}_{dyn_attr.split('_')[-1]}" # get original name without time_step
cols.append(col_name)
stn_df = pd.concat(stn_dfs, axis=1)
stn_df.columns = cols
stns_dfs[st] = stn_df
return stns_dfs
def fetch_static_features(self, station, features):
raise ValueError(f'No static feature for {self.name}')
@property
def start(self):
return '19850101'
@property
def end(self):
return '20191231'
|
{"hexsha": "daa7d26d703b2e6fd2855c3dde932b88c4d35034", "size": 69842, "ext": "py", "lang": "Python", "max_stars_repo_path": "ai4water/datasets/camels.py", "max_stars_repo_name": "csiro-hydroinformatics/AI4Water", "max_stars_repo_head_hexsha": "cdb18bd4bf298f77b381f1829045a1e790146985", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-10-13T08:23:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-22T04:36:21.000Z", "max_issues_repo_path": "ai4water/datasets/camels.py", "max_issues_repo_name": "csiro-hydroinformatics/AI4Water", "max_issues_repo_head_hexsha": "cdb18bd4bf298f77b381f1829045a1e790146985", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-15T02:42:52.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-15T02:51:07.000Z", "max_forks_repo_path": "ai4water/datasets/camels.py", "max_forks_repo_name": "csiro-hydroinformatics/AI4Water", "max_forks_repo_head_hexsha": "cdb18bd4bf298f77b381f1829045a1e790146985", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-23T04:45:38.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-26T10:12:34.000Z", "avg_line_length": 40.0011454754, "max_line_length": 204, "alphanum_fraction": 0.5732796884, "include": true, "reason": "import numpy", "num_tokens": 16116}
|
[STATEMENT]
lemma leadsTo_common:
"[| \<forall>m. F \<in> {m} Co (maxfg m);
\<forall>m \<in> -common. F \<in> {m} LeadsTo (greaterThan m);
n \<in> common |]
==> F \<in> (atMost (LEAST n. n \<in> common)) LeadsTo common"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>m. F \<in> {m} Co maxfg m; \<forall>m\<in>- common. F \<in> {m} \<longmapsto>w {m<..}; n \<in> common\<rbrakk> \<Longrightarrow> F \<in> {..LEAST n. n \<in> common} \<longmapsto>w common
[PROOF STEP]
apply (rule leadsTo_common_lemma)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>\<forall>m. F \<in> {m} Co maxfg m; \<forall>m\<in>- common. F \<in> {m} \<longmapsto>w {m<..}; n \<in> common\<rbrakk> \<Longrightarrow> \<forall>m. F \<in> {m} Co maxfg m
2. \<lbrakk>\<forall>m. F \<in> {m} Co maxfg m; \<forall>m\<in>- common. F \<in> {m} \<longmapsto>w {m<..}; n \<in> common\<rbrakk> \<Longrightarrow> \<forall>m\<in>{..<LEAST n. n \<in> common}. F \<in> {m} \<longmapsto>w {m<..}
3. \<lbrakk>\<forall>m. F \<in> {m} Co maxfg m; \<forall>m\<in>- common. F \<in> {m} \<longmapsto>w {m<..}; n \<in> common\<rbrakk> \<Longrightarrow> (LEAST n. n \<in> common) \<in> common
[PROOF STEP]
apply (simp_all (no_asm_simp))
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>\<forall>m. F \<in> {m} Co maxfg m; \<forall>m\<in>- common. F \<in> {m} \<longmapsto>w {m<..}; n \<in> common\<rbrakk> \<Longrightarrow> \<forall>m\<in>{..<LEAST n. n \<in> common}. F \<in> {m} \<longmapsto>w {m<..}
2. \<lbrakk>\<forall>m. F \<in> {m} Co maxfg m; \<forall>m\<in>- common. F \<in> {m} \<longmapsto>w {m<..}; n \<in> common\<rbrakk> \<Longrightarrow> (LEAST n. n \<in> common) \<in> common
[PROOF STEP]
apply (erule_tac [2] LeastI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>m. F \<in> {m} Co maxfg m; \<forall>m\<in>- common. F \<in> {m} \<longmapsto>w {m<..}; n \<in> common\<rbrakk> \<Longrightarrow> \<forall>m\<in>{..<LEAST n. n \<in> common}. F \<in> {m} \<longmapsto>w {m<..}
[PROOF STEP]
apply (blast dest!: not_less_Least)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 930, "file": null, "length": 5}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import functools
import copy
from federatedml.statistic import data_overview
from federatedml.util import LOGGER
def empty_table_detection(data_instances):
num_data = data_instances.count()
if num_data == 0:
raise ValueError(f"Count of data_instance is 0: {data_instances}")
def empty_feature_detection(data_instances):
is_empty_feature = data_overview.is_empty_feature(data_instances)
if is_empty_feature:
raise ValueError(f"Number of features of DTable is 0: {data_instances}")
def column_gathering(iterable, ):
lost_columns = set()
for k, v in iterable:
features = v.features
lost_columns.update(np.where(~np.isnan(features))[0])
return lost_columns
def merge_column_sets(v1: set, v2: set):
v1_copy = copy.deepcopy(v1)
v2_copy = copy.deepcopy(v2)
v1_copy.update(v2_copy)
return v1_copy
def empty_column_detection(data_instance):
contains_empty_columns = False
lost_feat = []
is_sparse = data_overview.is_sparse_data(data_instance)
if is_sparse:
raise ValueError('sparse format empty column detection is not supported for now')
map_func = functools.partial(column_gathering, )
map_rs = data_instance.applyPartitions(map_func)
reduce_rs = map_rs.reduce(merge_column_sets)
# transform col index to col name
reduce_rs = np.array(data_instance.schema['header'])[list(reduce_rs)]
reduce_rs = set(reduce_rs)
if reduce_rs != set(data_instance.schema['header']):
lost_feat = list(set(data_instance.schema['header']).difference(reduce_rs))
contains_empty_columns = True
if contains_empty_columns:
raise ValueError('column(s) {} contain(s) no values'.format(lost_feat))
def check_legal_schema(schema):
# check for repeated header & illegal/non-printable chars except for space
# allow non-ascii chars
LOGGER.debug(f"schema is {schema}")
if schema is None:
return
header = schema.get("header", None)
LOGGER.debug(f"header is {header}")
if header is not None:
for col_name in header:
if not col_name.isprintable():
raise ValueError(f"non-printable char found in header column {col_name}, please check.")
header_set = set(header)
if len(header_set) != len(header):
raise ValueError(f"data header contains repeated names, please check.")
sid_name = schema.get("sid_name", None)
LOGGER.debug(f"sid_name is {sid_name}")
if sid_name is not None and not sid_name.isprintable():
raise ValueError(f"non-printable char found in sid_name {sid_name}, please check.")
label_name = schema.get("label_name", None)
LOGGER.debug(f"label_name is {label_name}")
if label_name is not None and not label_name.isprintable():
raise ValueError(f"non-printable char found in label_name {label_name}, please check.")
|
{"hexsha": "b966c406b9fa1ab3989b10ea36d5ffcc58734d87", "size": 3579, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/federatedml/util/abnormal_detection.py", "max_stars_repo_name": "hubert-he/FATE", "max_stars_repo_head_hexsha": "6758e150bd7ca7d6f788f9a7a8c8aea7e6500363", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3787, "max_stars_repo_stars_event_min_datetime": "2019-08-30T04:55:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:30:07.000Z", "max_issues_repo_path": "python/federatedml/util/abnormal_detection.py", "max_issues_repo_name": "hubert-he/FATE", "max_issues_repo_head_hexsha": "6758e150bd7ca7d6f788f9a7a8c8aea7e6500363", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1439, "max_issues_repo_issues_event_min_datetime": "2019-08-29T16:35:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T11:55:31.000Z", "max_forks_repo_path": "python/federatedml/util/abnormal_detection.py", "max_forks_repo_name": "hubert-he/FATE", "max_forks_repo_head_hexsha": "6758e150bd7ca7d6f788f9a7a8c8aea7e6500363", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1179, "max_forks_repo_forks_event_min_datetime": "2019-08-29T16:18:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:55:38.000Z", "avg_line_length": 34.7475728155, "max_line_length": 104, "alphanum_fraction": 0.7127689299, "include": true, "reason": "import numpy", "num_tokens": 813}
|
'''
inventoryanalytics: a Python library for Inventory Analytics
Author: Roberto Rossi
MIT License
Copyright (c) 2018 Roberto Rossi
'''
from typing import List
from inventoryanalytics.utils import memoize as mem
import scipy.stats as sp
import json
class State:
"""
The state of the inventory system.
Returns:
[type] -- state of the inventory system
"""
def __init__(self, t: int, I: float):
self.t, self.I = t, I
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __str__(self):
return str(self.t) + " " + str(self.I)
def __hash__(self):
return hash(str(self))
class StochasticLotSizing:
"""
The nonstationary stochastic lot sizing problem.
Returns:
[type] -- A problem instance
"""
def __init__(self, K: float, v: float, h: float, p: float, d: List[float],
max_inv: float, q: float, initial_order: bool):
"""
Create an instance of StochasticLotSizing.
Arguments:
K {float} -- the fixed ordering cost
v {float} -- the proportional unit ordering cost
h {float} -- the proportional unit inventory holding cost
p {float} -- the proportional unit inventory penalty cost
d {List[float]} -- the demand probability mass function
taking the form [[d_1,p_1],...,[d_N,p_N]], where d_k is
the k-th value in the demand support and p_k is its
probability.
max_inv {float} -- the maximum inventory level
q {float} -- quantile truncation for the demand
initial_order {bool} -- allow order in the first period
"""
# placeholders
max_demand = lambda d: sp.poisson(d).ppf(q).astype(int) # max demand in the support
# initialize instance variables
self.T, self.K, self.v, self.h, self.p, self.d, self.max_inv = len(d)-1, K, v, h, p, d, max_inv
pmf = lambda d, k : sp.poisson(d).pmf(k)/q # poisson pmf
self.pmf = [[[k, pmf(d, k)] for k in range(0, max_demand(d))] for d in self.d]
# lambdas
if initial_order: # action generator
self.ag = lambda s: [x for x in range(0, max_inv-s.I)]
else:
self.ag = lambda s: [x for x in range(0, max_inv-s.I)] if s.t > 0 else [0]
self.st = lambda s, a, d: State(s.t+1, s.I+a-d) # state transition
L = lambda i,a,d : self.h*max(i+a-d, 0) + self.p*max(d-i-a, 0) # immediate holding/penalty cost
self.iv = lambda s, a, d: (self.K if a > 0 else 0) + L(s.I, a, d) # immediate value function
self.cache_actions = {} # cache with optimal state/action pairs
def f(self, level: float) -> float:
"""
Recursively solve the nonstationary stochastic lot sizing problem
for an initial inventory level.
Arguments:
level {float} -- the initial inventory level
Returns:
float -- the cost of an optimal policy
"""
s = State(0,level)
return self._f(s)
def q(self, period: int, level:float) -> float:
"""
Retrieves the optimal order quantity for a given initial inventory level.
Function :func:`f` must have been called before using this method.
Arguments:
period {int} -- the initial period
level {float} -- the initial inventory level
Returns:
float -- the optimal order quantity
"""
s = State(period,level)
return self.cache_actions[str(s)]
def extract_sS_policy(self) -> List[float]:
"""
Extract optimal (s,S) policy parameters
Herbert E. Scarf. Optimality of (s,S) policies in the
dynamic inventory problem. In K. J. Arrow, S. Karlin,
and P. Suppes, editors, Mathematical Methods in the
Social Sciences, pages 196–202. Stanford University
Press, Stanford, CA, 1960.
Returns:
List[float] -- the optimal s,S policy parameters [...,[s_k,S_k],...]
"""
for i in range(-self.max_inv, self.max_inv):
self.f(i)
policy_parameters = []
for t in range(0, len(self.d)):
level = self.max_inv - 1
min_level = -self.max_inv
s = State(t, level)
while self.cache_actions.get(str(s), 0) == 0 and level > min_level:
level, s = level - 1, State(t, level - 1)
policy_parameters.append(
[level, level+self.cache_actions.get(str(s), 0)])
return policy_parameters
@mem.memoize
def _f(self, s: State) -> float:
"""
Dynamic programming forward recursion.
Arguments:
s {State} -- the initial state
Returns:
float -- the cost of an optimal policy
"""
#Forward recursion
v = min(
[sum([p[1]*(self.iv(s, a, p[0])+ # immediate cost
(self._f(self.st(s, a, p[0])) if s.t < self.T else 0)) # future cost
for p in self.pmf[s.t]]) # demand realisations
for a in self.ag(s)]) # actions
opt_a = lambda a: sum([p[1]*(self.iv(s, a, p[0])+
(self._f(self.st(s, a, p[0])) if s.t < self.T else 0))
for p in self.pmf[s.t]]) == v
q = [k for k in filter(opt_a, self.ag(s))] # retrieve best action list
self.cache_actions[str(s)]=q[0] if bool(q) else None # store an action in dictionary
return v # return expected total cost
@staticmethod
def run_instance(file_name: str = None):
instance = {"K": 100, "v": 0, "h": 1, "p": 10, "d": [20,40,60,40],
"max_inv": 200, "q": 0.9999, "initial_order": True}
lot_sizing = StochasticLotSizing(**instance)
t = 0 # initial period
i = 0 #initial inventory level
print("Optimal policy cost: " + str(lot_sizing.f(i)))
print("Optimal order quantity: " + str(lot_sizing.q(t, i)))
print(lot_sizing.extract_sS_policy())
try:
with open(file_name, 'w') as f:
json.dump(lot_sizing.cache_actions, f)
f.close()
print("Policy saved to "+file_name)
except:
print("Provide a file name to save the policy to disk.")
@staticmethod
def run_instance_stationary():
instance = {"K": 64, "v": 0, "h": 1, "p": 9, "d": [10,10,10,10,10,10,10],
"max_inv": 200, "q": 0.9999, "initial_order": True}
lot_sizing = StochasticLotSizing(**instance)
print(lot_sizing.extract_sS_policy())
if __name__ == '__main__':
StochasticLotSizing.run_instance()
|
{"hexsha": "9766966c529d8c4784b8b98883201a3c1a262075", "size": 7252, "ext": "py", "lang": "Python", "max_stars_repo_path": "inventoryanalytics/lotsizing/stochastic/nonstationary/sdp.py", "max_stars_repo_name": "vishalbelsare/inventoryanalytics", "max_stars_repo_head_hexsha": "85feff8f1abaf2c29414e066eed096ac3a74973b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-06-17T02:45:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-11T09:13:06.000Z", "max_issues_repo_path": "inventoryanalytics/lotsizing/stochastic/nonstationary/sdp.py", "max_issues_repo_name": "vishalbelsare/inventoryanalytics", "max_issues_repo_head_hexsha": "85feff8f1abaf2c29414e066eed096ac3a74973b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-07T03:33:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-02T21:11:59.000Z", "max_forks_repo_path": "inventoryanalytics/lotsizing/stochastic/nonstationary/sdp.py", "max_forks_repo_name": "vishalbelsare/inventoryanalytics", "max_forks_repo_head_hexsha": "85feff8f1abaf2c29414e066eed096ac3a74973b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-07-14T19:45:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-12T09:45:04.000Z", "avg_line_length": 38.1684210526, "max_line_length": 111, "alphanum_fraction": 0.528130171, "include": true, "reason": "import scipy", "num_tokens": 1778}
|
import argparse
import numpy as np
import subprocess as sp
import os
combiner_bin = "/home/lars/work/combiner/bin/"
def main():
args = parseCmd()
braker2_level = ['species_excluded', 'family_excluded', 'order_excluded']
with open(args.data + '/species.tab', 'r') as file:
species_list = file.read().split('\n')
species_list = [s for s in species_list if s]
for species in species_list:
species_path = "{}/{}".format(args.data, species)
anno = '{}/anno/annot.gtf'.format(species_path)
anno_out = '{}/anno/annot_ucsc.gtf'.format(species_path)
gtf2ucsc(anno, anno_out, 'anno')
braker = "{}/braker1/braker_fixed.gtf".format(species_path)
braker_out = "{}/braker1/braker_fixed_ucsc.gtf".format(species_path)
gtf2ucsc(braker, braker_out, 'braker1')
hints = "{}/braker1/hintsfile.gff".format(species_path)
hints_out = "{}/braker1/hintsfile_ucsc.gff".format(species_path)
gtf2ucsc(hints, hints_out, 'rnaseq', 'prothint')
for level in braker2_level:
braker = "{}/braker2/{}/braker_fixed.gtf".format(species_path, level)
braker_out = "{}/braker2/{}/braker_fixed_ucsc.gtf".format(species_path, level)
if os.path.exists(braker):
gtf2ucsc(braker, braker_out, 'braker2_' + level)
hints = "{}/braker2/{}/hintsfile.gff".format(species_path, level)
hints_out = "{}/braker2/{}/hintsfile_ucsc.gff".format(species_path, level)
gtf2ucsc(hints, hints_out, 'prothint', 'prothint')
def gtf2ucsc(gtf, out, name, mode='augustus'):
color = list(map(str, list(np.random.choice(range(256), size=3))))
cmd = "{}/gtf2ucsc.py --gtf {} --out {} --name {} --mode {} --color {}".\
format(combiner_bin, gtf, out, name , mode, ','.join(color))
print(cmd)
sp.call(cmd, shell=True)
if mode == 'augustus':
cmd = "sed -i -e 's/chrChr/chr/g' " + out
sp.call(cmd, shell=True)
elif mode == 'prothint':
out = '.'.join(out.split('.')[:-1]) + '_'
for i in ['intron', 'start', 'stop', 'cds']:
if os.path.exists("{}{}.gff".format(out, i)):
cmd = "sed -i -e 's/chrChr/chr/g' {}{}.gff".format(out, i)
sp.call(cmd, shell=True)
def parseCmd():
"""Parse command line arguments
Returns:
dictionary: Dictionary with arguments
"""
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data', type=str,
help='')
return parser.parse_args()
if __name__ == '__main__':
main()
|
{"hexsha": "5a1db67b2fafd3d4f3fee8d62ac84ef4085eab03", "size": 2612, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/dev/data2ucsc.py", "max_stars_repo_name": "LarsGab/PrEvCo", "max_stars_repo_head_hexsha": "55461001685b33cbf49d1f8fef93c387ee85b284", "max_stars_repo_licenses": ["ClArtistic"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/dev/data2ucsc.py", "max_issues_repo_name": "LarsGab/PrEvCo", "max_issues_repo_head_hexsha": "55461001685b33cbf49d1f8fef93c387ee85b284", "max_issues_repo_licenses": ["ClArtistic"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/dev/data2ucsc.py", "max_forks_repo_name": "LarsGab/PrEvCo", "max_forks_repo_head_hexsha": "55461001685b33cbf49d1f8fef93c387ee85b284", "max_forks_repo_licenses": ["ClArtistic"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5757575758, "max_line_length": 90, "alphanum_fraction": 0.6014548239, "include": true, "reason": "import numpy", "num_tokens": 745}
|
[STATEMENT]
lemma Sublists_Un [simp]: "Sublists (A \<union> B) = Sublists A \<union> Sublists B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Sublists (A \<union> B) = Sublists A \<union> Sublists B
[PROOF STEP]
by (auto simp: Sublists_altdef)
|
{"llama_tokens": 97, "file": "Regular-Sets_Regexp_Constructions", "length": 1}
|
# Utility functions for processing VLSV data.
"""
getcell(meta, location) -> UInt
Return cell ID containing the given spatial `location` in meter, excluding domain
boundaries. Only accept 3D location.
"""
function getcell(meta::MetaVLSV, loc)
(;coordmin, coordmax, dcoord, ncells, cellid, maxamr) = meta
foreach( (i,comp) -> coordmin[i] < loc[i] < coordmax[i] ? nothing :
error("$comp coordinate out of bound!"), 1:3, 'x':'z')
# Get cell indices
indices = @inbounds ntuple(i -> round(UInt, (loc[i] - coordmin[i]) ÷ dcoord[i]), Val(3))
# Get cell id
cid = @inbounds indices[1] + indices[2]*ncells[1] + indices[3]*ncells[1]*ncells[2] + 1
ncells_lowerlevel = UInt(0)
ncell = prod(ncells)
@inbounds for ilevel = 0:maxamr
cid in cellid && break
ncells_lowerlevel += 2^(3*ilevel)*ncell
ratio = 2^(ilevel+1)
indices = ntuple(i -> floor(UInt, (loc[i] - coordmin[i]) / dcoord[i] * ratio), Val(3))
cid = ncells_lowerlevel + indices[1] +
ratio*ncells[1]*indices[2] + ratio^2*ncells[1]*ncells[2]*indices[3] + 1
end
cid
end
"""
getlevel(meta, cid) -> Int
Return the AMR level of a given cell ID `cid`.
!!! warning
This function does not check if the VLSV file of `meta` actually contains `cid`; it may
be shadowed by refined children.
"""
function getlevel(meta::MetaVLSV, cid::Integer)
ncell = prod(meta.ncells)
ilevel = 0
c = Int(cid) - ncell
while c > 0
ilevel += 1
c -= 2^(3*ilevel)*ncell
end
ilevel
end
"""
getparent(meta, cid) -> Int
Return the parent cell ID of given child `cid`.
"""
function getparent(meta::MetaVLSV, cid::Integer)
@inbounds xcell, ycell = meta.ncells[1], meta.ncells[2]
ncell = prod(meta.ncells)
mylvl = getlevel(meta, cid)
parentlvl = mylvl - 1
if parentlvl < 0
throw(ArgumentError("Cell ID $cid has no parent cell!"))
else
# get the first cellid on my level
cid1st = get1stcell(mylvl, ncell) + 1
# get row and column sequence on my level (starting with 0)
xcell <<= mylvl
ycell <<= mylvl
myseq = cid - cid1st
ix = myseq % xcell
iz = myseq ÷ (xcell*ycell)
iy = (myseq - iz*xcell*ycell) ÷ xcell
# indexes on the parent level
ixparent = ix ÷ 2
iyparent = iy ÷ 2
izparent = iz ÷ 2
# get the first cellid on parent level
cid1st -= ncell*8^parentlvl
# get parent cellid (may not exist!!!)
parentid = cid1st + izparent*xcell*ycell÷4 + iyparent*xcell÷2 + ixparent
end
parentid
end
"""
getchildren(meta, cid) -> Vector{Int}
Return direct children of `cid`.
"""
function getchildren(meta::MetaVLSV, cid::Integer)
xcell, ycell, zcell = meta.ncells
ncell = prod(meta.ncells)
mylvl = getlevel(meta, cid)
# get the first cell ID on the my level
cid1st = 1
for i = 0:mylvl-1
cid1st += ncell * 8^i
end
# get my row and column sequence on my level (starting with 0)
xcell <<= mylvl
ycell <<= mylvl
myseq = cid - cid1st
ix = myseq % xcell
iz = myseq ÷ (xcell*ycell)
iy = (myseq - iz*xcell*ycell) ÷ xcell
# get the children sequences on the finer level
ix *= 2
iy *= 2
iz *= 2
nchildren = 2^ndims(meta)
cid = zeros(Int, nchildren)
# get the first cell ID on the finer level
cid1st += ncell*8^mylvl
ix_, iy_ = (ix, ix+1), (iy, iy+1)
iz_ = zcell != 1 ? (iz, iz+1) : iz
for (n,i) in enumerate(Iterators.product(ix_, iy_, iz_))
@inbounds cid[n] = cid1st + i[3]*xcell*ycell*4 + i[2]*xcell*2 + i[1]
end
(cid)
end
"""
getsiblings(meta, cid) -> Vector{Int}
Return sibling cells of a given `cid`, including itself.
"""
function getsiblings(meta::MetaVLSV, cid::Integer)
xcell, ycell, zcell = meta.ncells
ncell = prod(meta.ncells)
mylvl = getlevel(meta, cid)
mylvl == 0 && throw(ArgumentError("CellID $cid is not a child cell!"))
xcell = xcell << mylvl
ycell = ycell << mylvl
# 1st cellid on my level
cid1st = get1stcell(mylvl, ncell) + 1
# xyz sequences on my level (starting with 0)
myseq = cid - cid1st
ix = myseq % xcell
iz = myseq ÷ (xcell*ycell)
iy = (myseq - iz*xcell*ycell) ÷ xcell
ix1 = iseven(ix) ? ix + 1 : ix - 1
iy1 = iseven(iy) ? iy + 1 : iy - 1
iz1 = iseven(iz) ? iz + 1 : iz - 1
# reorder
ix, ix1 = minmax(ix, ix1)
iy, iy1 = minmax(iy, iy1)
iz, iz1 = minmax(iz, iz1)
nsiblings = 2^ndims(meta)
cid = zeros(Int, nsiblings)
ix_, iy_ = (ix, ix1), (iy, iy1)
iz_ = zcell != 1 ? (iz, iz1) : iz
for (n,i) in enumerate(Iterators.product(ix_, iy_, iz_))
@inbounds cid[n] = cid1st + i[3]*xcell*ycell + i[2]*xcell + i[1]
end
(cid)
end
"""
isparent(meta, cid) -> Bool
Check if `cid` is a parent cell.
"""
function isparent(meta::MetaVLSV, cid::Integer)
ncell_accum = get1stcell(meta.maxamr, prod(meta.ncells))
cid ∉ meta.cellid && 0 < cid ≤ ncell_accum
end
"""
getcellcoordinates(meta, cid) -> NTuple{3, Float64}
Return a given cell's spatial coordinates.
"""
function getcellcoordinates(meta::MetaVLSV, cid::Integer)
(;ncells, coordmin, coordmax) = meta
cid -= 1 # for easy divisions
ncells_refmax = collect(ncells)
reflevel = 0
subtraction = prod(ncells) * (2^reflevel)^3
# sizes on the finest level
while cid ≥ subtraction
cid -= subtraction
reflevel += 1
subtraction *= 8
ncells_refmax[1] *= 2
ncells_refmax[2] *= 2
ncells_refmax[3] *= 2
end
indices = @inbounds (
cid % ncells_refmax[1],
cid ÷ ncells_refmax[1] % ncells_refmax[2],
cid ÷ (ncells_refmax[1] * ncells_refmax[2]) )
coords = @inbounds ntuple( i ->
coordmin[i] + (indices[i] + 0.5) * (coordmax[i] - coordmin[i]) / ncells_refmax[i],
Val(3))
coords
end
"""
getvcellcoordinates(meta, vcellids; species="proton")
Return velocity cells' coordinates of `species` and `vcellids`.
"""
function getvcellcoordinates(meta::MetaVLSV, vcellids; species="proton")
(;vblocks, vblock_size, dv, vmin) = meta.meshes[species]
bsize = prod(vblock_size)
blockid = @. vcellids ÷ bsize
# Get block coordinates
blockInd = [(
bid % vblocks[1],
bid ÷ vblocks[1] % vblocks[2],
bid ÷ (vblocks[1] * vblocks[2]) )
for bid in blockid]
blockCoord = [(
bInd[1] * dv[1] * vblock_size[1] + vmin[1],
bInd[2] * dv[2] * vblock_size[2] + vmin[2],
bInd[3] * dv[3] * vblock_size[3] + vmin[3] )
for bInd in blockInd]
# Get cell indices
vcellblockids = @. vcellids % bsize
cellidxyz = [(
cid % vblock_size[1],
cid ÷ vblock_size[1] % vblock_size[2],
cid ÷ (vblock_size[1] * vblock_size[2]) )
for cid in vcellblockids]
# Get cell coordinates
cellCoords = [SVector(0.0f0, 0.0f0, 0.0f0) for _ in vcellblockids]
@inbounds @simd for i in eachindex(vcellblockids)
cellCoords[i] = [blockCoord[i][j] + (cellidxyz[i][j] + 0.5) * dv[j] for j in 1:3]
end
cellCoords
end
"""
getdensity(meta, VDF; species="proton")
getdensity(meta, vcellf; species="proton")
getdensity(vmesh::VMeshInfo, vcellf)
Get density from `VDF` of `species` associated with `meta`, n = ∫ f(r,v) dV. Alternatively,
one can directly pass `vcellids` as original indices of nonzero VDFs and `vcellf` as their
corresponding values.
"""
function getdensity(meta::MetaVLSV, VDF::Array{T};
species="proton") where T <: AbstractFloat
(;dv) = meta.meshes[species]
n = sum(VDF) * convert(T, prod(dv))
end
function getdensity(vmesh::VMeshInfo, vcellf::Vector{T}) where T <: AbstractFloat
n = sum(vcellf) * convert(T, prod(vmesh.dv))
end
getdensity(meta::MetaVLSV, vcellf; species="proton") =
getdensity(meta.meshes[species], vcellf)
"""
getvelocity(meta, VDF; species="proton")
getvelocity(meta, vcellids, vcellf; species="proton")
getvelocity(vmesh::VMeshInfo, vcellids, vcellf)
Get bulk velocity from `VDF` of `species`, u = ∫ v * f(r,v) dV / n. Alternatively, one can
directly pass `vcellids`, `vcellf`, as in [`getdensity`](@ref).
"""
function getvelocity(meta::MetaVLSV, VDF::Array{T};
species="proton") where T <: AbstractFloat
(;dv, vmin) = meta.meshes[species]
u = zeros(T, 3)
@inbounds for k in axes(VDF,3), j in axes(VDF,2), i in axes(VDF,1)
vx = vmin[1] + (i - 0.5f0)*dv[1]
vy = vmin[2] + (j - 0.5f0)*dv[2]
vz = vmin[3] + (k - 0.5f0)*dv[3]
u[1] += vx*VDF[i,j,k]
u[2] += vy*VDF[i,j,k]
u[3] += vz*VDF[i,j,k]
end
n = sum(VDF)
u ./= n
SVector{3}(u)
end
function getvelocity(vmesh::VMeshInfo, vcellids::Vector{UInt32}, vcellf::Vector{T}) where
T <: AbstractFloat
(;vblock_size, vblocks, dv, vmin) = vmesh
vsize = @inbounds ntuple(i -> vblock_size[i] * vblocks[i], Val(3))
slicez = vsize[1]*vsize[2]
blocksize = prod(vblock_size)
sliceBz = vblocks[1]*vblocks[2]
sliceCz = vblock_size[1]*vblock_size[2]
u = zeros(T, 3)
@inbounds @simd for ic in eachindex(vcellids)
id = findindex(vcellids[ic], vblocks, vblock_size, blocksize, vsize, sliceBz, sliceCz)
i = id % vsize[1]
j = id % slicez ÷ vsize[1]
k = id ÷ slicez
vx = vmin[1] + (i + 0.5f0)*dv[1]
vy = vmin[2] + (j + 0.5f0)*dv[2]
vz = vmin[3] + (k + 0.5f0)*dv[3]
u[1] += vx*vcellf[ic]
u[2] += vy*vcellf[ic]
u[3] += vz*vcellf[ic]
end
n = sum(vcellf)
u ./= n
SVector{3}(u)
end
getvelocity(meta::MetaVLSV, vcellids, vcellf; species="proton") =
getvelocity(meta.meshes[species], vcellids, vcellf)
"""
getpressure(meta, VDF; species="proton")
getpressure(meta, vcellids, vcellf; species="proton")
getpressure(vmesh::VMeshInfo, vcellids, vcellf)
Get pressure tensor (6 components) of `species` from `VDF` associated with `meta`,
pᵢⱼ = m/3 * ∫ (v - u)ᵢ(v - u)ⱼ * f(r,v) dV. Alternatively, one can directly pass `vcellids`,
`vcellf`, as in [`getdensity`](@ref).
"""
function getpressure(meta::MetaVLSV, VDF::Array{T};
species="proton") where T <: AbstractFloat
(;dv, vmin) = meta.meshes[species]
p = zeros(T, 6)
u = getvelocity(meta, VDF; species)
@inbounds for k in axes(VDF,3), j in axes(VDF,2), i in axes(VDF,1)
vx = vmin[1] + (i - 0.5f0)*dv[1]
vy = vmin[2] + (j - 0.5f0)*dv[2]
vz = vmin[3] + (k - 0.5f0)*dv[3]
p[1] += (vx - u[1])*(vx - u[1])*VDF[i,j,k]
p[2] += (vy - u[2])*(vy - u[2])*VDF[i,j,k]
p[3] += (vz - u[3])*(vz - u[3])*VDF[i,j,k]
p[4] += (vy - u[2])*(vz - u[3])*VDF[i,j,k]
p[5] += (vx - u[1])*(vz - u[3])*VDF[i,j,k]
p[6] += (vx - u[1])*(vy - u[2])*VDF[i,j,k]
end
factor = mᵢ * convert(T, prod(dv))
p .*= factor
SVector{6}(p)
end
function getpressure(vmesh::VMeshInfo, vcellids::Vector{UInt32}, vcellf::Vector{T}) where
T <: AbstractFloat
(;vblock_size, vblocks, dv, vmin) = vmesh
vsize = @inbounds ntuple(i -> vblock_size[i] * vblocks[i], Val(3))
slicez = vsize[1]*vsize[2]
blocksize = prod(vblock_size)
sliceBz = vblocks[1]*vblocks[2]
sliceCz = vblock_size[1]*vblock_size[2]
u = getvelocity(vmesh, vcellids, vcellf)
p = zeros(T, 6)
@inbounds @simd for ic in eachindex(vcellids)
id = findindex(vcellids[ic], vblocks, vblock_size, blocksize, vsize, sliceBz, sliceCz)
i = id % vsize[1]
j = id % slicez ÷ vsize[1]
k = id ÷ slicez
vx = vmin[1] + (i + 0.5f0)*dv[1]
vy = vmin[2] + (j + 0.5f0)*dv[2]
vz = vmin[3] + (k + 0.5f0)*dv[3]
p[1] += (vx - u[1])*(vx - u[1])*vcellf[ic]
p[2] += (vy - u[2])*(vy - u[2])*vcellf[ic]
p[3] += (vz - u[3])*(vz - u[3])*vcellf[ic]
p[4] += (vy - u[2])*(vz - u[3])*vcellf[ic]
p[5] += (vx - u[1])*(vz - u[3])*vcellf[ic]
p[6] += (vx - u[1])*(vy - u[2])*vcellf[ic]
end
factor = mᵢ * convert(T, prod(dv))
p .*= factor
SVector{6}(p)
end
getpressure(meta::MetaVLSV, vcellids, vcellf; species="proton") =
getpressure(meta.meshes[species], vcellids, vcellf)
"Get the original vcell index without blocks from raw vcell index `i` (0-based)."
@inline function findindex(i, vblocks, vblock_size, blocksize, vsize, sliceBz, sliceCz)
iB = i ÷ blocksize
iBx = iB % vblocks[1]
iBy = iB % sliceBz ÷ vblocks[1]
iBz = iB ÷ sliceBz
iCellInBlock = i % blocksize
iCx = iCellInBlock % vblock_size[1]
iCy = iCellInBlock % sliceCz ÷ vblock_size[1]
iCz = iCellInBlock ÷ sliceCz
iBCx = iBx*vblock_size[1] + iCx
iBCy = iBy*vblock_size[2] + iCy
iBCz = iBz*vblock_size[3] + iCz
iOrigin = iBCz*vsize[1]*vsize[2] + iBCy*vsize[1] + iBCx
end
"""
reorder(vmesh::VMeshInfo, vcellids) -> vcellids_origin
Reorder vblock-organized VDF indexes into x-->y-->z indexes. `vcellids` are raw indices
of nonzero VDFs ordered by blocks.
"""
function reorder(vmesh::VMeshInfo, vcellids)
(;vblock_size, vblocks) = vmesh
blocksize = prod(vblock_size)
sliceBz = vblocks[1]*vblocks[2]
vsize = @inbounds ntuple(i -> vblock_size[i] * vblocks[i], Val(3))
sliceCz = vblock_size[1]*vblock_size[2]
vcellids_origin = similar(vcellids)
# IDs are 0-based
@inbounds @simd for i in eachindex(vcellids)
vcellids_origin[i] = 1 +
findindex(vcellids[i], vblocks, vblock_size, blocksize, vsize, sliceBz, sliceCz)
end
vcellids_origin
end
"""
reconstruct(vmesh::VMeshInfo, vcellids, vcellf)
Reconstruct the full VDFs in 3D. `vcellids` are raw indices of nonzero VDFs ordered by
blocks, and `vcellf` are the corresponding values in each cell.
"""
function reconstruct(vmesh::VMeshInfo, vcellids, vcellf)
(;vblock_size, vblocks) = vmesh
blocksize = prod(vblock_size)
sliceBz = vblocks[1]*vblocks[2]
vsize = @inbounds ntuple(i -> vblock_size[i] * vblocks[i], Val(3))
sliceCz = vblock_size[1]*vblock_size[2]
# Reconstruct the full velocity space
VDF = zeros(Float32, vsize)
# Raw IDs are 0-based
@inbounds @simd for i in eachindex(vcellids)
j = 1 +
findindex(vcellids[i], vblocks, vblock_size, blocksize, vsize, sliceBz, sliceCz)
VDF[j] = vcellf[i]
end
VDF
end
"""
getmaxwellianity(meta, VDF; species="proton")
getmaxwellianity(meta, vcellids, vcellf; species="proton")
Obtain the Maxwellian similarity factor -log(1/(2n) * ∫ |f - g| dv), where `f` is the VDF
from Vlasiator and `g` is the analytical Maxwellian distribution that generates the same
density as `f`. The value ranges from [0, +∞], with 0 meaning not Maxwellian-distributed at
all, and +∞ a perfect Maxwellian distribution.
Alternatively, one can pass original `vcellids` and `vcellf` directly.
"""
function getmaxwellianity(meta, VDF; species="proton")
(;dv, vmin) = meta.meshes[species]
n = getdensity(meta, VDF)
u = getvelocity(meta, VDF)
P = getpressure(meta, VDF)
p = (P[1] + P[2] + P[3]) / 3
T = p / (n *kB) # temperature from scalar pressure
ϵₘ = zero(eltype(VDF))
vth2Inv = mᵢ / (2kB*T)
@inbounds for k in axes(VDF,3), j in axes(VDF,2), i in axes(VDF,1)
vx = vmin[1] + (i - 0.5f0)*dv[1]
vy = vmin[2] + (j - 0.5f0)*dv[2]
vz = vmin[3] + (k - 0.5f0)*dv[3]
dv2 = (vx - u[1])^2 + (vy - u[2])^2 + (vz - u[3])^2
g = n * sqrt(vth2Inv/π) * (vth2Inv / π) * exp(-vth2Inv*dv2)
ϵₘ += abs(VDF[i,j,k] - g)
end
ϵₘ = -log(0.5 / n * convert(eltype(VDF), prod(dv)) * ϵₘ)
end
function getmaxwellianity(meta, vcellids, vcellf; species="proton")
(;vblock_size, vblocks, dv, vmin) = meta.meshes[species]
vsize = @inbounds ntuple(i -> vblock_size[i] * vblocks[i], Val(3))
slicez = vsize[1]*vsize[2]
blocksize = prod(vblock_size)
sliceBz = vblocks[1]*vblocks[2]
sliceCz = vblock_size[1]*vblock_size[2]
n = getdensity(meta, vcellf)
u = getvelocity(meta, vcellids, vcellf)
P = getpressure(meta, vcellids, vcellf)
p = (P[1] + P[2] + P[3]) / 3
T = p / (n *kB) # temperature from scalar pressure
ϵₘ = zero(eltype(vcellf))
vth2Inv = mᵢ / (2kB*T)
@inbounds @simd for ic in eachindex(vcellids)
id = findindex(vcellids[ic], vblocks, vblock_size, blocksize, vsize, sliceBz, sliceCz)
i = id % vsize[1]
j = id % slicez ÷ vsize[1]
k = id ÷ slicez
vx = vmin[1] + (i + 0.5f0)*dv[1]
vy = vmin[2] + (j + 0.5f0)*dv[2]
vz = vmin[3] + (k + 0.5f0)*dv[3]
dv2 = (vx - u[1])^2 + (vy - u[2])^2 + (vz - u[3])^2
g = n * sqrt(vth2Inv/π) * (vth2Inv / π) * exp(-vth2Inv*dv2)
ϵₘ += abs(vcellf[ic] - g)
end
ϵₘ = -log(0.5 / n * convert(eltype(vcellf), prod(dv)) * ϵₘ)
end
function isInsideDomain(meta::MetaVLSV, point)
(;coordmin, coordmax) = meta
if coordmin[1] < point[1] ≤ coordmax[1] &&
coordmin[2] < point[2] ≤ coordmax[2] &&
coordmin[3] < point[3] ≤ coordmax[3]
return true
else
return false
end
end
"""
getcellinline(meta, point1, point2) -> cellids, distances, coords
Returns cell IDs, distances and coordinates for every cell in a line between two given
points `point1` and `point2`. TODO: preallocation?
"""
function getcellinline(meta::MetaVLSV, point1::Vector{T}, point2::Vector{T}) where T
(;coordmin, coordmax, ncells) = meta
if !isInsideDomain(meta, point1)
throw(DomainError(point1, "point location outside simulation domain!"))
elseif !isInsideDomain(meta, point2)
throw(DomainError(point2, "point location outside simulation domain!"))
end
cell_lengths = @inbounds ntuple(i -> (coordmax[i] - coordmin[i]) / ncells[i], Val(3))
distances = [zero(T)]
cellids = [getcell(meta, point1)]
coords = [SVector{3}(point1)]
ϵ = eps(T)
unit_vector = @. (point2 - point1) / $norm(point2 - point1 + ϵ)
p = coords[1]
coef_min = zeros(T, 3)
coef_max = zeros(T, 3)
@inbounds while true
cid = getcell(meta, p)
amrlvl = getlevel(meta, cid)
# Get the max and min cell boundaries
min_bounds = getcellcoordinates(meta, cid) .- 0.5.*cell_lengths.*0.5.^amrlvl
max_bounds = min_bounds .+ cell_lengths
# Check which face we hit first
@. coef_min = (min_bounds - p) / unit_vector
@. coef_max = (max_bounds - p) / unit_vector
# Negative coefficients indicates the opposite direction
for i = 1:3
if unit_vector[i] == 0.0
coef_min[i] = Inf
coef_max[i] = Inf
end
if coef_min[i] ≤ 0 coef_min[i] = Inf end
if coef_max[i] ≤ 0 coef_max[i] = Inf end
end
# Find the minimum distance from a boundary times a factor
d = min(minimum(coef_min), minimum(coef_max)) * 1.00001
coordnew = SVector(
p[1] + d*unit_vector[1],
p[2] + d*unit_vector[2],
p[3] + d*unit_vector[3])
dot(point2 .- coordnew, unit_vector) ≥ 0 || break
cellidnew = getcell(meta, coordnew)
push!(cellids, cellidnew)
push!(coords, coordnew)
push!(distances, norm(coordnew .- point1))
p = coordnew
end
cellids, distances, coords
end
"""
getslicecell(meta, sliceoffset, idim, minCoord, maxCoord) -> idlist, indexlist
Find the cell IDs `idlist` which are needed to plot a 2d cut through of a 3d mesh, in a
direction `idim` at `sliceoffset`, and the `indexlist`, which is a mapping from original
order to the cut plane and can be used to select data onto the plane.
"""
function getslicecell(meta::MetaVLSV, sliceoffset, idim, minCoord, maxCoord)
idim ∉ (1,2,3) && @error "Unknown slice direction $idim"
(;ncells, maxamr, cellid, cellindex) = meta
nsize = ncells[idim]
sliceratio = sliceoffset / (maxCoord - minCoord)
0.0 ≤ sliceratio ≤ 1.0 || error("slice plane index out of bound!")
# Find the ids
nlen = 0
ncell = prod(ncells)
# number of cells up to each refinement level
nStart = (vcat(0, accumulate(+, (ncell*8^ilvl for ilvl = 0:maxamr))))
indexlist = Int[]
idlist = UInt[]
cellidsorted = cellid[cellindex]
@inbounds for ilvl = 0:maxamr
nLow, nHigh = nStart[ilvl+1], nStart[ilvl+2]
ids = cellidsorted[nLow .< cellidsorted .≤ nHigh]
ix, iy, iz = getindexes(ilvl, ncells[1], ncells[2], nLow, ids)
coords =
if idim == 1
ix
elseif idim == 2
iy
else # 3
iz
end
# Find the cut plane index for each refinement level (0-based)
depth = floor(Int, sliceratio*nsize*2^ilvl)
# Find the needed elements to create the cut and save the results
elements = coords .== depth
append!(indexlist, (nlen+1:nlen+length(ids))[elements])
append!(idlist, ids[elements])
nlen += length(ids)
end
idlist, indexlist
end
"""
refineslice(meta, idlist, data, normal) -> Array
Generate scalar data on the finest refinement level given cellids `idlist` and variable
`data` on the slice perpendicular to `normal`.
"""
function refineslice(meta::MetaVLSV, idlist, data, normal)
(;ncells, maxamr) = meta
dims = let ratio = 2^maxamr
if normal == :x
i1, i2 = 2, 3
elseif normal == :y
i1, i2 = 1, 3
elseif normal == :z
i1, i2 = 1, 2
end
(ncells[i1]*ratio, ncells[i2]*ratio)
end
dpoints = zeros(eltype(data), dims...)
# Create the plot grid
ncell = prod(ncells)
nHigh, nLow = ncell, 0
@inbounds for i = 0:maxamr
ids = idlist[nLow .< idlist .≤ nHigh]
d = data[nLow .< idlist .≤ nHigh]
ix, iy, iz = getindexes(i, ncells[1], ncells[2], nLow, ids)
# Get the correct coordinate values and the widths for the plot
a, b =
if normal == :x
iy, iz
elseif normal == :y
ix, iz
elseif normal == :z
ix, iy
end
# Insert the data values into dpoints
refineRatio = 2^(maxamr - i)
iRange = 0:refineRatio-1
X, Y = ndgrid(iRange, iRange)
coords = [(0, 0) for _ in a, _ in 1:2^(2*(maxamr-i))]
@inbounds for ir = 1:2^(2*(maxamr-i)), ic in eachindex(a, b)
@fastmath coords[ic,ir] = (muladd(a[ic], refineRatio, 1+X[ir]),
muladd(b[ic], refineRatio, 1+Y[ir]) )
end
for ir = 1:2^(2*(maxamr-i)), ic in eachindex(d)
dpoints[ coords[ic,ir]... ] = d[ic]
end
nLow = nHigh
nHigh += ncell*8^(i+1)
end
dpoints
end
"Compute every cell id's x, y and z indexes on the given refinement level (0-based)."
@inline function getindexes(ilevel, xcells, ycells, nCellUptoLowerLvl, ids)
slicesize = xcells*ycells*4^ilevel
iz = @. (ids - nCellUptoLowerLvl - 1) ÷ slicesize
iy = similar(iz)
ix = similar(iz)
@inbounds for i in eachindex(ids, iz)
# number of ids up to the coordinate z in the refinement level ilevel
idUpToZ = muladd(iz[i], slicesize, nCellUptoLowerLvl)
iy[i] = (ids[i] - idUpToZ - 1) ÷ (xcells*2^ilevel)
ix[i] = ids[i] - idUpToZ - iy[i]*xcells*2^ilevel - 1
end
ix, iy, iz
end
@inline function getindexes(ilvl, xcells, ycells, nCellUptoLowerLvl, id::Integer)
slicesize = xcells*ycells*4^ilvl
iz = (id - nCellUptoLowerLvl - 1) ÷ slicesize
idUpToZ = muladd(iz, slicesize, nCellUptoLowerLvl)
iy = (id - idUpToZ - 1) ÷ (xcells*2^ilvl)
ix = id - idUpToZ - iy*xcells*2^ilvl - 1
ix, iy, iz
end
"""
getnearestcellwithvdf(meta, id) -> UInt
Find the nearest spatial cell with VDF saved of a given cell `id` associated with `meta`.
"""
function getnearestcellwithvdf(meta::MetaVLSV, id)
cells = getcellwithvdf(meta)
isempty(cells) && throw(ArgumentError("No distribution saved in $(meta.name)"))
coords = [(0.0f0, 0.0f0, 0.0f0) for _ in cells]
@inbounds for i in eachindex(cells)
coords[i] = getcellcoordinates(meta, cells[i])
end
coords_orig = getcellcoordinates(meta, id)
d2 = [sum((c .- coords_orig).^2) for c in coords]
cells[argmin(d2)]
end
"""
getcellwithvdf(meta) -> cellids
Get all the cell IDs with VDF saved associated with `meta`.
"""
function getcellwithvdf(meta::MetaVLSV)
fid, footer = meta.fid, meta.footer
cellsWithVDF = readmesh(fid, footer, "SpatialGrid", "CELLSWITHBLOCKS")::Vector{UInt}
nblock_C = readmesh(fid, footer, "SpatialGrid", "BLOCKSPERCELL")::Vector{UInt32}
innerBCCells = findall(==(0), nblock_C)
deleteat!(cellsWithVDF, innerBCCells)
cellsWithVDF
end
"Return the first cellid - 1 on `mylevel` given `ncells` on this level."
function get1stcell(mylevel, ncells)
cid1st = 0
for i = 0:mylevel-1
cid1st += ncells*8^i
end
cid1st
end
fillmesh(meta::MetaVLSV, vars::AbstractString) = fillmesh(meta, [vars])
"""
fillmesh(meta::MetaVLSV, vars; verbose=false) -> celldata, vtkGhostType
Fill the DCCRG mesh with quantity of `vars` on all refinement levels.
# Return arguments
- `celldata::Vector{Vector{Array}}`: data for each variable on each AMR level.
- `vtkGhostType::Array{UInt8}`: cell status (to be completed!).
"""
function fillmesh(meta::MetaVLSV, vars; verbose=false)
(;maxamr, fid, footer, ncells, cellid, cellindex) = meta
nvarvg = findall(!startswith("fg_"), vars)
nv = length(vars)
T = Vector{DataType}(undef, nv)
offset = zeros(Int, nv)
arraysize = zeros(Int, nv)
vsize = zeros(Int, nv)
@inbounds for i = 1:nv
T[i], offset[i], arraysize[i], _, vsize[i] =
getObjInfo(footer, vars[i], "VARIABLE", "name")
end
Tout = copy(T)
for i in eachindex(T)
if T[i] == Float64 Tout[i] = Float32 end
end
@inbounds celldata =
[[zeros(Tout[iv], vsize[iv], ncells[1] << i, ncells[2] << i, ncells[3] << i)
for i = 0:maxamr] for iv in 1:nv]
@inbounds vtkGhostType =
[zeros(UInt8, ncells[1] << i, ncells[2] << i, ncells[3] << i) for i = 0:maxamr]
if maxamr == 0
@inbounds for iv = 1:nv
celldata[iv][1][:] = readvariable(meta, vars[iv])
end
return celldata, vtkGhostType
end
# Find the ids
ncell = prod(ncells)
nLow, nHigh = 0, ncell
cellidsorted = cellid[cellindex]
@inbounds for ilvl = 0:maxamr
verbose && @info "scanning AMR level $ilvl..."
idfirst_ = searchsortedfirst(cellidsorted, nLow+1)
idlast_ = searchsortedlast(cellidsorted, nHigh)
ids = cellidsorted[idfirst_:idlast_]
# indicate the condition of non-existing cells
idrefined = setdiff(nLow+1:nHigh, ids)
@simd for id in idrefined
ix, iy, iz = getindexes(ilvl, ncells[1], ncells[2], nLow, id) .+ 1
vtkGhostType[ilvl+1][ix,iy,iz] = 8
end
rOffsetsRaw = indexin(ids, cellid)
if ilvl != maxamr
for iv in nvarvg
verbose && @info "reading variable $(vars[iv])..."
a = mmap(fid, Vector{UInt8}, sizeof(T[iv])*vsize[iv]*arraysize[iv], offset[iv])
dataRaw = reshape(reinterpret(T[iv], a), vsize[iv], arraysize[iv])
data = @view dataRaw[:,rOffsetsRaw]
fillcell!(ilvl, ids, ncells, maxamr, nLow, celldata[iv], data)
end
else # max refinement level
for (iv, var) = enumerate(vars)
verbose && @info "reading variable $var..."
if startswith(var, "fg_")
celldata[iv][end][:] = readvariable(meta, var)
else
a = mmap(fid, Vector{UInt8}, sizeof(T[iv])*vsize[iv]*arraysize[iv],
offset[iv])
dataRaw = reshape(reinterpret(T[iv], a), vsize[iv], arraysize[iv])
data = @view dataRaw[:,rOffsetsRaw]
fillcell!(ids, ncells, maxamr, nLow, celldata[iv][end], data)
end
end
end
nLow = nHigh
nHigh += ncell*8^(ilvl+1)
end
celldata, vtkGhostType
end
function fillcell!(ilvl, ids, ncells, maxamr, nLow, dataout, datain)
@inbounds for ilvlup = ilvl:maxamr
r = 2^(ilvlup-ilvl) # ratio on refined level
for c in eachindex(ids)
ixr, iyr, izr = getindexes(ilvl, ncells[1], ncells[2], nLow, ids[c]) .* r
for k = 1:r, j = 1:r, i = 1:r
_fillcelldata!(dataout[ilvlup+1], datain, ixr+i, iyr+j, izr+k, c)
end
end
end
end
function fillcell!(ids, ncells, maxamr, nLow, dataout, datain)
@inbounds for i in eachindex(ids)
ix, iy, iz = getindexes(maxamr, ncells[1], ncells[2], nLow, ids[i]) .+ 1
_fillcelldata!(dataout, datain, ix, iy, iz, i)
end
end
@inline function _fillcelldata!(dataout, datain, i, j, k, index)
@inbounds @simd for icomp in axes(datain,1)
dataout[icomp,i,j,k] = datain[icomp,index]
end
end
"""
write_vtk(meta::MetaVLSV; kwargs...)
write_vtk(file; kwargs...)
Convert VLSV file to VTK format.
# Keyword arguments
- `vars=[""]`: select which variables to convert.
- `ascii=false`: output stored in ASCII or compressed binary format.
- `maxamronly=false`: generate image files on the highest refinement level only.
- `verbose=false`: display logs during conversion.
"""
function write_vtk(meta::MetaVLSV; vars=[""], ascii=false, maxamronly=false, verbose=false)
(;ncells, maxamr, dcoord, coordmin) = meta
append = ascii ? false : true
filedata = Vector{String}(undef, maxamr+1)
@inbounds for i in 1:maxamr+1
filedata[i] = meta.name[1:end-5]*"_$i.vti"
end
if isempty(vars[1])
vars = meta.variable
cellid_ = findfirst(==("CellID"), vars)
if !isnothing(cellid_) deleteat!(vars, cellid_) end
end
data, vtkGhostType = fillmesh(meta, vars; verbose)
if maxamronly
save_image(meta, meta.name[1:end-4]*"vti", vars, data, vtkGhostType[end], maxamr,
append)
else
# Generate image file on each refinement level
@inbounds for i in eachindex(vtkGhostType, filedata)
fdata, ghost = filedata[i], vtkGhostType[i]
save_image(meta, fdata, vars, data, ghost, i-1, append)
end
# Generate vthb file
filemeta = meta.name[1:end-4]*"vthb"
doc = XMLDocument()
elm = ElementNode("VTKFile")
setroot!(doc, elm)
link!(elm, AttributeNode("type", "vtkOverlappingAMR"))
link!(elm, AttributeNode("version", "1.1"))
link!(elm, AttributeNode("byte_order", "LittleEndian")) # x86
link!(elm, AttributeNode("header_type", "UInt64"))
xamr = addelement!(elm, "vtkOverlappingAMR")
origin = @sprintf "%f %f %f" coordmin[1] coordmin[2] coordmin[3]
link!(xamr, AttributeNode("origin", origin))
link!(xamr, AttributeNode("grid_description", "XYZ"))
@inbounds for i = 0:maxamr
xBlock = addelement!(xamr, "Block")
link!(xBlock, AttributeNode("level", string(i)))
spacing_str = @sprintf "%f %f %f" dcoord[1]/2^i dcoord[2]/2^i dcoord[3]/2^i
link!(xBlock, AttributeNode("spacing", spacing_str))
xDataSet = addelement!(xBlock, "DataSet")
link!(xDataSet, AttributeNode("index", "0"))
amr_box = (0, ncells[1]*2^i-1, 0, ncells[2]*2^i-1, 0, ncells[3]*2^i-1)
box_str = @sprintf("%d %d %d %d %d %d", amr_box[1], amr_box[2], amr_box[3],
amr_box[4], amr_box[5], amr_box[6])
link!(xDataSet, AttributeNode("amr_box", box_str))
link!(xDataSet, AttributeNode("file", filedata[i+1]))
end
write(filemeta, doc)
end
return
end
write_vtk(file; kwargs...) = write_vtk(load(file); kwargs...)
"""
save_image(meta::MetaVLSV, file, vars, data, vtkGhostType, level,
ascii=false, append=true)
Save `data` of name `vars` at AMR `level` into VTK image file of name `file`.
# Arguments
- `file::String`: output file name.
- `vars::Vector{String}`: variable names to be saved.
- `data::Vector{Vector}`: data for all the variables on each refinement level.
- `vtkGhostType::Array{UInt8}`: array for visibility control.
- `level::Int`: refinement level (0-based).
- `ascii=false`: save output in ASCII or binary format.
- `append=true`: determines whether to append data at the end of file or do in-block writing.
"""
function save_image(meta::MetaVLSV, file, vars, data, vtkGhostType, level, ascii=false,
append=true)
(;coordmin, dcoord, ncells) = meta
origin = (coordmin[1], coordmin[2], coordmin[3])
ratio = 2^level
spacing = (dcoord[1] / ratio, dcoord[2] / ratio, dcoord[3] / ratio)
vtk = vtk_grid(file, ncells[1]*ratio+1, ncells[2]*ratio+1, ncells[3]*ratio+1;
origin, spacing, append, ascii)
@inbounds for (iv, var) in enumerate(vars)
vtk[var, VTKCellData()] = data[iv][level+1]
end
vtk["vtkGhostType", VTKCellData()] = vtkGhostType
vtk_save(vtk)
end
"""
write_vlsv(filein, fileout, newvars::Vector{Tuple{Vector, String, VarInfo}};
force=false)
Generate a new VLSV `fileout` based on `filein`, with `newvars` added.
`force=true` overwrites the existing `fileout`.
"""
function write_vlsv(filein::AbstractString, fileout::AbstractString,
newvars::Vector{Tuple{VecOrMat, String, VarInfo}}; force=false)
if isfile(fileout) && !force
error("Output target $fileout exists!")
end
fid = open(filein)
endian_offset = 8 # First 8 bytes indicate big-endian or else
seek(fid, endian_offset)
# Obtain the offset of the XML footer
offset = read(fid, UInt64)
# Store all non-footer part as raw data
raw_data = zeros(UInt8, offset)
seekstart(fid)
readbytes!(fid, raw_data, offset)
# Read input VLSV file footer
doc = read(fid, String) |> parsexml
footer = doc |> root
close(fid)
# Get new variables' offsets
offsets = accumulate(+,
[offset, [sizeof(newvars[i][1]) for i in eachindex(newvars)[1:end-1]]...])
# Create new children for footer
for i in eachindex(newvars, offsets)
elm = addelement!(footer, "VARIABLE", string(offsets[i]))
a1 = AttributeNode("arraysize", string(length(newvars[i][1])))
a2 = AttributeNode("datasize", string(sizeof(eltype(newvars[i][1]))))
a3 =
if eltype(newvars[i][1]) <: Signed
AttributeNode("datatype", "int")
elseif eltype(newvars[i][1]) <: AbstractFloat
AttributeNode("datatype", "float")
elseif eltype(newvars[i][1]) <: Unsigned
AttributeNode("datatype", "uint")
end
a4 = AttributeNode("mesh", "SpatialGrid")
a5 = AttributeNode("name", newvars[i][2])
a6 = AttributeNode("unit", newvars[i][3].unit)
a7 = AttributeNode("unitConversion", newvars[i][3].unitConversion)
a8 = AttributeNode("unitLaTeX", newvars[i][3].unitLaTeX)
a9 = AttributeNode("variableLaTeX", newvars[i][3].variableLaTeX)
a10 =
if ndims(newvars[i][1]) == 1
AttributeNode("vectorsize", "1")
else
AttributeNode("vectorsize", string(size(newvars[i][1], 1)))
end
for attributenode in (a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)
link!(elm, attributenode)
end
end
# Write to fileout
open(fileout, "w") do io
write(io, @view raw_data[1:8]) # endianness
# Compute footer offset
totalnewsize = 0
for var in newvars
totalnewsize += sizeof(var[1])
end
write(io, offset+totalnewsize) # record new footer offset
write(io, @view raw_data[17:end]) # copy original data
for var in newvars
write(io, var[1])
end
write(io, string(footer), '\n')
end
return
end
"""
issame(file1, file2, tol=1e-4; verbose=false) -> Bool
Check if two VLSV files `file1` and `file2` are approximately identical, under relative
tolerance `tol`.
"""
function issame(f1, f2, tol::AbstractFloat=1e-4; verbose=false)
# 1st sanity check: minimal filesize difference
if abs(filesize(f1) - filesize(f2)) / filesize(f2) > 1e-2
verbose && println("The sizes of files are already quite different!")
return false
end
meta1 = load(f1)
meta2 = load(f2)
varnames = meta1.variable
strskip = r"CellID|rank|blocks"
deleteat!(varnames, findall(endswith(strskip), varnames))
isIdentical = true
for vname in varnames
v1 = readvariable(meta1, vname)
v2 = readvariable(meta2, vname)
s1, s2 = sum(v1), sum(v2)
if abs(s1 - s2) > tol * abs(s1) && abs(s1 - s2) > tol * abs(s2)
isIdentical = false
verbose && println("$vname is quite different!")
break
end
end
verbose && isIdentical && println("$f1 and $f2 are identical under tolerance $tol.")
close(meta1.fid)
close(meta2.fid)
return isIdentical
end
|
{"hexsha": "306e5fbbd86f9a633ccdfef9e16545254e967861", "size": 36257, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/vlsv/vlsvutility.jl", "max_stars_repo_name": "alhom/Vlasiator.jl", "max_stars_repo_head_hexsha": "615333705b5346522479ab72398f059cb94ab026", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/vlsv/vlsvutility.jl", "max_issues_repo_name": "alhom/Vlasiator.jl", "max_issues_repo_head_hexsha": "615333705b5346522479ab72398f059cb94ab026", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/vlsv/vlsvutility.jl", "max_forks_repo_name": "alhom/Vlasiator.jl", "max_forks_repo_head_hexsha": "615333705b5346522479ab72398f059cb94ab026", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9096334186, "max_line_length": 93, "alphanum_fraction": 0.6185288358, "num_tokens": 12369}
|
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import argparse
import os
import numpy as np
import skimage.io as io
import tqdm
from pycocotools.coco import COCO
from skimage.draw import polygon
parser = argparse.ArgumentParser()
parser.add_argument('--annotation_file', type=str, default="./annotations/instances_train2017.json",
help="Path to the annocation file. It can be downloaded at http://images.cocodataset.org/annotations/annotations_trainval2017.zip. Should be either instances_train2017.json or instances_val2017.json")
parser.add_argument('--input_label_dir', type=str, default="./train_label/",
help="Path to the directory containing label maps. It can be downloaded at http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip")
parser.add_argument('--output_instance_dir', type=str, default="./train_inst/",
help="Path to the output directory of instance maps")
opt = parser.parse_args()
os.makedirs(opt.output_instance_dir, exist_ok=True)
print("annotation file at {}".format(opt.annotation_file))
print("input label maps at {}".format(opt.input_label_dir))
print("output dir at {}".format(opt.output_instance_dir))
# initialize COCO api for instance annotations
coco = COCO(opt.annotation_file)
# display COCO categories and supercategories
cats = coco.loadCats(coco.getCatIds())
imgIds = coco.getImgIds(catIds=coco.getCatIds(cats))
for ix, id in enumerate(tqdm.tqdm(imgIds)):
# if ix % 50 == 0:
# print("{} / {}".format(ix, len(imgIds)))
img_dict = coco.loadImgs(id)[0]
filename = img_dict["file_name"].replace("jpg", "png")
label_name = os.path.join(opt.input_label_dir, filename)
inst_name = os.path.join(opt.output_instance_dir, filename)
img = io.imread(label_name, as_gray=True)
annIds = coco.getAnnIds(imgIds=id, catIds=[], iscrowd=None)
anns = coco.loadAnns(annIds)
count = 0
for ann in anns:
if type(ann["segmentation"]) == list:
if "segmentation" in ann:
for seg in ann["segmentation"]:
poly = np.array(seg).reshape((int(len(seg) / 2), 2))
rr, cc = polygon(poly[:, 1] - 1, poly[:, 0] - 1)
img[rr, cc] = count
count += 1
io.imsave(inst_name, img)
|
{"hexsha": "606d9ab53da428894d2ad3f38ca2eddb650052b0", "size": 2476, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/coco_generate_instance_map.py", "max_stars_repo_name": "NguyenHoangAn0511/gan-compression", "max_stars_repo_head_hexsha": "6512c067d4adebc7451635991418b54ab76dd711", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1005, "max_stars_repo_stars_event_min_datetime": "2020-03-20T04:13:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T01:16:45.000Z", "max_issues_repo_path": "datasets/coco_generate_instance_map.py", "max_issues_repo_name": "shadow2496/gan-compression", "max_issues_repo_head_hexsha": "9f3a2b51bedca040cc7d31c60ca71a77138f2c81", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 94, "max_issues_repo_issues_event_min_datetime": "2020-03-20T08:36:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:20:02.000Z", "max_forks_repo_path": "datasets/coco_generate_instance_map.py", "max_forks_repo_name": "shadow2496/gan-compression", "max_forks_repo_head_hexsha": "9f3a2b51bedca040cc7d31c60ca71a77138f2c81", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 147, "max_forks_repo_forks_event_min_datetime": "2020-03-20T04:49:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T10:44:25.000Z", "avg_line_length": 43.4385964912, "max_line_length": 220, "alphanum_fraction": 0.6865912763, "include": true, "reason": "import numpy", "num_tokens": 608}
|
import itertools
import numpy as np
import pandas as pd
import pytest
from hamcrest import assert_that, none, not_none, calling, raises, close_to
import cifrum as lib
from conftest import decimal_places, delta
from cifrum._portfolio.currency import PortfolioCurrencyFactory
from cifrum.common.enums import Currency
__end_period = pd.Period('2018-12', freq='M')
@pytest.fixture
def pcf():
return lib.obj_graph.provide(PortfolioCurrencyFactory)
@pytest.mark.parametrize('currency, inflation_kind',
itertools.product(Currency, ['values', 'cumulative', 'a_mean', 'g_mean']))
def test__exists_for_all_currencies(pcf: PortfolioCurrencyFactory, currency: Currency, inflation_kind: str):
pc = pcf.new(currency=currency)
infl = pc.inflation(kind=inflation_kind, end_period=__end_period, years_ago=4)
assert_that(infl, not_none())
@pytest.mark.parametrize('currency, inflation_kind',
itertools.product(Currency, ['values', 'cumulative', 'a_mean', 'g_mean']))
def test__should_not_handle_both_start_date_and_years_ago(pcf: PortfolioCurrencyFactory,
currency: Currency, inflation_kind: str):
pc = pcf.new(currency=currency)
foo = calling(pc.inflation).with_args(kind=inflation_kind,
start_period=pd.Period('2011-1', freq='M'),
end_period=__end_period,
years_ago=4)
assert_that(foo, raises(ValueError, 'either `start_period` or `years_ago` should be provided'))
def test__inflation_values(pcf: PortfolioCurrencyFactory):
pc = pcf.new(currency=Currency.USD)
assert_that(pc.inflation(kind='cumulative', end_period=__end_period, years_ago=5).value,
close_to(.0780, delta))
assert_that(pc.inflation(kind='a_mean', end_period=__end_period, years_ago=5).value,
close_to(.0013, delta))
assert_that(pc.inflation(kind='g_mean', end_period=__end_period, years_ago=5).value,
close_to(.0151, delta))
infl_yoy = pc.inflation(kind='yoy', end_period=__end_period, years_ago=5)
assert infl_yoy.start_period == pd.Period('2014-1')
assert infl_yoy.end_period == pd.Period('2018-1')
np.testing.assert_almost_equal(infl_yoy.values, [.0076, .0073, .0207, .0211, .0191], decimal_places)
def test__gmean_inflation_for_less_than_year(pcf: PortfolioCurrencyFactory):
pc = pcf.new(currency=Currency.USD)
assert_that(pc.inflation(kind='g_mean',
start_period=pd.Period('2017-1', freq='M'),
end_period=pd.Period('2018-1', freq='M')),
not_none())
assert_that(pc.inflation(kind='g_mean',
start_period=pd.Period('2017-5', freq='M'),
end_period=pd.Period('2018-1', freq='M')),
none())
|
{"hexsha": "2408579a2686ab51c71de352007aef3a5aaae2e8", "size": 2958, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_inflation.py", "max_stars_repo_name": "31337mbf/yapo", "max_stars_repo_head_hexsha": "b790e112efccfb8f818dc7711989a9174b2c65fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_inflation.py", "max_issues_repo_name": "31337mbf/yapo", "max_issues_repo_head_hexsha": "b790e112efccfb8f818dc7711989a9174b2c65fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_inflation.py", "max_forks_repo_name": "31337mbf/yapo", "max_forks_repo_head_hexsha": "b790e112efccfb8f818dc7711989a9174b2c65fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.8695652174, "max_line_length": 108, "alphanum_fraction": 0.6514536849, "include": true, "reason": "import numpy", "num_tokens": 686}
|
from dagster_pandas.data_frame import create_dagster_pandas_dataframe_type
from dagster_pandas.validation import PandasColumn
from numpy import mean, median, ndarray
from pandas import Timestamp
from dagster import DagsterType, EventMetadataEntry, TypeCheck
def compute_trip_dataframe_event_metadata(dataframe):
return [
EventMetadataEntry.text(
str(min(dataframe["start_time"])), "min_start_time", "Date data collection started",
),
EventMetadataEntry.text(
str(max(dataframe["end_time"])), "max_end_time", "Timestamp of last trip"
),
EventMetadataEntry.text(
str(len(dataframe)), "n_rows", "Number of rows seen in the dataframe"
),
EventMetadataEntry.text(
str(dataframe.columns), "columns", "Keys of columns seen in the dataframe"
),
]
TripDataFrameSchema = [
PandasColumn.integer_column("bike_id", min_value=0),
PandasColumn.datetime_column("start_time", min_datetime=Timestamp(year=2017, month=1, day=1),),
PandasColumn.datetime_column("end_time", min_datetime=Timestamp(year=2017, month=1, day=1),),
PandasColumn.string_column("interval_date"),
]
RawTripDataFrame = create_dagster_pandas_dataframe_type(
name="RawTripDataFrame",
columns=[
PandasColumn(column.name)
for column in TripDataFrameSchema
if column.name != "interval_date"
],
)
TripDataFrame = create_dagster_pandas_dataframe_type(
name="TripDataFrame",
columns=TripDataFrameSchema,
event_metadata_fn=compute_trip_dataframe_event_metadata,
)
def compute_traffic_dataframe_event_metadata(dataframe):
return [
EventMetadataEntry.text(
str(min(dataframe["peak_traffic_load"])), "min_traffic_load", "Best Peak Load"
),
EventMetadataEntry.text(
str(max(dataframe["peak_traffic_load"])), "max_traffic_load", "Worst Peak Load"
),
EventMetadataEntry.text(
str(mean(dataframe["peak_traffic_load"])), "mean_traffic_load", "Mean peak traffic",
),
EventMetadataEntry.text(
str(median(dataframe["peak_traffic_load"])),
"median_traffic_load",
"Median peak traffic",
),
EventMetadataEntry.text(
str(len(dataframe)), "n_rows", "Number of rows seen in the dataframe"
),
EventMetadataEntry.text(
str(dataframe.columns), "columns", "Keys of columns seen in the dataframe"
),
]
TrafficDataFrame = create_dagster_pandas_dataframe_type(
name="TrafficDataFrame",
columns=[
PandasColumn.string_column("interval_date"),
PandasColumn.integer_column("peak_traffic_load", min_value=0),
],
event_metadata_fn=compute_traffic_dataframe_event_metadata,
)
def compute_weather_dataframe_event_metadata(dataframe):
return [
EventMetadataEntry.text(
str(len(dataframe)), "n_rows", "Number of rows seen in the dataframe"
),
EventMetadataEntry.text(
str(dataframe.columns), "columns", "Keys of columns seen in the dataframe"
),
]
WeatherDataFrameSchema = [
PandasColumn.datetime_column("time", unique=True),
PandasColumn.string_column("summary"),
PandasColumn.categorical_column(
"icon", categories={"clear-day", "cloudy", "fog", "partly-cloudy-day", "rain"}
),
PandasColumn.integer_column("sunriseTime", min_value=0),
PandasColumn.integer_column("sunsetTime", min_value=0),
PandasColumn.float_column("precipIntensity", min_value=0.0, max_value=1.0),
PandasColumn.float_column("precipIntensityMax", min_value=0.0, max_value=1.0),
PandasColumn.float_column("precipProbability", min_value=0.0, max_value=1.0),
PandasColumn.float_column("temperatureHigh", min_value=40.0, max_value=100.0),
PandasColumn.integer_column("temperatureHighTime", min_value=0),
PandasColumn.float_column("temperatureLow", min_value=30.0, max_value=100.0),
PandasColumn.integer_column("temperatureLowTime", min_value=0),
PandasColumn.float_column("dewPoint", min_value=10.0, max_value=70.0),
PandasColumn.float_column("humidity", min_value=0.0, max_value=1.0),
PandasColumn.float_column("pressure", min_value=900.0, max_value=1200.0),
PandasColumn.float_column("windSpeed", min_value=0.0, max_value=100.0),
PandasColumn.float_column("windGust", min_value=0.0, max_value=40.0),
PandasColumn.integer_column("windGustTime", min_value=0),
PandasColumn.integer_column("windBearing", min_value=0),
PandasColumn.float_column("cloudCover", min_value=0.0, max_value=1.0),
PandasColumn.integer_column("uvIndex", min_value=0, max_value=12),
PandasColumn.integer_column("uvIndexTime", min_value=0),
PandasColumn.numeric_column("visibility", min_value=0.0, max_value=10.0),
PandasColumn.float_column("ozone", min_value=200.0, max_value=500.0),
]
WeatherDataFrame = create_dagster_pandas_dataframe_type(
name="WeatherDataFrame",
columns=WeatherDataFrameSchema,
event_metadata_fn=compute_weather_dataframe_event_metadata,
)
def validate_snapshot_timeseries(_, training_set_data):
if not isinstance(training_set_data, tuple):
return TypeCheck(False)
if len(training_set_data) != 2:
return TypeCheck(
success=False,
description="Invalid training set. The tuple must consist of a training set, output vector, and feature_names",
)
# tuple argument types
X, y = training_set_data
if not (isinstance(X, ndarray) and isinstance(y, ndarray)):
return TypeCheck(
success=False,
description="Both input matrix and output vector must be numpy arrays. X: {} | y: {}".format(
type(X), type(y)
),
)
timeseries_length, snapshot_length, num_timeseries = X.shape
output_vector_length = y.shape[0]
if num_timeseries == 0 or output_vector_length == 0:
return TypeCheck(success=False, description="No empty training sets allowed",)
if timeseries_length != output_vector_length:
return TypeCheck(
success=False, description="Every timeseries must have as many snapshots as outputs"
)
return TypeCheck(
success=True,
metadata_entries=[
EventMetadataEntry.text(
str(num_timeseries), "num_ts", "Number of parallel timeseries."
),
EventMetadataEntry.text(
str(timeseries_length), "timeseries_length", "Length of each timeseries."
),
EventMetadataEntry.text(
str(snapshot_length),
"snapshot_length",
"Number of past observations for each input.",
),
],
)
TrainingSet = DagsterType(
name="TrainingSet",
description="Final training set ready for the ml pipeline",
type_check_fn=validate_snapshot_timeseries,
)
|
{"hexsha": "6ac5ddff0450d15cda4978e23eeef7695838052b", "size": 6979, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/legacy_examples/dagster_examples/bay_bikes/types.py", "max_stars_repo_name": "bitdotioinc/dagster", "max_stars_repo_head_hexsha": "4fe395a37b206b1a48b956fa5dd72bf698104cca", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-27T19:49:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-27T19:49:59.000Z", "max_issues_repo_path": "examples/legacy_examples/dagster_examples/bay_bikes/types.py", "max_issues_repo_name": "bitdotioinc/dagster", "max_issues_repo_head_hexsha": "4fe395a37b206b1a48b956fa5dd72bf698104cca", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2022-03-16T06:55:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T07:03:25.000Z", "max_forks_repo_path": "examples/legacy_examples/dagster_examples/bay_bikes/types.py", "max_forks_repo_name": "bitdotioinc/dagster", "max_forks_repo_head_hexsha": "4fe395a37b206b1a48b956fa5dd72bf698104cca", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5215053763, "max_line_length": 123, "alphanum_fraction": 0.6857716005, "include": true, "reason": "from numpy", "num_tokens": 1590}
|
import numpy as np
from dataclasses import dataclass
from material import Material
@dataclass
class ConversionMatrices:
S: np.ndarray
S_reduced: np.ndarray
S_bar: np.ndarray
S_bar_reduced: np.ndarray
C: np.ndarray
C_reduced: np.ndarray
Q_bar: np.ndarray
Q_bar_reduced: np.ndarray
def __init__(self, mat: Material):
self.mat = mat
self.S = self.compliance_matrix(mat, theta_rad=0)
self.S_reduced = self._reduced_compliance_matrix()
self.S_bar = self.compliance_matrix(mat, theta_rad=0)
self.S_bar_reduced = self._transformed_compliance_matrix_2D()
self.C = np.linalg.inv(self.S)
self.C_reduced = np.linalg.inv(self.S_reduced)
self.Q_bar = np.linalg.inv(self.S_bar)
self.Q_bar_reduced = np.linalg.inv(self.S_bar_reduced)
self.T_2D = self.transformation_matrix_2D(theta_rad=0)
self.T_3D = self.transformation_matrix_3D(theta_rad=0)
def update_orientation(self, theta_rad: float):
'''
Updates the conversion matrices which rely on the orientation of the lamina.
Args:
theta_rad (float): Orientation in radians.
'''
self.S_bar = self.compliance_matrix(self.mat, theta_rad)
self.S_bar_reduced = self._transformed_compliance_matrix_2D(theta_rad)
self.Q_bar = np.linalg.inv(self.S_bar)
self.Q_bar_reduced = np.linalg.inv(self.S_bar_reduced)
self.T_2D = self.transformation_matrix_2D(theta_rad)
self.T_3D = self.transformation_matrix_3D(theta_rad)
def compliance_matrix(self, mat: Material, theta_rad: float = 0) -> np.ndarray:
'''
Returns the orthotropic compliance matrix.
Parameters:
E (np.ndarray): Vector of the elastic moduli for each of the principal directions [E1, E2, E3]
v (np.ndarray): Vector of Poisson's ratio for each of the principal directions [v23, v13, v12]
G (np.ndarray): Vector of the shear moduli for each fo the principal directions [G23, G13, G12]
Returns:
S (np.ndarray): Compliance matrix describing the material in the 3 principal directions
'''
E, v, G = mat.get_properties()
# Unpack the Poisson's ratio values
_v23, _v13, _v12 = v
# Create the 3x3 linear-elastic stress relationship
_norm = np.ones((3, 3)) * (1 / E)
_n = np.eye(3)
# Relationships between elastic modulii and Poison's ratio
_n[0, 1] = -E[1] / E[0] * _v12
_n[1, 0] = -_v12
_n[0, 2] = -E[2] / E[0] * _v13
_n[2, 0] = -_v13
_n[1, 2] = -E[1] / E[2] * _v23
_n[2, 1] = -_v23
# Create the 3x3 shear relationship
_shear = np.eye(3) / G
# Combine all into compliance matrix
_S = np.zeros((6, 6))
_S[:3, :3] = _n * _norm
_S[3:, 3:] = _shear
# Transformation matrix (defaults to identity if no rotation)
T = self.transformation_matrix_3D(theta_rad=theta_rad)
_S = T.T.dot(_S).dot(T)
return _S
def _reduced_compliance_matrix(self) -> np.ndarray:
'''
Returns the planar compliance matrix.
Parameters:
E (np.ndarray): Vector of the elastic moduli for each of the principal directions
v (np.ndarray): Vector of Poisson's ratio for each of the principal directions [v23, v13, v12]
G (np.ndarray): Vector of the shear moduli for each fo the principal directions [G23, G13, G12]
Returns:
S (np.ndarray): Planar (reduced) compliance matrix
'''
S = self.S
_S_r = np.zeros((3, 3))
_S_r[:2, :2] = S[:2, :2]
_S_r[2, 2] = S[-1, -1]
return _S_r
def _A_matrix(self, q_bar: np.ndarray, z: int):
pass
def transformation_matrix_2D(self, theta_rad: float = 0) -> np.ndarray:
c = np.cos(theta_rad)
s = np.sin(theta_rad)
T = np.array(
[
[c ** 2, s ** 2, 2 * c * s],
[s ** 2, c ** 2, -2 * c * s],
[-c * s, c * s, c ** 2 - s ** 2],
]
)
return T
def transformation_matrix_3D(self, theta_rad: float = 0) -> np.ndarray:
c = np.cos(theta_rad)
s = np.sin(theta_rad)
T = np.array(
[
[c ** 2, s ** 2, 0, 0, 0, 2 * c * s],
[s ** 2, c ** 2, 0, 0, 0, -2 * c * s],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, c, s, 0],
[0, 0, 0, -s, c, 0],
[-c * s, c * s, 0, 0, 0, c ** 2 - s ** 2],
]
)
return T
def _transformed_compliance_matrix_2D(self, theta_rad: float = 0) -> np.ndarray:
'''
Calculates the 2D transformed compliance matrix.
Args:
theta_rad (float, optional): Rotation angle measure in radians. Defaults to 0.
Returns:
np.ndarray: The transformed 2D compliance matrix evaluated at theta_rad
'''
T = self.transformation_matrix_2D(theta_rad)
S = self.S_reduced
S_bar_reduced = T.T.dot(S).dot(T)
return S_bar_reduced
def T_z(theta_rad):
'''Transformation matrix about the z-axis'''
return np.array(
[
[np.cos(theta_rad), np.cos(np.pi / 2 - theta_rad), 0],
[np.cos(theta_rad + np.pi / 2), np.cos(theta_rad), 0],
[0, 0, 1],
]
)
def create_tensor_3D(_11, _22, _33, _23=0, _13=0, _12=0):
"""Create a 3D tensor given the x, y, z, xy, xz, yz values"""
return np.array([[_11, _12, _13], [_12, _22, _23], [_13, _23, _33]])
def tensor_to_vec(tensor):
'''Create a vector from a given 3D tensor'''
return np.array([*np.diag(tensor), tensor[1, 2], tensor[0, 2], tensor[0, 1]])
def to_gamma(strain_tensor) -> np.ndarray:
'''
Converts a given strain tensor into a matrix with shear strain in terms of gamma.
Parameters:
strain_tensor (np.ndarray): Strain tensor in terms of epsilon.
Returns:
gamma_matrix (np.ndarray): Strain matrix in terms of gamma.
'''
_strain_tensor = strain_tensor.copy()
_gamma_matrix = _strain_tensor + (_strain_tensor - _strain_tensor * np.eye(3))
return _gamma_matrix
def to_epsilon(strain_matrix) -> np.ndarray:
'''
Converts a given strain matrix into a strain tensor with shear strain in terms of gamma.
Parameters:
strain_matrix (np.ndarray): Strain matrix in terms of gamma.
Returns:
epsilon_tensor (np.ndarray): Strain tensor in terms of epsilon.
'''
_strain_matrix = strain_matrix.copy()
_epsilon_tensor = _strain_matrix * np.eye(3) + 0.5 * (
_strain_matrix - _strain_matrix * np.eye(3)
)
return _epsilon_tensor
def transformation_3D(tensor, rot_matrix, theta, theta_radians=False):
'''
Return the transformed 3D tensor. Shear outputs are in terms of epsilon.
Parameters:
tensor (numpy.ndarray): Cauchy tensor
rot_matrix (numpy.ndarray): Rotation matrix
theta (float): Angle of rotation
radians (bool): True if theta is given in radians
Returns:
prime (numpy.ndarray): Transformed matrix
'''
_tensor = tensor.copy()
# Convert to radians and evaluate the rotation matrix
_theta = theta if theta_radians else theta * np.pi / 180
_R = rot_matrix(_theta)
# Transformation equation
_prime = _R.dot(_tensor).dot(_R.T)
return _prime
|
{"hexsha": "588e4fdc7eeb7c20f68868940842f7fa8f80e690", "size": 7653, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/conversion.py", "max_stars_repo_name": "echaffey/Compysite", "max_stars_repo_head_hexsha": "bf56f8fa641f39c747ce7be1d35dd198ea5a09e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/conversion.py", "max_issues_repo_name": "echaffey/Compysite", "max_issues_repo_head_hexsha": "bf56f8fa641f39c747ce7be1d35dd198ea5a09e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/conversion.py", "max_forks_repo_name": "echaffey/Compysite", "max_forks_repo_head_hexsha": "bf56f8fa641f39c747ce7be1d35dd198ea5a09e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2490118577, "max_line_length": 107, "alphanum_fraction": 0.5838233373, "include": true, "reason": "import numpy", "num_tokens": 2115}
|
import time
import sys, os
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
import scipy.stats
import tracemalloc
import umap
import hdbscan
from inspect import Parameter
import weblogo
from weblogo import *
def onehot_enc(row, expected_length=21):
"""Encode the data with one-hot encoding
Parameter:
---------
row: string
DNA or RNA sequence for one-hot encoding.
---------
Retruns:
--------
onehot_array: array
one-hot encoding data
"""
enc = OneHotEncoder(dtype=np.int8)
enc.fit([[i] for i in "ATCGN"])
seq = [[i] for i in row["seq"].upper().replace("U","T") if len(row["seq"]) == expected_length]
onehot_array = enc.transform(seq).toarray().reshape(-1)
return onehot_array
def UMAP(onehot_input, df, parameters):
"""Dimensionality reduction with UMAP
Parameter:
---------
onehot_input: array
One-hot encoding data from sequence.
df: pd.DataFrame
Sequence in pd.DataFrame format. It will be added two columns named "X" and "Y" as results of UMAP.
---------
Retruns:
pd.DataFrame
--------
"""
df = df.copy()
random_state = int(parameters["random_state"])
init = str(parameters["umap_init"])
min_dist = float(parameters["min_dist"])
n_neighbors = int(parameters["n_neighbors"])
densmap = bool(parameters["densmap"])
n_jobs = int(parameters["umap_jobs"])
model = umap.UMAP(init=init, random_state=random_state, n_components=2, min_dist=min_dist, n_neighbors=n_neighbors, verbose=True, densmap=densmap, n_jobs=n_jobs)
umap_output = model.fit_transform(onehot_input)
df["X"] = umap_output[:, 0]
df["Y"] = umap_output[:, 1]
del model
return df
def cluster_HDBSCAN(df, parameters):
"""Clustering UMAP results with HDSCAN
Parameter:
---------
df: pd.DataFrame
The results of UMAP computed from UMAP nearst neighbor. After clustering with HDBSCAN, add a columns to it named "Cluster".
parameters: list
Parameters for HDBSCAN from user.
---------
Retruns:
--------
pd.DataFrame
"""
# use multi-code here
df = df.copy()
X = np.stack([df["X"], df["Y"]], axis=1)
min_cluster_size = int(parameters["min_cluster_size"])
min_samples = int(parameters["min_samples"])
cluster_selection_method = str(parameters["cluster_selection_method"])
core_dist_n_jobs = int(parameters["hdbscan_jobs"])
cluster_selection_epsilon = float(parameters["cluster_selection_epsilon"])
if bool(parameters["softclustering"]) == True:
prediction_data = True
else:
prediction_data = False
model = hdbscan.HDBSCAN(min_cluster_size = min_cluster_size, min_samples=min_samples, cluster_selection_method=cluster_selection_method, cluster_selection_epsilon=cluster_selection_epsilon, core_dist_n_jobs=core_dist_n_jobs, prediction_data=prediction_data)
if prediction_data == True:
yhat = model.fit(X)
soft_clusters = hdbscan.all_points_membership_vectors(yhat)
labels = [np.argmax(x) for x in soft_clusters]
else:
labels = model.fit_predict(X)
df["Cluster"] = [i+1 if i > -1 else -1 for i in labels ] # re-number lables to make it human-readable
# check cluster number
# print(df.groupby("Cluster")["Cluster"].count())
return df
def run_cluster(fasta_df, path, parameters):
"""Call two algorithms to cluster and get the result
Parameter:
---------
fasta_df: pd.DataFrame
Sequence file in pd.DataFrame format. After calling two algorithms, add three columns to it named "X", "Y" and "Cluster" to store the results.
path: str
The output directory.
parameters: list
Parameters for HDBSCAN from user input.
---------
Retruns:
--------
pd.DataFrame
"""
onehot_input = []
for idx, row in fasta_df.iterrows():
onehot_input.append(onehot_enc(row, expected_length=int(parameters["exp_len"])))
onehot_input = np.array(onehot_input)
df_UMAP = UMAP(onehot_input, fasta_df, parameters)
df_HDBSCAN = cluster_HDBSCAN(df_UMAP, parameters)
# print(df_HDBSCAN)
df_HDBSCAN.to_csv("{path}/all_clusters.csv".format(path=path),index = None)
base_type = parameters["weblogo_base_type"]
with open("{path}/init.fa".format(path=path), "w") as init_fasta:
for idx, row in df_HDBSCAN.iterrows():
if base_type == "DNA":
seq_out = str(row["seq"]).upper().replace("U", "T")
elif base_type == "RNA":
seq_out = str(row["seq"]).upper().replace("T", "U")
else:
seq_out = str(row["seq"]).upper()
init_fasta.write(">{}\n{}\n".format(idx, seq_out))
return df_HDBSCAN
def draw_logo(infile, parameters):
"""Create sequence logos with Weblogo
Parameters:
---------
infile: string
A sequence file in fasta format to create sequence logo with Weblogo.
---------
Returns:
A picture of sequence logo in png format.
--------
"""
unit = parameters["weblogo_unit"]
first_index = int(parameters["weblogo_first_index"])
data = open(infile)
seqs = read_seq_data(data)
logodata = LogoData.from_seqs(seqs)
logooptions = LogoOptions(
unit_name = unit, # 'probability',
yaxis_label = unit, # 'probability',
first_index = first_index, # -10,
color_scheme = classic,
resolution = 1000
)
logoformat = LogoFormat(logodata,logooptions)
png = png_print_formatter(logodata,logoformat)
return png
|
{"hexsha": "17ca1708ccdc1b617e2cc05b400c863d36c57113", "size": 5779, "ext": "py", "lang": "Python", "max_stars_repo_path": "iMVP_utils/iMVP_utils/interactive_functions.py", "max_stars_repo_name": "jhfoxliu/iMVP", "max_stars_repo_head_hexsha": "741c355fbaae3a610cb31f0e34965734f0cd19a4", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "iMVP_utils/iMVP_utils/interactive_functions.py", "max_issues_repo_name": "jhfoxliu/iMVP", "max_issues_repo_head_hexsha": "741c355fbaae3a610cb31f0e34965734f0cd19a4", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "iMVP_utils/iMVP_utils/interactive_functions.py", "max_forks_repo_name": "jhfoxliu/iMVP", "max_forks_repo_head_hexsha": "741c355fbaae3a610cb31f0e34965734f0cd19a4", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-25T11:37:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T11:37:49.000Z", "avg_line_length": 32.2849162011, "max_line_length": 261, "alphanum_fraction": 0.6359231701, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1387}
|
import os
import random
from .libenv import CVecEnv
import numpy as np
from .build import build
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ENV_NAMES = [
"bigfish",
"bossfight",
"caveflyer",
"chaser",
"climber",
"coinrun",
"dodgeball",
"fruitbot",
"heist",
"jumper",
"leaper",
"maze",
"miner",
"ninja",
"plunder",
"starpilot",
"dodgeball1",
"dodgeball2",
"dodgeball3",
"dodgeball4",
"dodgeball5",
'coinrun1',
'coinrun2',
'coinrun3',
'coinrun4',
'coinrun5',
'bossfight1',
'bossfight2',
'bossfight3',
'bossfight4',
'bossfight5',
'starpilot1',
'starpilot2',
'starpilot3',
'starpilot4',
'starpilot5',
]
EXPLORATION_LEVEL_SEEDS = {
"coinrun": 1949448038,
"coinrun1": 1949448038,
"coinrun2": 1949448038,
"coinrun3": 1949448038,
"coinrun4": 1949448038,
"coinrun5": 1949448038,
"caveflyer": 1259048185,
"leaper": 1318677581,
"jumper": 1434825276,
"maze": 158988835,
"heist": 876640971,
"climber": 1561126160,
"ninja": 1123500215,
}
# should match DistributionMode in game.h, except for 'exploration' which is handled by Python
DISTRIBUTION_MODE_DICT = {
"easy": 0,
"hard": 1,
"extreme": 2,
"memory": 10,
"exploration": 20,
}
def create_random_seed():
rand_seed = random.SystemRandom().randint(0, 2 ** 31 - 1)
try:
# force MPI processes to definitely choose different random seeds
from mpi4py import MPI
rand_seed = rand_seed - (rand_seed % MPI.COMM_WORLD.size) + MPI.COMM_WORLD.rank
except ModuleNotFoundError:
pass
return rand_seed
class BaseProcgenEnv(CVecEnv):
"""
Base procedurally generated environment
"""
def __init__(
self,
num_envs,
env_name,
options,
debug=False,
rand_seed=None,
num_levels=0,
start_level=0,
use_sequential_levels=False,
debug_mode=0,
resource_root=None,
num_threads=4,
):
if resource_root is None:
resource_root = os.path.join(SCRIPT_DIR, "data", "assets") + os.sep
assert os.path.exists(resource_root)
lib_dir = os.path.join(SCRIPT_DIR, "data", "prebuilt")
if os.path.exists(lib_dir):
assert any([os.path.exists(os.path.join(lib_dir, name)) for name in ["libenv.so", "libenv.dylib", "env.dll"]]), "package is installed, but the prebuilt environment library is missing"
assert not debug, "debug has no effect for pre-compiled library"
else:
# only compile if we don't find a pre-built binary
lib_dir = build(debug=debug)
self.combos = self.get_combos()
if rand_seed is None:
rand_seed = create_random_seed()
options.update(
{
"env_name": env_name,
"num_levels": num_levels,
"start_level": start_level,
"num_actions": len(self.combos),
"use_sequential_levels": bool(use_sequential_levels),
"debug_mode": debug_mode,
"rand_seed": rand_seed,
"num_threads": num_threads,
# these will only be used the first time an environment is created in a process
"resource_root": resource_root,
}
)
self.options = options
super().__init__(
lib_dir=lib_dir, num_envs=num_envs, debug=debug, options=options
)
def get_combos(self):
return [
("LEFT", "DOWN"),
("LEFT",),
("LEFT", "UP"),
("DOWN",),
(),
("UP",),
("RIGHT", "DOWN"),
("RIGHT",),
("RIGHT", "UP"),
("D",),
("A",),
("W",),
("S",),
("Q",),
("E",),
]
def step_async(self, actions):
# tensorflow may return int64 actions (https://github.com/openai/gym/blob/master/gym/spaces/discrete.py#L13)
# so always cast actions to int32
return super().step_async(actions.astype(np.int32))
class ProcgenEnv(BaseProcgenEnv):
def __init__(
self,
num_envs,
env_name,
center_agent=True,
options=None,
use_generated_assets=False,
paint_vel_info=False,
distribution_mode="hard",
**kwargs,
):
if options is None:
options = {}
else:
options = dict(options)
assert (
distribution_mode in DISTRIBUTION_MODE_DICT
), f'"{distribution_mode}" is not a valid distribution mode.'
if distribution_mode == "exploration":
assert env_name in EXPLORATION_LEVEL_SEEDS, f"{env_name} does not support exploration mode"
distribution_mode = DISTRIBUTION_MODE_DICT["hard"]
assert "num_levels" not in kwargs, "exploration mode overrides num_levels"
kwargs["num_levels"] = 1
assert "start_level" not in kwargs, "exploration mode overrides start_level"
kwargs["start_level"] = EXPLORATION_LEVEL_SEEDS[env_name]
else:
distribution_mode = DISTRIBUTION_MODE_DICT[distribution_mode]
options.update(
{
"center_agent": bool(center_agent),
"use_generated_assets": bool(use_generated_assets),
"paint_vel_info": bool(paint_vel_info),
"distribution_mode": distribution_mode,
}
)
super().__init__(num_envs, env_name, options, **kwargs)
|
{"hexsha": "b46544a73ca86cb55021ff2530acc5628a7e3700", "size": 5713, "ext": "py", "lang": "Python", "max_stars_repo_path": "procgen/env.py", "max_stars_repo_name": "KarlXing/procgen", "max_stars_repo_head_hexsha": "937de8c350dff5c7cb0f6b9639a0b0815a8f3689", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "procgen/env.py", "max_issues_repo_name": "KarlXing/procgen", "max_issues_repo_head_hexsha": "937de8c350dff5c7cb0f6b9639a0b0815a8f3689", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "procgen/env.py", "max_forks_repo_name": "KarlXing/procgen", "max_forks_repo_head_hexsha": "937de8c350dff5c7cb0f6b9639a0b0815a8f3689", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2047619048, "max_line_length": 195, "alphanum_fraction": 0.5690530369, "include": true, "reason": "import numpy", "num_tokens": 1422}
|
module SStools
import Distributions.MvNormal
export kalman_filter, fast_state_smoother, simulate
"""
Perform Kalman filtering on the data y.
Conventions are as in Durbin and Koopman (2012).
Relevant dimensions are:
- Nt: number of time points
- Np: dimension of observation space
- Nm: dimension of state space
- Nr: dimension of state noise covariance
Parameters:
- a0: mean of prior on states: (Nm, 1) or (Nm,)
- P0: variance of prior on states: (Nm, Nm)
- Z: either a (Np, Nm, Nt) array or a (Np, Nm) array
- H: either a (Np, Np, Nt) array or a (Np, Np) array
- T: either a (Nm, Nm, Nt) array or a (Nm, Nm) array
- R: either a (Nm, Nr, Nt) array or a (Nm, Nr) array
- Q: either a (Nr, Nr, Nt) array or a (Nr, Nr) array
Returns:
- v: (Np, Nt) vector of residuals at each time
- K: (Nm, Np, Nt) Kalman gain matrix
- Finv: (Np, Np, Nt) inverse of prediction variance matrix
"""
function kalman_filter(y, a0, P0, Z, H, T, R, Q)
Np, Nt = size(y)
Nm = size(P0, 1)
# preallocate arrays
a = Array(Float64, Nm, Nt)
P = Array(Float64, Nm, Nm, Nt)
v = Array(Float64, Np, Nt)
Finv = Array(Float64, Np, Np, Nt)
K = Array(Float64, Nm, Np, Nt)
# initialize
a[:, 1] = a0;
P[:, :, 1] = P0;
# iterate
for t in 1:Nt
local a_t = slice(a, :, t)
local P_t = slice(P, :, :, t)
local y_t = slice(y, :, t)
local Z_t = ndims(Z) < 3 ? Z : slice(Z, :, :, t)
local H_t = ndims(H) < 3 ? H : slice(H, :, :, t)
local T_t = ndims(T) < 3 ? T : slice(T, :, :, t)
local R_t = ndims(R) < 3 ? R : slice(R, :, :, t)
local Q_t = ndims(Q) < 3 ? Q : slice(Q, :, :, t)
v_t = y_t - Z_t * a_t
F_t = Z_t * P_t * Z_t' + H_t
Finv_t = inv(F_t)
K_t = T_t * P_t * Z_t' * Finv_t
L_t = T_t - K_t * Z_t
if t < Nt
a[:, t + 1] = T_t * a_t + K_t * v_t
P[:, :, t + 1] = T_t * P_t * L_t' + R_t * Q_t * R_t'
end
v[:, t] = v_t
K[:, :, t] = K_t
Finv[:, :, t] = Finv_t
end
return v, K, Finv, a, P
end
"""
Runs an efficient smoothing algorithm to get posterior mean of state
vector.
Parameters are as in kalman_filter.
"""
function fast_state_smoother(v, K, Finv, a0, P0, Z, T, R, Q)
# infer dimensions
Np, Nt = size(v)
Nm = size(P0, 1)
# preallocate
r0 = 0.
r = Array(Float64, Nm, Nt)
r[:, end] = 0
alpha = Array(Float64, Nm, Nt);
# iterate backward
for t in Nt:-1:1
local Finv_t = slice(Finv, :, :, t)
local v_t = slice(v, :, t)
local K_t = slice(K, :, :, t)
local r_t = slice(r, :, t)
local Z_t = ndims(Z) < 3 ? Z : slice(Z, :, :, t)
local T_t = ndims(T) < 3 ? T : slice(T, :, :, t)
local u = Finv_t * v_t - K_t' * r_t
local thisr = Z_t' * u + T_t' * r_t
if t - 1 > 0
r[:, t - 1] = thisr
else
r0 = thisr
end
end
# run model forward
alpha[:, 1] = a0 + P0 * r0
for t in 1:(Nt - 1)
local T_t = ndims(T) < 3 ? T : slice(T, :, :, t)
local R_t = ndims(R) < 3 ? R : slice(R, :, :, t)
local Q_t = ndims(Q) < 3 ? Q : slice(Q, :, :, t)
local RQR = R_t * Q_t * R_t'
alpha[:, t + 1] = T_t * alpha[:, t] + RQR * r[:, t]
end
return alpha
end
"""
Draw a sample of the state trajectory from the smoothed posterior.
"""
function simulate(y, a0, P0, Z, H, T, R, Q; interleaved=true)
# get dimensions
Np, Nt = size(y)
Nm = size(a0, 1)
Nr = size(Q, 1)
# preallocate
alpha_plus = Array(Float64, Nm, Nt)
y_plus = Array(Float64, Np, Nt)
# simulate data
alpha_plus[:, 1] = rand(MvNormal(a0, P0))
for t in 1:Nt
local Z_t = ndims(Z) < 3 ? Z : slice(Z, :, :, t)
local T_t = ndims(T) < 3 ? T : slice(T, :, :, t)
local R_t = ndims(R) < 3 ? R : slice(R, :, :, t)
# random number generators should take copies, not views?
local H_t = ndims(H) < 3 ? H : H[:, :, t]
local Q_t = ndims(Q) < 3 ? Q : Q[:, :, t]
# draw disturbances
local ϵ = rand(MvNormal(full(H_t)))
y_plus[:, t] = Z_t * alpha_plus[:, t] + ϵ
if t < Nt
η = rand(MvNormal(full(Q_t)))
alpha_plus[:, t + 1] = T_t * alpha_plus[:, t] + R_t * η
end
end
# calculate smoothed means:
if interleaved
# actual data
v, K, Finv, a, P = interleaved_kalman_filter(y, a0, P0, Z, H, T, R, Q)
alpha_hat = interleaved_state_smoother(v, K, Finv, a0, P0, Z, T, R, Q)
# simulated data
v, K, Finv, a, P = interleaved_kalman_filter(y_plus, a0, P0, Z, H, T, R, Q)
alpha_hat_plus = interleaved_state_smoother(v, K, Finv, a0, P0, Z, T, R, Q)
else
# actual data
v, K, Finv, a, P = kalman_filter(y, a0, P0, Z, H, T, R, Q)
alpha_hat = fast_state_smoother(v, K, Finv, a0, P0, Z, T, R, Q)
# simulated data
v, K, Finv, a, P = kalman_filter(y_plus, a0, P0, Z, H, T, R, Q)
alpha_hat_plus = fast_state_smoother(v, K, Finv, a0, P0, Z, T, R, Q)
end
# combine
alpha_draw = alpha_plus - alpha_hat_plus + alpha_hat
return alpha_draw
end
"""
Like Kalman filter, but works by transforming a vector-valued
observation into a sequence of scalar observations.
"""
function interleaved_kalman_filter(y, a0, P0, Z, H, T, R, Q)
# test for diagonality of H
# H (minus its diagonals) should be the 0 matrix
if ndims(H) == 2
assert(H == diagm(diag(H)))
end
Np, Nt = size(y)
Nm = size(P0, 1)
# preallocate
a = Array(Float64, Nm, Np + 1, Nt)
P = Array(Float64, Nm, Nm, Np + 1, Nt)
v = Array(Float64, Np, Nt)
Finv = Array(Float64, Np, Nt)
K = Array(Float64, Nm, Np, Nt)
# initialize
a[:, 1, 1] = a0
P[:, :, 1, 1] = P0
for t in 1:Nt, i in 1:Np
local Z_t = ndims(Z) < 3 ? Z : slice(Z, :, :, t)
local H_t = ndims(H) < 3 ? H : slice(H, :, :, t)
local T_t = ndims(T) < 3 ? T : slice(T, :, :, t)
local R_t = ndims(R) < 3 ? R : slice(R, :, :, t)
local Q_t = ndims(Q) < 3 ? Q : slice(Q, :, :, t)
z = vec(full(Z_t[i, :])) # now a column vector
v[i, t] = y[i, t] - dot(z, a[:, i, t])
F = dot(z, P[:, :, i, t] * z) + H_t[i, i]
Finv[i, t] = 1. / F
if F != 0
K[:, i, t] = P[:, :, i, t] * z * Finv[i, t]
else
K[:, i, t] = 0
end
a[:, i + 1, t] = a[:, i, t] + K[:, i, t] * v[i, t]
P[:, :, i + 1, t] = P[:, :, i, t] - F * (K[:, i, t] * K[:, i, t]')
if i == Np && t < Nt
a[:, 1, t + 1] = T_t * a[:, Np + 1, t]
P[:, :, 1, t + 1] = T_t * P[:, :, Np + 1, t] * T_t' + R_t * Q_t * R_t'
end
end
return v, K, Finv, a, P
end
function interleaved_state_smoother(v, K, Finv, a0, P0, Z, T, R, Q)
# infer dimensions
Np, Nt = size(v)
Nm = size(P0, 1)
# preallocate
r = Array(Float64, Nm, Np + 1, Nt)
# initialize
r[:, end, end] = 0
# iterate backward
for t in Nt:-1:1, i in (Np + 1):-1:2
ii = i - 1 # handles offset between r and Z/K/F indices
local Z_t = ndims(Z) < 3 ? Z : slice(Z, :, :, t)
L = eye(Nm) - K[:, ii, t] * Z_t[ii, :]
z = vec(full(Z_t[ii, :])) # now a column vector
r[:, i - 1, t] = z * (v[ii, t] * Finv[ii, t]) + L' * r[:, i, t]
if i == 2 && t > 1
# NOTE: t - 1 BELOW !!!
local T_t = ndims(T) < 3 ? T : slice(T, :, :, t - 1)
r[:, Np + 1, t - 1] = T_t' * r[:, 1, t]
end
end
# initialize again
α = Array(Float64, Nm, Nt)
α[:, 1] = a0 + P0 * r[:, 1, 1]
for t in 1:(Nt - 1)
local T_t = ndims(T) < 3 ? T : slice(T, :, :, t)
local R_t = ndims(R) < 3 ? R : slice(R, :, :, t)
local Q_t = ndims(Q) < 3 ? Q : slice(Q, :, :, t)
local RQR = R_t * Q_t * R_t'
α[:, t + 1] = T_t * α[:, t] + RQR * r[:, 1, t + 1]
end
return α
end
end # module
|
{"hexsha": "e6aba09a9c99aa9e1462282d41664d28991f34af", "size": 8125, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/SStools.jl", "max_stars_repo_name": "jmxpearson/labcr", "max_stars_repo_head_hexsha": "ec9560004d81ecb912500d811b86829135a81782", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2015-10-08T19:58:38.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-03T18:39:30.000Z", "max_issues_repo_path": "julia/SStools.jl", "max_issues_repo_name": "jmxpearson/labcr", "max_issues_repo_head_hexsha": "ec9560004d81ecb912500d811b86829135a81782", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "julia/SStools.jl", "max_forks_repo_name": "jmxpearson/labcr", "max_forks_repo_head_hexsha": "ec9560004d81ecb912500d811b86829135a81782", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-02-13T02:33:39.000Z", "max_forks_repo_forks_event_max_datetime": "2017-09-28T13:10:35.000Z", "avg_line_length": 28.5087719298, "max_line_length": 83, "alphanum_fraction": 0.4851692308, "num_tokens": 3073}
|
#! /usr/bin/julia
# Rosetta Code, Find common directory path
function commonpath{T<:String}(ds::Array{T,1}, delim::Char='/')
0 < length(ds) || return convert(T, "")
1 < length(ds) || return ds[1]
p = split(ds[1], delim)
mcnt = length(p)
for d in ds[2:end]
q = split(d, delim)
mcnt = min(mcnt, length(q))
hits = findfirst(p[1:mcnt] .== q[1:mcnt], false)
hits != 0 || continue
mcnt = hits - 1
mcnt != 0 || return convert(T, "")
end
1 < mcnt || p[1] != "" || return convert(T, string(delim))
convert(T, join(p[1:mcnt], delim))
end
test = ["/home/user1/tmp/coverage/test",
"/home/user1/tmp/covert/operator",
"/home/user1/tmp/coven/members"]
println("Comparing")
for s in test
println(" ", s)
end
println("for their common directory path yields:")
println(" ", commonpath(test))
|
{"hexsha": "10739205e767bfd8a3ca1847d5316b6e1160f1c7", "size": 884, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/completed/find_common_directory_path.jl", "max_stars_repo_name": "MichaeLeroy/rosetta-code", "max_stars_repo_head_hexsha": "cb0f45f79704912967cbd37c0c9bdc1e78c964b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "julia/completed/find_common_directory_path.jl", "max_issues_repo_name": "MichaeLeroy/rosetta-code", "max_issues_repo_head_hexsha": "cb0f45f79704912967cbd37c0c9bdc1e78c964b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "julia/completed/find_common_directory_path.jl", "max_forks_repo_name": "MichaeLeroy/rosetta-code", "max_forks_repo_head_hexsha": "cb0f45f79704912967cbd37c0c9bdc1e78c964b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.625, "max_line_length": 63, "alphanum_fraction": 0.5701357466, "num_tokens": 270}
|
function [x,zo]=overlapadd(f,win,inc)
%OVERLAPADD join overlapping frames together X=(F,WIN,INC)
%
% Inputs: F(NR,NW) contains the frames to be added together, one
% frame per row.
% WIN(NW) contains a window function to multiply each frame.
% WIN may be omitted to use a default rectangular window
% If processing the input in chunks, WIN should be replaced by
% ZI on the second and subsequent calls where ZI is the saved
% output state from the previous call.
% INC gives the time increment (in samples) between
% succesive frames [default = NW].
%
% Outputs: X(N,1) is the output signal. The number of output samples is N=NW+(NR-1)*INC.
% ZO Contains the saved state to allow a long signal
% to be processed in chunks. In this case X will contain only N=NR*INC
% output samples.
%
% Example of frame-based processing:
% INC=20 % set frame increment
% NW=INC*2 % oversample by a factor of 2 (4 is also often used)
% S=cos((0:NW*7)*6*pi/NW); % example input signal
% W=sqrt(hamming(NW+1)); W(end)=[]; % sqrt hamming window of period NW
% F=enframe(S,W,INC); % split into frames
% ... process frames ...
% X=overlapadd(F,W,INC); % reconstitute the time waveform (omit "X=" to plot waveform)
% Copyright (C) Mike Brookes 2009
% Version: $Id: overlapadd.m,v 1.2 2009/06/08 16:21:49 dmb Exp $
%
% VOICEBOX is a MATLAB toolbox for speech processing.
% Home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% http://www.gnu.org/copyleft/gpl.html or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
[nr,nf]=size(f); % number of frames and frame length
if nargin<2
win=nf; % default increment
end
if isstruct(win)
w=win.w;
if ~numel(w) && length(w)~=nf
error('window length does not match frames size');
end
inc=win.inc;
xx=win.xx;
else
if nargin<3
inc=nf;
end
if numel(win)==1 && win==fix(win) && nargin<3 % win has been omitted
inc=win;
w=[];
else
w=win(:).';
if length(w)~=nf
error('window length does not match frames size');
end
if all(w==1)
w=[];
end
end
xx=[]; % partial output from previous call is null
end
nb=ceil(nf/inc); % number of overlap buffers
no=nf+(nr-1)*inc; % buffer length
z=zeros(no,nb); % space for overlapped output speech
if numel(w)
z(repmat(1:nf,nr,1)+repmat((0:nr-1)'*inc+rem((0:nr-1)',nb)*no,1,nf))=f.*repmat(w,nr,1);
else
z(repmat(1:nf,nr,1)+repmat((0:nr-1)'*inc+rem((0:nr-1)',nb)*no,1,nf))=f;
end
x=sum(z,2);
if ~isempty(xx)
x(1:length(xx))=x(1:length(xx))+xx; % add on leftovers from previous call
end
if nargout>1 % check if we want to preserve the state
mo=inc*nr; % completed output samples
if no<mo
x(mo,1)=0;
zo.xx=[];
else
zo.xx=x(mo+1:end);
zo.w=w;
zo.inc=inc;
x=x(1:mo);
end
elseif ~nargout
if isempty(xx)
k1=nf-inc; % dubious samples at start
else
k1=0;
end
k2=nf-inc; % dubious samples at end
plot(1+(0:nr-1)*inc,x(1+(0:nr-1)*inc),'>r',nf+(0:nr-1)*inc,x(nf+(0:nr-1)*inc),'<r', ...
1:k1+1,x(1:k1+1),':b',k1+1:no-k2,x(k1+1:end-k2),'-b',no-k2:no,x(no-k2:no),':b');
xlabel('Sample Number');
title(sprintf('%d frames of %d samples with %.0f%% overlap = %d samples',nr,nf,100*(1-inc/nf),no));
end
|
{"author": "decouples", "repo": "Matlab_deep_learning", "sha": "1b823b82686080e32b03e1f1a4648896bd6e3c44", "save_path": "github-repos/MATLAB/decouples-Matlab_deep_learning", "path": "github-repos/MATLAB/decouples-Matlab_deep_learning/Matlab_deep_learning-1b823b82686080e32b03e1f1a4648896bd6e3c44/\u7b2c 19 \u7ae0 \u57fa\u4e8e\u8bed\u97f3\u8bc6\u522b\u7684\u4fe1\u53f7\u706f\u56fe\u50cf\u6a21\u62df\u63a7\u5236\u6280\u672f/voicebox/overlapadd.m"}
|
const QuasiArrayMulArray{p, q, T, V} =
Applied{<:Any, typeof(*), <:Tuple{<:AbstractQuasiArray{T,p}, <:AbstractArray{V,q}}}
const ArrayMulQuasiArray{p, q, T, V} =
Applied{<:Any, typeof(*), <:Tuple{<:AbstractArray{T,p}, <:AbstractQuasiArray{V,q}}}
const QuasiArrayMulQuasiArray{p, q, T, V} =
Applied{<:Any, typeof(*), <:Tuple{<:AbstractQuasiArray{T,p}, <:AbstractQuasiArray{V,q}}}
####
# Matrix * Vector
####
const QuasiMatMulVec{T, V} = QuasiArrayMulArray{2, 1, T, V}
const QuasiMatMulMat{T, V} = QuasiArrayMulArray{2, 2, T, V}
const QuasiMatMulQuasiMat{T, V} = QuasiArrayMulQuasiArray{2, 2, T, V}
function getindex(M::Mul{<:AbstractQuasiArrayApplyStyle}, k::Number)
A,Bs = first(M.args), tail(M.args)
B = _mul(Bs...)
ret = zero(eltype(M))
for j = rowsupport(A, k) ∩ colsupport(B,1)
ret += A[k,j] * B[j]
end
ret
end
getindex(M::Mul{<:AbstractQuasiArrayApplyStyle}, k::Int) =
Base.invoke(getindex, Tuple{Mul{<:AbstractQuasiArrayApplyStyle},Number}, M, k)
function _mul_quasi_getindex(M::Mul, k::Number, j::Number)
A,Bs = first(M.args), tail(M.args)
B = _mul(Bs...)
ret = zero(eltype(M))
@inbounds for ℓ in (rowsupport(A,k) ∩ colsupport(B,j))
ret += A[k,ℓ] * B[ℓ,j]
end
ret
end
getindex(M::Mul{<:AbstractQuasiArrayApplyStyle}, k::Number, j::Number) =
_mul_quasi_getindex(M, k, j)
getindex(M::Mul{<:AbstractQuasiArrayApplyStyle}, k::Integer, j::Integer) =
_mul_quasi_getindex(M, k, j)
function getindex(M::QuasiMatMulVec, k::AbstractArray)
A,B = M.args
ret = zeros(eltype(M),length(k))
@inbounds for j in axes(A,2)
ret .+= view(A,k,j) .* B[j]
end
ret
end
*(A::AbstractQuasiArray, B...) = fullmaterialize(apply(*,A,B...))
*(A::AbstractQuasiArray, B::AbstractQuasiArray, C...) = fullmaterialize(apply(*,A,B,C...))
*(A::AbstractArray, B::AbstractQuasiArray, C...) = fullmaterialize(apply(*,A,B,C...))
for op in (:pinv, :inv)
@eval $op(A::AbstractQuasiArray) = fullmaterialize(apply($op,A))
end
axes(L::Ldiv{<:Any,<:Any,<:Any,<:AbstractQuasiMatrix}) =
(axes(L.A, 2),axes(L.B,2))
axes(L::Ldiv{<:Any,<:Any,<:Any,<:AbstractQuasiVector}) =
(axes(L.A, 2),)
\(A::AbstractQuasiArray, B::AbstractQuasiArray) = apply(\,A,B)
\(A::AbstractQuasiArray, B::AbstractArray) = apply(\,A,B)
\(A::AbstractArray, B::AbstractQuasiArray) = apply(\,A,B)
*(A::AbstractQuasiArray, B::Mul, C...) = apply(*,A, B.args..., C...)
*(A::Mul, B::AbstractQuasiArray, C...) = apply(*,A.args..., B, C...)
####
# MulQuasiArray
#####
const MulQuasiArray{T, N, Args<:Tuple} = ApplyQuasiArray{T, N, typeof(*), Args}
const MulQuasiVector{T, Args<:Tuple} = MulQuasiArray{T, 1, Args}
const MulQuasiMatrix{T, Args<:Tuple} = MulQuasiArray{T, 2, Args}
const Vec = MulQuasiVector
_ApplyArray(F, factors...) = ApplyQuasiArray(F, factors...)
_ApplyArray(F, factors::AbstractArray...) = ApplyArray(F, factors...)
MulQuasiOrArray = Union{MulArray,MulQuasiArray}
_factors(M::MulQuasiOrArray) = M.args
_factors(M) = (M,)
_flatten(A::MulQuasiArray, B...) = _flatten(Applied(A), B...)
flatten(A::MulQuasiArray) = ApplyQuasiArray(flatten(Applied(A)))
flatten(A::SubQuasiArray{<:Any,2,<:MulQuasiArray}) = materialize(flatten(Applied(A)))
function fullmaterialize(M::Applied{<:Any,typeof(*)})
M_mat = materialize(flatten(M))
typeof(M_mat) <: MulQuasiOrArray || return M_mat
typeof(Applied(M_mat)) == typeof(M) || return(fullmaterialize(M_mat))
ABC = M_mat.args
length(ABC) ≤ 2 && return flatten(M_mat)
AB = most(ABC)
Mhead = fullmaterialize(Mul(AB...))
typeof(_factors(Mhead)) == typeof(AB) ||
return fullmaterialize(Mul(_factors(Mhead)..., last(ABC)))
BC = tail(ABC)
Mtail = fullmaterialize(Mul(BC...))
typeof(_factors(Mtail)) == typeof(BC) ||
return fullmaterialize(Mul(first(ABC), _factors(Mtail)...))
apply(*,first(ABC), Mtail.args...)
end
fullmaterialize(M::ApplyQuasiArray) = flatten(fullmaterialize(Applied(M)))
fullmaterialize(M) = flatten(M)
adjoint(A::MulQuasiArray) = ApplyQuasiArray(*, reverse(adjoint.(A.args))...)
transpose(A::MulQuasiArray) = ApplyQuasiArray(*, reverse(transpose.(A.args))...)
function similar(A::MulQuasiArray)
B,a = A.args
B*similar(a)
end
function similar(A::QuasiArrayMulArray)
B,a = A.args
applied(*, B, similar(a))
end
function copy(a::MulQuasiArray)
@_propagate_inbounds_meta
copymutable(a)
end
function copyto!(dest::MulQuasiArray, src::MulQuasiArray)
d = last(dest.args)
s = last(src.args)
copyto!(IndexStyle(d), d, IndexStyle(s), s)
dest
end
_mul_tail_support(j, Z) = maximum(last.(colsupport.(Ref(Z),j)))
_mul_tail_support(j, Z, Y, X...) = _mul_tail_support(OneTo(_mul_tail_support(j,Z)), Y, X...)
function _mul_getindex(k, j, A, B)
M = min(_mul_tail_support(j,B), maximum(last.(rowsupport.(Ref(A),k))))
A[k,1:M]*B[1:M,j]
end
function _mul_getindex(k, j, A, B, C, D...)
N = _mul_tail_support(j, reverse(D)..., C, B)
M = min(maximum(last.(rowsupport.(Ref(A),k))), N)
_mul_getindex(OneTo(N), j, A[k,OneTo(M)]*B[OneTo(M),OneTo(N)], C, D...)
end
getindex(A::MulQuasiMatrix, k::AbstractVector{<:Number}, j::AbstractVector{<:Number}) =
_mul_getindex(k, j, A.args...)
@inline quasimulapplystyle(A...) = combine_mul_styles(A...)
ApplyStyle(::typeof(*), ::Type{A}, ::Type{B}, C::Type...) where {A<:AbstractQuasiArray,B<:Union{AbstractArray,AbstractQuasiArray}} =
quasimulapplystyle(MemoryLayout(A), MemoryLayout(B), MemoryLayout.(C)...)
ApplyStyle(::typeof(*), ::Type{A}, ::Type{B}, C::Type...) where {A<:AbstractArray,B<:AbstractQuasiArray} =
quasimulapplystyle(MemoryLayout(A), MemoryLayout(B), MemoryLayout.(C)...)
ApplyStyle(::typeof(*), ::Type{A}, ::Type{B}, ::Type{C}, D::Type...) where {A<:AbstractQuasiArray,B<:Union{AbstractArray,AbstractQuasiArray},C<:Union{AbstractArray,AbstractQuasiArray}} =
quasimulapplystyle(MemoryLayout(A), MemoryLayout(B), MemoryLayout(C), MemoryLayout.(D)...)
ApplyStyle(::typeof(*), ::Type{A}, ::Type{B}, ::Type{C}, D::Type...) where {A<:AbstractArray,B<:AbstractQuasiArray,C<:Union{AbstractArray,AbstractQuasiArray}} =
quasimulapplystyle(MemoryLayout(A), MemoryLayout(B), MemoryLayout(C), MemoryLayout.(D)...)
ApplyStyle(::typeof(*), ::Type{A}, ::Type{B}, ::Type{C}, D::Type...) where {A<:AbstractArray,B<:AbstractArray,C<:AbstractQuasiArray} =
quasimulapplystyle(MemoryLayout(A), MemoryLayout(B), MemoryLayout(C), MemoryLayout.(D)...)
ApplyStyle(::typeof(*), ::Type{A}, ::Type{B}, ::Type{C}, ::Type{D}, E::Type...) where {A<:AbstractQuasiArray,B<:Union{AbstractArray,AbstractQuasiArray},C<:Union{AbstractArray,AbstractQuasiArray},D<:Union{AbstractArray,AbstractQuasiArray}} =
quasimulapplystyle(MemoryLayout(A), MemoryLayout(B), MemoryLayout(C), MemoryLayout(D), MemoryLayout.(E)...)
ApplyStyle(::typeof(*), ::Type{A}, ::Type{B}, ::Type{C}, ::Type{D}, E::Type...) where {A<:AbstractArray,B<:AbstractQuasiArray,C<:Union{AbstractArray,AbstractQuasiArray},D<:Union{AbstractArray,AbstractQuasiArray}} =
quasimulapplystyle(MemoryLayout(A), MemoryLayout(B), MemoryLayout(C), MemoryLayout(D), MemoryLayout.(E)...)
ApplyStyle(::typeof(*), ::Type{A}, ::Type{B}, ::Type{C}, ::Type{D}, E::Type...) where {A<:AbstractArray,B<:AbstractArray,C<:AbstractQuasiArray,D<:Union{AbstractArray,AbstractQuasiArray}} =
quasimulapplystyle(MemoryLayout(A), MemoryLayout(B), MemoryLayout(C), MemoryLayout(D), MemoryLayout.(E)...)
ApplyStyle(::typeof(*), ::Type{A}, ::Type{B}, ::Type{C}, ::Type{D}, E::Type...) where {A<:AbstractArray,B<:AbstractArray,C<:AbstractArray,D<:AbstractQuasiArray} =
quasimulapplystyle(MemoryLayout(A), MemoryLayout(B), MemoryLayout(C), MemoryLayout(D), MemoryLayout.(E)...)
struct QuasiArrayLayout <: MemoryLayout end
MemoryLayout(::Type{<:AbstractQuasiArray}) = QuasiArrayLayout()
combine_mul_styles(::QuasiArrayLayout) = QuasiArrayApplyStyle()
result_mul_style(::QuasiArrayApplyStyle, ::QuasiArrayApplyStyle) = QuasiArrayApplyStyle()
result_mul_style(::QuasiArrayApplyStyle, ::IdentityMulStyle) = QuasiArrayApplyStyle()
result_mul_style(::IdentityMulStyle, ::QuasiArrayApplyStyle) = QuasiArrayApplyStyle()
####
# Matrix * Array
####
function _lmaterialize(A::MulQuasiArray, B, C...)
As = A.args
flatten(_ApplyArray(*, reverse(tail(reverse(As)))..., _lmaterialize(last(As), B, C...)))
end
function _rmaterialize(Z::MulQuasiArray, Y, W...)
Zs = Z.args
flatten(_ApplyArray(*, _rmaterialize(first(Zs), Y, W...), tail(Zs)...))
end
####
# Lazy \ ApplyArray. This applies to first arg.
#####
quasimulapplystyle(::InvLayout, _) = LdivApplyStyle()
quasimulapplystyle(::PInvLayout, _) = LdivApplyStyle()
ApplyStyle(::typeof(\), ::Type{<:AbstractQuasiArray}, ::Type{<:Applied}) = LdivApplyStyle()
\(A::AbstractQuasiArray, B::Applied) = apply(\, A, B)
function copy(L::Ldiv{LazyLayout,ApplyLayout{typeof(*)},<:AbstractQuasiMatrix})
args = arguments(L.B)
apply(*, L.A \ first(args), tail(args)...)
end
# copy(A::Applied{QuasiArrayApplyStyle,typeof(*)}) = lmaterialize(A)
import LazyArrays: MulAddStyle, _αAB, scalarone
_αAB(M::Mul{MulAddStyle,<:Tuple{<:AbstractQuasiArray,<:AbstractQuasiArray}}, ::Type{T}) where T = tuple(scalarone(T), M.args...)
_αAB(M::Mul{MulAddStyle,<:Tuple{<:Number,<:AbstractQuasiArray,<:AbstractQuasiArray}}, ::Type{T}) where T = M.args
|
{"hexsha": "a844d9117d93cd2f509344e1c32c57338729b7d9", "size": 9373, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/matmul.jl", "max_stars_repo_name": "UnofficialJuliaMirror/QuasiArrays.jl-c4ea9172-b204-11e9-377d-29865faadc5c", "max_stars_repo_head_hexsha": "db22aeeaa768d5995b9b09028e5422dcda273668", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/matmul.jl", "max_issues_repo_name": "UnofficialJuliaMirror/QuasiArrays.jl-c4ea9172-b204-11e9-377d-29865faadc5c", "max_issues_repo_head_hexsha": "db22aeeaa768d5995b9b09028e5422dcda273668", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/matmul.jl", "max_forks_repo_name": "UnofficialJuliaMirror/QuasiArrays.jl-c4ea9172-b204-11e9-377d-29865faadc5c", "max_forks_repo_head_hexsha": "db22aeeaa768d5995b9b09028e5422dcda273668", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2571428571, "max_line_length": 241, "alphanum_fraction": 0.6766243465, "num_tokens": 3034}
|
"""
Name : c11_16_VaR_sorting_10day.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import numpy as np
import pandas as pd
from scipy.stats import norm
from matplotlib.finance import quotes_historical_yahoo_ochl as getData
#
ticker='WMT' # input 1
n_shares=500 # input 2
confidence_level=0.99 # input 3
begdate=(2000,1,1) # input 4
enddate=(2016,12,31) # input 5
nDays=10 # input 6
#
z=norm.ppf(confidence_level)
x=getData(ticker,begdate,enddate,asobject=True,adjusted=True)
logret = np.log(x.aclose[1:]/x.aclose[:-1])
ret = x.aclose[1:]/x.aclose[:-1]-1
position=n_shares*x.close[0]
#
# Method I
mean=np.mean(ret)
std=np.std(ret)
meanNdays=(1+mean)**nDays-1
stdNdays=std*np.sqrt(nDays)
z=norm.ppf(confidence_level)
VaR1=position*z*stdNdays
print("Holding=",position, "VaR1=", round(VaR1,0), "in ", nDays, "Days")
#
# method 2: calculate 10 day returns
ddate=[]
d0=x.date
for i in range(0,np.size(logret)):
ddate.append(int(i/nDays))
y=pd.DataFrame(logret,index=ddate,columns=['retNdays'])
logRet=y.groupby(y.index).sum()
retNdays=np.exp(logRet)-1
#
VaR2=position*z*np.std(retNdays)
print("Holding=",position, "VaR2=", round(VaR2,0), "in ", nDays, "Days")
#
# Method III
ret2=np.sort(retNdays)
n=np.size(ret2)
leftTail=int(n*(1-confidence_level))
print(leftTail)
#
VaR3=position*ret2[leftTail]
print("Holding=",position, "VaR=", round(VaR3,0), "in ",nDays, "Days")
|
{"hexsha": "c43db825432cc8038dd1bcb58a2e0b1e069dd260", "size": 1584, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter11/c11_16_VaR_sorting_10days.py", "max_stars_repo_name": "John-ye666/Python-for-Finance-Second-Edition", "max_stars_repo_head_hexsha": "dabef09bcdd7b0ec2934774741bd0a7e1950de73", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 236, "max_stars_repo_stars_event_min_datetime": "2017-07-02T03:06:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T03:15:33.000Z", "max_issues_repo_path": "Chapter11/c11_16_VaR_sorting_10days.py", "max_issues_repo_name": "John-ye666/Python-for-Finance-Second-Edition", "max_issues_repo_head_hexsha": "dabef09bcdd7b0ec2934774741bd0a7e1950de73", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter11/c11_16_VaR_sorting_10days.py", "max_forks_repo_name": "John-ye666/Python-for-Finance-Second-Edition", "max_forks_repo_head_hexsha": "dabef09bcdd7b0ec2934774741bd0a7e1950de73", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 139, "max_forks_repo_forks_event_min_datetime": "2017-06-30T10:28:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T19:43:34.000Z", "avg_line_length": 24.75, "max_line_length": 72, "alphanum_fraction": 0.6761363636, "include": true, "reason": "import numpy,from scipy", "num_tokens": 547}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Class for managing seismic refraction data and doing inversions"""
from math import pi
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import pygimli as pg
import pygimli.meshtools as mt
from pygimli.viewer.mpl import drawModel, drawMesh, CellBrowser, createColorBar
from pygimli.utils.base import interperc, getSavePath
from pygimli.viewer.mpl.dataview import showVecMatrix
from pygimli.frameworks import MethodManager#, MethodManager0
# the explicit import with full name allow for:
# python ~/src/gimli/gimli/pygimli/physics/traveltime/refraction.py
from pygimli.physics.traveltime.ratools import createGradientModel2D
from pygimli.physics.traveltime.raplot import drawFirstPicks, plotLines
from . raplot import drawTravelTimeData
from . importData import importGTT
from . fatray import FatrayDijkstraModelling
# class Refraction(MethodManager0):
# """Manager for refraction seismics (traveltime tomography)
# TODO Document main members and use default MethodeManager interface
# e.g., self.inv, self.fop, self.paraDomain, self.mesh, self.data
# """
# def __init__(self, data=None, verbose=True, debug=False, fatray=False,
# frequency=1000., **kwargs):
# """Init function with optional data load"""
# pg.deprecated('Use TravelTimeManager insteas')
# super().__init__(verbose=verbose, debug=debug, **kwargs)
# self.figs = {}
# self.axs = {}
# self.doSave = kwargs.pop('doSave', False)
# self.errIsAbsolute = True
# self.method = None
# # should be forwarded so it can be accessed from outside
# self.mesh = None
# self.poly = None
# self.error = None
# self.velocity = None
# self.response = None
# self.__dict__.update(**kwargs)
# # self.start = []
# self.pd = None
# # CR!, check if this should be better a static member TG: no idea
# self.dataToken_ = 't'
# if fatray:
# self.useFatray(True, frequency)
# if isinstance(data, str):
# self.loadData(data)
# elif isinstance(data, pg.DataContainer):
# self.setDataContainer(pg.DataContainer(data))
# self.basename = kwargs.pop('name', 'new')
# # if self.dataContainer is not None:
# # self.createMesh()
# # self.fop = self.createFOP(verbose=self.verbose)
# # self.inv = self.createInv(self.fop,
# # verbose=self.verbose, doSave=self.doSave)
# # def __str__(self): # no need to overwrite with identical content
# # """string representation of the class"""
# # return self.__repr__()
# #
# def __repr__(self): # to be moved to Mesh/Data Method manager
# """String representation of the class for the print function"""
# out = type(self).__name__ + " object"
# if hasattr(self, 'dataContainer'):
# out += "\n" + self.dataContainer.__str__()
# if hasattr(self, 'mesh'):
# out += "\n" + self.mesh.__str__()
# return out
# def paraDomain(self):
# """Return parameter domain mesh."""
# return self.fop.regionManager().paraDomain()
# def getModel(self): # model collided with base method Manager attribute
# """Return velocity vector."""
# # (self.paraDomain.cellMarkers())
# return self.velocity
# def useFMM(self, fmm=True):
# """Define whether to use Fast Marching Method (FMM).
# Note that this method is more accurate but currently a lot slower!
# """
# self.fop = Refraction.createFOP(usefmm=fmm)
# def useFatray(self, fatray=True, frequency=300.):
# """Define whether to use Fatray jacobian computation."""
# self.fop = Refraction.createFOP(fatray=fatray)
# if fatray:
# self.fop.frequency = frequency
# @staticmethod
# def createFOP(verbose=False, usefmm=False, fatray=False):
# """Create default forward operator for Traveltime modelling.
# usefmm forces Fast Marching Method, otherwise Dijkstra is used.
# """
# if usefmm:
# from .FMModelling import TravelTimeFMM
# fop = TravelTimeFMM(verbose=verbose)
# else:
# if fatray:
# fop = FatrayDijkstraModelling(verbose=verbose)
# else:
# fop = pg.core.TravelTimeDijkstraModelling(verbose=verbose)
# return fop
# def createInv(self, fop, verbose=True, doSave=False):
# """Create default inversion instance for Traveltime inversion."""
# self.tD = pg.trans.Trans()
# self.tM = pg.trans.TransLogLU()
# inv = pg.Inversion(verbose, doSave)
# inv.setTransData(self.tD)
# inv.setTransModel(self.tM)
# inv.setForwardOperator(fop)
# return inv
# def createApparentData(self, data):
# """Create apparent slowness for given data."""
# # hackish .. dislike!
# self.setData(data)
# return 1./(self.getOffset(data=data, full=True) / data('t'))
# def dataVals(self, data):
# """Return pure data values from a given DataContainer."""
# return data('t')
# def relErrorVals(self, data):
# """Return pure data values from a given DataContainer."""
# return data('err') / data('t')
# def setData(self, data):
# """Set data container (holding s and g indices and t floats)."""
# if issubclass(type(data), pg.DataContainer):
# self.setDataContainer(data)
# else:
# raise BaseException("Implement set data from type:", type(data))
# def setDataContainer(self, data):
# """Set data container from outside."""
# self.dataContainer = data
# self.checkData()
# self.fop.setData(self.dataContainer)
# self.inv.setData(self.dataContainer('t'))
# if self.dataContainer.allNonZero('err'):
# self.error = self.dataContainer('err')
# else:
# self.error = Refraction.estimateError(data)
# def loadData(self, filename):
# """Load data from file."""
# if filename.endswith('.gtt'):
# data = importGTT(filename)
# else:
# data = pg.DataContainer(filename, sensorTokens='s g')
# self.basename = filename[:filename.rfind('.')]
# self.setDataContainer(data)
# def checkData(self):
# """Check data
# w.r.t. shot/geophone identity and zero/negative
# traveltimes, plus check y/z sensor positions
# """
# oldsize = self.dataContainer.size()
# self.dataContainer.markInvalid(pg.abs(self.dataContainer('s') -
# self.dataContainer('g')) < 1)
# self.dataContainer.markInvalid(self.dataContainer('t') <= 0.)
# self.dataContainer.removeInvalid()
# newsize = self.dataContainer.size()
# if newsize < oldsize:
# if self.verbose:
# print('Removed ' + str(oldsize - newsize) + ' values.')
# maxyabs = max(pg.abs(pg.y(self.dataContainer.sensorPositions())))
# maxzabs = max(pg.abs(pg.z(self.dataContainer.sensorPositions())))
# if maxzabs > 0 and maxyabs == 0:
# for i in range(self.dataContainer.sensorCount()):
# pos = self.dataContainer.sensorPosition(i).rotateX(-pi / 2)
# self.dataContainer.setSensorPosition(i, pos)
# if self.verbose:
# print(self.dataContainer)
# def showData(self, data=None, response=None, ax=None, name='data'):
# """Show data as travel time curves (optionally with response)
# Parameters
# ----------
# data : pyGIMLi data Container [self.dataContainer]
# data to show with points
# response : array
# response vector to draw with lines
# ax : maxplotlib axes
# axis to plot into, if not given, a new figure is created
# """
# if data is None:
# data = self.dataContainer
# if response is not None:
# name = 'datafit'
# if ax is None:
# fig, ax = plt.subplots()
# self.figs[name] = fig
# self.axs[name] = ax
# if response is None:
# drawFirstPicks(ax, data)
# else:
# drawFirstPicks(ax, data, marker='+')
# if response is True:
# response = self.response
# drawFirstPicks(ax, data, np.asarray(response), marker='-')
# return ax
# def createMesh(self, depth=None, quality=34.3, paraDX=1, boundary=0,
# paraBoundary=0, secNodes=3, apply=True, **kwargs):
# """Create (inversion) mesh using createParaDomain2D
# Parameters
# ----------
# depth : float, optional
# maximum depth, 0 (default) means maximum offset / 3.
# paraDX : float
# relative distance for refinement nodes between two sensors
# e.g., 0 or 1 means no refinement
# e.g., 0.5 means 1 additional node between two neighboring sensors
# e.g., 0.33 means 2 additional equidistant nodes between two sensors
# boundary : float, optional
# boundary width to be appended for domain prolongation in absolute
# para domain width.
# values < 0 force the boundary to be 4 times para domain width.
# paraBoundary : float, optional
# margin for parameter domain in sensor distances (default 2)
# quality : float, optional
# mesh quality (smallest angle allowed)
# apply : bool, optional
# set mesh property of the underlying forward operator
# secNodes : int (1)
# Amount of secondary nodes to improve accuracy of the forward
# solution.
# **kwargs: Additional keyword arguments passed to
# pygimli.meshtools.createParaMeshPLC
# See also
# --------
# pygimli.meshtools.createParaMeshPLC
# """
# if self.dataContainer is None:
# raise BaseException('Cannot create mesh without dataContainer.')
# if depth is None:
# depth = self.getDepth()
# self.poly = mt.createParaMeshPLC(self.dataContainer.sensorPositions(),
# paraDepth=depth, paraDX=paraDX,
# paraBoundary=paraBoundary,
# boundary=boundary, **kwargs)
# mesh = mt.createMesh(self.poly, quality=quality, smooth=(1, 10))
# if apply:
# self.setMesh(mesh, secNodes=secNodes)
# return mesh
# def setMesh(self, mesh, refine=False, secNodes=1):
# """Set mesh. To be removed from class once derived from MeshManager.
# Parameters
# ----------
# secNodes : int (1)
# Number of secondary nodes to improve accuracy of the forward
# solution.
# """
# self.mesh = mesh
# self.mesh.createNeighborInfos()
# self.fop.setMesh(self.mesh)
# self.fop.regionManager().setConstraintType(1)
# if refine:
# pg.warn("argument refine is deprecated .. use secnodes instead")
# secNodes = 1
# mesh = self.fop.regionManager().mesh().createMeshWithSecondaryNodes(
# secNodes)
# self.fop.setMesh(mesh, ignoreRegionManager=True)
# self.inv.setForwardOperator(self.fop)
# def showMesh(self, ax=None, name='mesh'):
# """show mesh in given ax or in a new figure"""
# if ax is None:
# fig, ax = plt.subplots()
# self.figs[name] = fig
# self.axs[name] = ax
# drawMesh(ax, self.mesh)
# # plt.show(block=False)
# ax.set_aspect(1)
# return ax
# @staticmethod
# def estimateError(data=None, absoluteError=0.001, relativeError=0.001):
# """Estimate error composed of an absolute and a relative part
# Parameters
# ----------
# absoluteError : float
# absolute error of traveltimes (usually in s)
# relativeError : float
# relative error of traveltimes in 1 (e.g. 0.01 is 1%)
# Returns
# -------
# err : array
# """
# # print(data)
# # if not data.allNonZero('t'):
# # raise BaseException("We need travel time values (t) " +
# # "in the data to estimate a data error.")
# if relativeError >= 0.5: # obviously in %
# print("relativeError set to a value > 0.5 .. assuming this "
# "is a percentile error level dividing them by 100")
# relativeError /= 100.
# error = absoluteError + data('t') * relativeError
# return error
# def invert(self, data=None, t=None, err=None, mesh=None, **kwargs):
# """Run actual inversion.
# Values for result/response are stored in the class members
# velocity/response
# Parameters
# ----------
# useGradient : bool
# Create gradient for starting model from vtop to vbottom.
# vtop, vbottom : float
# starting (gradient) model velocities on top/at bottom of the mesh
# lam : float
# regularization parameter describing the strength of smoothness
# zWeight : float
# relative weight for purely vertical boundaries
# maxIter : int
# Maximum number of iterations
# startModel : array
# Slowness starting model for the inversion
# """
# if 'verbose' in kwargs:
# self.setVerbose(kwargs.pop('verbose'))
# if data is not None:
# # setDataContainer would be better
# if t is not None:
# data.set('t', t)
# self.setDataContainer(data)
# if t is not None:
# self.dataContainer.set('t', t)
# if err is not None:
# self.error = err
# if mesh is not None:
# self.setMesh(mesh)
# if self.mesh is None:
# self.createMesh(**kwargs)
# startModel = kwargs.pop('startModel', None)
# self.pd = self.fop.regionManager().paraDomain()
# if startModel is None:
# useGradient = kwargs.pop('useGradient', True)
# if useGradient:
# startModel = createGradientModel2D(
# self.dataContainer, self.pd,
# kwargs.pop('vtop', 500.), kwargs.pop('vbottom', 5000.))
# else:
# startModel = self.fop.createDefaultStartModel()
# if isinstance(startModel, (float, int)):
# startModel = pg.Vector(self.pd.cellCount(), startModel)
# self.fop.setStartModel(startModel)
# zWeight = kwargs.pop('zWeight', 0.2)
# if 'zweight' in kwargs:
# zWeight = kwargs.pop('zweight', 0.2)
# print("zweight option will be removed soon. "
# "Please use zWeight instead.")
# self.fop.regionManager().setZWeight(zWeight)
# self.inv.setData(self.dataContainer('t'))
# self.inv.setLambda(kwargs.pop('lam', 30.))
# if 'threadCount' in kwargs: # just for backward compatibility
# self.fop.setThreadCount(kwargs.pop('threadCount'))
# if 'max_iter' in kwargs: # just for backward compatibility
# self.inv.setMaxIter(kwargs.pop('max_iter'))
# if 'maxIter' in kwargs: # the better way
# self.inv.setMaxIter(kwargs.pop('maxIter'))
# if 'robustData' in kwargs:
# self.inv.setRobustData(kwargs.pop('robustData'))
# if 'blockyModel' in kwargs:
# self.inv.setBlockyModel(kwargs.pop('blockyModel'))
# if kwargs.pop('referenceModel', False):
# self.inv.setReferenceModel(startModel)
# if not hasattr(self.error, '__iter__'):
# self.error = Refraction.estimateError(
# self.dataContainer, kwargs.pop('error', 0.003)) # abs err in s
# self.inv.setAbsoluteError(self.error)
# self.fop.jacobian().clear()
# slowness = self.inv.run()
# self.velocity = 1. / slowness
# self.response = self.inv.response()
# # use self.model() to access to this self.model = self.velocity
# return self.velocity
# @staticmethod
# def simulate(mesh, slowness, scheme, verbose=False, **kwargs):
# """Simulate a traveltime measurement.
# Perform the forward task for a given mesh, a slowness distribution (per
# cell) and return data (traveltime) for a measurement scheme. This is a
# static method since it does not interfere with the managers inversion
# approaches.
# Parameters
# ----------
# mesh : :gimliapi:`GIMLI::Mesh`
# Mesh to calculate for.
# slowness : array(mesh.cellCount()) | array(N, mesh.cellCount())
# slowness distribution for the given mesh cells can be:
# . a single array of len mesh.cellCount()
# . a matrix of N slowness distributions of len mesh.cellCount()
# . a res map as [[marker0, res0], [marker1, res1], ...]
# scheme : :gimliapi:`GIMLI::DataContainer`
# data measurement scheme
# verbose : boolean
# Be verbose.
# Other parameters
# ----------------
# noisify : boolean
# add normally distributed noise based on scheme('err')
# Returns
# -------
# t : array(N, data.size()) | DataContainer
# The resulting simulated travel time values.
# Either one column array or matrix in case of slowness matrix.
# A DataContainer is return if noisify set to True.
# """
# fop = Refraction.createFOP(verbose=verbose)
# fop.setData(scheme)
# fop.setMesh(mesh, ignoreRegionManager=True)
# if len(slowness) == mesh.cellCount():
# if max(slowness) > 1.:
# print('Warning: slowness values larger than 1 (' +
# str(max(slowness)) + ').. assuming that are velocity '
# 'values .. building reciprocity')
# t = fop.response(1./slowness)
# else:
# t = fop.response(slowness)
# else:
# print(mesh)
# print("slowness: ", slowness)
# raise BaseException("Simulate called with wrong slowness array.")
# ret = pg.DataContainer(scheme)
# ret.set('t', t)
# noiseLevel = kwargs.pop('noiseLevel', 0)
# noiseAbs = kwargs.pop('noiseAbs', 0)
# if noiseLevel > 0 or noiseAbs > 0:
# if not ret.allNonZero('err'):
# ret.set('t', t)
# ret.set('err', pg.physics.Refraction.estimateError(
# ret, absoluteError=noiseAbs))
# if verbose:
# print("Data error estimates (min:max) ",
# min(ret('err')), ":", max(ret('err')))
# t += pg.math.randn(ret.size()) * ret('err')
# ret.set('t', t)
# if kwargs.pop('returnArray', False):
# return t
# return ret
# @staticmethod
# def drawApparentVelocities(ax, data, t=None, **kwargs):
# """Plot data in for of apparent velocity image."""
# tt = Refraction()
# tt.setDataContainer(data)
# tt.showVA(ax=ax, t=t, **kwargs)
# @staticmethod
# def drawTravelTimeData(ax, data, t=None):
# """Plot travel time data as lines and points."""
# drawTravelTimeData(ax, data, t)
# def getOffset(self, data=None, full=False):
# """Return vector of offsets (in m) between shot and receiver."""
# if data is None:
# data = self.dataContainer
# if full:
# pos = data.sensorPositions()
# s, g = data('s'), data('g')
# nd = data.size()
# off = [pos[int(s[i])].distance(pos[int(g[i])]) for i in range(nd)]
# return np.absolute(off)
# else:
# px = pg.x(data.sensorPositions())
# gx = np.array([px[int(g)] for g in data("g")])
# sx = np.array([px[int(s)] for s in data("s")])
# return np.absolute(gx - sx)
# def getMidpoint(self, data=None):
# """Return vector of offsets (in m) between shot and receiver."""
# if data is None:
# data = self.dataContainer
# px = pg.x(data.sensorPositions())
# gx = np.array([px[int(g)] for g in data("g")])
# sx = np.array([px[int(s)] for s in data("s")])
# return (gx + sx) / 2
# def showVA(self, data=None, t=None, name='va', pseudosection=False,
# squeeze=True, full=True, ax=None, cmap=None, **kwargs):
# """Show apparent velocity as image plot.
# TODO showXXX commands need to return ax and cbar .. if there is one
# """
# if data is None:
# data = self.dataContainer
# if ax is None:
# fig, ax = plt.subplots()
# self.figs[name] = fig
# self.axs[name] = ax
# if t is None:
# t = data('t')
# px = pg.x(data.sensorPositions())
# py = pg.y(data.sensorPositions())
# if len(np.unique(py)) > len(np.unique(px)): # probably crosshole
# px = py
# gx = np.array([px[int(g)] for g in data("g")])
# sx = np.array([px[int(s)] for s in data("s")])
# offset = self.getOffset(data=data, full=full)
# kwargs.setdefault('vals', offset / t)
# if pseudosection:
# midpoint = (gx + sx) / 2
# _, cb = showVecMatrix(midpoint, offset, squeeze=True, ax=ax,
# label='Apparent slowness [s/m]', cmap=cmap,
# **kwargs)
# else:
# _, cb = showVecMatrix(gx, sx, squeeze=squeeze, ax=ax,
# label='Apparent velocity [m/s]', cmap=cmap,
# **kwargs)
# ax.figure.show()
# return ax, cb
# def getDepth(self):
# """return a (a-priori guessed) depth of investigation"""
# return max(self.getOffset()) / 3.0 # rule of thumb
# def rayCoverage(self):
# """return ray coverage"""
# one = pg.Vector(self.dataContainer.size(), 1.)
# return self.fop.jacobian().transMult(one)
# def standardizedCoverage(self):
# """return standardized coverage vector (0|1) using neighbor info"""
# coverage = self.rayCoverage()
# C = self.fop.constraintsRef()
# return np.sign(np.absolute(C.transMult(C * coverage)))
# def showRayPaths(self, model=None, ax=None, **kwargs):
# """Show ray paths for `model` or last model for which the Jacobian was
# calculated.
# Parameters
# ----------
# model : array
# Velocity model for which to calculate and visualize ray paths (the
# default is model for last Jacobian calculation in self.velocity).
# ax : matplotlib.axes
# Axes for the plot (the default is None).
# **kwargs : type
# Additional arguments passed to LineCollection (alpha, linewidths,
# color, linestyles).
# Returns
# -------
# ax : matplotlib.axes object
# cb : matplotlib.colorbar object (only if model is provided)
# """
# cbar = None
# if model is None and self.velocity is None:
# pg.info("No previous inversion result found and no model given.",
# "Using homogeneous slowness model.")
# self.velocity = pg.Vector(self.mesh.cellCount(), 1.0)
# self.fop.createJacobian(1./self.velocity)
# if model is not None:
# if self.velocity is not None:
# if not np.allclose(model, self.velocity):
# self.fop.createJacobian(1/model)
# ax, cbar = self.showResult(ax=ax, val=model)
# _ = kwargs.setdefault("color", "w")
# _ = kwargs.setdefault("alpha", 0.5)
# _ = kwargs.setdefault("linewidths", 0.8)
# else:
# ax = self.showMesh(ax=ax)
# # Due to different numbering scheme of way matrix
# _, shots = np.unique(self.dataContainer("s"), return_inverse=True)
# _, receivers = np.unique(self.dataContainer("g"), return_inverse=True)
# # Collecting way segments for all shot/receiver combinations
# segs = []
# for s, g in zip(shots, receivers):
# wi = self.fop.way(s, g)
# points = self.fop.mesh().positions(withSecNodes=True)[wi]
# segs.append(np.column_stack((pg.x(points), pg.y(points))))
# line_segments = LineCollection(segs, **kwargs)
# ax.add_collection(line_segments)
# return ax, cbar
# def showCoverage(self, ax=None, name='coverage', **kwargs):
# """shows the ray coverage in logscale"""
# if ax is None:
# fig, ax = plt.subplots()
# self.figs[name] = fig
# self.axs[name] = ax
# cov = self.rayCoverage()
# return pg.show(self.mesh, pg.log10(cov+min(cov[cov > 0])*.5), ax=ax,
# coverage=self.standardizedCoverage(), **kwargs)
# def showModel(self, ax=None, vals=None, **kwargs):
# """WRITEME"""
# return self.showResult(ax=ax, val=vals, **kwargs)
# def showResult(self, val=None, ax=None, cMin=None, cMax=None,
# logScale=False, rays=False, name='result', **kwargs):
# """Show resulting velocity vector.
# Parameters
# ----------
# val : result array [self.velocity]
# field to show, usually the velocity vector
# ax : matplotlib.axes
# axes to plot into, if not give a new one-axis figure is created
# cMin/cMax : float
# minimum and maximum values for ranging colorbar
# logScale : bool [False]
# use logarithmic scale
# rays : bool [False]
# Show ray paths as well.
# Other parameters
# ----------------
# useCoverage : bool [True]
# use standardized (0 or 1) ray coverage as alpha-shading
# label : str
# label to write on colorbar
# orientaton : str
# color bar orientation
# nLevs : int [7]
# number of level values for color bar
# **kwargs : keyword arguments passed to the show function
# Returns
# -------
# ax : maxplotlib axes
# cb : matplotlib color bar object
# """
# mesh = self.paraDomain()
# if val is None:
# val = self.velocity
# if cMin is None or cMax is None:
# cMin, cMax = interperc(val, 3)
# coverage = 1
# if kwargs.pop('useCoverage', True):
# coverage = self.standardizedCoverage()
# label = kwargs.pop("label", "Velocity (m/s)")
# if ax is None:
# fig, ax = plt.subplots()
# self.figs[name] = fig
# ax, cbar = pg.show(mesh, val, logScale=logScale, ax=ax,
# colorBar=True, cMin=cMin, cMax=cMax,
# coverage=coverage, label=label, hold=True,
# **kwargs)
# self.figs[name] = plt.gcf()
# else:
# gci = drawModel(ax, mesh, val, logScale=logScale,
# colorBar=True, cMin=cMin, cMax=cMax,
# coverage=coverage, **kwargs)
# labels = ['cMin', 'cMax', 'nLevs', 'orientation']
# subkwargs = {key: kwargs[key] for key in labels if key in kwargs}
# cbar = createColorBar(gci, label=label, **subkwargs)
# if rays:
# self.showRayPaths(ax=ax, alpha=0.5, color="w", linewidths=.8)
# browser = CellBrowser(self.mesh, val, ax)
# browser.connect()
# self.axs[name] = ax
# if 'lines' in kwargs:
# plotLines(ax, kwargs['lines'])
# return ax, cbar
# def showResultAndFit(self, name='resultfit', **kwargs):
# """show two vertical subplots with result and data (with response)"""
# fig, axs = plt.subplots(nrows=2)
# self.figs[name] = fig
# self.showResult(ax=axs[0], **kwargs)
# self.showData(ax=axs[1], response=self.response)
# def saveFigures(self, name=None, ext='pdf'):
# """save all existing figures to files"""
# if name is None:
# name = self.basename
# if name is None or not any(name):
# name = 'out'
# for key in self.figs:
# self.figs[key].savefig(name+'-'+key+'.'+ext, bbox_inches='tight')
# def makeJacobianPDF(self, ind=None):
# """Make multipage Jacobian PDF."""
# from matplotlib.backends.backend_pdf import PdfPages
# if ind is None:
# ind = range(self.dataContainer.size())
# with PdfPages(self.basename+'-jacobian.pdf') as pdf:
# fig, ax = pg.plt.subplots()
# for ii in ind:
# jj = self.fop.jacobian().row(ii)
# pg.show(self.mesh, jj, ax=ax, coverage=(jj > 0))
# fig.savefig(pdf, format='pdf')
# ax.cla()
# def saveResult(self, folder=None, size=(16, 10), **kwargs):
# """Save the results in the specified folder.
# Saved items are:
# Inverted profile
# Velocity vector
# Coverage vector
# Standardized coverage vector
# Mesh (bms and vtk with results)
# """
# # TODO: How to extract the chi2 etc. from each iteration???
# subfolder = '/' + self.__class__.__name__
# path = getSavePath(folder, subfolder)
# if self.verbose:
# print('Saving refraction data to: {}'.format(path))
# np.savetxt(path + '/velocity.vector',
# self.velocity)
# np.savetxt(path + '/velocity-cov.vector',
# self.rayCoverage())
# np.savetxt(path + '/velocity-scov.vector',
# self.standardizedCoverage())
# self.mesh.addExportData('Velocity', self.velocity)
# self.mesh.addExportData('Coverage', self.rayCoverage())
# self.mesh.addExportData('S_Coverage', self.standardizedCoverage())
# self.mesh.exportVTK(path + 'velocity')
# self.mesh.save(path + 'velocity-mesh')
# self.pd.save(path + 'velocity-pd')
# fig, ax = plt.subplots()
# self.showResult(ax=ax, cov=self.standardizedCoverage(), **kwargs)
# fig.set_size_inches(size)
# fig.savefig(path + '/velocity.pdf', bbox_inches='tight')
# return path, fig, ax
# def test_Refraction():
# """Test Refraction manager stability some data/mesh set / data update"""
# import os
# datafile = os.path.dirname(__file__) + '/example_topo.sgt'
# ra = Refraction(datafile, verbose=False, doSave=False)
# ra.createMesh(depth=80)
# ra.inv.setMaxIter(1)
# ra.invert()
# m1 = ra.model()
# mesh = pg.Mesh(ra.mesh)
# ra.setMesh(mesh)
# ra.invert()
# m2 = ra.model()
# np.testing.assert_array_equal(m1, m2)
# ra.setData(pg.DataContainer(datafile, 's g'))
# m3 = ra.invert()
# np.testing.assert_array_equal(m1, m3)
# class Tomography(Refraction):
# """Traveltime tomography for tomographic (e.g. crosshole) measurements"""
# def __init__(self, data=None, tcorr=0, name='new', **kwargs):
# """Init function with optional data load
# Parameters
# ----------
# data : pg.DataContainer or string
# tcorr : float [0]
# correct travel times by common shift
# name : str [data if being string, otherwise 'new']
# basename for saving Figures, results etc.
# ndig : int [2]
# number of digits to round positions (e.g. 2=cm), alternatively:
# roundto : float [0]
# unit spacing to round positions on
# """
# if isinstance(data, str):
# name = data[:data.rfind('.')]
# if data.lower()[-4:] == '.tom':
# data = readTOMfile(data, **kwargs)
# else:
# data = pg.DataContainer(data, 's g')
# if tcorr != 0:
# data.set('t', data('t') + tcorr)
# super(Tomography, self).__init__(data, name=name, **kwargs)
# def createMesh(self, quality=34.6, maxarea=0.1, addpoints=None):
# """Create (inversion) mesh by circumventing PLC"""
# data = self.dataContainer
# sx = list(pg.x(data.sensorPositions()))
# sz = list(pg.y(data.sensorPositions()))
# if addpoints is not None:
# for po in addpoints:
# sx.append(po[0])
# sz.append(po[1])
# iS = np.argsort(np.arctan2(sx-np.mean(sx), sz-np.mean(sz)))
# plc = pg.Mesh(2)
# nodes = [plc.createNode(sx[i], sz[i], 0) for i in iS]
# for i in range(len(nodes)-1):
# plc.createEdge(nodes[i], nodes[i+1])
# plc.createEdge(nodes[-1], nodes[0])
# tri = pg.core.TriangleWrapper(plc)
# tri.setSwitches("-pzFq"+str(quality)+"a"+str(maxarea))
# self.setMesh(tri.generate())
# def offset(self):
# """return shot-geophone distance"""
# data = self.dataContainer
# return np.array([data.sensorPosition(int(data('g')[i])).distance(
# data.sensorPosition(int(data('s')[i]))) for i in range(data.size())])
# def getVA(self, t=None):
# """return apparent velocity"""
# if t is None:
# t = self.dataContainer('t')
# return self.offset() / t
# def createStartModel(self):
# """create (gradient) starting model with vtop/vbottom bounds"""
# va = self.getVA()
# nModel = self.fop.regionManager().parameterCount()
# return pg.Vector(nModel, 1./np.mean(va))
# def showVA(self, t=None, ax=None, usepos=True, name='va', squeeze=True):
# """show apparent velocity as image plot"""
# # va = self.getVA(vals=vals)
# xvec = self.dataContainer('g')
# yvec = self.dataContainer('s')
# if usepos:
# pz = pg.y(self.dataContainer.sensorPositions())
# if squeeze:
# xvec = pz[xvec]
# yvec = pz[yvec]
# else:
# pz = pg.y(self.dataContainer.sensorPositions())
# raise Exception('Implement ME')
# # xvec = px[xvec]*1000 + pz[xvec]
# # xvec = px[yvec]*1000 + pz[yvec]
# plotVecMatrix(xvec, yvec, vals=t, squeeze=squeeze, ax=ax, name=name)
# def showVAold(self, vals=None, ax=None, usepos=True, name='va'):
# """show apparent velocity as image plot (old style)"""
# va = self.getVA(t=vals)
# data = self.dataContainer
# A = np.ones((data.sensorCount(), data.sensorCount())) * np.nan
# for i in range(data.size()):
# A[int(data('s')[i]), int(data('g')[i])] = va[i]
# if ax is None:
# fig, ax = plt.subplots()
# self.figs[name] = fig
# gci = ax.imshow(A, interpolation='nearest')
# ax.grid(True)
# if usepos:
# xt = np.linspace(0, data.sensorCount()-1, 7)
# xt.round()
# px = pg.abs(pg.y(self.dataContainer.sensorPositions()))
# ax.set_xticks(xt)
# ax.set_xticklabels([str(int(px[int(xti)])) for xti in xt])
# ax.set_yticks(xt)
# ax.set_yticklabels([str(int(px[int(xti)])) for xti in xt])
# plt.colorbar(gci, ax=ax)
# return va
# def main():
# """Main"""
# parser = MethodManager.createArgParser(dataSuffix='sgt')
# options = parser.parse_args()
# ra = Refraction(verbose=not options.quiet, debug=pg.debug())
# ra.loadData(options.dataFileName)
# ra.showData()
# ra.showVA()
# ra.createMesh(depth=options.depth)
# ra.showMesh()
# ra.invert(lam=options.lam, max_iter=options.maxIter,
# robustData=options.robustData, blockyModel=options.blockyModel)
# ra.showResult()
# if __name__ == '__main__':
# main()
# pg.wait()
|
{"hexsha": "4c6f26e667291dce8dac6c5a1d554e0b264daf99", "size": 37090, "ext": "py", "lang": "Python", "max_stars_repo_path": "pygimli/physics/traveltime/refraction.py", "max_stars_repo_name": "baender/gimli", "max_stars_repo_head_hexsha": "eb9a2204669cf11209b9577472f61ac70217a191", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-27T18:37:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T18:37:08.000Z", "max_issues_repo_path": "pygimli/physics/traveltime/refraction.py", "max_issues_repo_name": "baender/gimli", "max_issues_repo_head_hexsha": "eb9a2204669cf11209b9577472f61ac70217a191", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pygimli/physics/traveltime/refraction.py", "max_forks_repo_name": "baender/gimli", "max_forks_repo_head_hexsha": "eb9a2204669cf11209b9577472f61ac70217a191", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1643286573, "max_line_length": 81, "alphanum_fraction": 0.5522243192, "include": true, "reason": "import numpy", "num_tokens": 8855}
|
import cocos.device
import cocos.numerics as cn
import numpy as np
import pytest
test_data = [np.array([[1, -1],
[0, 1]],
dtype=np.int32),
np.array([[0.2, 1.0, 0.5],
[0.4, 0.5, 0.6],
[0.7, 0.2, 0.25]],
dtype=np.float32)]
@pytest.mark.parametrize("A_numpy", test_data)
def test_trigonometric(A_numpy):
cocos.device.init()
A_cocos = cn.array(A_numpy)
all_positive = np.all(A_numpy > 0)
all_finite = cn.all(cn.isfinite(A_cocos))
assert np.allclose(np.isfinite(A_numpy), cn.isfinite(A_cocos))
if all_finite:
assert np.allclose(np.sinh(A_numpy), cn.sinh(A_cocos))
assert np.allclose(np.cosh(A_numpy), cn.cosh(A_cocos))
assert np.allclose(np.tanh(A_numpy), cn.tanh(A_cocos))
assert np.allclose(np.sin(A_numpy), cn.sin(A_cocos))
assert np.allclose(np.cos(A_numpy), cn.cos(A_cocos))
assert np.allclose(np.tan(A_numpy), cn.tan(A_cocos))
assert np.allclose(np.arcsin(np.sin(A_numpy)),
cn.arcsin(cn.sin(A_cocos)))
assert np.allclose(np.arccos(np.cos(A_numpy)),
cn.arccos(cn.cos(A_cocos)))
assert np.allclose(np.arctan(np.tan(A_numpy)),
cn.arctan(cn.tan(A_cocos)))
assert np.allclose(np.arcsinh(np.sinh(A_numpy)),
cn.arcsinh(cn.sinh(A_cocos)))
assert np.allclose(np.arccosh(np.cosh(A_numpy)),
cn.arccosh(cn.cosh(A_cocos)))
assert np.allclose(np.arctanh(np.tanh(A_numpy)),
cn.arctanh(cn.tanh(A_cocos)))
if cn.isfloating(A_cocos):
assert np.allclose(A_cocos, cn.arcsin(cn.sin(A_cocos)))
assert np.allclose(A_cocos, cn.arccos(cn.cos(A_cocos)))
assert np.allclose(A_cocos, cn.arctan(cn.tan(A_cocos)))
assert np.allclose(A_cocos, cn.arcsinh(cn.sinh(A_cocos)))
assert np.allclose(A_cocos, cn.arccosh(cn.cosh(A_cocos)))
assert np.allclose(A_cocos, cn.arctanh(cn.tanh(A_cocos)))
|
{"hexsha": "25cb8e9f5e83856291e1f8e6f37c07367c1132e0", "size": 2161, "ext": "py", "lang": "Python", "max_stars_repo_path": "cocos/tests/test_numerics/test_arith/test_trigonometric.py", "max_stars_repo_name": "michaelnowotny/cocos", "max_stars_repo_head_hexsha": "3c34940d7d9eb8592a97788a5df84b8d472f2928", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 101, "max_stars_repo_stars_event_min_datetime": "2019-03-30T05:23:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-27T09:09:40.000Z", "max_issues_repo_path": "cocos/tests/test_numerics/test_arith/test_trigonometric.py", "max_issues_repo_name": "michaelnowotny/cocos", "max_issues_repo_head_hexsha": "3c34940d7d9eb8592a97788a5df84b8d472f2928", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-04-17T06:04:12.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-14T17:36:01.000Z", "max_forks_repo_path": "cocos/tests/test_numerics/test_arith/test_trigonometric.py", "max_forks_repo_name": "michaelnowotny/cocos", "max_forks_repo_head_hexsha": "3c34940d7d9eb8592a97788a5df84b8d472f2928", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-02-07T14:29:50.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-09T17:54:07.000Z", "avg_line_length": 36.6271186441, "max_line_length": 69, "alphanum_fraction": 0.5728829246, "include": true, "reason": "import numpy", "num_tokens": 574}
|
import numpy as np
import miepy
nm = 1e-9
v = 10
u = 0
n = 10
m = 0
ftype = 'electric'
N,M = miepy.vsh.VSH(n, m)
if ftype == 'magnetic':
func = M
elif ftype == 'electric':
func = N
k = 2*np.pi/(600*nm)
r = 600*nm
origin_1 = np.array([0,0,0])
THETA, PHI = miepy.coordinates.sphere_mesh(800)
E = func(r, THETA, PHI, k)
p = miepy.vsh.integral_project_fields_onto(E, r, k, ftype, n, m, spherical=True)
origin_2 = np.array([100*nm,0,0])
origin_2 = miepy.coordinates.sph_to_cart(2/k, 0.5, 0.5)
x,y,z = miepy.coordinates.sph_to_cart(r, THETA, PHI, origin=origin_2)
R_p, THETA_p, PHI_p = miepy.coordinates.cart_to_sph(x, y, z)
E_prime = func(R_p, THETA_p, PHI_p, k)
E_prime = miepy.coordinates.vec_sph_to_cart(E_prime, THETA_p, PHI_p)
E_prime = miepy.coordinates.vec_cart_to_sph(E_prime, THETA, PHI)
p_prime = miepy.vsh.integral_project_fields_onto(E_prime, r, k, ftype, v, u, spherical=True, mode=miepy.vsh.vsh_mode.incident)
A = p_prime/p
print(A)
r_ij, theta_ij, phi_ij = miepy.coordinates.cart_to_sph(*(origin_2 - origin_1))
A = miepy.vsh.A_translation(m, n, u, v, r_ij, theta_ij, phi_ij, k)
# A = miepy.vsh.A_translation(-2, 6, -2, 10, r_ij, theta_ij, phi_ij, k)
# A = miepy.vsh.A_translation(0, 10, 0, 10, r_ij, theta_ij, phi_ij, k)
print(A)
|
{"hexsha": "bc0389fd122789ac4dda72bc5ed118ba46512c22", "size": 1258, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/tests/vsh_translation.py", "max_stars_repo_name": "johnaparker/MiePy", "max_stars_repo_head_hexsha": "5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-05-30T06:45:29.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-30T19:58:56.000Z", "max_issues_repo_path": "examples/tests/vsh_translation.py", "max_issues_repo_name": "johnaparker/MiePy", "max_issues_repo_head_hexsha": "5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/tests/vsh_translation.py", "max_forks_repo_name": "johnaparker/MiePy", "max_forks_repo_head_hexsha": "5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2016-12-13T02:05:31.000Z", "max_forks_repo_forks_event_max_datetime": "2018-03-23T07:11:30.000Z", "avg_line_length": 27.9555555556, "max_line_length": 126, "alphanum_fraction": 0.6971383148, "include": true, "reason": "import numpy", "num_tokens": 478}
|
#ipython --pylab
import scipy
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import pandas
import pylab
np.set_printoptions(threshold=np.nan)
plt.rc('font', family='serif', serif='Times New Roman')
left1 = 0.025
bottom1 = 0.15
width1 = 0.45
height1 = 0.8
fig = plt.figure(1, figsize=(5,3))
axg1 = plt.axes([left1,bottom1,width1,height1])
file_hist = 'results/WOA/Thunnus_maccoyii/p50depth/woa.p50depthav.Thunnus_maccoyii.nc'
file2_hist = 'data/IUCN/csv_5deg/IUCN_5deg_Thunnus_maccoyii.csv'
nc = Dataset(file_hist,'r')
lats = nc.variables['LAT'][:]
lons = nc.variables['LON'][:]
depth = nc.variables['P50DEPTHAV'][:]
depth = depth.squeeze()
agree = pandas.read_csv(file2_hist, names=['lons', 'lats'])
agree['lons2'] = np.where(agree['lons'] <= 20 , agree['lons'] + 360, agree['lons'])
agreelons = agree['lons2']
agreelats = agree['lats']
m = Basemap(lat_1=-40.,lat_2=0.,llcrnrlon=70,llcrnrlat=-36,urcrnrlon=140, urcrnrlat=0, projection='lcc',lon_0=140)
x, y = m(*np.meshgrid(lons, lats))
a, b = m(pandas.DataFrame.as_matrix(agreelons), pandas.DataFrame.as_matrix(agreelats))
m.drawmapboundary(fill_color='#cccccc') #fill_color='0.5'
m.drawcoastlines()
m.fillcontinents(color='grey', lake_color='0.5')
levels=[0,100,200,300,400,500,600,700,800,900,1000]
im1 = m.contourf(x,y,depth,levels, cmap='plasma_r',extend='max')
im2 = m.scatter(a,b,s=5, marker='o', facecolor='0', lw=0)
plt.title("Present-day (WOA Data)", fontsize=12)
left2 = 0.525
bottom2 = 0.15
width2 = 0.45
height2 = 0.8
axg2 = plt.axes([left2,bottom2,width2,height2])
file_future = 'results/modelmean/modelmean.p50depth.Thunnus_maccoyii.nc'
file2_future = 'data/IUCN/csv_5deg/IUCN_5deg_Thunnus_maccoyii.csv'
nc = Dataset(file_future,'r')
lats = nc.variables['LAT'][:]
lons = nc.variables['LON'][:]
depth = nc.variables['MODELMEAN'][:]
depth = depth.squeeze()
agree = pandas.read_csv(file2_future, names=['lons', 'lats'])
agree['lons2'] = np.where(agree['lons'] <= 20 , agree['lons'] + 360, agree['lons'])
agreelons = agree['lons2']
agreelats = agree['lats']
m = Basemap(lat_1=-40.,lat_2=0.,llcrnrlon=70,llcrnrlat=-36,urcrnrlon=140, urcrnrlat=0, projection='lcc',lon_0=140)
x, y = m(*np.meshgrid(lons, lats))
a, b = m(pandas.DataFrame.as_matrix(agreelons), pandas.DataFrame.as_matrix(agreelats))
m.drawmapboundary(fill_color='#cccccc') #fill_color='0.5'
m.drawcoastlines()
m.fillcontinents(color='grey', lake_color='0.5')
levels=[0,100,200,300,400,500,600,700,800,900,1000]
im1 = m.contourf(x,y,depth,levels, cmap='plasma_r',extend='max')
im2 = m.scatter(a,b,s=5, marker='o', facecolor='0', lw=0)
plt.title("Future (RCP 8.5)", fontsize=12)
cax = fig.add_axes([0.29, 0.08, 0.42, 0.05])
cb=fig.colorbar(im1, cax=cax, ticks=levels, orientation='horizontal')
cb.set_ticklabels([0,'',200,'',400,'',600,'',800,'',1000])
pylab.text(0.28, 1.4, 'P$_{50}$ depth (m)', fontsize = 12)
outfig = 'graphs/southernbluefin_spawning.ps'
plt.savefig(outfig, dpi=300, bbox_inches=0)
|
{"hexsha": "f5888fdf0fd798a48801004d0bbf449e4b81907f", "size": 3056, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyCode/IUCN_southernbluefin_spawning.py", "max_stars_repo_name": "kallisons/CMIP5_p50", "max_stars_repo_head_hexsha": "ee8e078720d1a009cfb9355a9cadb07455b674ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyCode/IUCN_southernbluefin_spawning.py", "max_issues_repo_name": "kallisons/CMIP5_p50", "max_issues_repo_head_hexsha": "ee8e078720d1a009cfb9355a9cadb07455b674ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyCode/IUCN_southernbluefin_spawning.py", "max_forks_repo_name": "kallisons/CMIP5_p50", "max_forks_repo_head_hexsha": "ee8e078720d1a009cfb9355a9cadb07455b674ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-08-07T00:42:45.000Z", "max_forks_repo_forks_event_max_datetime": "2017-10-27T21:44:36.000Z", "avg_line_length": 39.1794871795, "max_line_length": 114, "alphanum_fraction": 0.7185863874, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1106}
|
"""Generic training script that trains a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import model
import data
from utils import tfmri
import utils.logging
# Data dimensions
tf.app.flags.DEFINE_integer('shape_y', 320, 'Image shape in Y')
tf.app.flags.DEFINE_integer('shape_z', 256, 'Image shape in Z')
tf.app.flags.DEFINE_integer('shape_calib', 20, 'Shape of calibration region')
tf.app.flags.DEFINE_integer('num_channels', 8,
'Number of channels for input datasets.')
tf.app.flags.DEFINE_integer(
'num_maps', 1, 'Number of eigen maps for input sensitivity maps.')
# For logging
tf.app.flags.DEFINE_string('model_dir', 'summary/model',
'Directory for checkpoints and event logs.')
tf.app.flags.DEFINE_string('warm_start_dir', None,
'Directory for warm starting model.')
tf.app.flags.DEFINE_integer('num_summary_image', 4,
'Number of images for summary output')
tf.app.flags.DEFINE_integer('log_step_count_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer('save_summary_steps', 100,
'The frequency with which summaries are saved')
tf.app.flags.DEFINE_integer('save_checkpoints_secs', 60,
'The frequency with which the model is saved [s]')
tf.app.flags.DEFINE_integer('random_seed', 1000,
'Seed to initialize random number generators.')
# For model
tf.app.flags.DEFINE_integer('unrolled_steps', 4,
'Number of grad steps for unrolled algorithms')
tf.app.flags.DEFINE_integer('unrolled_num_features', 128,
'Number of feature maps in each ResBlock')
tf.app.flags.DEFINE_integer('unrolled_num_resblocks', 3,
'Number of ResBlocks per iteration')
tf.app.flags.DEFINE_boolean('unrolled_share', False,
'Share weights between iterations')
tf.app.flags.DEFINE_boolean('hard_projection', False,
'Turn on/off hard data projection at the end')
# Optimization Flags
tf.app.flags.DEFINE_string('device', '0', 'GPU device to use.')
tf.app.flags.DEFINE_integer('batch_size', 4,
'The number of samples in each batch.')
tf.app.flags.DEFINE_float('loss_l1', 1, 'L1 loss')
tf.app.flags.DEFINE_float('loss_l2', 0, 'L2 loss')
tf.app.flags.DEFINE_float('loss_adv', 0, 'Adversarial loss')
tf.app.flags.DEFINE_integer(
'adv_steps', 5,
'Steps to train adversarial loss for each recon train step')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1e-8,
'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_integer('max_steps', None,
'The maximum number of training steps.')
# Dataset Flags
tf.app.flags.DEFINE_string(
'dir_validate', 'data/tfrecord/validate',
'Directory for validation data (None turns off validation)')
tf.app.flags.DEFINE_string('dir_masks', 'data/masks',
'Directory where masks are located.')
tf.app.flags.DEFINE_string('dir_train', 'data/tfrecord/train',
'Directory where training data are located.')
FLAGS = tf.app.flags.FLAGS
logger = utils.logging.logger
class RunTrainOpHooks(tf.train.SessionRunHook):
"""Based on tf.contrib.gan training."""
def __init__(self, train_op, train_steps):
self.train_op = train_op
self.train_steps = train_steps
def before_run(self, run_context):
for _ in range(self.train_steps):
run_context.session.run(self.train_op)
def model_fn(features, labels, mode, params):
"""Main model function to setup training/testing."""
training = (mode == tf.estimator.ModeKeys.TRAIN)
adv_scope = 'Adversarial'
recon_scope = params['recon_scope']
ks_example = features['ks_input']
sensemap = features['sensemap']
with tf.name_scope('FindMask'):
mask_example = tfmri.kspace_mask(ks_example, dtype=tf.complex64)
image_out, kspace_out, iter_out = model.unrolled_prox(
ks_example,
sensemap,
num_grad_steps=params['unrolled_steps'],
resblock_num_features=params['unrolled_num_features'],
resblock_num_blocks=params['unrolled_num_resblocks'],
resblock_share=params['unrolled_share'],
training=training,
hard_projection=params['hard_projection'],
mask=mask_example,
scope=recon_scope)
predictions = {'results': image_out}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
ks_truth = labels
with tf.name_scope('ModelTranspose'):
if training:
# If data was acquired with corner cutting, mask out corners
mask_recon = features['mask_recon']
else:
mask_recon = 1
image_truth = tfmri.model_transpose(ks_truth * mask_recon, sensemap)
image_example = tfmri.model_transpose(ks_example * mask_recon,
sensemap)
with tf.name_scope('loss'):
loss_total = 0
loss_l1 = tf.reduce_mean(
tf.abs(image_out - image_truth), name='loss-l1')
loss_l2 = tf.reduce_mean(
tf.square(tf.abs(image_out - image_truth)), name='loss-l2')
if params['loss_l1'] > 0:
logger.info('Loss: adding l1 loss {}...'.format(params['loss_l1']))
loss_total += params['loss_l1'] * loss_l1
if params['loss_l2'] > 0:
logger.info('Loss: adding l2 loss {}...'.format(params['loss_l2']))
loss_total += params['loss_l2'] * loss_l2
tf.summary.scalar('l1', loss_l1)
tf.summary.scalar('l2', loss_l2)
if params['loss_adv'] > 0:
logger.info('Loss: adding adversarial loss {}...'.format(
params['loss_adv']))
adv_truth = model.adversarial(
image_truth, training=training, scope=adv_scope)
adv_recon = model.adversarial(
image_out, training=training, scope=adv_scope)
adv_mse = tf.reduce_mean(tf.square(tf.abs(adv_truth - adv_recon)))
loss_adv_d = -adv_mse # train as "discriminator"
loss_adv_g = adv_mse # train as "generator"
loss_total += params['loss_adv'] * loss_adv_g
tf.summary.scalar('adv-l2', adv_mse)
metric_mse = tf.metrics.mean_squared_error(image_truth, image_out)
metrics = {'mse': metric_mse}
num_summary_image = params.get('num_summary_image', 0)
with tf.name_scope('mask'):
summary_mask = tfmri.sumofsq(mask_example, keepdims=True)
tf.summary.image('mask', summary_mask, max_outputs=num_summary_image)
with tf.name_scope('sensemap'):
summary_truth = tf.transpose(sensemap, [0, 3, 1, 4, 2])
summary_truth = tf.reshape(summary_truth, [
tf.shape(summary_truth)[0],
tf.reduce_prod(tf.shape(summary_truth)[1:3]),
tf.reduce_prod(tf.shape(summary_truth)[3:]), 1
])
tf.summary.image(
'mag', tf.abs(summary_truth), max_outputs=num_summary_image)
tf.summary.image(
'phase', tf.angle(summary_truth), max_outputs=num_summary_image)
image_summary = {
'input': image_example,
'output': image_out,
'truth': image_truth
}
kspace_summary = {
'input': features['ks_input'],
'output': kspace_out,
'truth': ks_truth
}
with tf.name_scope('max'):
for key in kspace_summary.keys():
tf.summary.scalar('kspace/' + key,
tf.reduce_max(tf.abs(kspace_summary[key])))
for key in image_summary.keys():
tf.summary.scalar(key, tf.reduce_max(tf.abs(image_summary[key])))
tf.summary.scalar('sensemap', tf.reduce_max(tf.abs(sensemap)))
with tf.name_scope('kspace'):
summary_kspace = None
for key in sorted(kspace_summary.keys()):
summary_tmp = tfmri.sumofsq(kspace_summary[key], keepdims=True)
if summary_kspace is None:
summary_kspace = summary_tmp
else:
summary_kspace = tf.concat((summary_kspace, summary_tmp),
axis=2)
summary_kspace = tf.log(summary_kspace + 1e-6)
tf.summary.image(
'-'.join(sorted(kspace_summary.keys())),
summary_kspace,
max_outputs=num_summary_image)
with tf.name_scope('image'):
summary_image = None
for key in sorted(image_summary.keys()):
summary_tmp = tfmri.sumofsq(image_summary[key], keepdims=True)
if summary_image is None:
summary_image = summary_tmp
else:
summary_image = tf.concat((summary_image, summary_tmp), axis=2)
tf.summary.image(
'-'.join(sorted(image_summary.keys())),
summary_image,
max_outputs=num_summary_image)
with tf.name_scope('recon'):
summary_iter = None
for i in range(params['unrolled_steps']):
iter_name = 'iter_%02d' % i
tmp = tfmri.sumofsq(iter_out[iter_name], keepdims=True)
if summary_iter is None:
summary_iter = tmp
else:
summary_iter = tf.concat((summary_iter, tmp), axis=2)
tf.summary.scalar('max/' + iter_name, tf.reduce_max(tmp))
if summary_iter is not None:
tf.summary.image(
'iter/image',
summary_iter,
max_outputs=params['num_summary_image'])
if mode == tf.estimator.ModeKeys.EVAL:
eval_hook = tf.train.SummarySaverHook(
save_steps=1,
output_dir=params['dir_validate_results'],
summary_op=tf.summary.merge_all())
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss_total,
predictions=predictions,
evaluation_hooks=[eval_hook],
eval_metric_ops=metrics)
train_op = tf.no_op()
training_hooks = []
update_recon_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope=recon_scope)
var_recon = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=recon_scope)
opt_recon = tf.train.AdamOptimizer(
params['learning_rate'],
beta1=params['adam_beta1'],
beta2=params['adam_beta2'],
epsilon=params['adam_epsilon'])
with tf.control_dependencies(update_recon_ops):
train_recon_op = opt_recon.minimize(
loss=loss_total,
global_step=tf.train.get_global_step(),
var_list=var_recon)
recon_hook = RunTrainOpHooks(train_recon_op, 1)
training_hooks.insert(0, recon_hook)
if params['loss_adv'] > 0:
update_adv_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope=adv_scope)
var_adv = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=adv_scope)
opt_adv = tf.train.AdamOptimizer(
params['learning_rate'],
beta1=params['adam_beta1'],
beta2=params['adam_beta2'],
epsilon=params['adam_epsilon'])
with tf.control_dependencies(update_adv_ops):
train_adv_op = opt_adv.minimize(
loss=loss_adv_d,
global_step=tf.train.get_global_step(),
var_list=var_adv)
logger.info('Training Adversarial loss: {} for every 1 step'.format(
params['adv_steps']))
adv_hook = RunTrainOpHooks(train_adv_op, params['adv_steps'])
training_hooks.insert(0, adv_hook)
logger.info('Number variables:')
num_var_recon = np.sum(
[np.prod(v.get_shape().as_list()) for v in var_recon])
logger.info(' {}: {}'.format(recon_scope, num_var_recon))
if params['loss_adv'] > 0:
num_var_adv = np.sum(
[np.prod(v.get_shape().as_list()) for v in var_adv])
logger.info(' {}: {}'.format(adv_scope, num_var_adv))
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss_total,
train_op=train_op,
training_hooks=training_hooks,
eval_metric_ops=metrics)
def main(_):
"""Execute main function."""
tf.logging.set_verbosity(tf.logging.INFO)
logger.setLevel(utils.logging.logging.INFO)
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.device
logger.info('Using GPU device {}...'.format(FLAGS.device))
if FLAGS.random_seed >= 0:
logger.info('Using random seed of {}...'.format(FLAGS.random_seed))
out_shape = [FLAGS.shape_z, FLAGS.shape_y]
dataset_train = data.create_dataset(
FLAGS.dir_train,
FLAGS.dir_masks,
batch_size=FLAGS.batch_size,
out_shape=out_shape,
shape_calib=FLAGS.shape_calib,
num_channels=FLAGS.num_channels,
num_maps=FLAGS.num_maps,
random_seed=FLAGS.random_seed)
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True # pylint: disable=E1101
session_config.allow_soft_placement = True
dir_val_results = os.path.join(FLAGS.model_dir, 'validate')
config = tf.estimator.RunConfig(
log_step_count_steps=FLAGS.log_step_count_steps,
save_summary_steps=FLAGS.save_summary_steps,
save_checkpoints_secs=FLAGS.save_checkpoints_secs,
model_dir=FLAGS.model_dir,
tf_random_seed=FLAGS.random_seed,
session_config=session_config)
if not os.path.exists(FLAGS.model_dir):
os.makedirs(FLAGS.model_dir)
recon_scope = 'ReconNetwork'
model_params = {
'learning_rate': FLAGS.learning_rate,
'adam_beta1': FLAGS.adam_beta1,
'adam_beta2': FLAGS.adam_beta2,
'adam_epsilon': FLAGS.opt_epsilon,
'loss_l1': FLAGS.loss_l1,
'loss_l2': FLAGS.loss_l2,
'loss_adv': FLAGS.loss_adv,
'adv_steps': FLAGS.adv_steps,
'unrolled_steps': FLAGS.unrolled_steps,
'unrolled_num_features': FLAGS.unrolled_num_features,
'unrolled_num_resblocks': FLAGS.unrolled_num_resblocks,
'unrolled_share': FLAGS.unrolled_share,
'hard_projection': FLAGS.hard_projection,
'num_summary_image': FLAGS.num_summary_image,
'dir_validate_results': dir_val_results,
'recon_scope': recon_scope
}
model.save_params(FLAGS.model_dir, model_params)
warm_start = None
if FLAGS.warm_start_dir is not None:
warm_start = tf.estimator.WarmStartSettings(
FLAGS.warm_start_dir, vars_to_warm_start=recon_scope + '*')
estimator = tf.estimator.Estimator(
model_fn=model_fn,
params=model_params,
config=config,
warm_start_from=warm_start)
def _prep_data(dataset):
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def train_input_fn():
return _prep_data(dataset_train)
if FLAGS.dir_validate:
dataset_validate = data.create_dataset(
FLAGS.dir_validate,
FLAGS.dir_masks,
num_channels=FLAGS.num_channels,
num_maps=FLAGS.num_maps,
batch_size=FLAGS.batch_size,
out_shape=out_shape)
def validate_input_fn():
return _prep_data(dataset_validate)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=FLAGS.max_steps)
eval_spec = tf.estimator.EvalSpec(
input_fn=validate_input_fn,
steps=1,
start_delay_secs=10 * 60,
throttle_secs=10 * 60)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
else:
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.max_steps)
if __name__ == '__main__':
tf.app.run()
|
{"hexsha": "22a9e7ed0653fb0dde4b3d67be604203ce1ab6d4", "size": 16427, "ext": "py", "lang": "Python", "max_stars_repo_path": "recon_train.py", "max_stars_repo_name": "MRSRL/dl-cs", "max_stars_repo_head_hexsha": "df8b541b8797ffceb65cdbc06c93c377a22d777a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2019-03-20T12:16:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T15:14:45.000Z", "max_issues_repo_path": "recon_train.py", "max_issues_repo_name": "MRSRL/dl-cs", "max_issues_repo_head_hexsha": "df8b541b8797ffceb65cdbc06c93c377a22d777a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-09-27T20:39:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-22T23:30:18.000Z", "max_forks_repo_path": "recon_train.py", "max_forks_repo_name": "MRSRL/dl-cs", "max_forks_repo_head_hexsha": "df8b541b8797ffceb65cdbc06c93c377a22d777a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-03-29T01:40:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-25T15:22:11.000Z", "avg_line_length": 38.8345153664, "max_line_length": 79, "alphanum_fraction": 0.6366956839, "include": true, "reason": "import numpy", "num_tokens": 3659}
|
/*
@copyright Louis Dionne 2014
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#include <boost/hana/detail/assert.hpp>
#include <boost/hana/ext/std/integral_constant.hpp>
#include <boost/hana/functional.hpp>
#include <boost/hana/list/instance.hpp>
#include <boost/hana/maybe.hpp>
#include <boost/hana/type.hpp>
#include <boost/hana/type_list.hpp>
#include <type_traits>
using namespace boost::hana;
int main() {
//! [main]
BOOST_HANA_CONSTEXPR_ASSERT(find(list(1.0, 2, '3'), trait_<std::is_integral>) == just(2));
BOOST_HANA_CONSTANT_ASSERT(find(list(1.0, 2, '3'), trait_<std::is_class>) == nothing);
constexpr auto types = type_list<char, int, unsigned, long, unsigned long>;
BOOST_HANA_CONSTANT_ASSERT(find(types, _ == type<unsigned>) == just(type<unsigned>));
BOOST_HANA_CONSTANT_ASSERT(find(types, _ == type<void>) == nothing);
//! [main]
}
|
{"hexsha": "474a3a83d7b36729b2bfb5a4f88a3205e081e3dd", "size": 973, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "example/searchable/find.cpp", "max_stars_repo_name": "rbock/hana", "max_stars_repo_head_hexsha": "2b76377f91a5ebe037dea444e4eaabba6498d3a8", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-05-07T14:29:13.000Z", "max_stars_repo_stars_event_max_datetime": "2015-07-04T10:59:46.000Z", "max_issues_repo_path": "example/searchable/find.cpp", "max_issues_repo_name": "rbock/hana", "max_issues_repo_head_hexsha": "2b76377f91a5ebe037dea444e4eaabba6498d3a8", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example/searchable/find.cpp", "max_forks_repo_name": "rbock/hana", "max_forks_repo_head_hexsha": "2b76377f91a5ebe037dea444e4eaabba6498d3a8", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5517241379, "max_line_length": 94, "alphanum_fraction": 0.7163412127, "num_tokens": 253}
|
"""
`x, y = noisy_function(fun, x; noise = 0.01, f_rand = randn)`
Generates a noisy response `y` for the given function `fun`
by adding `noise .* f_randn(length(x))` to the result of `fun(x)`.
"""
function noisy_function(fun::Function, x::AbstractVector{T}; noise::Real = 0.01, f_rand::Function = randn) where T<:Real
x_vec = collect(x)
y = fun.(x_vec) .+ noise .* f_rand.()
x_vec, y
end
"""
`x, y = noisy_sin(n, start, stop; noise = 0.3, f_rand = randn)`
Generates `n` noisy equally spaces samples of a sinus from `start` to `stop`
by adding `noise .* f_randn(length(x))` to the result of `fun(x)`.
"""
function noisy_sin(n::Int = 50, start::Real = 0, stop::Real = 2π; noise::Real = 0.3, f_rand::Function = randn)
noisy_function(sin, range(start, stop=stop, length=n); noise = noise, f_rand = f_rand)
end
"""
`x, y = noisy_poly(coef, x; noise = 0.01, f_rand = randn)`
Generates a noisy response for a polynomial of degree `length(coef)`
using the vector `x` as input and adding `noise .* f_randn(length(x))` to the result.
The vector `coef` contains the coefficients for the terms of the polynome.
The first element of `coef` denotes the coefficient for the term with
the highest degree, while the last element of `coef` denotes the intercept.
"""
function noisy_poly(coef::AbstractVector{R}, x::AbstractVector{T}; noise::Real = 0.1, f_rand::Function = randn) where {T<:Real,R<:Real}
n = length(x)
m = length(coef)
x_vec = collect(x)
y = zeros(n)
@inbounds for i = 1:n
for k = 1:m
y[i] += coef[k] * x_vec[i]^(m-k)
end
end
y .+= noise .* f_rand.()
x_vec, y
end
"""
`x, y = noisy_spiral(n, a, theta, b; noise = 0.01, f_rand = randn)`
Generates `n` noisy responses for a spiral with two labels. Uses the radius, angle
and scaling arguments to space the points in 2D space and adding `noise .* f_randn(n)`
to the response.
"""
function noisy_spiral(n::Int = 97, a::Real = 6.5, theta::Real = 16.0, b::Real=104.0; noise::Real = 0.1, f_rand::Function = randn)
x = zeros(Float64, (2, 2*n))
y = zeros(Int, 2*n)
index = 0:1.0:(n-1)
for i = 1:n
_angle = index[i]*pi/theta
_radius = a * (b-index[i]) / b
x_coord = _radius * sin(_angle)
y_coord = _radius * cos(_angle)
x[1, i] = x_coord
x[2, i] = y_coord
x[1, n+i] = -(x_coord)
x[2, n+i] = -(y_coord)
y[i] = 1
y[n+i] = 0
end
x[1, :] .+= noise .* f_rand.()
x[2, :] .+= noise .* f_rand.()
x, y
end
|
{"hexsha": "9ae879dadeb5f16f7a3800be73de630a8d08df64", "size": 2512, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/noisy_function.jl", "max_stars_repo_name": "UnofficialJuliaMirror/MLDataUtils.jl-cc2ba9b6-d476-5e6d-8eaf-a92d5412d41d", "max_stars_repo_head_hexsha": "2f845c6482e821d56f75353318a8c3bd507f5b1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 107, "max_stars_repo_stars_event_min_datetime": "2016-03-14T19:43:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T22:24:41.000Z", "max_issues_repo_path": "src/noisy_function.jl", "max_issues_repo_name": "UnofficialJuliaMirror/MLDataUtils.jl-cc2ba9b6-d476-5e6d-8eaf-a92d5412d41d", "max_issues_repo_head_hexsha": "2f845c6482e821d56f75353318a8c3bd507f5b1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 55, "max_issues_repo_issues_event_min_datetime": "2016-02-12T22:06:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-03T08:46:23.000Z", "max_forks_repo_path": "src/noisy_function.jl", "max_forks_repo_name": "UnofficialJuliaMirror/MLDataUtils.jl-cc2ba9b6-d476-5e6d-8eaf-a92d5412d41d", "max_forks_repo_head_hexsha": "2f845c6482e821d56f75353318a8c3bd507f5b1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2016-03-14T19:43:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-14T21:39:17.000Z", "avg_line_length": 33.9459459459, "max_line_length": 135, "alphanum_fraction": 0.6150477707, "num_tokens": 845}
|
import unittest
import numpy as np
import openjij as oj
class UtilsTest(unittest.TestCase):
def test_benchmark(self):
h = {0: 1}
J = {(0, 1):-1.0, (1,2): -1.0}
sa_samp = oj.SASampler()
def solver(time_param, iteration):
sa_samp.step_num = time_param
sa_samp.iteration = iteration
return sa_samp.sample_ising(h, J)
ground_state = [-1, -1, -1]
ground_energy = oj.BinaryQuadraticModel(h, J).calc_energy(ground_state)
step_num_list = np.linspace(10, 50, 5, dtype=np.int)
bm_res = oj.benchmark([ground_state], ground_energy, solver, time_param_list=step_num_list)
self.assertTrue(set(bm_res) >= {'time', 'error', 'e_res', 'tts', 'tts_threshold_prob'})
self.assertEqual(len(bm_res) ,len(step_num_list))
class ModelTest(unittest.TestCase):
def test_bqm(self):
h = {}
J = {(0,1): -1.0, (1,2): -3.0}
bqm = oj.BinaryQuadraticModel(h=h, J=J)
self.assertEqual(type(bqm.ising_interactions()), np.ndarray)
correct_mat = np.array([[0, -1, 0,],[-1, 0, -3],[0, -3, 0]])
np.testing.assert_array_equal(bqm.ising_interactions(), correct_mat.astype(np.float))
class SamplerOptimizeTest(unittest.TestCase):
def setUp(self):
self.h = {0: -1, 1: -1}
self.J = {(0,1): -1.0, (1,2): -1.0}
self.Q = {(i,i): hi for i, hi in self.h.items()}
self.Q.update(self.J)
def test_sa(self):
response = oj.SASampler().sample_ising(self.h, self.J)
self.assertEqual(len(response.states), 1)
self.assertListEqual(response.states[0], [1,1,1])
response = oj.SASampler().sample_qubo(self.Q)
self.assertEqual(len(response.states), 1)
self.assertListEqual(response.states[0], [1,1,1])
def test_sqa(self):
response = oj.SQASampler().sample_ising(self.h, self.J)
self.assertEqual(len(response.states), 1)
self.assertListEqual(response.states[0], [1,1,1])
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "395c57e79a176f6a707cbb4e06e110a999769817", "size": 2062, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test.py", "max_stars_repo_name": "y-yu/OpenJij", "max_stars_repo_head_hexsha": "ed08460b7c9f8e553d4d33e08977d465472e9c44", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test.py", "max_issues_repo_name": "y-yu/OpenJij", "max_issues_repo_head_hexsha": "ed08460b7c9f8e553d4d33e08977d465472e9c44", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test.py", "max_forks_repo_name": "y-yu/OpenJij", "max_forks_repo_head_hexsha": "ed08460b7c9f8e553d4d33e08977d465472e9c44", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.776119403, "max_line_length": 99, "alphanum_fraction": 0.6086323957, "include": true, "reason": "import numpy", "num_tokens": 592}
|
import numpy as np
import matplotlib.pyplot as plt
import subprocess
import h5py
# from tensorflow.python.keras.models import Sequential
# from tensorflow.python.keras.layers import BatchNormalization, Dense, Flatten, Input, LeakyReLU, Reshape
from os.path import abspath
from keras.models import Sequential, Model
from keras.layers import BatchNormalization, Dense, Flatten, Input, LeakyReLU, Reshape
from keras.optimizers import Adam
from keras.datasets import mnist
class GAN():
def __init__(self):
self.image_rows = 28
self.image_cols = 28
self.channels = 1
self.image_shape = (self.image_rows, self.image_cols, self.channels)
optimizer = Adam(0.0002, 0.5)
self.discriminator = self.building_discriminator()
self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
self.generator = self.building_generator()
self.generator.compile(loss='binary_crossentropy', optimizer=optimizer)
input_shape = Input(shape=(100, ))
image = self.generator(input_shape)
self.discriminator.trainale = False
validate = self.discriminator(image)
self.combined = Model(input_shape, validate)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def building_generator(self):
noisiness = (100, )
model = Sequential()
model.add(Dense(256, input_shape = noisiness))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.image_shape), activation='tanh'))
model.add(Reshape(self.image_shape))
model.summary()
noise = Input(shape=noisiness)
image = model(noise)
return Model(noise, image)
def building_discriminator(self):
image_shape = (self.image_rows, self.image_cols, self.channels)
model = Sequential()
model.add(Flatten(input_shape=image_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
image = Input(shape=image_shape)
validity = model(image)
return Model(image, validity)
def training(self, epochs, batch_size=128, save_interval=50):
(train_x, _), (_, _) = mnist.load_data()
train_x = (train_x.astype(np.float32) - 127.5) / 127.5
train_x = np.expand_dims(train_x, axis=3)
half_batch = int(batch_size/2)
for epoch in range(epochs):
index = np.random.randint(0, train_x.shape[0], half_batch)
images = train_x[index]
noise = np.random.normal(0, 1, (half_batch, 100))
generate_imates = self.generator.predict(noise)
discriminator_loss_real = self.discriminator.train_on_batch(images, np.ones((half_batch, 1)))
discriminator_loss_imaginary = self.discriminator.train_on_batch(generate_imates, np.zeros((half_batch, 1)))
discriminator_loss = 0.5 * np.add(discriminator_loss_real, discriminator_loss_imaginary)
noise = np.random.normal(0, 1, (batch_size, 100))
validate_y = np.array([1] * batch_size)
generator_loss = self.combined.train_on_batch(noise, validate_y)
print("%d [Discriminator loss: %0.4f, accuracy: %0.4f] [Generator loss: %0.4f]" % (epoch, discriminator_loss[0], 100 * discriminator_loss[1], generator_loss))
if epoch % save_interval == 0:
self.save_the_image(epoch)
return model
def load_gan_model(self):
pass
def save_and_train_gan_model(self):
model=self.training(epochs=120000, batch_size=128, save_interval=800)
print("\n Saving image generation model to disk... \n")
model.save(abspath('model/artsy.h5'))
def save_the_image(self, epoch):
p, q = 5, 5
noise = np.random.normal(0, 1, (p * q, 100))
generate_images = self.generator.predict(noise)
generate_images = 0.5 * generate_images + 0.5
figure, axis = plt.subplots(p, q)
counts = 0
for i in range(p):
for j in range(q):
axis[i, j].imshow(generate_images[counts, :, :, 0], cmap='gray')
axis[i, j].axis('off')
counts += 1
try:
figure.savefig(abspath('gan_image/mnist_%d.png') % epoch)
except:
# TODO Fix the image generation
pass
plt.close()
if __name__ == '__main__':
gan = GAN()
gan.save_and_train_gan_model()
# try:
# model = load_model('model/artsy.h5')
# print("\n Loaded Image Generation Module... \n")
# except:
# print("\n Fitting Image Generation Model... \n")
# gan.training(120000, batch_size=128, save_interval=800)
# print("\n Saving image generation model to disk... \n")
|
{"hexsha": "43c5811a2893638b2a514e447b00d0f08c5746fd", "size": 5646, "ext": "py", "lang": "Python", "max_stars_repo_path": "poke-dicts/artsy.py", "max_stars_repo_name": "iheartbenzene/musical-funicular", "max_stars_repo_head_hexsha": "5fc83504874243d13aeedc97bdb955d01d64844a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-20T18:13:56.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-20T18:13:56.000Z", "max_issues_repo_path": "poke-dicts/artsy.py", "max_issues_repo_name": "iheartbenzene/musical-funicular", "max_issues_repo_head_hexsha": "5fc83504874243d13aeedc97bdb955d01d64844a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "poke-dicts/artsy.py", "max_forks_repo_name": "iheartbenzene/musical-funicular", "max_forks_repo_head_hexsha": "5fc83504874243d13aeedc97bdb955d01d64844a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9617834395, "max_line_length": 170, "alphanum_fraction": 0.5926319518, "include": true, "reason": "import numpy", "num_tokens": 1294}
|
*DECK MPMAXR
SUBROUTINE MPMAXR (X)
C***BEGIN PROLOGUE MPMAXR
C***SUBSIDIARY
C***PURPOSE Subsidiary to DQDOTA and DQDOTI
C***LIBRARY SLATEC
C***TYPE ALL (MPMAXR-A)
C***AUTHOR (UNKNOWN)
C***DESCRIPTION
C
C Sets X to the largest possible positive 'mp' number.
C
C The argument X(*) is an INTEGER arrays of size 30. See the comments
C in the routine MPBLAS for the reason for this choice.
C
C***SEE ALSO DQDOTA, DQDOTI, MPBLAS
C***ROUTINES CALLED MPCHK
C***COMMON BLOCKS MPCOM
C***REVISION HISTORY (YYMMDD)
C 791001 DATE WRITTEN
C ?????? Modified for use with BLAS. Blank COMMON changed to named
C COMMON. R given dimension 12.
C 891214 Prologue converted to Version 4.0 format. (BAB)
C 900402 Added TYPE section. (WRB)
C 930124 Increased Array size in MPCON for SUN -r8. (RWC)
C***END PROLOGUE MPMAXR
COMMON /MPCOM/ B, T, M, LUN, MXR, R(30)
INTEGER B, T, R, X(*)
C***FIRST EXECUTABLE STATEMENT MPMAXR
CALL MPCHK (1, 4)
IT = B - 1
C SET FRACTION DIGITS TO B-1
DO 10 I = 1, T
10 X(I+2) = IT
C SET SIGN AND EXPONENT
X(1) = 1
X(2) = M
RETURN
END
|
{"hexsha": "9eaba4fecfe88a4468060363038d118f3ce01ac6", "size": 1154, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "slatec/src/mpmaxr.f", "max_stars_repo_name": "andremirt/v_cond", "max_stars_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "slatec/src/mpmaxr.f", "max_issues_repo_name": "andremirt/v_cond", "max_issues_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slatec/src/mpmaxr.f", "max_forks_repo_name": "andremirt/v_cond", "max_forks_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.85, "max_line_length": 71, "alphanum_fraction": 0.6464471404, "num_tokens": 436}
|
from PIL import Image
import numpy as np
import torch
import torch.nn.functional as F
import torchvision
# Gaussian blur kernel
def get_gaussian_kernel(device="cpu"):
kernel = np.array([
[1, 4, 6, 4, 1],
[4, 16, 24, 16, 4],
[6, 24, 36, 24, 6],
[4, 16, 24, 16, 4],
[1, 4, 6, 4, 1]], np.float32) / 256.0
gaussian_k = torch.as_tensor(kernel.reshape(1, 1, 5, 5)).to(device)
return gaussian_k
def pyramid_down(image, device="cpu"):
gaussian_k = get_gaussian_kernel(device=device)
# channel-wise conv(important)
multiband = [F.conv2d(image[:, i:i + 1,:,:], gaussian_k, padding=2, stride=2) for i in range(3)]
down_image = torch.cat(multiband, dim=1)
return down_image
def pyramid_up(image, device="cpu"):
gaussian_k = get_gaussian_kernel(device=device)
upsample = F.interpolate(image, scale_factor=2)
multiband = [F.conv2d(upsample[:, i:i + 1,:,:], gaussian_k, padding=2) for i in range(3)]
up_image = torch.cat(multiband, dim=1)
return up_image
def gaussian_pyramid(original, n_pyramids, device="cpu"):
x = original
# pyramid down
pyramids = [original]
for i in range(n_pyramids):
x = pyramid_down(x, device=device)
pyramids.append(x)
return pyramids
def laplacian_pyramid(original, n_pyramids, device="cpu"):
# create gaussian pyramid
pyramids = gaussian_pyramid(original, n_pyramids, device=device)
# pyramid up - diff
laplacian = []
for i in range(len(pyramids) - 1):
diff = pyramids[i] - pyramid_up(pyramids[i + 1], device=device)
laplacian.append(diff)
# Add last gaussian pyramid
laplacian.append(pyramids[len(pyramids) - 1])
return laplacian
def minibatch_laplacian_pyramid(image, n_pyramids, batch_size, device="cpu"):
n = image.size(0) // batch_size + np.sign(image.size(0) % batch_size)
pyramids = []
for i in range(n):
x = image[i * batch_size:(i + 1) * batch_size]
p = laplacian_pyramid(x.to(device), n_pyramids, device=device)
p = [x.cpu() for x in p]
pyramids.append(p)
del x
result = []
for i in range(n_pyramids + 1):
x = []
for j in range(n):
x.append(pyramids[j][i])
result.append(torch.cat(x, dim=0))
return result
def extract_patches(pyramid_layer, slice_indices,
slice_size=7, unfold_batch_size=128, device="cpu"):
assert pyramid_layer.ndim == 4
n = pyramid_layer.size(0) // unfold_batch_size + np.sign(pyramid_layer.size(0) % unfold_batch_size)
# random slice 7x7
p_slice = []
for i in range(n):
# [unfold_batch_size, ch, n_slices, slice_size, slice_size]
ind_start = i * unfold_batch_size
ind_end = min((i + 1) * unfold_batch_size, pyramid_layer.size(0))
x = pyramid_layer[ind_start:ind_end].unfold(
2, slice_size, 1).unfold(3, slice_size, 1).reshape(
ind_end - ind_start, pyramid_layer.size(1), -1, slice_size, slice_size)
# [unfold_batch_size, ch, n_descriptors, slice_size, slice_size]
x = x[:,:, slice_indices,:,:]
# [unfold_batch_size, n_descriptors, ch, slice_size, slice_size]
p_slice.append(x.permute([0, 2, 1, 3, 4]))
# sliced tensor per layer [batch, n_descriptors, ch, slice_size, slice_size]
x = torch.cat(p_slice, dim=0)
# normalize along ch
std, mean = torch.std_mean(x, dim=(0, 1, 3, 4), keepdim=True)
x = (x - mean) / (std + 1e-8)
# reshape to 2rank
x = x.reshape(-1, 3 * slice_size * slice_size)
return x
def swd(image1, image2,
n_pyramids=None, slice_size=7, n_descriptors=128,
n_repeat_projection=128, proj_per_repeat=4, device="cpu", return_by_resolution=False,
pyramid_batchsize=128):
# n_repeat_projectton * proj_per_repeat = 512
# Please change these values according to memory usage.
# original = n_repeat_projection=4, proj_per_repeat=128
assert image1.size() == image2.size()
assert image1.ndim == 4 and image2.ndim == 4
if n_pyramids is None:
n_pyramids = int(np.rint(np.log2(image1.size(2) // 16)))
with torch.no_grad():
# minibatch laplacian pyramid for cuda memory reasons
pyramid1 = minibatch_laplacian_pyramid(image1, n_pyramids, pyramid_batchsize, device=device)
pyramid2 = minibatch_laplacian_pyramid(image2, n_pyramids, pyramid_batchsize, device=device)
result = []
for i_pyramid in range(n_pyramids + 1):
# indices
n = (pyramid1[i_pyramid].size(2) - 6) * (pyramid1[i_pyramid].size(3) - 6)
indices = torch.randperm(n)[:n_descriptors]
# extract patches on CPU
# patch : 2rank (n_image*n_descriptors, slice_size**2*ch)
p1 = extract_patches(pyramid1[i_pyramid], indices,
slice_size=slice_size, device="cpu")
p2 = extract_patches(pyramid2[i_pyramid], indices,
slice_size=slice_size, device="cpu")
p1, p2 = p1.to(device), p2.to(device)
distances = []
for j in range(n_repeat_projection):
# random
rand = torch.randn(p1.size(1), proj_per_repeat).to(device) # (slice_size**2*ch)
rand = rand / torch.std(rand, dim=0, keepdim=True) # noramlize
# projection
proj1 = torch.matmul(p1, rand)
proj2 = torch.matmul(p2, rand)
proj1, _ = torch.sort(proj1, dim=0)
proj2, _ = torch.sort(proj2, dim=0)
d = torch.abs(proj1 - proj2)
distances.append(torch.mean(d))
# swd
result.append(torch.mean(torch.stack(distances)))
# average over resolution
result = torch.stack(result) * 1e3
if return_by_resolution:
return result.cpu()
else:
return torch.mean(result).cpu()
|
{"hexsha": "7ffdd93cf681cfdc3ce86fb6089158272a6f0000", "size": 6007, "ext": "py", "lang": "Python", "max_stars_repo_path": "swd.py", "max_stars_repo_name": "WestCityInstitute/swd-pytorch", "max_stars_repo_head_hexsha": "2b0c224fa4e43ab081a40380689d6a334959eb65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 75, "max_stars_repo_stars_event_min_datetime": "2019-10-17T08:41:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T09:05:50.000Z", "max_issues_repo_path": "swd.py", "max_issues_repo_name": "WestCityInstitute/swd-pytorch", "max_issues_repo_head_hexsha": "2b0c224fa4e43ab081a40380689d6a334959eb65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-03-02T11:09:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-07T03:16:56.000Z", "max_forks_repo_path": "swd.py", "max_forks_repo_name": "WestCityInstitute/swd-pytorch", "max_forks_repo_head_hexsha": "2b0c224fa4e43ab081a40380689d6a334959eb65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2019-12-26T06:15:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T07:34:56.000Z", "avg_line_length": 39.7814569536, "max_line_length": 103, "alphanum_fraction": 0.6134509739, "include": true, "reason": "import numpy", "num_tokens": 1644}
|
"""
To run k-means, just call the function:
kmeans(k, data, min_vals, max_vals, max_iter=50)
with parameters:
k - number of clusters
d - data set, should be ndarray of shape (n, d) where n is the number
of data entries and d is the number of dimensions of each data entry
min_vals, max_vals - array like lists that store min and max values
for each dimension
max_iter - maximal number of iterations.
kmeans(...) returns ndarray of shape (n, d) containing labels for
every data entry.
"""
from random import randint
import numpy as np
def kmeans(k, data, min_vals, max_vals, max_iter=50):
"""
Perform k-means algorithm. k is the number of expected clusters.
Expects data to be ndarray of shape (n, d), where n is the number
of data points in the set and d is a number of dimensions.
min_vals and max_vals are lists of min and max values for each
dimension, therefore they should be array like of size d.
Returns ndarray of shape (n, d) containing labels for each data point.
"""
np.unique(data, axis=0)
dimensions = data.shape[1]
min_vals = list(min_vals)
max_vals = list(max_vals)
centroids = get_centroids(k, dimensions, min_vals, max_vals)
old_centroids = np.zeros(centroids.shape)
it = 0
while not end_condition_true(centroids, old_centroids, it, max_iter):
# find nearest centroid for each data point
labels = get_labels(data, centroids)
old_centroids = centroids
# update centroids
centroids = update_centroids(data, labels, centroids)
it += 1
return labels
def end_condition_true(centroids, old_centroids, it, max_iter):
"""
Check whether the end condition for the k-means algorithm is met.
That is, whether centroids don't change or the maximum number of
iterations has been reached.
"""
return np.array_equal(centroids, old_centroids) or it >= max_iter
def get_centroids(k, dimensions, min_vals, max_vals):
"""
Returns ndarray of shape (k, d), where k is the number of randomly
placed centroids and d is the number of dimensions.
"""
centroids = []
# randomly place k centroids in the data space
while len(centroids) < k:
centroid = tuple(randint(min_vals[dim], max_vals[dim])
for dim in range(dimensions))
if centroid not in centroids:
centroids.append(centroid)
return np.array(centroids)
def get_labels(data, centroids):
"""
Find nearest centroid to each data point. Returns ndarray of shape (n, d)
containing labels (nearest centroid) of each data point.
"""
# i-th row of the distances matrix represents distances from each data
# point to the i-th centroid.
distances = np.empty(shape=(centroids.shape[0], data.shape[0]))
for i, c in enumerate(centroids):
x = data - c
distances[i] = np.linalg.norm(x, axis=1)
# i-th element in the nearest_idc matrix represents the index number
# of the nearest centroid.
nearest_idc = np.argmin(distances, axis=0)
return np.array([centroids[i] for i in nearest_idc])
# TODO make this function faster
def update_centroids(data, labels, centroids):
"""
Update current centroids with respect to the points' labels.
Returns ndarray of shape (k, d), where k is the number of centroids
and d is the number of dimensions.
"""
dimensions = data.shape[1]
sums = np.zeros(shape=(centroids.shape[0], dimensions), dtype=int)
labels_occ = np.zeros(shape=(centroids.shape[0], 1))
for d, l in zip(data, labels):
for dim in range(dimensions):
c_idx = get_centroid_idx(centroids, l)
sums[c_idx, dim] += d[dim]
labels_occ[c_idx] += 1
# if there was a centroid with no data points labeled with it,
# don't update it
for i, occ in enumerate(labels_occ):
if occ == 0:
sums[i] = centroids[i]
else:
for dim in range(dimensions):
sums[i, dim] = np.round(sums[i, dim]/occ)
return sums
def get_centroid_idx(centroids, c):
"""
Returns the index of a given centroid c. Assumes that centroids
is the ndarray of shape (k, d) where k is a number of centroids
and d is a number od dimensions.
"""
return centroids.tolist().index(c.tolist())
|
{"hexsha": "a7feb4c7a83d9b312eb377d75e269d8920ef9a99", "size": 4496, "ext": "py", "lang": "Python", "max_stars_repo_path": "kmeans.py", "max_stars_repo_name": "adapiekarska/kmeans", "max_stars_repo_head_hexsha": "1b9e77646fadd6d9ab73c4b9feaacfe215d39744", "max_stars_repo_licenses": ["FSFAP"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kmeans.py", "max_issues_repo_name": "adapiekarska/kmeans", "max_issues_repo_head_hexsha": "1b9e77646fadd6d9ab73c4b9feaacfe215d39744", "max_issues_repo_licenses": ["FSFAP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kmeans.py", "max_forks_repo_name": "adapiekarska/kmeans", "max_forks_repo_head_hexsha": "1b9e77646fadd6d9ab73c4b9feaacfe215d39744", "max_forks_repo_licenses": ["FSFAP"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1142857143, "max_line_length": 78, "alphanum_fraction": 0.6483540925, "include": true, "reason": "import numpy", "num_tokens": 1054}
|
###############################################################################
#
# pttableau.py - Object to represent protein tableaux and functions to
# parse output of TableauCreator program into tableau object.
#
#
# File: pttableau.py
# Author: Alex Stivala
# Created: October 2007
#
#
# $Id: pttableau.py 2703 2009-07-27 06:01:05Z astivala $
#
###############################################################################
"""
Ths module contains routines to generate protein tableaux using the
axes fitted to SSEs by functions in the ptnode.py module, and the relative
angle calculate in that module.
This module also contains functions to parse the output of
Arun Konargurthu's TableauCreator program, which was used before
tableaux were re-implemented internally to this code.
NOTE: not yet published or available (as of October 2007).
IMPORTANT: this requires my modified versin of TableauCreator,
and also a patched version of Bio.PDB file PDBIO.py :
context diff for patching it is Bio.PDB.PDBIO.py.diff
This patch was made relative to BioPython release 1.43.
Tableaux are described by Kamat and Lesk 2007
'Contact Patterns Between Helices and Strands of Sheet Define Protein
Folding Patterns' Proteins 66:869-876
and Lesk 2003 'From Electrons to Proteins and Back Again'
Int. J. Quant. Chem. 95:678-682
and Lesk 1995 'Systematic representation of folding patterns'
J. Mol. Graph. 13:159-164.
Two classes are provided, PTTableau and PTTableauPacked. The latter is
a more compact format based on LAPACK style symmetric matrix packed
array storage, useful for holding whole databse of tableaux in memory
(see buildtableauxdb.py) and dumping/loading it. They can be used
interchangeably when it comes to getting and setting with []
(__getitem__ and __setitem__). (They should probably both inherit from
some tableau base class to make this explicity, but it doesn't really
matter with Python 'duck typing' of which this is a use (abuse?).
"""
import os,sys
from math import pi
import numpy.oldnumeric as Numeric
from Bio.PDB import *
from ptnode import *
from ptdomain import PTDomain
from ptsecstruct import pdb_chainid_to_stride_chainid
from ptutils import cleanup_tmpdir
#-----------------------------------------------------------------------------
#
# Module globals
#
#-----------------------------------------------------------------------------
# constants
TABLEAU_MIN_HELIX_LEN = 4 # min length of a helix in TableauCreator is 4
TABLEAU_MIN_STRAND_LEN = 2 # min length of a strand in TableauCreator is 2
# global variables
verbose = False
#-----------------------------------------------------------------------------
#
# Class definitions
#
#-----------------------------------------------------------------------------
class PTTableau:
"""
The PTTableau class is a protein tableau, as per Kamat and Lesk 2007
'Contact Patterns Between Helices and Strands of Sheet Define Protein
Folding Patterns' Proteins 66:869-876.
The tableau is a 2 dimensional symmetric matrix indexed by SSEs
in the protein where each entry
is a two character code representing the angle between those SSEs.
(See paper(s) for details).
We implement it as a mapping container, i.e. using __getitem__ and
__setitem__ so that elements can bet get/set with dictionary/array
type syntax e.g. tableau[(helix1,strand2)]. (index is a tuple of SSEs
represented by PTNode objects)- NB must have in parens to ensure tuple.
This is implemented with a standard dictionary object,
and since it is symmetric, only one copy is stored, the one where
i < j in (i,j) index; however either can be get/set, they are swapped
internally if necessary. Accessing (i,i) returns
'xa', 'xi', 'xg' for respectively alpha,pi,310 helices and
'e ' for strand.
"""
def __init__(self, nodelist):
"""
Intialize a PTTableau with no tableau entries set yet.
Parameters:
nodelist - list of nodes that will be in the tableau, ins
residue sequence number order.
"""
self.tabdict = {} # { (res1, res2) : code }; see class documentation\
self.nodelist = nodelist # ptnodes with those not in tableau removed
def __str__(self):
"""
Return string representation of the tableau; we will write a full matrix
just like TableauCreator does.
"""
s = ""
for sse1 in self.nodelist:
for sse2 in self.nodelist:
try:
s += self[(sse1, sse2)] + ' '
except KeyError:
s += "?? "
s += '\n'
return s
#
# methods defined to implement container type
#
def __len__(self):
"""
Return number of SSEs in the tableau
Parameters: None
Return value: Number of SSEs in nodelist for building tableau
"""
return len(self.nodelist)
def __getitem__(self, ssepair):
"""
Return the entry in the tableau for the pair of SSEs
(sse1, sse2) where sse1 and sse2 are PTNode objects;
or if ssepair is (i,j) where i,j are integers, the corresponding
tableau entry for (nodelist[i], nodelist[j]).
Parameters:
ssepair - tuple (sse1,sse2) (PTNode objects) or
tuple (i,j) (integers) to look up tableau entry for
Return value:
two character tableau string e.g. 'RD' or 'HH', or ' ' (2 spaces).
On the main diagonal (self-orientation) since this has no menaing
we return a (two-char) encoding of the SSE type instead:
'xa', 'xi', 'xg' for respectively alpha,pi,310 helices and
'e ' for strand.
Raises Exceptions:
TypeError if ssepair is not PTNode pair or int pair.
"""
ssespec1 = ssepair[0]
ssespec2 = ssepair[1]
if isinstance(ssespec1, PTNode) and isinstance(ssespec2, PTNode):
sse1 = ssespec1
sse2 = ssespec2
elif isinstance(ssespec1, int) and isinstance(ssespec2, int):
sse1 = self.nodelist[ssespec1]
sse2 = self.nodelist[ssespec2]
else:
raise TypeError("bad tuple type in PTTableau getitem")
if sse1 == sse2:
if isinstance(sse1, PTNodeHelix):
if sse1.get_type() == "ALPHA":
return "xa"
elif sse1.get_type() == "PI":
return "xi"
elif sse1.get_type() == "310":
return "xg"
else:
return "??" # should not happen
elif isinstance(sse1, PTNodeStrand):
return "e "
else:
return "??" # should not happen
elif sse1 < sse2:
ssepair = (sse1,sse2)
else:
ssepair = (sse2,sse1)
return self.tabdict[ssepair]
def __setitem__(self, ssepair, tabcode):
"""
Set the entry in the tableau for the pair of SSEs (sse1,sse2)
specified as the key (ssepair) parameter to the tabcode value.
Parameters:
ssepair - tuple (sse1,sse2) to set.
tabccode - two character tableau string e.g. 'RD' or 'HH', or ' '.
Return value: None
Raises exceptions:
TypeError if tabcode is not a valid 2 char uppercase string or ' '
"""
if len(tabcode) != 2 or not tabcode.isupper() and not tabcode.isspace():
raise TypeError("bad tableau code '" + tabcode + "'\n")
if (tabcode[0] not in ['L','R','P','O'] or \
tabcode[1] not in ['E','D','S','T']) and \
tabcode != 'HH' and tabcode != 'KK':
raise TypeError("bad tableau code '" + tabcode + "'\n")
sse1 = ssepair[0]
sse2 = ssepair[1]
if sse1 == sse2:
return
elif sse1 < sse2:
ssepair = (sse1, sse2)
else:
ssepair = (sse2, sse1)
self.tabdict[ssepair] = tabcode
# have not implemented: __delitem__, __iter__, __contains__
# TODO: work out how to implement things like tab[2:] to get row 2,
# ust like Numeric.array etc.
def getrow(self, i):
"""
Return a row of the tableau as a list of tableau codes.
Parameters:
i - row to get 0 <= i < len(self)
Return value:
list of two-character tableau codes for row i.
"""
return [self[(i,j)] for j in xrange(len(self))]
class PTTableauPacked:
"""
The PTTableauPacked class is a compact representation
of a protein tableau, as per Kamat and Lesk 2007
'Contact Patterns Between Helices and Strands of Sheet Define Protein
Folding Patterns' Proteins 66:869-876.
The tableau is a 2 dimensional symmetric matrix indexed by SSEs
in the protein where each entry
is a two character code representing the angle between those SSEs.
(See paper(s) for details).
We implement it as a mapping container, i.e. using __getitem__ and
__setitem__ so that elements can bet get/set with dictionary/array
type syntax e.g. tableau[(1,2)]. (index is a pair of sequential
SSE numbers, from 0 to n-1 where n is the order of tableau ie number
of SSEs) - NB must have in parens to ensure tuple.
This is the compact respresentation, storing tableau as simply
a linear string of two-character tableau codes, in the same as as the
LAPACK 'packed' format for triangular/symmetric arrays. i.e.
each column of the matrix is stored in sequence.
We could save even more space by using only 4 bits for each tableau
code (since there are only 16 possible codes), but in Python
it doesn't really make sense to try to be so efficient - but
we are trying to save space to some degree so that the entire ASTRAL
PDB non-redundant set or similar can be loaded as tableaux in
memory.
As it happens, strings in python don't even support item assignemnt,
so we have to store it as a list anyway i.e ['xa','OT',...]
instead of 'xaOT...'
Unlike PTTableau, this format contains just the tableau codes
and diagonal SSE type entries, i.e. just character data. there
are no PTNode object references or anything, so it is simple and
quick to dump/load with Python pickle module (or similar) with
no need to build all sorts of other objects (PTNode, Bio.PDB.Structure,
etc.).
Accessing (i,i) returns 'xa', 'xi', 'xg' for respectively
alpha,pi,310 helices and 'e ' for strand.
"""
def __init__(self, tableau):
"""
Intialize a PTTableauPacked given an already built tableaux in
the full PTTableau format.
Parameters:
tableau - an already built PTTableau object
"""
#self.n = 3
#self.uplist = ['xa','OS','e ','OT','PE','xa']
self.n = len(tableau) # order of tableau (number of SSEs)
self.uplist = [] # packed format of matrix upper triangle
# NB COLUMN-MAJOR (LAPACK style)
for j in range(self.n):
for i in range(j+1):
try:
tabcode = tableau[(i,j)]
except:
tabcode = '??'
self.uplist.append(tabcode)
assert(len(self.uplist) == self.n * (self.n + 1) / 2)
def __str__(self):
"""
Return string representation of the tableau; we will write a full matrix
just like TableauCreator does.
"""
s = ""
for i in range(self.n):
for j in range(self.n):
s += self[(i,j)] + ' '
s += '\n'
return s
#
# methods defined to implement container type
#
def __len__(self):
"""
Return number of SSEs respresented the tableau
Parameters: None
Return value: order of tableau
"""
return self.n
def __getitem__(self, ssepair):
"""
Return the entry in the tableau for the pair of SSEs
(i,j) where i,j are integers, 0 <= i,j < n.
Parameters:
ssepair - tuple (i,j) (integers) to look up tableau entry for
Return value:
two character tableau string e.g. 'RD' or 'HH', or ' ' (2 spaces).
On the main diagonal (self-orientation) since this has no menaing
we return a (two-char) encoding of the SSE type instead:
'xa', 'xi', 'xg' for respectively alpha,pi,310 helices and
'e ' for strand.
Raises Exceptions:
TypeError if ssepair is not PTNode pair or int pair.
"""
i = ssepair[0] + 1
j = ssepair[1] + 1 # more convenient to have 1 < i,j <= n internally
if j < i:
tmp = i
i = j
j = tmp
r = i + j*(j-1)/2
r -= 1 # back to zero-based for list indexing
return self.uplist[r]
def __setitem__(self, ssepair, tabcode):
"""
Set the entry in the tableau for the pair of SSEs (sse1,sse2)
specified as the key (ssepair) parameter to the tabcode value.
Parameters:
ssepair - tuple (i,j) to set, 0 <= i,j < n.
tabccode - two character tableau string e.g. 'RD' or 'HH',
or 'xa','e ' etc. if i==j for SSE type on diagonal.
Return value: None
Raises exceptions:
TypeError if tabcode is not a valid 2 char uppercase string
or lowercase type code for diagonal.
"""
i = ssepair[0]
j = ssepair[1]
if i == j:
if tabcode not in ['xa','xi','xg','e ']:
raise TypeError('bad tableau sse type code ' + tabcode + '\n')
else:
if len(tabcode) != 2 or not tabcode.isupper():
raise TypeError("bad tableau code '" + tabcode + "'\n")
if ( (tabcode[0] not in ['L','R','P','O'] or
tabcode[1] not in ['E','D','S','T']) and
tabcode != 'HH' and tabcode != 'KK' ):
raise TypeError("bad tableau code '" + tabcode + "'\n")
if j < i:
tmp = i
i = j
j = tmp
i += 1
j += 1 # more convenient to have 1 < i,j <= n internally
r = i + j*(j-1)/2 # location in packed rep assuming each entry len 1
r -= 1 # back to zero-based for list indexing
self.uplist[r] = tabcode
# have not implemented: __delitem__, __iter__, __contains__
# TODO: work out how to implement things like tab[2:] to get row 2,
# ust like Numeric.array etc.
def getrow(self, i):
"""
Return a row of the tableau as a list of tableau codes.
Parameters:
i - row to get 0 <= i < len(self)
Return value:
list of two-character tableau codes for row i.
"""
# TODO: we could do this more efficiently for packed tableau
return [self[(i,j)] for j in xrange(len(self))]
#-----------------------------------------------------------------------------
#
# Function definitions
#
#-----------------------------------------------------------------------------
def angle_to_tabcode(omega):
"""
Convert an angle (radians in (-pi, pi]) to a two-character tableau
code (double quadrant encoding) as described in the papers cited
at top of module.
Parmaters:
omega - relative angle to encode
Return value:
two-character tableua code (OS, PD, etc.)
Raises exceptions:
ValueError if angle is out of range
"""
if omega > -pi/4 and omega <= pi/4:
tabcode = 'P' # parallel
elif omega > pi/4 and omega <= 3*pi/4:
tabcode = 'R' # crossing-right
elif (omega > 3*pi/4 and omega <= pi) or omega > -pi and omega <= -3*pi/4:
tabcode = 'O' # antiparallel (opposite)
elif omega > -3*pi/4 and omega <= -pi/4:
tabcode = 'L' # crossing-left
else:
raise ValueError('bad omega value ' + str(omega) + '\n')
if omega > 0 and omega <= pi/2:
tabcode += 'D' # dinner
elif omega > pi/2 and omega <= pi:
tabcode += 'T' # tea
elif omega > -pi and omega <= -pi/2:
tabcode += 'S' # supper
elif omega > -pi/2 and omega <= 0:
tabcode += 'E' # elevenses
else:
raise ValueError('bad omega value ' + str(omega) + '\n')
return tabcode
def compute_tableau(ptnode_list, pdb_structure, use_hk=True):
"""
Build a PTTableau object for the tableau by computing relative angles
between all SSEs in the ptnode_list.
Parameters:
ptnode_list - list of PTNode objects (ie iterable of PTNode)
representing the SSEs (helices,strands) the
tabelau is for.
pdb_structure - parsed Bio.PDB structure
use_hk - If True, use the HH and KK codes for respectively
antiparallel and parallel strands of the same sheet.
Default True.
Return value:
PTTableau object with entry for each pair of SSEs.
"""
tableau = PTTableau(ptnode_list)
for i in range(len(ptnode_list)):
for j in range(i+1, len(ptnode_list)):
omega = ptnode_list[i].relative_angle(ptnode_list[j], pdb_structure)
if omega != None:
try:
tabcode = angle_to_tabcode(omega)
except ValueError:
sys.stderr.write('WARNING: catch bad tableau angle, seting Parallel (%d,%d)\n' %(i,j))
tabcode = "PE" # NaN -> 0.0 -> parallel: should not happen but does e.g. d7pcka_ -35
# set tabcode to HH for antiparallel strands and
# KK for parallel strands
if (use_hk and
isinstance(ptnode_list[i], PTNodeStrand) and
isinstance(ptnode_list[j], PTNodeStrand) and
(ptnode_list[i].get_sheet_id() != None and
ptnode_list[i].get_sheet_id() ==
ptnode_list[j].get_sheet_id())):
if tabcode[0] == 'O':
tableau[(ptnode_list[i], ptnode_list[j])] = 'HH'
elif tabcode[0] == 'P':
tableau[(ptnode_list[i], ptnode_list[j])] = 'KK'
else:
tableau[(ptnode_list[i], ptnode_list[j])] = tabcode
else:
tableau[(ptnode_list[i], ptnode_list[j])] = tabcode
if verbose:
sys.stderr.write(str(tableau))
return tableau
def compute_omega_matrix(ptnode_list, pdb_structure):
"""
Return the omega (relative angle, in radians) matrix as a 2D Numeric.array
by computing relative angles between all SSEs in the ptnode_list
Parameters:
ptnode_list - list of PTNode objects (ie iterable of PTNode)
representing the SSEs (helices,strands) the
tabelau is for.
pdb_structure - parsed Bio.PDB structure
Return value:
Numeric.array square symmetric (order length of ptnode_list) where
each entry is relative angle between SSEs in radians.
Main diagonal entries set to 0.
"""
n = len(ptnode_list)
omega_array = Numeric.zeros((n, n), Numeric.Float)
for i in range(n):
for j in range(i+1, n):
omega = ptnode_list[i].relative_angle(ptnode_list[j], pdb_structure)
if omega == None:
omega_array[i, j] = float('NaN')
else:
omega_array[i, j] = omega
omega_array[j, i] = omega_array[i, j]
# set the diagonal as follows:
# 0.00 for strand
# 1.00 for alpha helix
# 2.00 for pi helix
# 3.00 for 3_10 helix
for i in range(n):
if isinstance(ptnode_list[i], PTNodeHelix):
if ptnode_list[i].get_type() == "ALPHA":
v = 1.00
elif ptnode_list[i].get_type() == "PI":
v = 2.00
elif ptnode_list[i].get_type() == "310":
v = 3.00
else:
pass # should not happen
elif isinstance(ptnode_list[i], PTNodeStrand):
v = 0.00
omega_array[i,i] = v
return omega_array
#-----------------------------------------------------------------------------
#
# Classes and functions for running external TableauCreator
#
#-----------------------------------------------------------------------------
# Inherit from the PDBIO.Select class for writing only parts of PDB to file
# See the Bio.PDB documentation: biopython-1.43/Doc/biopdb_faq.pdf
class DomainSelect(Select):
"""
The DomainSelect class inherits from the PDBIO.Select class
and overrides function to select only certain residues for writing
ATOM records in the domain we are interested in to the
simplified PDB file for TableauCreator.
See the Bio.PDB documentation by Thomas Hamelryck:
biopython-1.43/Doc/biopdb_faq.pdf
"""
def __init__(self, domain):
"""
Constructor for the DomainSelect class, sets the domain member
used to accept only residues in that domain.
Parameters:
domain - ptdomain object of domain to select residues from
"""
self.domain = domain
def __repr__(self):
"""
Overrides the base __repr__ to write out the domain we have
"""
return "<DomainSelect: " + str(self.domain) + ">"
def accept_residue(self, residue):
"""
overrides the base accept_residue() function to accept only
residues in our domain. Also reject HETATMS.
Paramteters:
residue - Bio.PDB Residue object of residue to test
Return value:
1 to accept residue, 0 to reject.
"""
chain = residue.get_parent()
chainid = pdb_chainid_to_stride_chainid(chain.get_id())
# id of a residue in Bio.PDB is tuple (hetatm, resseqnum, icode)
# so we choose those where chain and residue PDB number
# is in the domain.
resnum = residue.get_id()[1]
if (self.domain.is_in_domain(chainid, resnum) and
residue.get_id()[0] == ' '):
return 1
else:
return 0
def get_tableau_from_pdbstruct(pdbid, domain,
pdb_structure, ptnode_list):
"""
Build a PTTableau object for the tableau by first creating a
simple PDB file with only the ATOM records for residues in the
domain we are processing, and also a .SSEsInfo file containing the
secnodary structure assignments we already have, then running
TableauCreator on it (using our simple PDB file and SSEsInfo) and
parsing the output.
Parameters:
pdbid - PDB identifier of the strucutre
domain - The PTDomain object for our current domain
pdb_structure - parsed Bio.PDB structure
ptnode_list - list of PTNode objects (ie iterable of PTNode)
representing the SSEs (helices,strands) the
tabelau is for.
Return value:
PTTableau object built from TableauCreator output
"""
TMPDIR = os.tempnam(None, "pttabin")
os.mkdir(TMPDIR)
try:
filename = pdbid
if domain.domainid != None:
filename += '-' + domain.domainid
filename += '.pdb'
domain_pdb_filename = os.path.join(TMPDIR, filename)
io = PDBIO()
io.set_structure(pdb_structure)
io.save(domain_pdb_filename, DomainSelect(domain))
ssesinfo_filename = os.path.join(TMPDIR, filename + ".input-SSEsInfo")
write_ssesinfo(ssesinfo_filename, ptnode_list)
tableau = read_tableau_from_tableaucreator(domain_pdb_filename,
ptnode_list,
ssesinfo_filename)
os.unlink(domain_pdb_filename)
os.unlink(ssesinfo_filename)
finally:
cleanup_tmpdir(TMPDIR)
return tableau
def read_tableau_from_tableaucreator(pdb_filename, ptnode_list,
ssesinfo_filename):
"""
Run Arun's TableauCreator program on the supplied pdb_filename
using SSEsInfo file.
Parameters:
pdb_filename - PDB file to run TableauCreator on
ptnode_list - list of PTNode objects (ie iterable of PTNode)
representing the SSEs (helices,strands) the
tabelau is for.
ssesinfo_filename - filename of the .SSEsInfo file that was written
to define SSEs for TableauCreator.
Return value:
PTTableau object built from TableauCreator output
NB: TableauCreator is not yet published or available (October 2007)
and I am using a private version which Arun sent me, which I modified
to add the -s option to use STRIDE rather than DSSP
and to have the -i option to parse .SSEsInfo files.
"""
# TableauCreator needs an output directory where it writes all its
# intermediate/output files, only puts progress information/errors
# to stdout/stderr.
tmpdir = os.tempnam(None, "pttab")
os.mkdir(tmpdir)
command = "TableauCreator "
command += "-i " + ssesinfo_filename + " "
command += pdb_filename + " " + tmpdir
command += " >/dev/null"
if verbose:
sys.stderr.write("running '" + command + "'...")
os.system(command)
if verbose:
sys.stderr.write("done\n")
# output files are:
# <pdbfilename>.angles
# <pdbfilename>.SSEsInfo
# <pdbfilename>.stride or <pdbfilename>.dssp
# <pdbfilename>.tableau
outfile_prefix = os.path.join(tmpdir, os.path.basename(pdb_filename))
if not os.path.isfile(os.path.join(tmpdir, "TABCREATE_OK")):
sys.stderr.write("ERROR: TableauCreator failed\n")
cleanup_tmpdir(tmpdir)
return None
# Now the tricky thing is TableauCreator indexes its matrix just with
# purely sequential numbers from 0 (as conventional)
# assuming all SSEs in one domain and in fact one chain
# (so we handle this by creating our own simple PDB file with only
# ATOM records for our current domain, and only one TER record on
# end so chains concatenated effectively).
# And also (as in comments above functions) we have the dodginess of
# doing the same thing in different ways in multiple places (DSSP/STRIDE
# parsing, PDB parsing, etc.).
# So let's check that the TableauCreator SSE info lines up with ours
# (otherwise we can't use the tableau data).
# parse the SSEsInfo file and check lines up with ptnodes,
# returns list of ptnodes corresponding to Tableau entries (may be shorter
# than our input node list; some removed as no equivalent in tableua).
nodelist = parse_tableaucreator_ssesinfo(outfile_prefix + '.SSEsInfo',
ptnode_list)
if nodelist != None:
tableau_filename = outfile_prefix + ".tableau"
tableau = parse_tableaucreator_output(tableau_filename, nodelist)
if tableau != None:
if verbose:
sys.stderr.write(str(tableau))
else:
sys.stderr.write('WARNING: problem parsing TableauCreator output;\n'
' tableau information will not be used\n')
else:
sys.stderr.write('WARNING: problem with TableauCreator output;\n'
' tableau information will not be used\n')
tableau = None
cleanup_tmpdir(tmpdir)
return tableau
def parse_tableaucreator_ssesinfo(filename, nodelist):
"""
Parse the .SSEsInfo file created by TableauCreator and check that
it lines up with our SSE info in the form of the list of helix/strand
PTNodes.
Parameters:
filename - filename of the .SSEsInfo file
nodelist - list of PTNodes, in order of residue sequence number
Return value:
nodelist where nodes with no tableau entry removed (too short) if
OK (they line up) else None (different number of nodes,
residue sequence numbers/types don't match, etc).
"""
# first remove all nodes with len < TABLEAU_MIN_SSE_LEN, since Tableau
# Creator won't have entries for them; we will have to set them
# ptnodelist = [ node for node in nodelist \
# if ( (isinstance(node, PTNodeHelix) and
# node.get_span() >= TABLEAU_MIN_HELIX_LEN) or
# (isinstance(node, PTNodeStrand) and
# node.get_span() >= TABLEAU_MIN_STRAND_LEN) )
# ]
ptnodelist = nodelist
# FIXME: no longer need this filtering of too short SSEs,
# now that .SSEsInfo input is being used
if len(ptnodelist) != len(nodelist):
sys.stderr.write('WARNING: no tableau entry for '
+ str(len(nodelist)-len(ptnodelist)) +
' nodes due to length to small'
+ '\n')
fh = open(filename)
# first line is number of SSEs, subsequent lines are
# type start_resnum end_resnum
# where type is E or H (DSSP code) and resnums are PDB residue numbers
#
numlines = int(fh.readline())
if numlines < 2:
sys.stderr.write('ERROR: bad SSEsInfo data\n')
fh.close()
return None
linenum = 1
sseinfo = [] # tuple (type, start, end)
line = fh.readline()
while line != "":
fields = line.split()
sseinfo.append((fields[0], int(fields[1]), int(fields[2])))
linenum += 1
line = fh.readline()
fh.close()
if len(sseinfo) != numlines:
sys.stderr.write('ERROR: TableauCreator SSEsInfo file specified ' \
+ str(numlines) + ' entries but ' \
+ str(len(sseinfo)) + ' read\n')
return None
if len(sseinfo) != len(ptnodelist):
sys.stderr.write('ERROR: TableauCreator SSEsInfo file has ' \
+ str(numlines) + ' entries but ' \
+ 'we have ' + str(len(ptnodelist)) + ' SSEs\n')
return None
for i in range(len(sseinfo)):
sse = sseinfo[i]
ptnode = ptnodelist[i]
if sse[0] == 'H' and not isinstance(ptnode, PTNodeHelix) or \
sse[0] == 'E' and not isinstance(ptnode, PTNodeStrand) or \
sse[1] != ptnode.get_start_res_seq() or \
sse[2] != ptnode.get_end_res_seq():
sys.stderr.write('ERROR: TableauCreator SSEInfo entry ' + \
str(sse) + ' does not match node ' +
str(ptnode) + '\n')
return None
return ptnodelist
def parse_tableaucreator_output(filename, nodelist):
"""
Parse the .tableau file created by TableauCreator
Parameters:
filename - filename of the .tableau file
nodelist - list of PTNodes, in order of residue sequence number
Return value:
PTTableau for the tableau parsed
"""
tableau = PTTableau(nodelist)
# First line of file is number of SSEs (order of square matrix)
# The whole matrix is in the file (it is symmetric), diagonal elements
# and other unset (ie non-contact) elements are set to '--', or ' '
# (two spaces) when cannot be calculated.
fh = open(filename)
numlines = int(fh.readline())
if numlines < 2:
sys.stderr.write('ERROR: bad tableau data\n')
fh.close()
return None
linenum = 1
line = fh.readline()
i = 0
while line != "":
# we will use the fact that fields are fixed length rather than
# splitting on space separator as fields may be set to ' ' (2 spaces)
# when error calculating (helices too short etc.) in TableauCreator.
# fields are two chars with two spaces between each
node_i = nodelist[i]
if len(line) < len(nodelist) * 4:
sys.stderr.write('ERROR: bad line in tableau; line too short:\n')
sys.stderr.write(line)
fh.close()
return None
for j in range(i+1, len(nodelist)): # no need to store both i,j and j,i
node_j = nodelist[j]
col = 4*j # each field as 2 char tabcode then 2 spaces
tabcode = line[col:col+2]
if tabcode != ' ' and tabcode != '--':
tableau[(node_i, node_j)] = line[col:col+2]
i += 1
linenum += 1
line = fh.readline()
fh.close()
return tableau
def write_ssesinfo(filename, nodelist):
"""
Write a TableauCreator .SSEsInfo file describing the SSE asignments
we have to the specified filename. This is so that we avoid having
TableauCreator re-run DSSP or STRIDE for the assignments, which is
inefficient and leads to inconsistencies. TableauCreator has been
modified to be able to read this .SSEsInfo file instead, allowing
the same assignments we have to be re-used by TableauCreator.
WARNING: file is overwritten if it exists
Parameters:
filenanme - filename to write SSEsInfo to
nodelist - list of PTNodes defining the SSEs
Return value: None
"""
# The format of the .SSEsInfo file is (see writeTableauAnglesSSEinfo())
# that the first line has number of records and each subsequent
# line (record)
# is whitespace-separated:
# dssp-code start end chainid
# e.g.
# H 10 21 A
# Only H and E codes are used.
# blank chainid is not allowed, '-' used instead.
fh = open(filename, 'w')
fh.write(str(len(nodelist)) + "\n")
for node in nodelist:
if isinstance(node, PTNodeHelix):
typecode = 'H'
elif isinstance(node, PTNodeStrand):
typecode = 'E'
else:
assert(False)
fh.write(typecode + " " + str(node.get_start_res_seq()) + " " +
str(node.get_end_res_seq()) + " " + node.get_chainid() + "\n")
fh.close()
def pttableau_set_verbose(verb):
"""
set the module global verbose flag in this module to supplied value
Parameters: verb - True (for verbose output) or False
Return value: None
Uses globals: verbose (in this module)
"""
global verbose
verbose = verb
|
{"hexsha": "961c07c58a25bb9f557107c4b25f4e0b6bb866a2", "size": 34863, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/pttableau.py", "max_stars_repo_name": "stivalaa/cuda_satabsearch", "max_stars_repo_head_hexsha": "b947fb711f8b138e5a50c81e7331727c372eb87d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/pttableau.py", "max_issues_repo_name": "stivalaa/cuda_satabsearch", "max_issues_repo_head_hexsha": "b947fb711f8b138e5a50c81e7331727c372eb87d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/pttableau.py", "max_forks_repo_name": "stivalaa/cuda_satabsearch", "max_forks_repo_head_hexsha": "b947fb711f8b138e5a50c81e7331727c372eb87d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.286631016, "max_line_length": 106, "alphanum_fraction": 0.5833118206, "include": true, "reason": "import numpy", "num_tokens": 8437}
|
import glob
import os
from typing import Dict, Optional, Tuple, Union
import numpy as np
import rasterio
import torch
import torch.nn.functional as F
def get_paths(img_dir: str, label_dir: str) -> Tuple[list, list]:
os.chdir(label_dir)
label_paths, img_paths = [], []
for filepath in glob.glob("*_SR.tif"):
label_paths.append(label_dir + filepath)
print(filepath, "appended to list.")
for root, dirs, files in os.walk(img_dir):
img_path = filepath[:-4] + '.tif'
if img_path in files:
print ("File exists. Adding to img_paths...")
img_paths.append(root + '/' + img_path)
return img_paths, label_paths
def make_filepaths_dict(img_dir: str, label_dirs: list) -> dict:
filepaths = {}
for label_dir in label_dirs:
img_paths, label_paths = get_paths(img_dir, label_dir)
for img_path, label_path in zip(img_paths, label_paths):
if img_path in filepaths.keys():
filepaths[img_path] += [label_path]
else:
filepaths[img_path] = [label_path]
return filepaths
def features_to_patches(features: torch.Tensor,
size: int,
) -> torch.Tensor:
W = features.shape[1] * size
H = features.shape[2] * size
patches = features.flatten(0, end_dim=-2).unflatten(
0, (int(W / size), int(H / size))).unflatten(
-1, (4, 5, 5)).swapaxes(0,2)[None, :, :, :, :, :]
return patches
def patches_to_array(patches: torch.Tensor,
size: int,
stride: int,
B: int = 1,
C: int = 4) -> torch.Tensor:
H = patches.shape[2] * patches.shape[-2]
W = patches.shape[3] * patches.shape[-1]
patches = patches.contiguous().view(B, C, -1, size*size)
patches = patches.permute(0, 1, 3, 2)
patches = patches.contiguous().view(B, C*size*size, -1)
output = F.fold(patches.to(torch.float), output_size=(H, W),
kernel_size=size, stride=stride)
output = output.to(torch.int)
return output
def unflatten_preds(y: Union[np.array, torch.Tensor],
H: int,
W: int,
size: int,
stride: int) -> torch.Tensor:
if size != stride:
raise NotImplementedError
if type(y) != torch.Tensor:
y = torch.tensor(y)
assert len(y.shape) == 1, 'y must be have a single axis.'
y = y[None, None, :, None]
y = y.unflatten(2, (W, H)).expand(
-1, -1, -1, -1, size * size).unflatten(-1, (size, size))
return y
def save_as_raster(raster_mask: Union[np.array, torch.Tensor],
like: str,
save_path: str
) -> None:
if type(raster_mask) == torch.Tensor:
raster_mask = raster_mask.numpy()
with rasterio.open(like, 'r') as ds:
# Register GDAL format drivers and configuration options with a
# context manager.
with rasterio.Env():
# Write an array as a raster band to a new 8-bit file. For
# the new file's profile, we start with the profile of the source
profile = ds.profile
# And then change the band count to 1, set the
# dtype to uint8, and specify LZW compression.
profile.update(
dtype=rasterio.uint8,
count=1,
compress='lzw')
with rasterio.open(save_path, 'w', **profile) as dst:
dst.write(raster_mask.astype(rasterio.uint8), 1)
def make_training_data_filepaths_dict(feature_dir: str, label_dir: str) -> dict:
if feature_dir != label_dir:
raise NotImplementedError
filepaths = {}
os.chdir(feature_dir)
for filepath in glob.glob("*.pt"):
path_split = filepath.split('_')
if 'features.pt' in path_split:
idx = path_split.index('features.pt')
new_path_elems = filepath.split('/')[-1].split('_')[:idx] + \
['labels.pt'] + filepath.split('/')[-1].split('_')[idx+1:]
label_path = '/'.join(filepath.split('/')[:-1]) + \
'_'.join(new_path_elems)
filepaths[feature_dir + filepath] = [feature_dir + label_path]
return filepaths
def make_x_y(features: torch.Tensor,
labels: torch.Tensor,
only_non_negative: bool,
sample_size: Optional[int] = None
) -> Tuple[np.array, np.array]:
if not only_non_negative:
return features.numpy(), labels.numpy()
else:
features_positive = features[labels == 1, :]
non_null_rows = features.any(dim=-1)
pl_non_null = labels[non_null_rows]
features_non_null = features[features.any(dim=-1),:]
if sample_size is not None:
indices = torch.randperm(pl_non_null.shape[0])[:sample_size]
else:
raise NotImplementedError
charcoal_sample_rows = torch.where(pl_non_null)[0]
non_null_sample_indices = torch.cat((indices, charcoal_sample_rows))
X = features_non_null[non_null_sample_indices]
y = pl_non_null[non_null_sample_indices]
X = X.numpy()
y = y.numpy()
return X, y
def make_data(filepaths: Union[Dict[str, list], list],
only_non_negative: Optional[bool] = False,
num_null_samples_per_file: Optional[int] = None,
target_class: int = 1,
) -> dict:
data_dict = {} # Stores dimensional data for future processing
if type(filepaths) == list:
for feature_path in filepaths:
features = torch.load(feature_path)
# Get dimensional data
B, W, H, F_dim = features.shape
assert B == 1
# Flatten
features = features.flatten(end_dim=-2)
X = features.numpy()
data_dict[feature_path] = {'X': X, 'y': None, 'W': W, 'H': H}
else:
for feature_path, label_paths in filepaths.items():
if len(label_paths) > 1:
raise NotImplementedError
for label_path in label_paths:
print('Loading %s ...' % feature_path)
labels = torch.load(label_path)
features = torch.load(feature_path)
# Get dimensional data
B, W, H, F_dim = features.shape
assert B == 1
# Flatten
features = features.flatten(end_dim=-2)
labels = torch.any(labels == target_class, dim=-1).flatten()
assert labels.shape[0] == features.shape[0]
X, y = make_x_y(features, labels, only_non_negative,
num_null_samples_per_file)
data_dict[feature_path] = {'X': X, 'y': y, 'W': W, 'H': H}
return data_dict
def make_training_arrays(data_dict: dict,
no_labels: Optional[bool] = False
) -> Union[Tuple[np.array, np.array], np.array]:
if no_labels:
X = []
for feature_name, data in data_dict.items():
X += [data['X']]
X = np.vstack(X)
return X
else:
X, y = [], []
for feature_name, data in data_dict.items():
X += [data['X']]
y += [data['y'][:, None]]
X = np.vstack(X)
y = np.vstack(y)
return X, y
|
{"hexsha": "450bf54cf4bfb9c7fbc3139b1a1a137435afc791", "size": 7508, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/raster_tiles/raster_tile_io.py", "max_stars_repo_name": "rcorrero/charcoal", "max_stars_repo_head_hexsha": "bd91a6a25960acdfafa1fd6a3be0839357a9e7ee", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/raster_tiles/raster_tile_io.py", "max_issues_repo_name": "rcorrero/charcoal", "max_issues_repo_head_hexsha": "bd91a6a25960acdfafa1fd6a3be0839357a9e7ee", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/raster_tiles/raster_tile_io.py", "max_forks_repo_name": "rcorrero/charcoal", "max_forks_repo_head_hexsha": "bd91a6a25960acdfafa1fd6a3be0839357a9e7ee", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5829383886, "max_line_length": 80, "alphanum_fraction": 0.5528769313, "include": true, "reason": "import numpy", "num_tokens": 1769}
|
[STATEMENT]
lemma map_map_rexp:
"map_rexp f (map_rexp g r) = map_rexp (\<lambda>r. f (g r)) r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_rexp f (map_rexp g r) = map_rexp (\<lambda>r. f (g r)) r
[PROOF STEP]
unfolding rexp.map_comp o_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_rexp (\<lambda>x. f (g x)) r = map_rexp (\<lambda>r. f (g r)) r
[PROOF STEP]
..
|
{"llama_tokens": 185, "file": "Regular-Sets_Regular_Exp", "length": 2}
|
import numpy as np
class Activation:
def __call__(self, incoming):
raise NotImplementedError
def delta(self, incoming, outgoing, above):
"""
Compute the derivative of the cost with respect to the input of this
activation function. Outgoing is what this function returned in the
forward pass and above is the derivative of the cost with respect to
the outgoing activation.
"""
raise NotImplementedError
class Identity(Activation):
def __call__(self, incoming):
return incoming
def delta(self, incoming, outgoing, above):
delta = np.ones(incoming.shape).astype(float)
return delta * above
class Sigmoid(Activation):
def __call__(self, incoming):
return 1 / (1 + np.exp(-incoming))
def delta(self, incoming, outgoing, above):
delta = outgoing * (1 - outgoing)
return delta * above
class Relu(Activation):
def __call__(self, incoming):
return np.maximum(incoming, 0)
def delta(self, incoming, outgoing, above):
delta = np.greater(incoming, 0).astype(float)
return delta * above
class Softmax(Activation):
def __call__(self, incoming):
# The constant doesn't change the expression but prevents overflows.
constant = np.max(incoming)
exps = np.exp(incoming - constant)
return exps / exps.sum()
def delta(self, incoming, outgoing, above):
delta = outgoing * above
sum_ = delta.sum(axis=delta.ndim - 1, keepdims=True)
delta -= outgoing * sum_
return delta
class SparseField(Activation):
def __init__(self, inhibition=0.05, leaking=0.0):
self.inhibition = inhibition
self.leaking = leaking
def __call__(self, incoming):
count = len(incoming)
length = int(np.sqrt(count))
assert length ** 2 == count, 'layer size must be a square'
field = incoming.copy().reshape((length, length))
radius = int(np.sqrt(self.inhibition * count)) // 2
assert radius, 'no inhibition due to small factor'
outgoing = np.zeros(field.shape)
while True:
x, y = np.unravel_index(field.argmax(), field.shape)
if field[x, y] <= 0:
break
outgoing[x, y] = 1
surrounding = np.s_[
max(x - radius, 0):min(x + radius + 1, length),
max(y - radius, 0):min(y + radius + 1, length)]
field[surrounding] = 0
assert field[x, y] == 0
outgoing = outgoing.reshape(count)
outgoing = np.maximum(outgoing, self.leaking * incoming)
return outgoing
def delta(self, incoming, outgoing, above):
delta = np.greater(outgoing, 0).astype(float)
return delta * above
class SparseRange(Activation):
"""
E%-Max Winner-Take-All.
Binary activation. First, the activation function is applied. Then all
neurons within the specified range below the strongest neuron are set to
one. All others are set to zero. The gradient is the one of the activation
function for active neurons and zero otherwise.
See: A Second Function of Gamma Frequency Oscillations: An E%-Max
Winner-Take-All Mechanism Selects Which Cells Fire. (2009)
"""
def __init__(self, range_=0.3, function=Sigmoid()):
assert 0 < range_ < 1
self._range = range_
self._function = function
def __call__(self, incoming):
incoming = self._function(incoming)
threshold = self._threshold(incoming)
active = (incoming >= threshold)
outgoing = np.zeros(incoming.shape)
outgoing[active] = 1
# width = active.sum() * 80 / 1000
# print('|', '#' * width, ' ' * (80 - width), '|')
return outgoing
def delta(self, incoming, outgoing, above):
# return self._function.delta(incoming, outgoing, outgoing * above)
return outgoing * self._function.delta(incoming, outgoing, above)
def _threshold(self, incoming):
min_, max_ = incoming.min(), incoming.max()
threshold = min_ + (max_ - min_) * (1 - self._range)
return threshold
|
{"hexsha": "40d209b7ffe80afc0137d110a08a6857a7c21560", "size": 4202, "ext": "py", "lang": "Python", "max_stars_repo_path": "layered/activation.py", "max_stars_repo_name": "danijar/ffnn", "max_stars_repo_head_hexsha": "c1c09d95f90057a91ae24c80b74f415680b97338", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 265, "max_stars_repo_stars_event_min_datetime": "2015-12-13T15:03:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-25T11:54:37.000Z", "max_issues_repo_path": "layered/activation.py", "max_issues_repo_name": "danijar/ffnn", "max_issues_repo_head_hexsha": "c1c09d95f90057a91ae24c80b74f415680b97338", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2015-11-04T01:18:37.000Z", "max_issues_repo_issues_event_max_datetime": "2017-01-06T11:02:02.000Z", "max_forks_repo_path": "layered/activation.py", "max_forks_repo_name": "danijar/ffnn", "max_forks_repo_head_hexsha": "c1c09d95f90057a91ae24c80b74f415680b97338", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 38, "max_forks_repo_forks_event_min_datetime": "2015-11-03T16:40:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T17:46:30.000Z", "avg_line_length": 31.5939849624, "max_line_length": 78, "alphanum_fraction": 0.6206568301, "include": true, "reason": "import numpy", "num_tokens": 980}
|
#' This Script reshapes data on the Labour Income Share from Europe commission / economic database, https://ec.europa.eu/info/business-economy-euro/indicators-statistics/economic-databases/macro-economic-database-ameco_en
#'
#' a link gives access to the set of the database pertaining to labour costs etc
#' https://ec.europa.eu/info/files/gross-domestic-product-income-approach-labour-costs_en
#' there a zip file is available for download, the zip file is ameco7_1.zip, and
#' the exctracted file is AMECO7.TXT
#' This Data is used to get the relation between the Country code and the Country Name used by AMECO
#'
#' The following link is the one used to get the actual values of the data:
#' http://ec.europa.eu/economy_finance/ameco/Include/Query.cfm?serie=ALCD0&trn=1&agg=0&unite=0&ref=0&lstCntry=&lstYear=&code_serie=%27ALCD0%27&selection=0
#'
#' To obtain this link follow the instructions:
#' 0) USE MOZILLA FIREFOX!
#' 1) Go to the AMECO database main page (first), and click to the AMECO online database link
#' 2) Select item 7, (GDP - Income Aproach Labour, costs), then select item 7.6 Adjusted Wage Share
#' 3) Of the two indicators choose ALCD0 ( [...Indicator name ...] at current market prices)
#' 4) Click on WebQuery(CurrentView), it opens an xlsx file with a long link in there, after clean up it boils down to the link above
#'
#' @param work_directory inside the DATA repo, default is 'REP_EUAMECO'
#' @param check for automation, return a warning file at the root, default is TRUE
#' @return csv files compare with ilostat database
#'
#' @examples
#' ## Not run:
#' REP_EUAMECO.ANNUAL_LAP_input()
#' ## End(**Not run**)
#' @export
#' @rdname Micro_process
# SETUP;
### redundant library calls for reference only
work_directory = "REP_EUAMECO/ANNUAL_LAP"
# work_directory <- "J:/COMMON/STATISTICS/DPAU/DATA/REP_EUROSTAT/AMECO"
setwd(paste0(ilo:::path$data, work_directory))
require(tidyverse)
require(stringr)
require(ilo)
init_ilo()
# STEP 0; Obtain a mapping from Ameco country name to country code
### not the text filtering are done in the hope that they are more robust to updates than the position of the variable of interest
countrymap<- read_delim('input/AMECO7.TXT', delim = ';') %>%
select(-X65) %>%
separate(CODE, 'ref_area', sep = '\\.', extra = 'drop') %>%
rename(SUBCHAPTER = `SUB-CHAPTER`) %>%
filter(SUBCHAPTER=='06 Adjusted wage share',
str_detect(TITLE, "market prices")) %>%
select(ref_area,COUNTRY) %>%
rename(ref_area.label=COUNTRY)
# STEP 1 Loading from server
Sys.setenv(http_proxy="http://proxyos.ilo.org:8080")
path <- 'http://ec.europa.eu/economy_finance/ameco/Include/Query.cfm?serie=ALCD0&trn=1&agg=0&unite=0&ref=0&lstCntry=&lstYear=&code_serie=%27ALCD0%27&selection=0'
X <-xml2:::read_html(httr:::GET(path)) %>%
rvest:::html_table(header = TRUE) %>% # get dataset from html table
as.data.frame %>% # fixed list issue
as.tbl %>%
rename(ref_area.label = Country)
# STEP 2; Correspondence to ILOSTAT countries
cou <- ilo$code$cl_country
countrymap <- countrymap
X<- X %>%
inner_join(countrymap, by = "ref_area.label")
X <- X %>%
left_join(
cou %>% select(ref_area = code, label_en) %>%
mutate(test = 1),
by = "ref_area") %>%
filter( test %in% 1) %>%
select(-test,-Unit,-Label, -ref_area.label, -label_en)
#STEP 3; reshape to long format
X <-X %>%
gather(time, obs_value, -ref_area, na.rm = TRUE) %>%
as.tbl %>%
mutate(time = str_sub(time,2,-1) %>% as.numeric) %>%
filter(time < as.numeric(str_sub(Sys.time(),1,4)))
# STEP 4; Generate non-correspondence missing variables (variables that in this case are not required to be matched to any other ILOSTAT subset)
### Note that for note_indicator the reference is taken from existing ILOSTAT data on wage share estimated from GDP, at market prices, and with employment income components= compensation of employees
### for note_source, since anual data, set to anual, and from AMECO's metadata it follows that the concept falls under Definition (ESA 95) - which is often refered to as System of national accounts SNA
# mapping of the source
X <- X %>% left_join(
ilo$code$cl_survey %>%
filter(str_detect(label_en,'AMECO'), str_sub(code,1,2) %in% 'XX') %>%
mutate(ref_area = str_sub(label_en,1,3)) %>%
select(source = code, ref_area),
by = "ref_area")
X <- X %>%
mutate(collection="STI",
obs_value = round(obs_value, 5),
indicator="LAP_DGVA_NOC_RT",
classif1="NOC_VALUE",
note_indicator="T25:205_T26:1426_T27:213",
note_source="R1:3903_R1:3904") %>% # add tag Bulk and IMF
mutate( classif2 = as.character(NA),
obs_status = as.character(NA),
sex = as.character(NA),
note_classif = as.character(NA),
freq_code = 'm')
### rearrange
Y <- X %>% select_(.dots = c("collection", "ref_area", "indicator", "source", "sex", "classif1", "classif2", "time", "obs_value", "obs_status", "note_classif", "note_indicator", "note_source", "freq_code"))
rm(X)
REF <- levels(as.factor(Y$ref_area))
# split and save by country
for (i in 1:length(REF)){
X <- Y %>% filter(ref_area%in%REF[i])
save(X,file = paste("./output/REP_EUAMECO_",REF[i],".Rdata",sep=""))
print(REF[i])
}
REF <- cbind(PATH = paste(getwd(), "/output/REP_EUAMECO_",REF,".Rdata",sep=""),ID = NA, Types ="NSO_ilostat", REF = NA)
write.csv(REF,"FileToLoad.csv",row.names = FALSE,na="")
rm(list=ls(all=TRUE))
q(save = "no", status = 0, runLast = FALSE)
|
{"hexsha": "6face3b32cd6f553d3f1b5c2df1014367751bfcf", "size": 5714, "ext": "r", "lang": "R", "max_stars_repo_path": "inst/doc/do/REP_EUAMECO.ANNUAL_LAP.r", "max_stars_repo_name": "dbescond/iloData", "max_stars_repo_head_hexsha": "c4060433fd0b7025e82ca3b0a213bf00c62b2325", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inst/doc/do/REP_EUAMECO.ANNUAL_LAP.r", "max_issues_repo_name": "dbescond/iloData", "max_issues_repo_head_hexsha": "c4060433fd0b7025e82ca3b0a213bf00c62b2325", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inst/doc/do/REP_EUAMECO.ANNUAL_LAP.r", "max_forks_repo_name": "dbescond/iloData", "max_forks_repo_head_hexsha": "c4060433fd0b7025e82ca3b0a213bf00c62b2325", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6081081081, "max_line_length": 221, "alphanum_fraction": 0.6688834442, "num_tokens": 1715}
|
#import tensorflow as tf
import numpy as np
import torch
import torch.nn.functional as F
#
""" Includes helper functions that are used in admm.py and model.py
Last updated: 2/22/2019
Overview:
* Padding and cropping functions
* FFT shifting functions
* Forward Model (H, Hadj)
* Soft thresholding functions
* TV forward/adjoint operators
"""
###### Complex operations ##########
def complex_multiplication(t1, t2):
real1, imag1 = torch.unbind(t1, dim=-1)
real2, imag2 = torch.unbind(t2, dim=-1)
return torch.stack([real1 * real2 - imag1 * imag2, real1 * imag2 + imag1 * real2], dim = -1)
def complex_abs(t1):
real1, imag1 = torch.unbind(t1, dim=2)
return torch.sqrt(real1**2 + imag1**2)
def make_real(c):
out_r, _ = torch.unbind(c,-1)
return out_r
def make_complex(r, i = 0):
if i==0:
i = torch.zeros_like(r, dtype=torch.float32)
return torch.stack((r, i), -1)
####### Padding and cropping functions #####
def pad_zeros_torch(model, x):
PADDING = (model.PAD_SIZE1, model.PAD_SIZE1, model.PAD_SIZE0, model.PAD_SIZE0)
return F.pad(x, PADDING, 'constant', 0)
def crop(model, x):
C01 = model.PAD_SIZE0; C02 = model.PAD_SIZE0 + model.DIMS0 # Crop indices
C11 = model.PAD_SIZE1; C12 = model.PAD_SIZE1 + model.DIMS1 # Crop indices
return x[:, :, C01:C02, C11:C12]
####### FFT Shifting #####
def roll_n(X, axis, n):
f_idx = tuple(slice(None, None, None) if i != axis else slice(0, n, None) for i in range(X.dim()))
b_idx = tuple(slice(None, None, None) if i != axis else slice(n, None, None) for i in range(X.dim()))
front = X[f_idx]
back = X[b_idx]
return torch.cat([back, front], axis)
def batch_fftshift2d(x):
real, imag = torch.unbind(x, -1)
for dim in range(1, len(real.size())):
n_shift = real.size(dim)//2
if real.size(dim) % 2 != 0:
n_shift += 1 # for odd-sized images
real = roll_n(real, axis=dim, n=n_shift)
imag = roll_n(imag, axis=dim, n=n_shift)
return torch.stack((real, imag), -1) # last dim=2 (real&imag)
def batch_ifftshift2d(x):
real, imag = torch.unbind(x, -1)
for dim in range(len(real.size()) - 1, 0, -1):
real = roll_n(real, axis=dim, n=real.size(dim)//2)
imag = roll_n(imag, axis=dim, n=imag.size(dim)//2)
return torch.stack((real, imag), -1) # last dim=2 (real&imag)
####### Forward Model #####
def Hfor(model, x):
xc = torch.stack((x, torch.zeros_like(x, dtype=torch.float32)), -1)
#X = torch.fft(batch_ifftshift2d(xc),2)
X = torch.fft(xc,2)
HX = complex_multiplication(model.H,X)
out = torch.ifft(HX,2)
out_r, _ = torch.unbind(out,-1)
return out_r
def Hadj(model, x):
xc = torch.stack((x, torch.zeros_like(x, dtype=torch.float32)), -1)
#X = torch.fft(batch_ifftshift2d(xc),2)
X = torch.fft(xc,2)
HX = complex_multiplication(model.Hconj,X)
#out = batch_ifftshift2d(torch.ifft(HX,2))
out = torch.ifft(HX,2)
out_r, _ = torch.unbind(out,-1)
return out_r
####### Soft Thresholding Functions #####
def soft_2d_gradient2_rgb(model, v,h,tau):
z0 = torch.tensor(0, dtype = torch.float32, device=model.cuda_device)
z1 = torch.zeros(model.batch_size, 3, 1, model.DIMS1*2, dtype = torch.float32, device=model.cuda_device)
z2 = torch.zeros(model.batch_size, 3, model.DIMS0*2, 1, dtype= torch.float32, device=model.cuda_device)
vv = torch.cat([v, z1] , 2)
hh = torch.cat([h, z2] , 3)
mag = torch.sqrt(vv*vv + hh*hh)
magt = torch.max(mag - tau, z0, out=None)
mag = torch.max(mag - tau, z0, out=None) + tau
#smax = torch.nn.Softmax()
#magt = smax(mag - tau, torch.zeros_like(mag, dtype = torch.float32))
#mag = smax(mag - tau, torch.zeros_like(mag, dtype = torch.float32)) + tau
mmult = magt/(mag)#+1e-5)
if torch.any(mmult != mmult):
print('here')
if torch.any(v != v):
print('there')
return v*mmult[:,:, :-1,:], h*mmult[:,:, :,:-1]
def soft_2d(v,tau):
out = torch.nn.functional.relu(v-tau)
return out
######## normalize image #########
def normalize_image(image):
out_shape = image.shape
image_flat = image.reshape((out_shape[0],out_shape[1]*out_shape[2]*out_shape[3]))
image_max,_ = torch.max(image_flat,1)
image_max_eye = torch.eye(out_shape[0], dtype = torch.float32, device=image.device)*1/image_max
image_normalized = torch.reshape(torch.matmul(image_max_eye, image_flat), (out_shape[0],out_shape[1],out_shape[2],out_shape[3]))
return image_normalized
####### Add Noise #####
def gaussian_noise_layer(input_layer, std):
noise = tf.random_normal(shape=tf.shape(input_layer), mean=0.0, stddev=std, dtype=tf.float32)
return input_layer + noise
######## ADMM Parameter Update #########
def param_update_previous(mu, res_tol, mu_inc, mu_dec, r, s):
if r > res_tol * s:
mu_up = mu*mu_inc
if s > res_tol*s:
mu_up = mu/mu_dec
else:
mu_up = mu
#mu_up = tf.cond(tf.greater(r, res_tol * s), lambda: (mu * mu_inc), lambda: mu)
#mu_up = tf.cond(tf.greater(s, res_tol * r), lambda: (mu_up/mu_dec), lambda: mu_up)
return mu_up
######## ADMM Parameter Update #########
def param_update2(mu, res_tol, mu_inc, mu_dec, r, s):
if r > res_tol * s:
mu_up = mu*mu_inc
else:
mu_up = mu
if s > res_tol*r:
mu_up = mu_up/mu_dec
else:
mu_up = mu_up
#mu_up = tf.cond(tf.greater(r, res_tol * s), lambda: (mu * mu_inc), lambda: mu)
#mu_up = tf.cond(tf.greater(s, res_tol * r), lambda: (mu_up/mu_dec), lambda: mu_up)
return mu_up
###### Things I saw on TV ###########
def make_laplacian(model):
lapl = np.zeros([model.DIMS0*2,model.DIMS1*2])
lapl[0,0] =4.;
lapl[0,1] = -1.; lapl[1,0] = -1.;
lapl[0,-1] = -1.; lapl[-1,0] = -1.;
LTL = np.abs(np.fft.fft2(lapl))
return LTL
#def DT(dx, dy): # Use convolution instead?
# with tf.device("/cpu:0"):
# out = (tf.manip.roll(dx, 1, axis = 1) - dx) + (tf.manip.roll(dy, 1, axis = 2) - dy)
# return out
#def D(x):
# with tf.device("/cpu:0"):
# xroll = tf.manip.roll(x, -1, axis = 1)
# yroll = tf.manip.roll(x, -1, axis = 2)
# return (xroll - x), (yroll - x)
def L_tf(a): # Not using
xdiff = a[:,:, 1:, :]-a[:,:, :-1, :]
ydiff = a[:,:, :, 1:]-a[:,:, :, :-1]
return -xdiff, -ydiff
def Ltv_tf(a, b): # Not using
return torch.cat([a[:,:, 0:1,:], a[:,:, 1:, :]-a[:,:, :-1, :], -a[:,:,-1:,:]],
2) + torch.cat([b[:,:,:,0:1], b[:, :, :, 1:]-b[:, :, :, :-1], -b[:,:, :,-1:]],3)
#return tf.concat([a[:,0:1,:], a[:, 1:, :]-a[:, :-1, :], -a[:,-1:,:]], axis = 1) + tf.concat([b[:,:,0:1], b[:, :, 1:]-b[:, :, :-1], -b[:,:,-1:]], axis = 2)
def TVnorm_tf(x):
x_diff, y_diff = L_tf(x)
result = torch.sum(torch.abs(x_diff)) + torch.sum(torch.abs(y_diff))
return result
|
{"hexsha": "48f7a859a4e7b53e0e3f02642934a0a2d80f4b1a", "size": 7031, "ext": "py", "lang": "Python", "max_stars_repo_path": "admm_helper_functions_torch.py", "max_stars_repo_name": "sangeetsu/LenslessLearning", "max_stars_repo_head_hexsha": "751efc614eff5616a229972620192478af2c39c1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2019-05-31T15:42:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T22:29:05.000Z", "max_issues_repo_path": "admm_helper_functions_torch.py", "max_issues_repo_name": "sangeetsu/LenslessLearning", "max_issues_repo_head_hexsha": "751efc614eff5616a229972620192478af2c39c1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-05-27T06:21:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-09T22:57:11.000Z", "max_forks_repo_path": "admm_helper_functions_torch.py", "max_forks_repo_name": "sangeetsu/LenslessLearning", "max_forks_repo_head_hexsha": "751efc614eff5616a229972620192478af2c39c1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-09-05T18:06:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-07T09:19:09.000Z", "avg_line_length": 32.1050228311, "max_line_length": 160, "alphanum_fraction": 0.5818517992, "include": true, "reason": "import numpy", "num_tokens": 2256}
|
import torch
import torchvision.transforms as T
import numpy as np
from nn_analysis.datasets import datasets as ds
from nn_analysis.datasets import transforms
def get_custom_dataset(base_dataset_name, seed, transform_names=[], subset_indices=None, outer_dims=None):
transforms_map = {
'crop': transforms.RandomResizedCrop(224,scale=(0.2,1.),ratio=(1.0,1.0)),
'color': transforms.ColorJitter(0.4, 0.4, 0.4, 0.1),
}
transforms_list = [transforms_map[transform_name] for transform_name in transform_names]
transforms_list += [
T.Resize(224),
T.ToTensor(),
transforms.Normalize('imagenet'),
]
base_dataset = ds.get_dataset(base_dataset_name)
if subset_indices is not None:
subset_indices = tuple([slice(*nth_idx) if isinstance(nth_idx, list) else nth_idx for nth_idx in subset_indices])
if isinstance(base_dataset, ds.TensorDataset):
base_dataset = base_dataset[subset_indices]
elif isinstance(base_dataset, ds.ListDataset):
assert len(subset_indices) == 1 # 1 dimensional dataset
base_dataset = ds.ListDataset(
[base_dataset.images[idx] for idx in np.arange(base_dataset.shape[0])[subset_indices]],
[base_dataset.targets[idx] for idx in np.arange(base_dataset.shape[0])[subset_indices]]
)
else:
raise NotImplementedError("The configuration 'subset_indices' is not implemented for a base_dataset that is neither a TensorDataset nor a ListDataset. Set 'subset_indices' to null if you don't want this functionality.")
if isinstance(base_dataset, ds.TensorDataset):
base_dataset.is_tensor = False
if outer_dims is not None:
base_dataset = ds.OuterDataset(base_dataset, outer_dims)
dataset = ds.RandomTransformDataset(
base_dataset,
transforms.Compose(transforms_list),
target_transform=lambda target: torch.cat(target),
identity=True,
return_index=True,
)
return dataset
|
{"hexsha": "dec7baa13fa1db24864ee70cb0337826bb847b74", "size": 2063, "ext": "py", "lang": "Python", "max_stars_repo_path": "nn_analysis/datasets/custom_dataset.py", "max_stars_repo_name": "hchau630/nn-analysis", "max_stars_repo_head_hexsha": "0fbe7ad7b2b4566b9f88d8f21413a6d405f96bdc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nn_analysis/datasets/custom_dataset.py", "max_issues_repo_name": "hchau630/nn-analysis", "max_issues_repo_head_hexsha": "0fbe7ad7b2b4566b9f88d8f21413a6d405f96bdc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nn_analysis/datasets/custom_dataset.py", "max_forks_repo_name": "hchau630/nn-analysis", "max_forks_repo_head_hexsha": "0fbe7ad7b2b4566b9f88d8f21413a6d405f96bdc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.1020408163, "max_line_length": 231, "alphanum_fraction": 0.6883179835, "include": true, "reason": "import numpy", "num_tokens": 439}
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import os
import sys
from GlimpseSensor import *
from Globals import *
from CoreRNN import *
batchSize = constants['batchSize']
trainingIters = 1000000 # in terms of sample size
displayStep = 1 # how often to print details
step = 0
logPath = "logs"
modelPath = "model/"
def setupSummaries():
with tf.variable_scope('monitor') as scope:
loss = tf.Variable(0.0)
tf.summary.scalar("Loss", loss)
trainAcc = tf.Variable(0.0)
tf.summary.scalar("Train Accuracy", trainAcc)
testAcc = tf.Variable(0.0)
tf.summary.scalar("Test Accuracy", testAcc)
summaryVars = [loss, trainAcc, testAcc]
summaryPlaceholders = [
tf.placeholder("float") for i in range(
len(summaryVars))]
updateOps = [
summaryVars[i].assign(
summaryPlaceholders[i]) for i in range(
len(summaryVars))]
return summaryPlaceholders, updateOps
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
monitorPh, monitorOps = setupSummaries()
saver = tf.train.Saver()
summaryOps = tf.summary.merge_all()
# glimpseSensor = GlimpseSensor()
coreRnn = CoreRNN()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(logPath, sess.graph)
if not os.path.exists(modelPath):
os.makedirs(modelPath)
checkpoint = tf.train.get_checkpoint_state(modelPath)
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print ("successfully loaded checkpoint")
while step * batchSize < trainingIters:
batch = mnist.train.next_batch(batchSize)
coreRnn.forward(batch[0], batch[1])
print ("finished forward...")
sess.run([coreRnn.optimizer], feed_dict={
coreRnn.imgsPlaceholder: batch[0], coreRnn.labelsPlaceholder: batch[1]})
print ("running optimizer...")
if step % displayStep == 0:
# Calculate training loss and accuracy
loss, trainAcc = sess.run([coreRnn.cost, coreRnn.accuracy], feed_dict={
coreRnn.imgsPlaceholder: batch[0],
coreRnn.labelsPlaceholder: batch[1]})
# calculate test accuracy
tbatch = mnist.test.next_batch(batchSize)
coreRnn.forward(tbatch[0], tbatch[1])
testAcc = sess.run(
coreRnn.accuracy,
feed_dict={
coreRnn.imgsPlaceholder: batch[0],
coreRnn.labelsPlaceholder: batch[1]})
sess.run([monitorOps[0], monitorOps[1], monitorOps[2]], feed_dict={
monitorPh[0]: float(loss), monitorPh[1]: trainAcc, monitorPh[2]: testAcc})
print("Iter " +
str(step *
batchSize) +
", Minibatch Loss= " +
"{:.6f}".format(loss) +
", Training Accuracy= " +
"{:.5f}".format(trainAcc) +
", Test Accuracy= " +
"{:.5f}".format(testAcc))
savePath = saver.save(sess, modelPath + "dram.ckpt")
print("Model saved in file: %s" % savePath)
summaryStr = sess.run(
summaryOps,
feed_dict={
coreRnn.imgsPlaceholder: batch[0],
coreRnn.labelsPlaceholder: batch[1]})
writer.add_summary(summaryStr, step)
step += 1
|
{"hexsha": "fd89448b1cdf6040596e03d66c5aeea8b8046416", "size": 3603, "ext": "py", "lang": "Python", "max_stars_repo_path": "MNIST/ram2/Main.py", "max_stars_repo_name": "mimikaan/Attention-Model", "max_stars_repo_head_hexsha": "079cc1b42c83f6e3e77a92aa54c1a8f9ad0d8b93", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 48, "max_stars_repo_stars_event_min_datetime": "2017-07-21T02:39:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T09:45:43.000Z", "max_issues_repo_path": "MNIST/ram2/Main.py", "max_issues_repo_name": "Pandinosaurus/Visual-Attention-Model", "max_issues_repo_head_hexsha": "079cc1b42c83f6e3e77a92aa54c1a8f9ad0d8b93", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MNIST/ram2/Main.py", "max_forks_repo_name": "Pandinosaurus/Visual-Attention-Model", "max_forks_repo_head_hexsha": "079cc1b42c83f6e3e77a92aa54c1a8f9ad0d8b93", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2017-07-31T14:45:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T10:34:41.000Z", "avg_line_length": 37.53125, "max_line_length": 95, "alphanum_fraction": 0.594782126, "include": true, "reason": "import numpy", "num_tokens": 805}
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2014-2016 The FFF Core developers
// Original code was distributed under the MIT software license.
// Copyright (c) 2014-2019 Coin Sciences Ltd
// FFF_Core code distributed under the GPLv3 license, see COPYING file.
#include "rpc/rpcutils.h"
#include "filters/FFF_Corefilter.h"
#include "filters/filter.h"
#include "community/community.h"
#include "utils/util.h"
#include "json/json_spirit_ubjson.h"
#include <boost/assign/list_of.hpp>
using namespace std;
using namespace json_spirit;
void ParseFilterRestrictionsForField(Value param,mc_Script *lpDetailsScript,uint32_t filter_type);
string ParseFilterOptionsLibraryField(Value param,mc_Script *lpDetailsScript, bool for_test);
uint32_t ParseRawDataParamType(Value *param,mc_EntityDetails *given_entity,mc_EntityDetails *entity,uint32_t *data_format,int *errorCode,string *strError)
{
uint32_t param_type=MC_DATA_API_PARAM_TYPE_NONE;
uint32_t this_param_type;
bool missing_data=true;
bool txfilter=false;
*data_format=MC_SCR_DATA_FORMAT_UNKNOWN;
entity->Zero();
if(param->type() == obj_type)
{
BOOST_FOREACH(const Pair& d, param->get_obj())
{
this_param_type=MC_DATA_API_PARAM_TYPE_NONE;
if(d.name_ == "inputcache")
{
this_param_type=MC_DATA_API_PARAM_TYPE_CIS;
}
if(d.name_ == "create")
{
if(d.value_.type() != null_type && !d.value_.get_str().empty())
{
if(d.value_.get_str() == "stream")
{
this_param_type=MC_DATA_API_PARAM_TYPE_CREATE_STREAM;
}
if(d.value_.get_str() == "asset")
{
this_param_type=MC_DATA_API_PARAM_TYPE_ISSUE;
}
if(d.value_.get_str() == "upgrade")
{
this_param_type=MC_DATA_API_PARAM_TYPE_CREATE_UPGRADE;
}
if(d.value_.get_str() == "txfilter")
{
if( mc_gState->m_Features->Filters() == 0 )
{
*errorCode=RPC_NOT_SUPPORTED;
*strError=string("Tx filters are not supported by this protocol version");
goto exitlbl;
}
if( (param_type == MC_DATA_API_PARAM_TYPE_PUBLISH) ||
(param_type == MC_DATA_API_PARAM_TYPE_APPROVAL) )
{
*strError=string("'create' field should preceed 'for'");
goto exitlbl;
}
this_param_type=MC_DATA_API_PARAM_TYPE_CREATE_FILTER;
txfilter=true;
}
if(d.value_.get_str() == "streamfilter")
{
if( mc_gState->m_Features->StreamFilters() == 0 )
{
*errorCode=RPC_NOT_SUPPORTED;
*strError=string("Stream filters are not supported by this protocol version");
goto exitlbl;
}
this_param_type=MC_DATA_API_PARAM_TYPE_CREATE_FILTER;
}
if(d.value_.get_str() == "variable")
{
if( mc_gState->m_Features->Variables() == 0 )
{
*errorCode=RPC_NOT_SUPPORTED;
*strError=string("Variables are not supported by this protocol version");
goto exitlbl;
}
this_param_type=MC_DATA_API_PARAM_TYPE_CREATE_VAR;
}
if(d.value_.get_str() == "library")
{
if( mc_gState->m_Features->Libraries() == 0 )
{
*errorCode=RPC_NOT_SUPPORTED;
*strError=string("Libraries are not supported by this protocol version");
goto exitlbl;
}
this_param_type=MC_DATA_API_PARAM_TYPE_CREATE_LIB;
}
}
if(this_param_type == MC_DATA_API_PARAM_TYPE_NONE)
{
*strError=string("Invalid new entity type");
goto exitlbl;
}
}
if(d.name_ == "update")
{
if(d.value_.type() != null_type && !d.value_.get_str().empty())
{
ParseEntityIdentifier(d.value_,entity, MC_ENT_TYPE_ANY);
if(entity->GetEntityType() == MC_ENT_TYPE_ASSET)
{
this_param_type=MC_DATA_API_PARAM_TYPE_FOLLOWON;
}
if(entity->GetEntityType() == MC_ENT_TYPE_VARIABLE)
{
this_param_type=MC_DATA_API_PARAM_TYPE_UPDATE_VAR;
}
if(entity->GetEntityType() == MC_ENT_TYPE_LIBRARY)
{
this_param_type=MC_DATA_API_PARAM_TYPE_UPDATE_LIB;
}
}
if(this_param_type == MC_DATA_API_PARAM_TYPE_NONE)
{
*strError=string("Asset or variable with this identifier not found");
*errorCode=RPC_ENTITY_NOT_FOUND;
goto exitlbl;
}
/*
if(entity->GetEntityType() != MC_ENT_TYPE_ASSET)
{
*strError=string("Asset with this identifier not found");
*errorCode=RPC_ENTITY_NOT_FOUND;
goto exitlbl;
}
this_param_type=MC_DATA_API_PARAM_TYPE_FOLLOWON;
*/
}
if(d.name_ == "for")
{
if(txfilter)
{
param_type=MC_DATA_API_PARAM_TYPE_NONE;
this_param_type=MC_DATA_API_PARAM_TYPE_CREATE_FILTER;
}
else
{
if(d.value_.type() != null_type && !d.value_.get_str().empty())
{
ParseEntityIdentifier(d.value_,entity, MC_ENT_TYPE_ANY);
if(entity->GetEntityType() == MC_ENT_TYPE_STREAM)
{
this_param_type=MC_DATA_API_PARAM_TYPE_PUBLISH;
}
if(entity->GetEntityType() == MC_ENT_TYPE_UPGRADE)
{
this_param_type=MC_DATA_API_PARAM_TYPE_APPROVAL;
}
}
if(this_param_type == MC_DATA_API_PARAM_TYPE_NONE)
{
*strError=string("Entity with this identifier not found");
*errorCode=RPC_ENTITY_NOT_FOUND;
goto exitlbl;
}
}
}
if( (d.name_ == "text") || (d.name_ == "json") || (d.name_ == "cache") )
{
if( mc_gState->m_Features->FormattedData() == 0 )
{
*errorCode=RPC_NOT_SUPPORTED;
*strError=string("Formatted data is not supported by this protocol version");
goto exitlbl;
}
if(!missing_data)
{
*strError=string("data field can appear only once in the object");
goto exitlbl;
}
missing_data=false;
}
if(this_param_type != MC_DATA_API_PARAM_TYPE_NONE)
{
if(param_type != MC_DATA_API_PARAM_TYPE_NONE)
{
*strError=string("Only one of the following keywords can appear in the object: create, update, for, json, text");
goto exitlbl;
}
}
if(this_param_type != MC_DATA_API_PARAM_TYPE_NONE)
{
param_type=this_param_type;
}
}
if(param_type == MC_DATA_API_PARAM_TYPE_NONE)
{
// if(*data_format != MC_SCR_DATA_FORMAT_UNKNOWN)
if(!missing_data)
{
param_type=MC_DATA_API_PARAM_TYPE_FORMATTED;
}
}
/*
if(param_type == MC_DATA_API_PARAM_TYPE_NONE)
{
if(given_entity && given_entity->GetEntityType())
{
memcpy(entity,given_entity,sizeof(mc_EntityDetails));
param_type=MC_DATA_API_PARAM_TYPE_FOLLOWON;
}
else
{
param_type=MC_DATA_API_PARAM_TYPE_ISSUE;
}
}
*/
/*
if(param_type == MC_DATA_API_PARAM_TYPE_FOLLOWON)
{
if(entity->AllowedFollowOns() == 0)
{
*errorCode=RPC_NOT_ALLOWED;
*strError=string("Issuing more units not allowed for this asset");
goto exitlbl;
}
}
*/
}
else
{
if(param->type() == str_type)
{
if(param->get_str().size())
{
param_type=MC_DATA_API_PARAM_TYPE_RAW;
}
else
{
param_type=MC_DATA_API_PARAM_TYPE_EMPTY_RAW;
}
}
else
{
*strError="Invalid parameter type, should be object or string";
goto exitlbl;
}
}
exitlbl:
return param_type;
}
CScript RawDataScriptRawHex(Value *param,int *errorCode,string *strError)
{
bool fIsHex;
CScript scriptOpReturn=CScript();
vector<unsigned char> dataData(ParseHex(param->get_str().c_str(),fIsHex));
if(!fIsHex)
{
*strError="data should be hexadecimal string";
if(mc_gState->m_Features->FormattedData())
{
*strError+=" or recognized object";
}
}
scriptOpReturn << OP_RETURN << dataData;
return scriptOpReturn;
}
vector<unsigned char> ParseRawFormattedData(const Value *value,uint32_t *data_format,mc_Script *lpDetailsScript,uint32_t in_options,uint32_t* out_options,int *errorCode,string *strError)
{
if(out_options)
{
*out_options=MC_RFD_OPTION_NONE;
}
vector<unsigned char> vValue;
if(value->type() == str_type)
{
bool fIsHex;
vValue=ParseHex(value->get_str().c_str(),fIsHex);
if(!fIsHex)
{
*strError=string("data should be hexadecimal string");
if(mc_gState->m_Features->FormattedData())
{
*strError+=" or recognized object";
}
}
*data_format=MC_SCR_DATA_FORMAT_UNKNOWN;
}
else
{
if( (in_options & MC_RFD_OPTION_INLINE) ||
(mc_gState->m_Features->FormattedData() != 0) ||
(mc_gState->m_Features->OffChainData() != 0) )
{
if(value->type() == obj_type)
{
if(value->get_obj().size() != 1)
{
*strError=string("data should be object with single element");
}
else
{
BOOST_FOREACH(const Pair& d, value->get_obj())
{
if(d.name_ == "text")
{
if(d.value_.type() == str_type)
{
vValue=vector<unsigned char> (d.value_.get_str().begin(),d.value_.get_str().end());
}
else
{
*strError=string("value in data object should be string");
}
*data_format=MC_SCR_DATA_FORMAT_UTF8;
}
if(d.name_ == "json")
{
size_t bytes;
int err;
const unsigned char *script;
lpDetailsScript->Clear();
lpDetailsScript->AddElement();
if((err = ubjson_write(d.value_,lpDetailsScript,MAX_FORMATTED_DATA_DEPTH)) != MC_ERR_NOERROR)
{
*strError=string("Couldn't transfer JSON object to internal UBJSON format");
}
script = lpDetailsScript->GetData(0,&bytes);
vValue=vector<unsigned char> (script,script+bytes);
*data_format=MC_SCR_DATA_FORMAT_UBJSON;
}
if(d.name_ == "cache")
{
if(d.value_.type() == str_type)
{
vValue=vector<unsigned char> (d.value_.get_str().begin(),d.value_.get_str().end());
vValue.push_back(0);
if(in_options & MC_RFD_OPTION_OFFCHAIN)
{
if(out_options)
{
*out_options |= MC_RFD_OPTION_CACHE;
}
}
else
{
int fHan=mc_BinaryCacheFile((char*)&vValue[0],0);
if(fHan <= 0)
{
*strError="Binary cache item with this identifier not found";
}
int64_t total_size=0;
if(strError->size() == 0)
{
total_size=lseek64(fHan,0,SEEK_END);
if(lseek64(fHan,0,SEEK_SET) != 0)
{
*strError="Cannot read binary cache item";
*errorCode=RPC_INTERNAL_ERROR;
}
}
if(strError->size() == 0)
{
if(total_size > MAX_OP_RETURN_RELAY)
{
*strError="Binary cache item too big";
*errorCode=RPC_NOT_SUPPORTED;
}
}
if(strError->size() == 0)
{
if(total_size)
{
mc_gState->m_TmpBuffers->m_RpcChunkScript1->Clear();
mc_gState->m_TmpBuffers->m_RpcChunkScript1->Resize(total_size,1);
unsigned char* ptr=mc_gState->m_TmpBuffers->m_RpcChunkScript1->m_lpData;
if(read(fHan,ptr,total_size) != total_size)
{
*errorCode=RPC_INTERNAL_ERROR;
*strError="Cannot read binary cache item";
}
vValue=vector<unsigned char> (ptr,ptr+total_size);
}
else
{
vValue.clear();
}
}
mc_CloseBinaryCache(fHan);
}
}
else
{
*strError=string("cache identifier in data object should be string");
}
*data_format=MC_SCR_DATA_FORMAT_UNKNOWN;
}
else
{
if(d.name_ == "chunks")
{
if(mc_gState->m_Features->OffChainData())
{
if(d.value_.type() == array_type)
{
Array arr=d.value_.get_array();
for(int i=0;i<(int)arr.size();i++)
{
if(strError->size() == 0)
{
if(arr[i].type() == str_type)
{
vector<unsigned char> vHash;
bool fIsHex;
vHash=ParseHex(arr[i].get_str().c_str(),fIsHex);
if(!fIsHex)
{
*strError=string("Chunk hash should be hexadecimal string");
}
else
{
if(vHash.size() != MC_CDB_CHUNK_HASH_SIZE)
{
*strError=strprintf("Chunk hash should be %d bytes long",MC_CDB_CHUNK_HASH_SIZE);
}
else
{
uint256 hash;
hash.SetHex(arr[i].get_str());
vValue.insert(vValue.end(),(unsigned char*)&hash,(unsigned char*)&hash+MC_CDB_CHUNK_HASH_SIZE);
}
}
}
}
}
}
else
{
*strError=string("value in data object should be array");
}
if(out_options)
{
*out_options |= MC_RFD_OPTION_OFFCHAIN;
}
*data_format=MC_SCR_DATA_FORMAT_UNKNOWN;
}
else
{
*errorCode=RPC_NOT_SUPPORTED;
*strError="Unsupported item data type: " + d.name_;
}
}
else
{
if(*data_format == MC_SCR_DATA_FORMAT_UNKNOWN)
{
*errorCode=RPC_NOT_SUPPORTED;
*strError="Unsupported item data type: " + d.name_;
}
}
}
}
}
}
else
{
*strError=string("data should be hexadecimal string or recognized object");
}
}
else
{
*strError=string("data should be hexadecimal string");
if(mc_gState->m_Features->FormattedData() == 0)
{
*strError+=" for this protocol version";
}
}
}
return vValue;
}
void ParseRawDetails(const Value *value,mc_Script *lpDetails,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
if(value->type() == obj_type)
{
size_t bytes;
int err;
const unsigned char *script;
lpDetailsScript->Clear();
lpDetailsScript->AddElement();
if((err = ubjson_write(*value,lpDetailsScript,MAX_FORMATTED_DATA_DEPTH)) != MC_ERR_NOERROR)
{
*strError=string("Couldn't transfer details JSON object to internal UBJSON format");
}
else
{
script = lpDetailsScript->GetData(0,&bytes);
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_JSON_DETAILS,script,bytes);
}
/*
BOOST_FOREACH(const Pair& p, value->get_obj())
{
if(p.value_.type() == str_type)
{
lpDetails->SetParamValue(p.name_.c_str(),p.name_.size(),(unsigned char*)p.value_.get_str().c_str(),p.value_.get_str().size());
}
else
{
*strError=string("Invalid details value, should be string");
}
}
*/
}
else
{
*strError=string("Invalid details");
}
}
void ParseRawValue(const Value *value,mc_Script *lpDetails,mc_Script *lpDetailsScript,size_t *max_size,int *errorCode,string *strError)
{
size_t bytes;
int err;
const unsigned char *script;
lpDetailsScript->Clear();
lpDetailsScript->AddElement();
if((err = ubjson_write(*value,lpDetailsScript,MAX_FORMATTED_DATA_DEPTH)) != MC_ERR_NOERROR)
{
*strError=string("Couldn't transfer value JSON to internal UBJSON format");
}
else
{
script = lpDetailsScript->GetData(0,&bytes);
if(max_size)
{
if(bytes > *max_size)
{
*max_size=bytes;
return;
}
else
{
if(bytes > MAX_SCRIPT_ELEMENT_SIZE-128)
{
*max_size += 1;
return;
}
}
}
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_JSON_VALUE,script,bytes);
}
}
CScript RawDataScriptFormatted(Value *param,uint32_t *data_format,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
vector<unsigned char> vValue;
size_t bytes;
const unsigned char *script;
bool field_parsed;
bool missing_data=true;
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if( (d.name_ == "text") || (d.name_ == "json") || (d.name_ == "cache") )
{
if(!missing_data)
{
*strError=string("data object should have single key - json or text");
}
vValue=ParseRawFormattedData(param,data_format,lpDetailsScript,MC_RFD_OPTION_NONE,NULL,errorCode,strError);
field_parsed=true;
missing_data=false;
}
// if(d.name_ == "format")field_parsed=true;
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());;
}
}
if(missing_data)
{
*strError=string("Missing json or text field");
}
if(strError->size() == 0)
{
lpDetailsScript->Clear();
lpDetailsScript->SetDataFormat(*data_format);
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP;
scriptOpReturn << OP_RETURN << vValue;
}
return scriptOpReturn;
}
CScript RawDataScriptIssue(Value *param,mc_Script *lpDetails,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
bool field_parsed;
size_t bytes;
int err;
const unsigned char *script;
string entity_name;
int multiple=1;
int is_open=0;
int is_anyone_can_issuemore=0;
uint32_t permissions=0;
bool missing_name=true;
bool missing_multiple=true;
bool missing_open=true;
bool missing_anyone_can_issuemore=true;
bool missing_details=true;
lpDetails->Clear();
lpDetails->AddElement();
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "name")
{
if(!missing_name)
{
*strError=string("name field can appear only once in the object");
}
if(d.value_.type() != null_type && !d.value_.get_str().empty())
{
entity_name=d.value_.get_str();
if(entity_name == "*")
{
*strError=string("Invalid asset name");
}
if(entity_name.size())
{
if(entity_name.size() > MC_ENT_MAX_NAME_SIZE)
{
*strError=string("Invalid asset name - too long");
}
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_NAME,(const unsigned char*)(entity_name.c_str()),entity_name.size());
}
}
else
{
*strError=string("Invalid name");
}
missing_name=false;
field_parsed=true;
}
if(d.name_ == "multiple")
{
if(!missing_multiple)
{
*strError=string("multiple field can appear only once in the object");
}
if(d.value_.type() == int_type)
{
multiple=d.value_.get_int();
if(multiple <= 0)
{
*strError=string("Invalid multiple - should be positive");
}
else
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_ASSET_MULTIPLE,(unsigned char*)&multiple,4);
}
}
else
{
*strError=string("Invalid multiple");
}
missing_multiple=false;
field_parsed=true;
}
if(d.name_ == "open")
{
if(!missing_open)
{
*strError=string("open field can appear only once in the object");
}
if(d.value_.type() == bool_type)
{
is_open=d.value_.get_bool();
}
else
{
*strError=string("Invalid open");
}
// lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FOLLOW_ONS,(unsigned char*)&is_open,1);
missing_open=false;
field_parsed=true;
}
if(d.name_ == "unrestrict")
{
if(!missing_anyone_can_issuemore)
{
*strError=string("unrestrict field can appear only once in the object");
}
if(mc_gState->m_Features->AnyoneCanIssueMore())
{
if( (d.value_.type() == str_type) && (d.value_.get_str() == "issue") )
{
is_anyone_can_issuemore=true;
}
else
{
*strError=string("Invalid unrestrict field");
}
}
else
{
throw JSONRPCError(RPC_NOT_SUPPORTED, "unrestrict field is not supported in this protocol version");
}
missing_anyone_can_issuemore=false;
field_parsed=true;
}
if(d.name_ == "restrict")
{
if(mc_gState->m_Features->PerAssetPermissions() == 0)
{
throw JSONRPCError(RPC_NOT_SUPPORTED, "Per-asset permissions not supported for this protocol version");
}
if(permissions == 0)
{
if(d.value_.type() == str_type)
{
permissions=mc_gState->m_Permissions->GetPermissionType(d.value_.get_str().c_str(),MC_PTP_SEND | MC_PTP_RECEIVE);
if(permissions == 0)
{
*strError=string("Invalid restrict");
}
}
else
{
*strError=string("Invalid restrict");
}
}
else
{
*strError=string("restrict field can appear only once in the object");
}
if(permissions)
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_PERMISSIONS,(unsigned char*)&permissions,1);
}
field_parsed=true;
}
if(d.name_ == "details")
{
if(!missing_details)
{
*strError=string("details field can appear only once in the object");
}
ParseRawDetails(&(d.value_),lpDetails,lpDetailsScript,errorCode,strError);
missing_details=false;
field_parsed=true;
}
if(d.name_ == "create")field_parsed=true;
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());
}
}
if(is_open)
{
if(is_anyone_can_issuemore)
{
is_open |= 0x02;
}
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FOLLOW_ONS,(unsigned char*)&is_open,1);
}
else
{
if(is_anyone_can_issuemore)
{
*errorCode=RPC_NOT_SUPPORTED;
*strError=string("Asset cannot have unrestricted issue permission if follow-ons are not allowed");
}
}
if(strError->size() == 0)
{
lpDetailsScript->Clear();
script=lpDetails->GetData(0,&bytes);
err=lpDetailsScript->SetNewEntityType(MC_ENT_TYPE_ASSET,0,script,bytes);
if(err)
{
*strError=string("Invalid custom fields, too long");
}
else
{
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP << OP_RETURN;
}
}
return scriptOpReturn;
}
CScript RawDataScriptFollowOn(Value *param,mc_EntityDetails *entity,mc_Script *lpDetails,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
size_t bytes;
int err;
const unsigned char *script;
bool field_parsed;
bool missing_details=true;
lpDetails->Clear();
lpDetails->AddElement();
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "details")
{
if(!missing_details)
{
*strError=string("details field can appear only once in the object");
}
ParseRawDetails(&(d.value_),lpDetails,lpDetailsScript,errorCode,strError);
missing_details=false;
field_parsed=true;
}
if(d.name_ == "update")field_parsed=true;
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());;
}
}
lpDetailsScript->Clear();
lpDetailsScript->SetEntity(entity->GetTxID()+MC_AST_SHORT_TXID_OFFSET);
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP;
lpDetailsScript->Clear();
script=lpDetails->GetData(0,&bytes);
err=lpDetailsScript->SetNewEntityType(MC_ENT_TYPE_ASSET,1,script,bytes);
if(err)
{
*strError=string("Invalid custom fields, too long");
}
else
{
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP << OP_RETURN;
}
return scriptOpReturn;
}
bool RawDataParseRestrictParameter(const Value& param,uint32_t *restrict,uint32_t *permissions,int *errorCode,string *strError)
{
*restrict=0;
*permissions=0;
uint32_t match;
char* ptr;
char* start;
char* ptrEnd;
char c;
if(param.type() != str_type)
{
*strError="Invalid restrict field, should be string";
return false;
}
ptr=(char*)param.get_str().c_str();
ptrEnd=ptr+strlen(ptr);
start=ptr;
while(ptr<=ptrEnd)
{
c=*ptr;
if( (c == ',') || (c ==0x00))
{
if(ptr > start)
{
match=0;
if(( (ptr-start) == 5) && (memcmp(start,"write", ptr-start) == 0) ){match = 1; *permissions |= MC_PTP_WRITE ;}
if(( (ptr-start) == 4) && (memcmp(start,"read", ptr-start) == 0) )if(mc_gState->m_Features->ReadPermissions()){match = 1; *permissions |= MC_PTP_READ ;}
if(( (ptr-start) == 7) && (memcmp(start,"onchain", ptr-start) == 0) ){match = 1; *restrict |= MC_ENT_ENTITY_RESTRICTION_ONCHAIN;}
if(( (ptr-start) == 8) && (memcmp(start,"offchain", ptr-start) == 0) ){match = 1; *restrict |= MC_ENT_ENTITY_RESTRICTION_OFFCHAIN;}
if(match == 0)
{
*strError="Unsupported restriction";
*errorCode=RPC_NOT_SUPPORTED;
return false;
}
start=ptr+1;
}
}
ptr++;
}
return true;
}
CScript RawDataScriptCreateStream(Value *param,mc_Script *lpDetails,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
bool field_parsed;
size_t bytes;
int err;
const unsigned char *script;
string entity_name;
int is_open=0;
int is_salted=0;
uint32_t restrict=0;
uint32_t permissions=MC_PTP_WRITE;
bool missing_name=true;
bool missing_open=true;
bool missing_details=true;
bool missing_salted=true;
lpDetails->Clear();
lpDetails->AddElement();
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "name")
{
if(!missing_name)
{
*strError=string("name field can appear only once in the object");
}
if(d.value_.type() != null_type && !d.value_.get_str().empty())
{
entity_name=d.value_.get_str();
if(entity_name.size())
{
if(entity_name.size() > MC_ENT_MAX_NAME_SIZE)
{
*strError=string("Invalid stream name - too long");
}
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_NAME,(const unsigned char*)(entity_name.c_str()),entity_name.size());
}
}
else
{
*strError=string("Invalid name");
}
missing_name=false;
field_parsed=true;
}
if(d.name_ == "open")
{
if(!missing_open)
{
*strError=string("open/restrict field can appear only once in the object");
}
if(d.value_.type() == bool_type)
{
is_open=d.value_.get_bool();
}
else
{
*strError=string("Invalid open");
}
if(mc_gState->m_Features->OffChainData() == 0)
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_ANYONE_CAN_WRITE,(unsigned char*)&is_open,1);
}
else
{
permissions=is_open ? MC_PTP_NONE : MC_PTP_WRITE;
}
missing_open=false;
field_parsed=true;
}
if(d.name_ == "salted")
{
if(mc_gState->m_Features->SaltedChunks() == 0)
{
*strError=string("Salted chunks not supported for this protocol version");
*errorCode=RPC_NOT_SUPPORTED;
}
else
{
if(!missing_salted)
{
*strError=string("salted field can appear only once in the object");
}
if(d.value_.type() == bool_type)
{
is_salted=d.value_.get_bool();
}
else
{
*strError=string("Invalid salted");
}
missing_salted=false;
field_parsed=true;
}
}
if(d.name_ == "restrict")
{
if(mc_gState->m_Features->OffChainData() == 0)
{
*strError=string("Per-stream restrictions not supported for this protocol version");
*errorCode=RPC_NOT_SUPPORTED;
}
else
{
if(!missing_open)
{
*strError=string("open/restrict field can appear only once in the object");
}
RawDataParseRestrictParameter(d.value_,&restrict,&permissions,errorCode,strError);
/*
if(RawDataParseRestrictParameter(d.value_,&restrict,&permissions,errorCode,strError))
{
if(restrict & MC_ENT_ENTITY_RESTRICTION_OFFCHAIN)
{
if(restrict & MC_ENT_ENTITY_RESTRICTION_ONCHAIN)
{
*strError=string("Stream cannot be restricted from both onchain and offchain items");
*errorCode=RPC_NOT_SUPPORTED;
}
}
}
*/
missing_open=false;
field_parsed=true;
}
}
if(d.name_ == "details")
{
if(!missing_details)
{
*strError=string("details field can appear only once in the object");
}
ParseRawDetails(&(d.value_),lpDetails,lpDetailsScript,errorCode,strError);
missing_details=false;
field_parsed=true;
}
if(d.name_ == "create")field_parsed=true;
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());
}
}
if(missing_salted)
{
if(permissions & MC_PTP_READ)
{
is_salted=true;
}
}
if(mc_gState->m_Features->OffChainData())
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_PERMISSIONS,(unsigned char*)&permissions,1);
}
if(is_salted)
{
restrict |= MC_ENT_ENTITY_RESTRICTION_NEED_SALTED;
}
if(strError->size() == 0)
{
if(permissions & MC_PTP_READ)
{
restrict |= MC_ENT_ENTITY_RESTRICTION_ONCHAIN;
/*
if( (restrict & MC_ENT_ENTITY_RESTRICTION_ONCHAIN ) == 0 )
{
*strError="onchain restriction should be set for read-permissioned streams";
*errorCode=RPC_NOT_ALLOWED;
}
*/
}
}
if(restrict & MC_ENT_ENTITY_RESTRICTION_OFFCHAIN)
{
if(restrict & MC_ENT_ENTITY_RESTRICTION_ONCHAIN)
{
*strError=string("Stream cannot be restricted from both onchain and offchain items");
*errorCode=RPC_NOT_SUPPORTED;
}
}
if( restrict != 0 )
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_RESTRICTIONS,(unsigned char*)&restrict,1);
}
if(strError->size() == 0)
{
lpDetailsScript->Clear();
script=lpDetails->GetData(0,&bytes);
err=lpDetailsScript->SetNewEntityType(MC_ENT_TYPE_STREAM,0,script,bytes);
if(err)
{
*strError=string("Invalid custom fields, too long");
}
else
{
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP << OP_RETURN;
}
}
return scriptOpReturn;
}
bool AddParamNameValueToScript(const string param_name,const Value param_value,mc_Script *lpDetailsScript,int version,int *errorCode,string *strError)
{
int64_t value;
string name=param_name;
name.erase(std::remove(name.begin(), name.end(), '-'), name.end());
const mc_OneFFF_CoreParam *param=mc_gState->m_NetworkParams->FindParam(name.c_str());
if(param == NULL)
{
*errorCode=RPC_INVALID_PARAMETER;
*strError=string("Invalid parameter name");
return false;
}
int size;
unsigned char zero=0;
switch(param->m_Type & MC_PRM_DATA_TYPE_MASK)
{
case MC_PRM_BOOLEAN:
if(param_value.type() == bool_type)
{
value=param_value.get_bool() ? 1 : 0;
}
else
{
*errorCode=RPC_INVALID_PARAMETER;
*strError=string("Invalid parameter type, should be boolean");
return false;
}
break;
case MC_PRM_INT32:
case MC_PRM_INT64:
case MC_PRM_UINT32:
if(param->m_Type & MC_PRM_DECIMAL)
{
if(param_value.type() == real_type)
{
value=mc_gState->m_NetworkParams->DecimalToInt64(param_value.get_real());
}
else
{
*errorCode=RPC_INVALID_PARAMETER;
*strError=string("Invalid parameter type, should be numeric");
return false;
}
}
else
{
if(param_value.type() == int_type)
{
value=param_value.get_int64();
}
else
{
*errorCode=RPC_INVALID_PARAMETER;
*strError=string("Invalid parameter type, should be integer");
return false;
}
}
break;
default:
*errorCode=RPC_NOT_SUPPORTED;
*strError=string("One of parameters cannot be upgraded by this protocol version");
return false;
}
size=mc_gState->m_NetworkParams->CanBeUpgradedByVersion(name.c_str(),version,0);
if(size < 0)
{
*errorCode=RPC_INVALID_PARAMETER;
*strError=string("Invalid parameter name");
return false;
}
if(size == 0)
{
*errorCode=RPC_NOT_SUPPORTED;
*strError=string("One of parameters cannot be upgraded by this protocol version");
return false;
}
lpDetailsScript->SetData((unsigned char*)name.c_str(),name.size());
lpDetailsScript->SetData((unsigned char*)&zero,1);
lpDetailsScript->SetData((unsigned char*)&size,MC_PRM_PARAM_SIZE_BYTES);
lpDetailsScript->SetData((unsigned char*)&value,size);
return true;
}
CScript RawDataScriptCreateUpgrade(Value *param,mc_Script *lpDetails,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
bool field_parsed;
size_t bytes;
const unsigned char *script;
string entity_name;
int protocol_version;
uint32_t startblock;
bool missing_name=true;
bool missing_startblock=true;
bool missing_details=true;
lpDetails->Clear();
lpDetails->AddElement();
lpDetailsScript->Clear();
lpDetailsScript->AddElement();
protocol_version=-1;
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "name")
{
if(!missing_name)
{
*strError=string("name field can appear only once in the object");
}
if(d.value_.type() != null_type && !d.value_.get_str().empty())
{
entity_name=d.value_.get_str();
if(entity_name.size())
{
if(entity_name.size() > MC_ENT_MAX_NAME_SIZE)
{
*strError=string("Invalid upgrade name - too long");
}
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_NAME,(const unsigned char*)(entity_name.c_str()),entity_name.size());
}
}
else
{
*strError=string("Invalid name");
}
missing_name=false;
field_parsed=true;
}
if(d.name_ == "startblock")
{
if(!missing_startblock)
{
*strError=string("startblock field can appear only once in the object");
}
if(d.value_.type() == int_type)
{
if( (d.value_.get_int64() >= 0) && (d.value_.get_int64() <= 0xFFFFFFFF) )
{
startblock=(uint32_t)(d.value_.get_int64());
if(startblock > 0)
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_UPGRADE_START_BLOCK,(unsigned char*)&startblock,4);
}
}
else
{
*strError=string("Invalid startblock");
}
}
else
{
*strError=string("Invalid startblock");
}
missing_startblock=false;
field_parsed=true;
}
if(d.name_ == "details")
{
if(!missing_details)
{
*strError=string("details field can appear only once in the object");
}
if(d.value_.type() == obj_type)
{
protocol_version=-1;
BOOST_FOREACH(const Pair& p, d.value_.get_obj())
{
if(p.name_ == "protocol-version")
{
if( (p.value_.type() == int_type) && (p.value_.get_int() > 0) )
{
if(protocol_version < 0)
{
protocol_version=p.value_.get_int();
}
}
else
{
*strError=string("Invalid protocol-version");
}
}
else
{
if(mc_gState->m_Features->ParameterUpgrades())
{
AddParamNameValueToScript(p.name_,p.value_,lpDetailsScript,0,errorCode,strError);
}
else
{
*strError=string("Invalid details");
}
}
}
script = lpDetailsScript->GetData(0,&bytes);
if(strError->size() == 0)
{
if( (protocol_version <= 0) && (bytes == 0) )
{
*strError=string("Missing protocol-version");
}
}
if(strError->size() == 0)
{
if(protocol_version > 0)
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_UPGRADE_PROTOCOL_VERSION,(unsigned char*)&protocol_version,4);
}
if(bytes)
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_UPGRADE_CHAIN_PARAMS,script,bytes);
}
}
}
missing_details=false;
field_parsed=true;
}
if(d.name_ == "create")field_parsed=true;
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());
}
}
if(strError->size() == 0)
{
if(missing_details)
{
*strError=string("Missing details");
}
}
if(strError->size() == 0)
{
int err;
script=lpDetails->GetData(0,&bytes);
lpDetailsScript->Clear();
err=lpDetailsScript->SetNewEntityType(MC_ENT_TYPE_UPGRADE,0,script,bytes);
if(err)
{
*strError=string("Invalid custom fields, too long");
}
else
{
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP << OP_RETURN;
}
}
return scriptOpReturn;
}
bool mc_JSInExtendedScript(size_t size)
{
if( (size > 32768) || (size > MAX_SCRIPT_ELEMENT_SIZE-128) )
{
if(mc_gState->m_Features->ExtendedEntityDetails())
{
return true;
}
}
return false;
}
CScript RawDataScriptCreateFilter(Value *param,mc_Script *lpDetails,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
bool field_parsed;
size_t bytes;
const unsigned char *script;
string entity_name,filter_code,filter_main_name;
uint32_t filter_type=MC_FLT_TYPE_TX;
string js;
string library_code="";
bool js_extended=false;
bool missing_name=true;
bool missing_code=true;
bool missing_for=true;
bool missing_libraries=true;
lpDetails->Clear();
lpDetails->AddElement();
lpDetailsScript->Clear();
lpDetailsScript->AddElement();
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "name")
{
if(!missing_name)
{
*strError=string("name field can appear only once in the object");
}
if(d.value_.type() != null_type && !d.value_.get_str().empty())
{
entity_name=d.value_.get_str();
if(entity_name.size())
{
if(entity_name.size() > MC_ENT_MAX_NAME_SIZE)
{
*strError=string("Invalid filter name - too long");
}
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_NAME,(const unsigned char*)(entity_name.c_str()),entity_name.size());
}
}
else
{
*strError=string("Invalid name");
}
missing_name=false;
field_parsed=true;
}
if(d.name_ == "for")
{
if(!missing_for)
{
*strError=string("for field can appear only once in the object");
}
ParseFilterRestrictionsForField(d.value_,lpDetailsScript,MC_FLT_TYPE_TX);
script = lpDetailsScript->GetData(0,&bytes);
if(bytes)
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FILTER_RESTRICTIONS,script,bytes);
}
missing_for=false;
field_parsed=true;
}
if(d.name_ == "libraries")
{
if(mc_gState->m_Features->Libraries())
{
if(!missing_libraries)
{
*strError=string("libraries field can appear only once in the object");
}
library_code=ParseFilterOptionsLibraryField(d.value_,lpDetailsScript,false);
script = lpDetailsScript->GetData(0,&bytes);
if(bytes)
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FILTER_LIBRARIES,script,bytes);
}
missing_libraries=false;
field_parsed=true;
}
}
if(d.name_ == "code")
{
if(!missing_code)
{
*strError=string("code field can appear only once in the object");
}
if(d.value_.type() == str_type)
{
js_extended=mc_JSInExtendedScript(d.value_.get_str().size());
if(!js_extended)
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FILTER_CODE,(unsigned char*)d.value_.get_str().c_str(),d.value_.get_str().size());
}
else
{
js=d.value_.get_str();
}
}
else
{
*strError=string("Invalid code field type");
}
filter_code=d.value_.get_str();
missing_code=false;
field_parsed=true;
}
if(d.name_ == "create")
{
if (strcmp(d.value_.get_str().c_str(),"streamfilter") == 0)
{
filter_type=MC_FLT_TYPE_STREAM;
}
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FILTER_TYPE,(unsigned char*)&filter_type,4);
field_parsed=true;
}
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());
}
}
filter_main_name=MC_FLT_MAIN_NAME_TX;
if(strError->size() == 0)
{
if(filter_type != MC_FLT_TYPE_TX)
{
filter_main_name=MC_FLT_MAIN_NAME_STREAM;
if(!missing_for)
{
*strError=string("for field is allowed only for tx filters");
*errorCode=RPC_NOT_ALLOWED;
}
}
}
if(strError->size() == 0)
{
if(missing_code)
{
*strError=string("Missing code");
}
else
{
mc_Filter *worker=new mc_Filter;
string strFilterError;
string test_code=filter_code;
if(library_code.size())
{
test_code=library_code + MC_FLT_LIBRARY_GLUE + filter_code;
}
int err=pFilterEngine->CreateFilter(test_code.c_str(),filter_main_name,pFFF_CoreFilterEngine->m_CallbackNames[filter_type],worker,strFilterError);
delete worker;
if(err)
{
*strError=string("Couldn't create filter");
*errorCode=RPC_INTERNAL_ERROR;
}
else
{
if(strFilterError.size())
{
*strError=strprintf("Couldn't compile filter code: %s",strFilterError.c_str());
}
}
}
}
if(strError->size() == 0)
{
int err;
script=lpDetails->GetData(0,&bytes);
lpDetailsScript->Clear();
err=lpDetailsScript->SetNewEntityType(MC_ENT_TYPE_FILTER,0,script,bytes);
if(err)
{
*strError=string("Invalid code, too long");
}
else
{
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP << OP_RETURN;
if(js_extended)
{
lpDetails->Clear();
lpDetails->AddElement();
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FILTER_CODE,(unsigned char*)js.c_str(),js.size());
script=lpDetails->GetData(0,&bytes);
lpDetailsScript->Clear();
lpDetailsScript->SetExtendedDetails(script,bytes);
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes);
//
// scriptOpReturn << vector<unsigned char>((unsigned char*)js.c_str(), (unsigned char*)js.c_str() + js.size());
}
}
}
return scriptOpReturn;
}
CScript RawDataScriptCreateVariable(Value *param,mc_Script *lpDetails,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
bool field_parsed;
size_t bytes;
const unsigned char *script;
string entity_name;
bool js_extended=false;
Value varvalue=Value::null;
size_t elem_size;
const unsigned char *elem;
bool missing_name=true;
bool missing_value=true;
lpDetails->Clear();
lpDetails->AddElement();
lpDetailsScript->Clear();
lpDetailsScript->AddElement();
unsigned char b=1;
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FOLLOW_ONS,&b,1);
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "name")
{
if(!missing_name)
{
*strError=string("name field can appear only once in the object");
}
if(d.value_.type() != null_type && !d.value_.get_str().empty())
{
entity_name=d.value_.get_str();
if(entity_name.size())
{
if(entity_name.size() > MC_ENT_MAX_NAME_SIZE)
{
*strError=string("Invalid variable name - too long");
}
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_NAME,(const unsigned char*)(entity_name.c_str()),entity_name.size());
}
}
else
{
*strError=string("Invalid name");
}
missing_name=false;
field_parsed=true;
}
if(d.name_ == "value")
{
if(!missing_value)
{
*strError=string("value field can appear only once in the object");
}
size_t max_size=MC_AST_MAX_NOT_EXTENDED_VARIABLE_SIZE;
lpDetailsScript->Clear();
ParseRawValue(&(d.value_),lpDetails,lpDetailsScript,&max_size,errorCode,strError);
if(max_size > MC_AST_MAX_NOT_EXTENDED_VARIABLE_SIZE)
{
js_extended=true;
varvalue=d.value_;
}
lpDetailsScript->Clear();
missing_value=false;
field_parsed=true;
}
if(d.name_ == "create")field_parsed=true;
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());
}
}
if(strError->size() == 0)
{
int err;
script=lpDetails->GetData(0,&bytes);
lpDetailsScript->Clear();
err=lpDetailsScript->SetNewEntityType(MC_ENT_TYPE_VARIABLE,0,script,bytes);
if(err)
{
*strError=string("Invalid value, too long");
}
else
{
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP << OP_RETURN;
if(js_extended)
{
lpDetails->Clear();
lpDetails->AddElement();
ParseRawValue(&(varvalue),lpDetails,lpDetailsScript,NULL,errorCode,strError);
elem=lpDetails->GetData(0,&elem_size);
lpDetailsScript->Clear();
lpDetailsScript->SetExtendedDetails(elem,elem_size);
elem = lpDetailsScript->GetData(0,&elem_size);
scriptOpReturn << vector<unsigned char>(elem, elem + elem_size);
}
}
}
return scriptOpReturn;
}
CScript RawDataScriptUpdateVariable(Value *param,mc_EntityDetails *entity,mc_Script *lpDetails,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
bool field_parsed;
size_t bytes;
const unsigned char *script;
bool js_extended=false;
Value varvalue=Value::null;
size_t elem_size;
const unsigned char *elem;
int err;
bool missing_value=true;
lpDetails->Clear();
lpDetails->AddElement();
lpDetailsScript->Clear();
lpDetailsScript->AddElement();
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "value")
{
if(!missing_value)
{
*strError=string("value field can appear only once in the object");
}
size_t max_size=MC_AST_MAX_NOT_EXTENDED_VARIABLE_SIZE;
lpDetailsScript->Clear();
ParseRawValue(&(d.value_),lpDetails,lpDetailsScript,&max_size,errorCode,strError);
if(max_size > MC_AST_MAX_NOT_EXTENDED_VARIABLE_SIZE)
{
js_extended=true;
varvalue=d.value_;
}
lpDetailsScript->Clear();
missing_value=false;
field_parsed=true;
}
if(d.name_ == "update")field_parsed=true;
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());;
}
}
lpDetailsScript->Clear();
lpDetailsScript->SetEntity(entity->GetTxID()+MC_AST_SHORT_TXID_OFFSET);
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP;
lpDetailsScript->Clear();
script=lpDetails->GetData(0,&bytes);
err=lpDetailsScript->SetNewEntityType(MC_ENT_TYPE_VARIABLE,1,script,bytes);
if(err)
{
*strError=string("Invalid custom fields, too long");
}
else
{
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP << OP_RETURN;
if(js_extended)
{
lpDetails->Clear();
lpDetails->AddElement();
ParseRawValue(&(varvalue),lpDetails,lpDetailsScript,NULL,errorCode,strError);
elem=lpDetails->GetData(0,&elem_size);
lpDetailsScript->Clear();
lpDetailsScript->SetExtendedDetails(elem,elem_size);
elem = lpDetailsScript->GetData(0,&elem_size);
scriptOpReturn << vector<unsigned char>(elem, elem + elem_size);
}
}
return scriptOpReturn;
}
CScript RawDataScriptCreateLibrary(Value *param,mc_Script *lpDetails,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
bool field_parsed;
size_t bytes;
const unsigned char *script;
string entity_name;
bool js_extended=false;
string filter_code;
string js;
bool missing_name=true;
bool missing_code=true;
bool missing_updatemode=true;
lpDetails->Clear();
lpDetails->AddElement();
lpDetailsScript->Clear();
lpDetailsScript->AddElement();
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "name")
{
if(!missing_name)
{
*strError=string("name field can appear only once in the object");
}
if(d.value_.type() != null_type && !d.value_.get_str().empty())
{
entity_name=d.value_.get_str();
if(entity_name.size())
{
if(entity_name.size() > MC_ENT_MAX_NAME_SIZE)
{
*strError=string("Invalid library name - too long");
}
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_NAME,(const unsigned char*)(entity_name.c_str()),entity_name.size());
}
}
else
{
*strError=string("Invalid name");
}
missing_name=false;
field_parsed=true;
}
if(d.name_ == "updatemode")
{
if(!missing_updatemode)
{
*strError=string("updatemode field can appear only once in the object");
}
if(d.value_.type() == str_type)
{
unsigned char b=255;
if(d.value_.get_str() == "none")b=0x00;
if(d.value_.get_str() == "instant")b=0x01;
if(d.value_.get_str() == "approve")b=0x04;
entity_name=d.value_.get_str();
if(b == 255)
{
*strError=string("Invalid updatemode field");
}
else
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FOLLOW_ONS,&b,1);
}
}
else
{
*strError=string("Invalid updatemode field");
}
missing_updatemode=false;
field_parsed=true;
}
if(d.name_ == "code")
{
if(!missing_code)
{
*strError=string("code field can appear only once in the object");
}
if(d.value_.type() == str_type)
{
js_extended=mc_JSInExtendedScript(d.value_.get_str().size());
if(!js_extended)
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FILTER_CODE,(unsigned char*)d.value_.get_str().c_str(),d.value_.get_str().size());
}
else
{
js=d.value_.get_str();
}
}
else
{
*strError=string("Invalid code field type");
}
filter_code=d.value_.get_str();
missing_code=false;
field_parsed=true;
}
if(d.name_ == "create")field_parsed=true;
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());
}
}
if(strError->size() == 0)
{
if(missing_updatemode)
{
*strError=string("Missing updatemode field");
}
}
if(strError->size() == 0)
{
if(missing_code)
{
*strError=string("Missing code");
}
else
{
std::vector <std::string> callback_names;
int err;
string dummy_main_function=MC_FLT_MAIN_NAME_TEST;
string test_code=filter_code+MC_FLT_LIBRARY_GLUE+"function "+dummy_main_function+"(){} ";
mc_Filter *worker=new mc_Filter;
string strFilterError;
err=pFilterEngine->CreateFilter(test_code,dummy_main_function,callback_names,worker,strFilterError);
delete worker;
if(err)
{
*strError=string("Couldn't create filter");
*errorCode=RPC_INTERNAL_ERROR;
}
else
{
if(strFilterError.size())
{
*strError=strprintf("Couldn't compile filter code: %s",strFilterError.c_str());
}
}
}
}
if(strError->size() == 0)
{
int err;
script=lpDetails->GetData(0,&bytes);
lpDetailsScript->Clear();
err=lpDetailsScript->SetNewEntityType(MC_ENT_TYPE_LIBRARY,0,script,bytes);
if(err)
{
*strError=string("Invalid code, too long");
}
else
{
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP << OP_RETURN;
if(js_extended)
{
lpDetails->Clear();
lpDetails->AddElement();
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FILTER_CODE,(unsigned char*)js.c_str(),js.size());
script=lpDetails->GetData(0,&bytes);
lpDetailsScript->Clear();
lpDetailsScript->SetExtendedDetails(script,bytes);
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes);
}
}
}
return scriptOpReturn;
}
CScript RawDataScriptUpdateLibrary(Value *param,mc_EntityDetails *entity,mc_Script *lpDetails,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
bool field_parsed;
size_t bytes;
const unsigned char *script;
bool js_extended=false;
string update_name;
int err;
string filter_code;
string js;
bool missing_code=true;
bool missing_updatename=true;
lpDetails->Clear();
lpDetails->AddElement();
lpDetailsScript->Clear();
lpDetailsScript->AddElement();
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "updatename")
{
if(!missing_updatename)
{
*strError=string("updatename field can appear only once in the object");
}
if(d.value_.type() == str_type)
{
update_name=d.value_.get_str();
if(update_name.size())
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_UPDATE_NAME,(unsigned char*)(update_name.c_str()),update_name.size());
}
else
{
*strError=string("updatename cannot be empty");
}
}
else
{
*strError=string("Invalid updatename");
}
missing_updatename=false;
field_parsed=true;
}
if(d.name_ == "code")
{
if(!missing_code)
{
*strError=string("code field can appear only once in the object");
}
if(d.value_.type() == str_type)
{
js_extended=mc_JSInExtendedScript(d.value_.get_str().size());
if(!js_extended)
{
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FILTER_CODE,(unsigned char*)d.value_.get_str().c_str(),d.value_.get_str().size());
}
else
{
js=d.value_.get_str();
}
}
else
{
*strError=string("Invalid code field type");
}
filter_code=d.value_.get_str();
missing_code=false;
field_parsed=true;
}
if(d.name_ == "update")field_parsed=true;
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());;
}
}
if(strError->size() == 0)
{
if(missing_updatename)
{
*strError=string("Missing updatename field");
}
}
if(strError->size() == 0)
{
if(missing_code)
{
*strError=string("Missing code");
}
else
{
std::vector <std::string> callback_names;
int err;
string dummy_main_function=MC_FLT_MAIN_NAME_TEST;
string test_code=filter_code+MC_FLT_LIBRARY_GLUE+"function "+dummy_main_function+"(){} ";
mc_Filter *worker=new mc_Filter;
string strFilterError;
err=pFilterEngine->CreateFilter(test_code,dummy_main_function,callback_names,worker,strFilterError);
delete worker;
if(err)
{
*strError=string("Couldn't create filter");
*errorCode=RPC_INTERNAL_ERROR;
}
else
{
if(strFilterError.size())
{
*strError=strprintf("Couldn't compile filter code: %s",strFilterError.c_str());
}
}
}
}
lpDetailsScript->Clear();
lpDetailsScript->SetEntity(entity->GetTxID()+MC_AST_SHORT_TXID_OFFSET);
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP;
lpDetailsScript->Clear();
script=lpDetails->GetData(0,&bytes);
err=lpDetailsScript->SetNewEntityType(MC_ENT_TYPE_LIBRARY,1,script,bytes);
if(err)
{
*strError=string("Invalid custom fields, too long");
}
else
{
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP << OP_RETURN;
if(js_extended)
{
lpDetails->Clear();
lpDetails->AddElement();
lpDetails->SetSpecialParamValue(MC_ENT_SPRM_FILTER_CODE,(unsigned char*)js.c_str(),js.size());
script=lpDetails->GetData(0,&bytes);
lpDetailsScript->Clear();
lpDetailsScript->SetExtendedDetails(script,bytes);
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes);
}
}
return scriptOpReturn;
}
CScript RawDataScriptPublish(Value *param,mc_EntityDetails *entity,uint32_t *data_format,mc_Script *lpDetailsScript,vector<uint256>* vChunkHashes,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
vector<unsigned char> vValue;
vector<unsigned char> vKey;
Array vKeys;
size_t bytes;
const unsigned char *script;
bool field_parsed;
bool missing_data=true;
bool missing_key=true;
uint32_t in_options,out_options;
in_options=MC_RFD_OPTION_NONE;
out_options=MC_RFD_OPTION_NONE;
vKeys.clear();
BOOST_FOREACH(const Pair& d, param->get_obj())
{
if(d.name_ == "options")
{
if( mc_gState->m_Features->OffChainData() == 0 )
{
*errorCode=RPC_NOT_SUPPORTED;
*strError=string("Format options are not supported by this protocol version");
goto exitlbl;
}
if(d.value_.type() != null_type && (d.value_.type()==str_type))
{
if(d.value_.get_str() == "offchain")
{
in_options |= MC_RFD_OPTION_OFFCHAIN;
}
else
{
if(d.value_.get_str().size())
{
*strError=string("Stream item options must be offchain or empty");
}
}
}
else
{
*strError=string("Stream item options must be offchain or empty");
}
field_parsed=true;
}
}
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "key")
{
if(!missing_key)
{
*strError=string("only one of the key fields can appear in the object");
}
if(d.value_.type() != null_type && (d.value_.type()==str_type))
{
vKeys.push_back(d.value_);
}
else
{
*strError=string("Invalid key");
}
field_parsed=true;
missing_key=false;
}
if(d.name_ == "keys")
{
if( mc_gState->m_Features->MultipleStreamKeys() == 0 )
{
*errorCode=RPC_NOT_SUPPORTED;
*strError=string("Multiple keys are not supported by this protocol version");
goto exitlbl;
}
if(!missing_key)
{
*strError=string("only one of the key fields can appear in the object");
}
if(d.value_.type() == array_type)
{
vKeys=d.value_.get_array();
if(vKeys.size() == 0)
{
*strError=string("Invalid keys - should be non-empty array");
}
}
else
{
*strError=string("Invalid keys - should be array");
}
field_parsed=true;
missing_key=false;
}
if(d.name_ == "data")
{
if(!missing_data)
{
*strError=string("data field can appear only once in the object");
}
vValue=ParseRawFormattedData(&(d.value_),data_format,lpDetailsScript,in_options,&out_options,errorCode,strError);
field_parsed=true;
missing_data=false;
}
if(d.name_ == "options")
{
field_parsed=true;
}
if(d.name_ == "for")field_parsed=true;
// if(d.name_ == "format")field_parsed=true;
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());;
}
}
if(missing_data)
{
*strError=string("Missing data field");
}
if(missing_key)
{
*strError=string("Missing key field");
if(mc_gState->m_Features->MultipleStreamKeys())
{
*strError=string("Missing keys field");
}
}
if(strError->size() == 0)
{
lpDetailsScript->Clear();
lpDetailsScript->SetEntity(entity->GetTxID()+MC_AST_SHORT_TXID_OFFSET);
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP;
for(int i=0;i<(int)vKeys.size();i++)
{
lpDetailsScript->Clear();
if(vKeys[i].type() != null_type && (vKeys[i].type()==str_type))
{
vKey=vector<unsigned char>(vKeys[i].get_str().begin(), vKeys[i].get_str().end());
if(vKey.size() > MC_ENT_MAX_ITEM_KEY_SIZE)
{
throw JSONRPCError(RPC_INVALID_PARAMETER, "Item key is too long");
goto exitlbl;
}
}
else
{
*strError=string("key should be string");
goto exitlbl;
}
if(lpDetailsScript->SetItemKey(&vKey[0],vKey.size()) == MC_ERR_NOERROR)
{
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP;
}
}
if(entity->AnyoneCanRead() == 0)
{
pEF->LIC_RPCVerifyFeature(MC_EFT_STREAM_READ_RESTRICTED_WRITE,"Publishing to read-restricted stream");
}
if(entity->Restrictions() & MC_ENT_ENTITY_RESTRICTION_NEED_SALTED)
{
out_options |= MC_RFD_OPTION_SALTED;
}
if(in_options & MC_RFD_OPTION_OFFCHAIN)
{
AppendOffChainFormatData(*data_format,out_options,lpDetailsScript,vValue,vChunkHashes,errorCode,strError);
if(strError->size())
{
goto exitlbl;
}
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP;
scriptOpReturn << OP_RETURN;
}
else
{
if(out_options & MC_RFD_OPTION_OFFCHAIN)
{
*strError=string("chunks data type is not allowed with missing options field");
*errorCode=RPC_NOT_ALLOWED;
goto exitlbl;
}
if(*data_format != MC_SCR_DATA_FORMAT_UNKNOWN)
{
lpDetailsScript->Clear();
lpDetailsScript->SetDataFormat(*data_format);
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP;
}
if(vValue.size())
{
scriptOpReturn << OP_RETURN << vValue;
}
else
{
scriptOpReturn << OP_RETURN;
}
}
}
exitlbl:
return scriptOpReturn;
}
CScript RawDataScriptApprove(Value *param,mc_EntityDetails *entity,mc_Script *lpDetailsScript,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
vector<unsigned char> vValue;
vector<unsigned char> vKey;
size_t bytes;
const unsigned char *script;
bool field_parsed;
int is_approve=true;
bool missing_approve=true;
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "approve")
{
if(!missing_approve)
{
*strError=string("approve field can appear only once in the object");
}
if(d.value_.type() == bool_type)
{
is_approve=d.value_.get_bool();
}
else
{
*strError=string("Invalid approve");
}
field_parsed=true;
missing_approve=false;
}
if(d.name_ == "for")field_parsed=true;
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());;
}
}
if(missing_approve)
{
*strError=string("Missing approve field");
}
if(strError->size() == 0)
{
lpDetailsScript->Clear();
lpDetailsScript->SetEntity(entity->GetTxID()+MC_AST_SHORT_TXID_OFFSET);
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP;
lpDetailsScript->Clear();
lpDetailsScript->SetApproval(is_approve, mc_TimeNowAsUInt());
script = lpDetailsScript->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP;
scriptOpReturn << OP_RETURN;
}
return scriptOpReturn;
}
CScript RawDataScriptInputCache(Value *param,mc_Script *lpDetails,int *errorCode,string *strError)
{
CScript scriptOpReturn=CScript();
size_t bytes;
const unsigned char *script;
bool field_parsed;
BOOST_FOREACH(const Pair& d, param->get_obj())
{
field_parsed=false;
if(d.name_ == "inputcache")
{
if(d.value_.type() != array_type)
{
*strError=string("Array should be specified for inputcache");
}
else
{
int cs_offset,cs_vin,cs_size;
string cs_script="";
Array csa=d.value_.get_array();
lpDetails->Clear();
lpDetails->SetCachedScript(0,&cs_offset,-1,NULL,-1);
for(int csi=0;csi<(int)csa.size();csi++)
{
if(strError->size() == 0)
{
if(csa[csi].type() != obj_type)
{
*strError=string("Elements of inputcache should be objects");
}
cs_vin=-1;
cs_size=-1;
BOOST_FOREACH(const Pair& csf, csa[csi].get_obj())
{
bool cs_parsed=false;
if(csf.name_ == "vin")
{
cs_parsed=true;
if(csf.value_.type() != int_type)
{
*strError=string("vin should be integer");
}
else
{
cs_vin=csf.value_.get_int();
}
}
if(csf.name_ == "scriptPubKey")
{
cs_parsed=true;
if(csf.value_.type() != str_type)
{
*strError=string("scriptPubKey should be string");
}
else
{
cs_script=csf.value_.get_str();
cs_size=cs_script.size()/2;
}
}
if(!cs_parsed)
{
*strError=string("Invalid field: ") + csf.name_;
}
}
if(strError->size() == 0)
{
if(cs_vin<0)
{
*strError=string("Missing vin field");
}
}
if(strError->size() == 0)
{
if(cs_size<0)
{
*strError=string("Missing scriptPubKey field");
}
}
if(strError->size() == 0)
{
bool fIsHex;
vector<unsigned char> dataData(ParseHex(cs_script.c_str(),fIsHex));
if(!fIsHex)
{
*strError=string("scriptPubKey should be hexadecimal string");
}
else
{
lpDetails->SetCachedScript(cs_offset,&cs_offset,cs_vin,&dataData[0],cs_size);
}
}
}
}
}
field_parsed=true;
}
if(!field_parsed)
{
*strError=strprintf("Invalid field: %s",d.name_.c_str());;
}
}
if(strError->size() == 0)
{
script=lpDetails->GetData(0,&bytes);
scriptOpReturn << vector<unsigned char>(script, script + bytes) << OP_DROP << OP_RETURN;
}
return scriptOpReturn;
}
CScript ParseRawMetadata(Value param,uint32_t allowed_objects,mc_EntityDetails *given_entity,mc_EntityDetails *found_entity)
{
vector<uint256> vChunkHashes;
string strError="";
int errorCode=RPC_INVALID_PARAMETER;
uint32_t data_format;
mc_EntityDetails entity;
CScript scriptOpReturn=CScript();
mc_Script *lpDetailsScript=mc_gState->m_TmpBuffers->m_RpcScript1;
lpDetailsScript->Clear();
mc_Script *lpDetails=mc_gState->m_TmpBuffers->m_RpcScript2;
lpDetails->Clear();
uint32_t param_type=ParseRawDataParamType(¶m,given_entity,&entity,&data_format,&errorCode,&strError);
if(strError.size())
{
goto exitlbl;
}
if(param_type == MC_DATA_API_PARAM_TYPE_NONE)
{
strError=string("Unrecognized parameter format");
goto exitlbl;
}
if( (param_type & allowed_objects) == 0 )
{
if(param_type != MC_DATA_API_PARAM_TYPE_EMPTY_RAW)
{
strError=string("Keyword not allowed in this API");
errorCode=RPC_NOT_ALLOWED;
}
goto exitlbl;
}
if(found_entity)
{
memcpy(found_entity,&entity,sizeof(mc_EntityDetails));
}
switch(param_type)
{
case MC_DATA_API_PARAM_TYPE_EMPTY_RAW:
case MC_DATA_API_PARAM_TYPE_RAW:
scriptOpReturn=RawDataScriptRawHex(¶m,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_FORMATTED:
scriptOpReturn=RawDataScriptFormatted(¶m,&data_format,lpDetailsScript,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_ISSUE:
scriptOpReturn=RawDataScriptIssue(¶m,lpDetails,lpDetailsScript,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_FOLLOWON:
scriptOpReturn=RawDataScriptFollowOn(¶m,&entity,lpDetails,lpDetailsScript,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_CREATE_STREAM:
scriptOpReturn=RawDataScriptCreateStream(¶m,lpDetails,lpDetailsScript,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_PUBLISH:
scriptOpReturn=RawDataScriptPublish(¶m,&entity,&data_format,lpDetailsScript,&vChunkHashes,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_CREATE_UPGRADE:
scriptOpReturn=RawDataScriptCreateUpgrade(¶m,lpDetails,lpDetailsScript,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_CREATE_FILTER:
scriptOpReturn=RawDataScriptCreateFilter(¶m,lpDetails,lpDetailsScript,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_CREATE_VAR:
scriptOpReturn=RawDataScriptCreateVariable(¶m,lpDetails,lpDetailsScript,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_UPDATE_VAR:
scriptOpReturn=RawDataScriptUpdateVariable(¶m,&entity,lpDetails,lpDetailsScript,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_CREATE_LIB:
scriptOpReturn=RawDataScriptCreateLibrary(¶m,lpDetails,lpDetailsScript,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_UPDATE_LIB:
scriptOpReturn=RawDataScriptUpdateLibrary(¶m,&entity,lpDetails,lpDetailsScript,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_APPROVAL:
scriptOpReturn=RawDataScriptApprove(¶m,&entity,lpDetailsScript,&errorCode,&strError);
break;
case MC_DATA_API_PARAM_TYPE_CIS:
scriptOpReturn=RawDataScriptInputCache(¶m,lpDetailsScript,&errorCode,&strError);
break;
}
exitlbl:
if(strError.size())
{
throw JSONRPCError(errorCode, strError);
}
return scriptOpReturn;
}
|
{"hexsha": "92674691fbd557b12ea1534586bfc7d7e6df1224", "size": 103465, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/rpc/rpcrawdata.cpp", "max_stars_repo_name": "fffnerwork/FFF_Protocol_Core", "max_stars_repo_head_hexsha": "94d75cc6b3a94e06fe6dde75967e665db26a7649", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 67.0, "max_stars_repo_stars_event_min_datetime": "2021-10-05T05:53:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T06:23:41.000Z", "max_issues_repo_path": "src/rpc/rpcrawdata.cpp", "max_issues_repo_name": "jichengbin/FFF_Protocol_Core", "max_issues_repo_head_hexsha": "94d75cc6b3a94e06fe6dde75967e665db26a7649", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5.0, "max_issues_repo_issues_event_min_datetime": "2021-10-05T07:50:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-09T09:40:30.000Z", "max_forks_repo_path": "src/rpc/rpcrawdata.cpp", "max_forks_repo_name": "jichengbin/FFF_Protocol_Core", "max_forks_repo_head_hexsha": "94d75cc6b3a94e06fe6dde75967e665db26a7649", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24.0, "max_forks_repo_forks_event_min_datetime": "2021-10-05T06:34:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T09:14:36.000Z", "avg_line_length": 39.028668427, "max_line_length": 225, "alphanum_fraction": 0.4265113807, "num_tokens": 18768}
|
"""
integrate a TaylorNModel with respect to the variable number `which`.
Optionally adds `x0` to the result.
"""
function integrate(f::TaylorNModel, which=1, x0=0)
p = integrate(f.p, which) # not necessary if an already complete Taylor series, in which case p2 == f.p
Δ = integral_bound(f, which)
g = TaylorNModel(f.n, f.x0, f.I, p, Δ)
g.p[0] += x0 # constant term
# for k in 0:(x0.order)
# TaylorSeries.add!(g.p, g.p, x0, k)
# end
return g
end
doc"""
Bound the integral of a `TaylorNModel` `f` with respect to the variable `which`.
"""
function integral_bound(f::TaylorNModel, which)
high_order_term = f.p[end] # a HomogeneousPolynomial
Δ = ( bound(high_order_term, f.x0, f.I) + f.Δ ) * diam(f.I[which])
return Δ
end
bound(f::HomogeneousPolynomial, x0, I) = f( [(I - x0)...] )
# applies the hom poly to the bounds
|
{"hexsha": "7bd4b35c436b21f5cd4d8009d309bd6683b167bf", "size": 880, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/TaylorN/integrate.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/TaylorModels.jl-314ce334-5f6e-57ae-acf6-00b6e903104a", "max_stars_repo_head_hexsha": "330b5b79fceef074979e100d14a56fd319c24a72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2018-05-07T02:41:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T05:20:38.000Z", "max_issues_repo_path": "src/TaylorN/integrate.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/TaylorModels.jl-314ce334-5f6e-57ae-acf6-00b6e903104a", "max_issues_repo_head_hexsha": "330b5b79fceef074979e100d14a56fd319c24a72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 115, "max_issues_repo_issues_event_min_datetime": "2018-04-16T17:33:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-04T20:38:34.000Z", "max_forks_repo_path": "src/TaylorN/integrate.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/TaylorModels.jl-314ce334-5f6e-57ae-acf6-00b6e903104a", "max_forks_repo_head_hexsha": "330b5b79fceef074979e100d14a56fd319c24a72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2019-03-05T19:24:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T10:39:28.000Z", "avg_line_length": 23.1578947368, "max_line_length": 108, "alphanum_fraction": 0.6409090909, "num_tokens": 277}
|
# coding: utf-8
# # Influence of parameter choice on the phase diagram
# To study to what extend the phase diagram depends on the cost of infection $c_{\rm inf}$, and on the trade-off shapes $c_{\rm def}(c_{\rm con}), c_{\rm uptake}(p_{\rm uptake})$ we plot the phase diagram for a number of different choices in the following.
# Import packages.
# In[6]:
from cycler import cycler
import sys
sys.path.append('../lib')
import numpy as np
import matplotlib.colors
import matplotlib.pyplot as plt
from matplotlib import transforms, gridspec, ticker
import palettable
import shapely.ops
import plotting
import evolimmune
import misc
import analysis
plt.style.use(['paper'])
plt.rc('lines', linewidth=1.0)
plt.rc('axes', labelpad=1.0)
eps = 1e-8
# Read in and summarize data
# In[7]:
df = analysis.loadnpz('data/phases.npz')
analysis.intelligent_describe(df, nunique=10)
dfg = df.groupby(['lambda_', 'mus', 'cup'])
nparams = len(dfg)
# define colors used in plot and phasenames
# In[8]:
black = matplotlib.rcParams['text.color']
colors = np.asarray(palettable.colorbrewer.qualitative.Set3_6.mpl_colors)[[4, 0, 2, 3, 5, 1]]
strategies_s = ['a', 'p', 'o', 'i', 'm', 'c']
color_dict = dict(zip(strategies_s, colors))
linecolors = palettable.colorbrewer.qualitative.Dark2_6.mpl_colors
plt.rc('axes', prop_cycle=cycler('color', linecolors))
phasenames = misc.DefaultIdentityDict(o='$i$', i='$ib$')
# Define plotting functions
# In[9]:
def plotmus(ax, musstr, alpha=1.0, label=True):
epsilon = np.linspace(0.0, 1.0, 100)
mus = evolimmune.mus_from_str(musstr)
mu1, mu2 = mus(epsilon)
if label:
ax.plot(mu1, mu2, c=linecolors[1], alpha=alpha, label='defense')
else:
ax.plot(mu1, mu2, c=linecolors[1], alpha=alpha)
ax.plot(mu1[0], mu2[0], 'o', markeredgecolor='none', markersize=3, c=linecolors[1], alpha=alpha)
def plotstatecosts(ax, musstr, musstrref=None, lambda_=None):
if lambda_:
ax.text(1, 1, '${0}={1}$'.format(r'c_{\rm inf}', lambda_),
transform=ax.transAxes, ha='right', va='top')
if musstrref is not None:
plotmus(ax, musstrref, alpha=0.25, label=False)
plotmus(ax, musstr)
ax.set_xlabel(evolimmune.varname_to_tex['cconstitutive'])
ax.set_ylabel(evolimmune.varname_to_tex['cdefense'])
ax.set_xlim(0, 1.5)
ax.set_ylim(0, 2.7)
ax.locator_params(nbins=3)
def plotcup(ax, cupstr, cupstrref=None):
pup = np.linspace(0.0, 0.2, 100)
if cupstrref is not None:
cup = evolimmune.cup_from_str(cupstrref)
ax.plot(pup, cup(pup), c=linecolors[2], alpha=.25)
cup = evolimmune.cup_from_str(cupstr)
ax.plot(pup, cup(pup), c=linecolors[2])
ax.set_xlabel(evolimmune.varname_to_tex['pup'])
ax.set_ylabel(evolimmune.varname_to_tex['cup'])
ax.set_ylim(0, 0.1)
ax.locator_params(nbins=1)
# Putting it all together into one figure
# In[10]:
fig = plt.figure(figsize=(6, 7))
nrow = 4
nsubrow = 3
height_ratios = [1, 10, 10]
gsglobal = gridspec.GridSpec(4, 2)
import param1
lambdaref, musref, cupref = param1.lambda_, param1.mus, param1.cup
label_axes = []
for i in range(1, 9):
p = __import__('param{}'.format(i))
lambda_ = p.lambda_
mus = p.mus
cup = p.cup
dfg = df[(df.mus==mus)&(df.cup==cup)&(df.lambda_==lambda_)]
print lambda_, mus, cup
gs = gridspec.GridSpecFromSubplotSpec(3, 2, subplot_spec=gsglobal[(i-1)%nrow, (i-1)//nrow],
width_ratios=[1, 2], height_ratios=[1, 30, 20],
hspace=1.5, wspace=0.6)
axlambda = fig.add_subplot(gs[0, 0])
axlambda.text(0.5, -3.0, '${0}={1}$'.format(r'c_{\rm inf}', lambda_),
transform=axlambda.transAxes, ha='center', va='top')
axlambda.axis('off')
axmu = fig.add_subplot(gs[1, 0])
plotstatecosts(axmu, mus, musref)
axcup = fig.add_subplot(gs[2, 0])
plotcup(axcup, cup, cupref)
for ax in [axmu, axcup]:
plotting.despine(ax)
axm = fig.add_subplot(gs[:, 1])
try:
polygons = evolimmune.polygons_from_boundaries(dfg, yconv=evolimmune.to_tau)
phases = evolimmune.phases_from_polygons(polygons)
except:
pass
else:
for phasename, phase in phases.iteritems():
try:
axm.add_patch(analysis.shapely_to_mpl(phase, ec='None',
fc=color_dict[phasename],
lw=1.0))
phaset = shapely.ops.transform(lambda x, y, z=None: (x, np.log(y+eps)), phase)
axm.text(phaset.centroid.x, np.exp(phaset.centroid.y),
r'$\mathbf{%s}$'%phasenames[phasename][1:-1],
ha='center', va='center')
except:
pass
axm.set_ylim(evolimmune.to_tau(df.aenv.min()), evolimmune.to_tau(df.aenv.max()))
axm.set_yscale('log')
axm.yaxis.set_major_formatter(ticker.ScalarFormatter())
axm.set_xlabel('$\pi_{env}$')
axm.set_ylabel(r'$\tau_{env}$')
axm.grid(which='major', alpha=0.75)
axm.grid(which='minor', lw=0.4, alpha=0.5)
axm.set_axisbelow(False)
plotting.despine(axm, spines='all')
label_axes.append((i, axlambda))
label_axes = [ax for i, ax in sorted(label_axes)]
plotting.label_axes(label_axes, xy=(-0.6, 1.0), fontsize='large', va='top')
gsglobal.tight_layout(fig, h_pad=1.0, w_pad=2.0)
fig.savefig('SIaltphases.pdf')
fig.savefig('SIaltphases.svg')
# Fig.S2: **Influence of parameter choice on the phase diagram presented in Fig. 2.**
# For every panel the parameter choices are shown on the left and the phase boundaries between **p**roto-adaptive, **i**nnate, **i**nnate **b**et hedging, **m**ixed and **C**RISPR-like strategies are shown on the right. As a reference, lines in lighter color show trade-off and uptake cost for parameter set used in Fig. 2.
# **(A)** Phase diagram for parameters used in Fig. 2.
# **(B)** More expensive active acquisition ($c_{\rm uptake}$ multiplied by a factor of two).
# **(C)** Different functional form for cost of active acqusition: $c_{\rm uptake} = 0.05 \times p_{\rm uptake} + 2 \times p_{\rm uptake}^2$.
# **(D)** More permissive state-dependent costs (costs multiplied by a factor of 0.5).
# **(E)** Less permissive state-dependent costs (costs multiplied by a factor of 1.5).
# **(F)** Higher cost of infection.
# **(G)** Higher cost of immune protection.
# **(H)** Different functional form for cost trade-off, $c_{\rm defense} = 1.4-0.6\times c_{\rm constitutive}+0.2 \times c_{\rm constitutive}^2$
# In[ ]:
|
{"hexsha": "b3dd75cbe1a974bd6145d872528497320d3fe776", "size": 6607, "ext": "py", "lang": "Python", "max_stars_repo_path": "figSIaltphases/figure-SIaltphases.py", "max_stars_repo_name": "andim/evolimmune", "max_stars_repo_head_hexsha": "6ffcc19e8725d343e9b10fa9c4dd77a9a485398a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2016-05-18T19:43:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-20T01:40:32.000Z", "max_issues_repo_path": "figSIaltphases/figure-SIaltphases.py", "max_issues_repo_name": "andim/evolimmune", "max_issues_repo_head_hexsha": "6ffcc19e8725d343e9b10fa9c4dd77a9a485398a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-10T14:51:21.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-10T14:55:17.000Z", "max_forks_repo_path": "figSIaltphases/figure-SIaltphases.py", "max_forks_repo_name": "andim/evolimmune", "max_forks_repo_head_hexsha": "6ffcc19e8725d343e9b10fa9c4dd77a9a485398a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-20T14:48:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T14:48:02.000Z", "avg_line_length": 37.1179775281, "max_line_length": 323, "alphanum_fraction": 0.6452247616, "include": true, "reason": "import numpy", "num_tokens": 2006}
|
#!/usr/bin/env python3
import numpy as np
from pyqubo import Array, Constraint, Placeholder
def make_energy(type_matrix, weak_matrix, resist_matrix, enemy, skill):
# set the number of enemies
num_enemies = len(enemy)
# set the number of my pokemon
num_my_team = num_enemies
# set the number of types
num_types = len(type_matrix)
# set the number of skills
num_skills = 4
# set placeholder
lambda_a = Placeholder('h_a')
lambda_b = Placeholder('h_b')
lambda_c = Placeholder('h_c')
lambda_d = Placeholder('h_d')
# set binary variables
x = Array.create('x', shape=(num_my_team, num_types), vartype='BINARY')
y = Array.create('y', shape=(num_my_team, num_skills, num_types), vartype='BINARY')
s = Array.create('s', shape=(num_enemies, num_skills, 2), vartype='BINARY')
t = Array.create('t', shape=(num_enemies, num_skills, 2*num_my_team-1), vartype='BINARY')
# convert to numpy array
x = np.array(x)
y = np.array(y)
s = np.array(s)
t = np.array(t)
z = np.array(enemy)
w = np.array(skill)
# set one-hot encoding constraint for pokemon type
h_a = 0
for i in range(num_my_team):
tmp = sum([x[i][j] for j in range(num_types)])
h_a += (tmp-1) * (tmp-2)
h_a = Constraint(h_a, label='h_a')
# set one-hot encoding constraint for skill type
h_b = 0
for i in range(num_my_team):
for k in range(num_skills):
tmp = sum([y[i][k][l] for l in range(num_types)])
h_b += (tmp-1) ** 2
h_b = Constraint(h_b, label='h_b')
# set a constraint that weak pokemon is less than 2
h_c = 0
h_c2 = 0
tmp_sum = 0
for i in range(num_enemies):
for j in range(num_skills):
for l in range(num_my_team):
tmp = np.dot(w[i][j], np.dot(weak_matrix, x[l]))
h_c2 += tmp * (tmp-1)
tmp_sum += tmp
h_c += (2-sum(s[i][j])-tmp_sum) ** 2
h_c = Constraint(h_c+h_c2, label='h_c')
# set a constraint that resist pokemon is greather than 1
h_d = 0
for i in range(num_enemies):
for j in range(num_skills):
tmp = sum([np.dot(w[i][j], np.dot(resist_matrix, x[l])) for l in range(num_my_team)])
h_d += (2*num_my_team-sum(t[i][j])-tmp) ** 2
h_d = Constraint(h_d, label='h_d')
# set cost function
atk_damage = 0
for i in range(num_my_team):
for j in range(num_skills):
for l in range(num_enemies):
tmp = np.dot(y[i][j], np.dot(type_matrix, z[l]))
atk_damage += tmp
def_damage = 0
for i in range(num_enemies):
for j in range(num_skills):
for l in range(num_my_team):
tmp = np.dot(w[i][j], np.dot(type_matrix, x[l]))
def_damage += tmp
obj = - atk_damage + def_damage
# compute total energy
energy = obj + lambda_a * h_a + lambda_b * h_b + lambda_c * h_c + lambda_d * h_d
# compile
model = energy.compile()
return model
|
{"hexsha": "ca872bd8c7181e149aabf089f67a25eefe097e3f", "size": 3038, "ext": "py", "lang": "Python", "max_stars_repo_path": "make_energy.py", "max_stars_repo_name": "github-nakasho/Pokemon_opt", "max_stars_repo_head_hexsha": "abf1522fc1bf315d2018599b94d839084b421420", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "make_energy.py", "max_issues_repo_name": "github-nakasho/Pokemon_opt", "max_issues_repo_head_hexsha": "abf1522fc1bf315d2018599b94d839084b421420", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "make_energy.py", "max_forks_repo_name": "github-nakasho/Pokemon_opt", "max_forks_repo_head_hexsha": "abf1522fc1bf315d2018599b94d839084b421420", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6024096386, "max_line_length": 97, "alphanum_fraction": 0.5915075708, "include": true, "reason": "import numpy", "num_tokens": 880}
|
#include <Eigen/StdVector>
#include <iostream>
#include <thread>
#include <srrg_system_utils/shell_colors.h>
#include <srrg_system_utils/parse_command_line.h>
#include <srrg_messages/message_handlers/message_file_source.h>
#include <srrg_messages/message_handlers/message_sorted_source.h>
#include <srrg_messages/message_handlers/message_synchronized_source.h>
#include <srrg_messages/instances.h>
#include "srrg_messages_ros/instances.h"
#include "srrg_data_structures/platform.h"
#include <signal.h>
const std::string exe_prefix = "test_tf_from_sync|";
#define LOG std::cerr << exe_prefix
using namespace srrg2_core;
using namespace srrg2_core_ros;
const char* banner[] = {
"test to read tf from a sync source",
"usage: <exe> [options] path_to_bag.bag",
"BAG MUST CONTAIN: laser, odometry and tf",
0
};
bool run = true;
void sigIntHandler(int signum_) {
if (signum_ == SIGINT) {
LOG << "shutting down...PRESS ENTER\n";
run = false;
}
}
using Isometry3fAlignedVector = std::vector<Isometry3f, Eigen::aligned_allocator<Isometry3f> >;
class Evil {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
Evil() {}
~Evil() {}
inline Platform& getPlatform() {return _platform;}
void addEvil(PropertyContainerIdentifiablePtr& events_ptr) {
std::cerr << "Evil::addEvil|adding event vector\n";
_platform.add(events_ptr);
if (!_platform.isWellFormed()) {
_platform.setup();
}
std::cerr << "Evil::addEvil|pushing back dummy stuff in the vector\n";
std::cerr << "Evil::addEvil|size PRE = " << _poses.size();
_poses.push_back(Isometry3f::Identity());
std::cerr << "\tsize POST = " << _poses.size() << std::endl;
}
protected:
Platform _platform;
Isometry3fAlignedVector _poses;
};
int main(int argc, char **argv) {
signal(SIGINT, sigIntHandler);
messages_registerTypes();
messages_ros_registerTypes();
ParseCommandLine cmd_line(argv, banner);
ArgumentString topic_laser(&cmd_line, "tl", "topic-laser", "laser topic", "/scan");
ArgumentString topic_odom(&cmd_line, "to", "topic-odom", "odometry topic", "/odom");
ArgumentString topic_tf(&cmd_line, "tt", "topic-tf", "tf topic", "/tf");
ArgumentFlag stepwise_parsing(&cmd_line, "step", "stepwise-processing", "process the bag step by step (press ENTER to go forward)");
cmd_line.parse();
if (cmd_line.lastParsedArgs().empty())
throw std::runtime_error(exe_prefix+"please set valid bag");
MessageROSBagSourcePtr source = MessageROSBagSourcePtr(new MessageROSBagSource);
MessageSortedSourcePtr sorter = MessageSortedSourcePtr(new MessageSortedSource);
sorter->param_time_interval.setValue(100.0);
//ia if I comment this block and I use the synched source (so this is not relevant)
//ia then everything goes nuts iff Platform is a pointer.
//ia otherwise (when Platform is on the stack) everything is ok.
source->param_topics.value().push_back(topic_laser.value());
source->param_topics.value().push_back(topic_odom.value());
source->param_topics.value().push_back(topic_tf.value());
sorter->param_source.setValue(source);
LOG << "opening file " << FG_YELLOW(cmd_line.lastParsedArgs()[0]) << std::endl;
source->open(cmd_line.lastParsedArgs()[0]);
Platform platform;
BaseSensorMessagePtr msg = nullptr;
size_t m = 0;
std::cerr << std::setprecision(20) << std::endl;
Isometry3f T;
while (run) {
msg = sorter->getMessage();
if (!msg) {
LOG << "file ended, stop" << std::endl;
break;
}
LOG << "message #" << FG_CYAN(m++) << std::endl;
double timestamp = 0;
if (!platform.add(msg)) {
LOG << "failed to add the message to the platform" << std::endl;
continue;
}
timestamp = msg->timestamp.value();
//ia print
LOG << "tf message timestamp: " << timestamp << std::endl;
LOG << "platform size = " << FG_YELLOW(platform.size()) << std::endl;
/* if(platform.getTransform(T,"laser_frame", "base_link", timestamp)) */
/* LOG << "transform laser_frame base_link = \n" << T.matrix() << std::endl; */
if(platform.getTransform(T,"base_link","odom",timestamp))
LOG << "transform base_link in odom = \n" << T.matrix() << std::endl;
LOG << platform << std::endl;
if (stepwise_parsing.isSet()) {
LOG << "press ENTER to continue" << std::endl;
std::cin.clear();
std::cin.get();
}
std::cerr << "==================================================================================================\n\n";
}
// delete platform;
}
|
{"hexsha": "d2cc1a3deb3b4aaded18209ef308f2f7846374ee", "size": 4521, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "catkin_ws/src/srrg2_core/srrg2_core_ros/src/tests/test_tf_from_source.cpp", "max_stars_repo_name": "laaners/progetto-labiagi_pick_e_delivery", "max_stars_repo_head_hexsha": "3453bfbc1dd7562c78ba06c0f79b069b0a952c0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2020-03-11T14:36:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-09T09:01:15.000Z", "max_issues_repo_path": "catkin_ws/src/srrg2_core/srrg2_core_ros/src/tests/test_tf_from_source.cpp", "max_issues_repo_name": "laaners/progetto-labiagi_pick_e_delivery", "max_issues_repo_head_hexsha": "3453bfbc1dd7562c78ba06c0f79b069b0a952c0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-06-07T17:25:04.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-15T07:36:10.000Z", "max_forks_repo_path": "catkin_ws/src/srrg2_core/srrg2_core_ros/src/tests/test_tf_from_source.cpp", "max_forks_repo_name": "laaners/progetto-labiagi_pick_e_delivery", "max_forks_repo_head_hexsha": "3453bfbc1dd7562c78ba06c0f79b069b0a952c0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2020-11-30T08:17:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-19T05:07:07.000Z", "avg_line_length": 31.1793103448, "max_line_length": 134, "alphanum_fraction": 0.6682149967, "num_tokens": 1174}
|
import json
from scipy import interpolate
import copy
from ._emulator import Emulator
def transform_ES_elastance(emulator_data, factor):
"""
Transform an emulator by applying a multiplicative factor to end systolic elastance.
Parameters
----------
emulator_data : str, dict or Emulator
Emulator to be transformed.
factor : float
Multiplicative factor for end systolic elastance.
Returns
-------
emulator : Emulator
Transformed emulator.
"""
if isinstance(emulator_data, str):
with open(emulator_data) as json_file:
emulator_data = json.load(json_file)
if isinstance(emulator_data, Emulator):
emulator_data = emulator_data.data
emulator_data_new = copy.deepcopy(emulator_data)
emulator_data_new['ESPV']['E'] *= factor
return Emulator(emulator_data_new)
def transform_time_shift(emulator_data, time_lag):
"""
Transform an emulator by shifting in time the activation pattern.
Parameters
----------
emulator_data : str, dict or Emulator
Emulator to be transformed.
time_lag : float [s]
Time lag.
Returns
-------
emulator : Emulator
Transformed emulator.
"""
if isinstance(emulator_data, str):
with open(emulator_data) as json_file:
emulator_data = json.load(json_file)
if isinstance(emulator_data, Emulator):
emulator_data = emulator_data.data
activation_base = interpolate.interp1d(emulator_data['activation']['t'], emulator_data['activation']['v'])
activation = lambda t: activation_base(t % emulator_data['period'])
emulator_data_new = copy.deepcopy(emulator_data)
emulator_data_new['activation']['v'] = activation(emulator_data['activation']['t'] - time_lag)
return Emulator(emulator_data_new)
|
{"hexsha": "db65cb05936715038febb441a6aade1c21f59b50", "size": 1840, "ext": "py", "lang": "Python", "max_stars_repo_path": "cardioemulator/_transform_emulator.py", "max_stars_repo_name": "michelebucelli/cardioemulator", "max_stars_repo_head_hexsha": "0ce8d5fce017a7251865ab01fdf3d0653490b60f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cardioemulator/_transform_emulator.py", "max_issues_repo_name": "michelebucelli/cardioemulator", "max_issues_repo_head_hexsha": "0ce8d5fce017a7251865ab01fdf3d0653490b60f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cardioemulator/_transform_emulator.py", "max_forks_repo_name": "michelebucelli/cardioemulator", "max_forks_repo_head_hexsha": "0ce8d5fce017a7251865ab01fdf3d0653490b60f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3076923077, "max_line_length": 110, "alphanum_fraction": 0.6831521739, "include": true, "reason": "from scipy", "num_tokens": 397}
|
#pragma once
#include <Core/Containers/AlignedStdVector.hpp>
#include <Core/Containers/VectorArray.hpp>
#include <Eigen/Core>
#include <iostream>
namespace Ra {
namespace Core {
using ParentList = AlignedStdVector<int>;
using LevelList = AlignedStdVector<uint8_t>;
using ChildrenList = AlignedStdVector<uint8_t>;
using Adjacency = AlignedStdVector<ChildrenList>;
/**
* The AdjacencyList contains the adjacency matrix expressed as a vector of indices and
* a vector containing the index of the parents indices of the i-th node.
*/
class RA_CORE_API AdjacencyList
{
public:
enum class ConsistencyStatus {
Valid,
IncompatibleChildrenAndParentList,
WrongParentOrdering,
WrongParentIndex,
InconsistentParentIndex,
NonLeafNodeWithoutChild
};
//////////////////////////////////////////////////////////////////////////////
// CONSTRUCTOR
//////////////////////////////////////////////////////////////////////////////
AdjacencyList();
explicit AdjacencyList( const uint n );
AdjacencyList( const AdjacencyList& adj );
//////////////////////////////////////////////////////////////////////////////
// DESTRUCTOR
//////////////////////////////////////////////////////////////////////////////
~AdjacencyList();
//////////////////////////////////////////////////////////////////////////////
// NODE
//////////////////////////////////////////////////////////////////////////////
/// Return the index of the added root.
uint addRoot();
/// Return the index of the added leaf.
uint addNode( const uint parent );
/// Prune the leaves of the graph and returns the changes.
void pruneLeaves( std::vector<uint>& pruned, std::vector<bool>& delete_flag );
/// Prune the leaves of the graph.
void pruneLeaves();
/*!
* Return the edge list built from the given adjacency list.
* If include_leaf is true, the list will contain the pairs:
* ...
* { i, i }
* ...
* where i is the index of a leaf node.
*/
VectorArray<Eigen::Matrix<uint, 2, 1>> extractEdgeList( bool include_leaf = false ) const;
//////////////////////////////////////////////////////////////////////////////
// SIZE
//////////////////////////////////////////////////////////////////////////////
/// Return the number of nodes in the graph
inline uint size() const;
/// Clear the vectors
inline void clear();
//////////////////////////////////////////////////////////////////////////////
// QUERY
//////////////////////////////////////////////////////////////////////////////
/// Return true if the graph is consistent
ConsistencyStatus computeConsistencyStatus() const;
/// Return true if the graph is empty.
inline bool isEmpty() const;
/// Return true if a node is a root node.
inline bool isRoot( const uint i ) const;
/// Return true if the node is a leaf node.
inline bool isLeaf( const uint i ) const;
/// Return true if the node is a branch node. ( |child| > 1 )
inline bool isBranch( const uint i ) const;
/// Return true if the node is a joint node. ( |child| == 1 )
inline bool isJoint( const uint i ) const;
/// Return true if the edge { i, j } exists.
inline bool isEdge( const uint i, const uint j ) const;
inline const Adjacency& children() const;
inline const ParentList& parents() const;
//////////////////////////////////////////////////////////////////////////////
// VARIABLE
//////////////////////////////////////////////////////////////////////////////
private:
/// Adjacency matrix
Adjacency m_child;
/// Parents ids vector
ParentList m_parent;
LevelList m_level;
};
RA_CORE_API std::ofstream& operator<<( std::ofstream& ofs, const AdjacencyList& p );
} // namespace Core
} // namespace Ra
#include <Core/Containers/AdjacencyList.inl>
|
{"hexsha": "f892438ebd449a2af4df9760dd1208965f4e7731", "size": 3963, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/Core/Containers/AdjacencyList.hpp", "max_stars_repo_name": "grandch/Radium-Engine", "max_stars_repo_head_hexsha": "9d8c5b34c191ab3a31acff2f12cf3b0d66f613db", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-02-03T17:47:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-03T17:47:04.000Z", "max_issues_repo_path": "src/Core/Containers/AdjacencyList.hpp", "max_issues_repo_name": "david49az/Radium-Engine", "max_issues_repo_head_hexsha": "2600039e5c0658057b8b35f79222a332feceb026", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Core/Containers/AdjacencyList.hpp", "max_forks_repo_name": "david49az/Radium-Engine", "max_forks_repo_head_hexsha": "2600039e5c0658057b8b35f79222a332feceb026", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7027027027, "max_line_length": 94, "alphanum_fraction": 0.4978551602, "num_tokens": 766}
|
import numpy as np
import sys
import math
def read_inputs() :
feature_vec_train=np.load('train_feature.npy')
#print(feature_vec_train.shape)
train_label=np.load('train_label.npy')
#print(train_label.shape)
feature_vec_test=np.load('test_feature.npy')
#print(feature_vec_test.shape)
test_label=np.load('test_label.npy')
#print(test_label.shape)
return feature_vec_train,train_label,feature_vec_test,test_label
def regularised_regression(feature_vec_train,train_label,feature_vec_test,test_label) :
lamda=0.5
K=np.vstack((feature_vec_train,lamda*np.identity(18)))
K=K.astype(float)
zeros=np.zeros((18,1), dtype=int, order='C')
d=(np.vstack((train_label,zeros))).astype(float)
x_1=np.dot(np.transpose(K),K)
x_1=np.linalg.inv(x_1)
x_2=np.dot(np.transpose(K),d)
x=np.dot(x_1,x_2)
prediction_train=np.matrix.round(np.dot(feature_vec_train.astype(float),x))
prediction_train[prediction_train>=1] = 1
prediction_train[prediction_train<=0] = 0
return x, prediction_train
def exponential_regression(feature_vec_train,train_label,feature_vec_test,test_label) :
exponent = 1
if(exponent!=1):
feature_vec_train = np.array(feature_vec_train, dtype=np.float128)
feature_vec_test = np.array(feature_vec_test, dtype=np.float128)
for x in np.nditer(feature_vec_train, op_flags=['readwrite']):
x[...] = exponent ** x
for y in np.nditer(feature_vec_test, op_flags=['readwrite']):
y[...] = exponent ** y
regression(feature_vec_train,train_label,feature_vec_test,test_label)
def logarithmic_regression(feature_vec_train,train_label,feature_vec_test,test_label) :
base = 2
feature_vec_train =np.array(feature_vec_train, dtype=np.float64)
feature_vec_test = np.array(feature_vec_test, dtype=np.float64)
for x in np.nditer(feature_vec_train, op_flags=['readwrite']):
if(x>0):
x[...] = math.log(x,base)
for y in np.nditer(feature_vec_test, op_flags=['readwrite']):
if(y>0):
y[...] = math.log(y,base)
x_1=np.linalg.pinv(feature_vec_train)
x=np.dot(x_1,np.array(train_label,dtype=np.float64))
prediction_train=np.matrix.round(np.dot(feature_vec_train,x))
train_accuracy(prediction_train,train_label)
test(feature_vec_test,test_label,x)
def regression(feature_vec_train,train_label,feature_vec_test,test_label) :
feature_vec_train=feature_vec_train.astype(float)
train_label=train_label.astype(float)
feature_vec_test=feature_vec_test.astype(float)
test_label=test_label.astype(float)
x_1=np.dot(np.transpose(feature_vec_train),feature_vec_train)
x_1=np.linalg.inv(x_1)
x_2=np.dot(np.transpose(feature_vec_train),train_label)
x=np.dot(x_1,x_2)
prediction_train=np.matrix.round(np.dot(feature_vec_train,x))
train_accuracy(prediction_train,train_label)
test(feature_vec_test,test_label,x)
def train_accuracy(train_predictions,train_label) :
count=0.0
train_label=train_label.astype(float)
train_predictions=train_predictions.astype(float)
train_predictions[train_predictions>=1] = 1.0
train_predictions[train_predictions<=0] = 0.0
for i in range (train_label.size):
if train_predictions[i,0]==train_label[i,0] :
count+=1.0
accuracy=(count/train_label.size)
print("Train Accuracy for Regression is: "+str(accuracy))
def test(feature_vec_test,test_label,x) :
prediction_test=np.dot(feature_vec_test.astype(float),x.astype(float))
count=0.0
test_label=test_label.astype(float)
prediction_test=np.matrix.round(prediction_test.astype(float))
np.seterr(invalid='ignore')
prediction_test[prediction_test>=1] = 1.0
prediction_test[prediction_test<=0] = 0.0
for i in range (0,test_label.size):
if prediction_test[i,0]==test_label[i,0] :
count+=1.0
accuracy=(count/test_label.size)
print("Test Accuracy for Regression is: "+str(accuracy))
def polynomial_regression(feature_vec_train,train_label,feature_vec_test,test_label) :
power=5
for i in range (2,power+1):
train_matrix= np.power(np.array(feature_vec_train,dtype=np.float64),i)
train_matrix=np.hstack((np.array(feature_vec_train,dtype=np.float64),train_matrix))
feature_vec_train=train_matrix
test_matrix=np.power(np.array(feature_vec_test,dtype=np.float64),i)
test_matrix=np.hstack((np.array(feature_vec_test,dtype=np.float64),test_matrix))
feature_vec_test=test_matrix
x_1=np.linalg.pinv(train_matrix)
x=np.dot(x_1,np.array(train_label,dtype=np.float64))
prediction_train=np.matrix.round(np.dot(train_matrix,x))
train_accuracy(prediction_train,train_label)
test(test_matrix,test_label,x)
if __name__ == "__main__":
# feature_vec_train,train_label,feature_vec_test,test_label = read_inputs()
# print('\033[1m'+"\nRegression "+'\033[0m')
# regression(feature_vec_train,train_label,feature_vec_test,test_label)
# print('\033[1m'+"\nRegularised Regression"+'\033[0m')
# weights, trainPredictions = regularised_regression(feature_vec_train,train_label,feature_vec_test,test_label)
# train_accuracy(trainPredictions,train_label)
# test(feature_vec_test,test_label,weights)
# print('\033[1m'+"\nPolynomial Regression "+'\033[0m')
# polynomial_regression(feature_vec_train,train_label,feature_vec_test,test_label)
# print('\033[1m'+"\nExponential Regression "+'\033[0m')
# exponential_regression(feature_vec_train,train_label,feature_vec_test,test_label)
print('\033[1m'+"\nLogarithmic Regression "+'\033[0m')
logarithmic_regression(feature_vec_train,train_label,feature_vec_test,test_label)
|
{"hexsha": "092389a4ebb90504c27bf69d5dd10a92e0b6d152", "size": 5346, "ext": "py", "lang": "Python", "max_stars_repo_path": "regression.py", "max_stars_repo_name": "samiragarwala/diabetic_retinopathy_detection", "max_stars_repo_head_hexsha": "c0a134a65339098d338a998109fcab367bb00a32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "regression.py", "max_issues_repo_name": "samiragarwala/diabetic_retinopathy_detection", "max_issues_repo_head_hexsha": "c0a134a65339098d338a998109fcab367bb00a32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "regression.py", "max_forks_repo_name": "samiragarwala/diabetic_retinopathy_detection", "max_forks_repo_head_hexsha": "c0a134a65339098d338a998109fcab367bb00a32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5, "max_line_length": 112, "alphanum_fraction": 0.7878787879, "include": true, "reason": "import numpy", "num_tokens": 1394}
|
from collections.abc import Iterable
from contextlib import contextmanager, nullcontext
import emcee as mc
import numpy as np
import scipy.stats as st
import sklearn
from scipy.linalg import cho_solve, cholesky, solve_triangular
from sklearn.utils import check_random_state
from skopt.learning import GaussianProcessRegressor
from skopt.learning.gaussian_process.gpr import _param_for_white_kernel_in_Sum
from skopt.learning.gaussian_process.kernels import WhiteKernel
from .utils import geometric_median, guess_priors, validate_zeroone
__all__ = ["BayesGPR"]
class BayesGPR(GaussianProcessRegressor):
""" Gaussian process regressor of which the kernel hyperparameters are inferred in a
fully Bayesian framework.
The implementation is based on Algorithm 2.1 of Gaussian Processes for Machine
Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API, GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior);
* provides an additional method sample_y(X), which evaluates samples drawn from
the GPR (prior or posterior or hyper-posterior) at given inputs;
* exposes a method log_marginal_likelihood(theta), which can be used externally
for other ways of selecting hyperparameters,
e.g., via Markov chain Monte Carlo.
* allows setting the kernel hyperparameters while correctly recalculating the
required matrices
* exposes a method noise_set_to_zero() which can be used as a context manager to
temporarily set the prediction noise to zero.
This is useful for evaluating acquisition functions for Bayesian optimization
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are set to the geometric median of the
Markov chain Monte Carlo samples of the posterior.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations.
This can also prevent a potential numerical issue during fitting, by
ensuring that the calculated values form a positive definite matrix.
If an array is passed, it must have the same number of entries as the
data used for fitting and is used as datapoint-dependent noise level.
Note that this is equivalent to adding a WhiteKernel with c=alpha.
Allowing to specify the noise level directly as a parameter is mainly
for convenience and for consistency with Ridge.
Also note, that this class adds a WhiteKernel automatically if noise
is set.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be minimized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
Note, that the kernel hyperparameters obtained are only used as the
initial position of the Markov chain and will be discarded afterwards.
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle.
warp_inputs : boolean, optional (default: False)
If True, each input dimension will be warped (internally) using the cumulative
distribution function of a beta distribution [1]_. The parameters of each beta
distribution will be inferred from the data. The input data needs to be
in [0, 1].
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
noise : string, optional (default: "gaussian")
If set to "gaussian", then it is assumed that `y` is a noisy
estimate of `f(x)` where the noise is gaussian.
A WhiteKernel will be added to the provided kernel.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
noise_ : float
Estimate of the gaussian noise. Useful only when noise is set to
"gaussian".
chain_ : array-like, shape = (n_desired_samples, n_hyperparameters)
Samples from the posterior distribution of the hyperparameters.
pos_ : array-like, shape = (n_walkers, n_hyperparameters)
Last position of the Markov chain. Useful for continuing sampling when new
datapoints arrive. fit(X, y) internally uses an existing pos_ to resume
sampling, if no other position is provided.
References
----------
.. [1] Snoek, Jasper, Kevin Swersky, Richard Zemel, and Ryan P. Adams. “Input
Warping for Bayesian Optimization of Non-Stationary Functions.”
In Proceedings of the 31st International Conference on International
Conference on Machine Learning - Volume 32, II–1674–II–1682.
ICML’14. Beijing, China: JMLR.org, 2014.
"""
def __init__(
self,
kernel=None,
alpha=1e-10,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0,
normalize_y=False,
warp_inputs=False,
copy_X_train=True,
random_state=None,
noise="gaussian",
):
if kernel is None:
self._kernel = None
else:
self._kernel = kernel.clone_with_theta(kernel.theta)
random_state = check_random_state(random_state)
super().__init__(
kernel,
alpha,
optimizer,
n_restarts_optimizer,
normalize_y,
copy_X_train,
random_state,
noise,
)
self._alpha = self.alpha
self.warp_inputs = warp_inputs
self._sampler = None
self.chain_ = None
self.pos_ = None
self.kernel_ = None
@property
def theta(self):
"""The current geometric median of the kernel hyperparameter distribution.
The returned values are located in log space. Call `BayesGPR.kernel_` to obtain
the values their original space.
Returns
-------
ndarray
Array containing the kernel hyperparameters in log space.
"""
if self.kernel_ is not None:
with np.errstate(divide="ignore"):
return np.copy(self.kernel_.theta)
return None
@theta.setter
def theta(self, theta):
self.kernel_.theta = theta
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=True)
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
self.K_inv_ = L_inv.dot(L_inv.T)
except np.linalg.LinAlgError as exc:
exc.args = (
"The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'alpha' parameter of your "
"GaussianProcessRegressor estimator." % self.kernel_,
) + exc.args
raise
self.alpha_ = cho_solve((self.L_, True), self.y_train_)
@property
def X_train_(self):
""" The training data which was used to train the Gaussian process.
If input warping is used, it will return the warped instances.
Returns
-------
array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction).
If `warp_inputs=True`, will contain the warped inputs in [0, 1].
"""
if hasattr(self, "_X_train_orig_"):
if self.warp_inputs:
return self._X_train_warped_
return self._X_train_orig_
return None
@X_train_.setter
def X_train_(self, X_train):
self._X_train_orig_ = np.copy(X_train) if self.copy_X_train else X_train
if self.warp_inputs:
self._X_train_warped_ = np.copy(self._X_train_orig_)
if hasattr(self, "warpers_"):
for col, warper in enumerate(self.warpers_):
self._X_train_warped_[:, col] = warper(self._X_train_orig_[:, col])
# If no warpers exist yet, we begin with an unwarped input space
def warp(self, X):
"""Warp the input X using the existing warpers.
Returns X if `warp_inputs=False` or if no warpers have been fit yet.
Parameters
----------
X : ndarray, shape (n_points, n_dims)
Points in the original space which should be warped.
"""
if self.warp_inputs and hasattr(self, "warpers_"):
X_warped = np.empty_like(X)
for col, warper in enumerate(self.warpers_):
X_warped[:, col] = warper(X[:, col])
X = X_warped
return X
def unwarp(self, X):
"""Unwarp the input X back to the original input space.
Returns X if `warp_inputs=False` or if no warpers have been fit yet.
Parameters
----------
X : ndarray, shape (n_points, n_dims)
Points in the warped space which should be transformed back to the input
space.
"""
if self.warp_inputs and hasattr(self, "warpers_"):
X_orig = np.empty_like(X)
for col, unwarper in enumerate(self.unwarpers_):
X_orig[:, col] = unwarper(X[:, col])
X = X_orig
return X
def rewarp(self):
"""Apply warping again to X_train_ after parameters have changed.
Does nothing if `warp_inputs=False` or if no warpers have been fit yet.
"""
if self.warp_inputs:
if hasattr(self, "warpers_") and hasattr(self, "_X_train_orig_"):
self._X_train_warped_ = np.empty_like(self._X_train_orig_)
for col, warper in enumerate(self.warpers_):
self._X_train_warped_[:, col] = warper(self._X_train_orig_[:, col])
def create_warpers(self, alphas, betas):
"""Create Beta CDFs and inverse CDFs for input (un)warping.
Parameters
----------
alphas : ndarray, shape (n_dims)
Raw alpha parameters of the Beta distributions in log-space.
betas : ndarray, shape (n_dims)
Raw beta parameters of the Beta distributions in log-space.
"""
if self.warp_inputs:
self.warpers_ = []
self.unwarpers_ = []
self.warp_alphas_ = np.copy(alphas)
self.warp_betas_ = np.copy(betas)
for a_log, b_log in zip(alphas, betas):
a, b = np.exp(a_log), np.exp(b_log)
dist = st.beta(a=a, b=b)
self.warpers_.append(dist.cdf)
self.unwarpers_.append(dist.ppf)
@contextmanager
def noise_set_to_zero(self):
"""Context manager in which the noise of the Gaussian process is 0.
This is useful when you want to predict the epistemic uncertainty of the
Gaussian process without the noise.
"""
current_theta = self.theta
try:
# Now we set the noise to 0, but do NOT recalculate the alphas!:
white_present, white_param = _param_for_white_kernel_in_Sum(self.kernel_)
self.kernel_.set_params(**{white_param: WhiteKernel(noise_level=0.0)})
yield self
finally:
self.kernel_.theta = current_theta
def _apply_noise_vector(self, n_instances, noise_vector):
# We apply the noise vector to self.alpha here, to avoid having to pull up
# inherited code:
if noise_vector is not None:
if not np.iterable(self.alpha):
alpha = np.ones(n_instances) * self.alpha
elif not np.iterable(self._alpha): # we already changed self.alpha before
alpha = np.ones(n_instances) * self._alpha
alpha[: len(noise_vector)] += noise_vector
self.alpha = alpha
def _log_prob_fn(self, x, priors, warp_priors):
lp = 0
if self.warp_inputs:
n_dim = self.X_train_.shape[1]
x_warp = x[-2 * n_dim :]
x_gp = x[: len(x) - 2 * n_dim]
alphas, betas = x_warp[:n_dim], x_warp[n_dim:]
self.create_warpers(alphas, betas)
self.rewarp()
for a_log, b_log in zip(alphas, betas):
if isinstance(warp_priors, Iterable):
lp += warp_priors[0](a_log)
lp += warp_priors[1](b_log)
else:
lp += warp_priors(a_log, b_log)
else:
x_gp = x
if isinstance(priors, Iterable):
for prior, val in zip(priors, x_gp):
lp += prior(val)
else: # Assume priors is a callable, which evaluates the log probability:
lp += priors(x_gp)
try:
lp = lp + self.log_marginal_likelihood(theta=x_gp)
except ValueError:
return -np.inf
if not np.isfinite(lp):
return -np.inf
return lp
def sample(
self,
X=None,
y=None,
noise_vector=None,
n_threads=1,
n_desired_samples=100,
n_burnin=0,
n_thin=1,
n_walkers_per_thread=100,
progress=False,
priors=None,
warp_priors=None,
position=None,
add=False,
**kwargs
):
"""Sample from the posterior distribution of the hyper-parameters.
Parameters
----------
X : ndarray, shape (n_points, n_dims), optional (default: None)
Points at which the function is evaluated. If None, it will use the saved
datapoints.
y : ndarray, shape (n_points,), optional (default: None)
Value(s) of the function at `X`. If None, it will use the saved values.
noise_vector :
Variance(s) of the function at `X`. If None, no additional noise is applied.
n_threads : int, optional (default: 1)
Number of threads to use during inference.
This is currently not implemented.
n_desired_samples : int, optional (default: 100)
Number of hyperposterior samples to collect during inference. Must be a
multiple of `n_walkers_per_thread`.
n_burnin : int, optional (default: 0)
Number of iterations to discard before collecting hyperposterior samples.
Needs to be increased only, if the hyperposterior samples have not reached
their typical set yet. Higher values increase the running time.
n_thin : int, optional (default: 1)
Only collect hyperposterior samples every k-th iteration. This can help
reducing the autocorrelation of the collected samples, but reduces the
total number of samples.
n_walkers_per_thread : int, optional (default: 100)
Number of MCMC ensemble walkers to employ during inference.
progress : bool, optional (default: False)
If True, show a progress bar during inference.
priors : list or callable, optional (default: None)
Log prior(s) for the kernel hyperparameters. Remember that the kernel
hyperparameters are transformed into log space. Thus your priors need to
perform the necessary change-of-variables.
warp_priors : list or callable, optional (default: None)
Log prior(s) for the parameters of the Beta distribution used to warp each
dimension. Only used, if `warp_inputs=True`.
By default uses a log-normal distribution with mean 0 and standard deviation
of 0.5 for each parameter of the Beta distribution. This prior favors the
identity transformation and sufficient data is needed to shift towards a
stronger warping function.
position : ndarray, shape (n_walkers, n_kernel_dims), optional (default: None)
Starting position of the Markov chain. If None, it will use the current
position. If this is None as well, it will try to initialize in a small
ball.
add : bool, optional (default: False)
If True, all collected hyperposterior samples will be added to the existing
samples in `BayesGPR.chain_`. Otherwise they will be replaced.
kwargs : dict
Additional keyword arguments for emcee.EnsembleSampler
"""
if X is None and not hasattr(self, "X_train_") or self.kernel_ is None:
raise ValueError(
"""
It looks like you are trying to sample from the GP posterior without
data. Pass X and y, or ensure that you call fit before sample.
"""
)
# We are only able to guess priors now, since BayesGPR can add
# another WhiteKernel, when noise is set to "gaussian":
if priors is None:
priors = guess_priors(self.kernel_)
if warp_priors is None:
warp_priors = (
st.norm(loc=0.0, scale=0.3).logpdf,
st.norm(loc=0.0, scale=0.3).logpdf,
)
# Update data, if available:
if X is not None:
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
if int(sklearn.__version__[2:4]) >= 23:
self._y_train_std = np.std(y, axis=0)
else:
self._y_train_mean = np.zeros(1)
if int(sklearn.__version__[2:4]) >= 23:
self._y_train_std = 1
if int(sklearn.__version__[2:4]) >= 23:
self.y_train_std_ = self._y_train_std
self.y_train_mean_ = self._y_train_mean
else:
self.y_train_mean_ = self._y_train_mean
self.y_train_std_ = 1
y = (y - self.y_train_mean_) / self.y_train_std_
if noise_vector is not None:
noise_vector = np.array(noise_vector) / np.power(self.y_train_std_, 2)
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
self._apply_noise_vector(len(self.y_train_), noise_vector)
n_dim = len(self.theta)
n_walkers = n_threads * n_walkers_per_thread
n_samples = int(np.ceil(n_desired_samples / n_walkers) + n_burnin)
pos = None
if position is not None:
pos = position
elif self.pos_ is not None:
pos = self.pos_
if self.warp_inputs:
added_dims = self.X_train_.shape[1] * 2
n_dim += added_dims
if pos is None:
theta = self.theta
theta[np.isinf(theta)] = np.log(self.noise_)
if self.warp_inputs:
theta = np.concatenate([theta, np.zeros(added_dims)])
pos = [
theta + 1e-2 * self.random_state.randn(n_dim) for _ in range(n_walkers)
]
self._sampler = mc.EnsembleSampler(
nwalkers=n_walkers,
ndim=n_dim,
log_prob_fn=self._log_prob_fn,
kwargs=dict(priors=priors, warp_priors=warp_priors),
threads=n_threads,
**kwargs
)
rng = np.random.RandomState(
self.random_state.randint(0, np.iinfo(np.int32).max)
)
self._sampler.random_state = rng.get_state()
pos, prob, state = self._sampler.run_mcmc(pos, n_samples, progress=progress)
# if backup_file is not None:
# with open(backup_file, "wb") as f:
# np.save(f, pos)
chain = self._sampler.get_chain(flat=True, discard=n_burnin, thin=n_thin)
if add and self.chain_ is not None:
self.chain_ = np.concatenate([self.chain_, chain])
else:
self.chain_ = chain
if self.warp_inputs:
median = geometric_median(self.chain_)
warp_params = median[len(self.theta) :]
alphas = warp_params[: self.X_train_.shape[1]]
betas = warp_params[self.X_train_.shape[1] :]
self.create_warpers(alphas, betas)
self.rewarp()
self.theta = median[: len(self.theta)]
else:
self.theta = geometric_median(self.chain_)
self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
self.kernel_.theta, clone_kernel=False
)
self.pos_ = pos
def fit(
self,
X,
y,
noise_vector=None,
n_threads=1,
n_desired_samples=100,
n_burnin=10,
n_walkers_per_thread=100,
progress=True,
priors=None,
warp_priors=None,
position=None,
**kwargs
):
"""Fit the Gaussian process model to the given training data.
Parameters
----------
X : ndarray, shape (n_points, n_dims)
Points at which the function is evaluated. If None, it will use the saved
datapoints.
y : ndarray, shape (n_points,)
Value(s) of the function at `X`. If None, it will use the saved values.
noise_vector :
Variance(s) of the function at `X`. If None, no additional noise is applied.
n_threads : int, optional (default: 1)
Number of threads to use during inference.
This is currently not implemented.
n_desired_samples : int, optional (default: 100)
Number of hyperposterior samples to collect during inference. Must be a
multiple of `n_walkers_per_thread`.
n_burnin : int, optional (default: 0)
Number of iterations to discard before collecting hyperposterior samples.
Needs to be increased only, if the hyperposterior samples have not reached
their typical set yet. Higher values increase the running time.
n_walkers_per_thread : int, optional (default: 100)
Number of MCMC ensemble walkers to employ during inference.
progress : bool, optional (default: False)
If True, show a progress bar during inference.
priors : list or callable, optional (default: None)
Log prior(s) for the kernel hyperparameters. Remember that the kernel
hyperparameters are transformed into log space. Thus your priors need to
perform the necessary change-of-variables.
position : ndarray, shape (n_walkers, n_kernel_dims), optional (default: None)
Starting position of the Markov chain. If None, it will use the current
position. If this is None as well, it will try to initialize in a small
ball.
kwargs : dict
Additional keyword arguments for BayesGPR.sample
"""
self.kernel = self._kernel
# In sklearn >= 23 the normalization includes scaling the output by the
# standard deviation. We need to scale the noise_vector accordingly here:
if (
int(sklearn.__version__[2:4]) >= 23
and self.normalize_y
and noise_vector is not None
):
y_std = np.std(y, axis=0)
noise_vector = np.array(noise_vector) / np.power(y_std, 2)
self._apply_noise_vector(len(y), noise_vector)
super().fit(X, y)
self.sample(
n_threads=n_threads,
n_desired_samples=n_desired_samples,
n_burnin=n_burnin,
n_walkers_per_thread=n_walkers_per_thread,
progress=progress,
priors=priors,
warp_priors=warp_priors,
position=position,
add=False,
**kwargs
)
def predict(
self,
X,
return_std=False,
return_cov=False,
return_mean_grad=False,
return_std_grad=False,
):
if self.warp_inputs:
validate_zeroone(X)
X = self.warp(X)
return super().predict(
X, return_std, return_cov, return_mean_grad, return_std_grad
)
def sample_y(self, X, sample_mean=False, noise=False, n_samples=1, random_state=0):
"""Sample function realizations of the Gaussian process.
Parameters
----------
X : ndarray, shape (n_points, n_dims)
Points at which to evaluate the functions.
sample_mean : bool, optional (default: False)
If True, the geometric median of the hyperposterior samples is used as the
Gaussian process to sample from. If False, a new set of hyperposterior
is used for each new sample.
noise : bool, optional (default: False)
If True, Gaussian noise is added to the samples.
n_samples : int, optional (default: 1)
Number of samples to draw from the Gaussian process(es).
random_state : int or RandomState or None, optional, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
result : ndarray, shape (n_points, n_samples)
Samples from the Gaussian process(es)
Raises
------
ValueError
If `warp_inputs=True` and the entries of X are not all between 0 and 1.
"""
rng = check_random_state(random_state)
if sample_mean:
if noise:
cm = nullcontext(self)
else:
cm = self.noise_set_to_zero()
with cm:
samples = super().sample_y(X, n_samples=n_samples, random_state=rng)
return samples
ind = rng.choice(len(self.chain_), size=n_samples, replace=True)
if self.warp_inputs:
current_warp_alphas = np.copy(self.warp_alphas_)
current_warp_betas = np.copy(self.warp_betas_)
current_theta = self.theta
n_dims = len(current_theta)
current_K_inv = np.copy(self.K_inv_)
current_L = np.copy(self.L_)
current_alpha = np.copy(self.alpha_)
result = np.empty((X.shape[0], n_samples))
for i, j in enumerate(ind):
if self.warp_inputs:
validate_zeroone(X)
theta = self.chain_[j][:n_dims]
warp_params = self.chain_[j][n_dims:]
alphas, betas = warp_params[: X.shape[1]], warp_params[X.shape[1] :]
self.create_warpers(alphas, betas)
self.rewarp()
else:
theta = self.chain_[j]
self.theta = theta
if noise:
cm = nullcontext(self)
else:
cm = self.noise_set_to_zero()
with cm:
result[:, i] = (
super().sample_y(X, n_samples=1, random_state=rng).flatten()
)
self.kernel_.theta = current_theta
self.K_inv_ = current_K_inv
self.alpha_ = current_alpha
if self.warp_inputs:
self.warp_alphas_ = current_warp_alphas
self.warp_betas_ = current_warp_betas
self.L_ = current_L
return result
|
{"hexsha": "4c9aee18a5f184ff0eb9d2d4aa0ec5288f1039ce", "size": 30611, "ext": "py", "lang": "Python", "max_stars_repo_path": "bask/bayesgpr.py", "max_stars_repo_name": "kiudee/bayes-skopt", "max_stars_repo_head_hexsha": "8f1daf996e34b95af47ef0d382d57fe8a17bbae5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2020-01-16T13:38:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-18T01:13:41.000Z", "max_issues_repo_path": "bask/bayesgpr.py", "max_issues_repo_name": "kiudee/bayes-skopt", "max_issues_repo_head_hexsha": "8f1daf996e34b95af47ef0d382d57fe8a17bbae5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 43, "max_issues_repo_issues_event_min_datetime": "2019-12-31T14:35:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-30T17:18:01.000Z", "max_forks_repo_path": "bask/bayesgpr.py", "max_forks_repo_name": "kiudee/bayes-skopt", "max_forks_repo_head_hexsha": "8f1daf996e34b95af47ef0d382d57fe8a17bbae5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-04-13T14:31:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T11:43:12.000Z", "avg_line_length": 43.2358757062, "max_line_length": 88, "alphanum_fraction": 0.6165757407, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 6779}
|
using CSV, DataFrames, ScikitLearn, PyPlot
pathtodata = joinpath("julia-scripts", "model-zoo", "covid_cleaned.csv")
data = DataFrame(CSV.File(pathtodata))
X = convert(Array, data[!, Not(:covid_res)])
y = convert(Array, data[!, :covid_res])
@sk_import model_selection:train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y; test_size=0.5, random_state=42)
@sk_import naive_bayes:GaussianNB
gnb = GaussianNB()
fit!(gnb, X_train, y_train)
y_predict = predict(gnb, X_train)
@sk_import metrics:classification_report
print(classification_report(y_train, y_predict))
@sk_import metrics:plot_confusion_matrix
plot_confusion_matrix(gnb, X_train, y_train)
PyPlot.gcf()
|
{"hexsha": "ef78a6259929e843b80510f236b99e98718afde3", "size": 684, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia-scripts/model-zoo/nbclassifier.jl", "max_stars_repo_name": "coinslab/ComputationalCognitiveModeling", "max_stars_repo_head_hexsha": "14c761ab8bc6685e7ec7f2bd79e7adae6abbad92", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "julia-scripts/model-zoo/nbclassifier.jl", "max_issues_repo_name": "coinslab/ComputationalCognitiveModeling", "max_issues_repo_head_hexsha": "14c761ab8bc6685e7ec7f2bd79e7adae6abbad92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "julia-scripts/model-zoo/nbclassifier.jl", "max_forks_repo_name": "coinslab/ComputationalCognitiveModeling", "max_forks_repo_head_hexsha": "14c761ab8bc6685e7ec7f2bd79e7adae6abbad92", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-06T09:17:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-06T09:17:46.000Z", "avg_line_length": 29.7391304348, "max_line_length": 89, "alphanum_fraction": 0.7821637427, "num_tokens": 199}
|
\documentclass[12pt]{article}
\usepackage[margin = 1.5in]{geometry}
\setlength{\parindent}{0in}
\usepackage{amsfonts, amssymb, amsthm, mathtools, tikz, qtree, float}
\usepackage{algpseudocode, algorithm, algorithmicx}
\usepackage{DejaVuSans}
\usepackage[T1]{fontenc}
\usepackage{ae, aecompl, color}
\usepackage[pdftex, pdfauthor={Charles Shen}, pdftitle={STAT 231: Statistics}, pdfsubject={Notes from STAT 231: at the University of Waterloo}, pdfkeywords={course notes, notes, Waterloo, University of Waterloo}, pdfproducer={LaTeX}, pdfcreator={pdflatex}]{hyperref}
\usepackage{cleveref}
\usepackage{wrapfig}
\allowdisplaybreaks
\DeclarePairedDelimiter{\set}{\lbrace}{\rbrace}
\renewcommand*\familydefault{\sfdefault}
\definecolor{darkish-blue}{RGB}{25,103,185}
\hypersetup{
colorlinks,
citecolor=darkish-blue,
filecolor=darkish-blue,
linkcolor=darkish-blue,
urlcolor=darkish-blue
}
\theoremstyle{definition}
\newtheorem*{defn}{Definition}
\newtheorem*{theorem}{Theorem}
\newtheorem*{corollary}{Corollary}
\newtheorem{ex}{Example}[section]
\crefname{ex}{Example}{Example}
\setlength{\marginparwidth}{1.5in}
\newcommand{\lecture}[1]{\marginpar{{\footnotesize $\leftarrow$ \underline{#1}}}}
\makeatletter
\def\blfootnote{\gdef\@thefnmark{}\@footnotetext}
\makeatother
\begin{document}
\let\ref\Cref
\pagenumbering{roman}
\title{\bf{STAT 231: Statistics}}
\date{Spring 2016, University of Waterloo \\ \center Formulas and Notes.}
\author{Charles Shen}
\blfootnote{Feel free to email feedback to me at \href{mailto:ccshen902@gmail.com}{ccshen902@gmail.com}.}
\maketitle
\begin{center}
\textbf{Assume all $\log$ are in base $e$ unless specified.} \\
I've tried to use $\ln$ for consistency, \\
but there may be a few inconsistency.
\end{center}
\newpage
\tableofcontents
\newpage
\pagenumbering{arabic}
\section{Numerical Summaries}
\subsection{Measure of Location}
\subsubsection{Mean}
The \emph{sample mean}, also called the sample average is
$$\bar{y} = \frac{1}{n}\sum_{i=1}^{n}y_{i}$$
\subsubsection{Median}
The \emph{sample median} $\hat{m}$ is the middle value(s) of an ordered sample. \\
Median is less affected by a few extreme observations so it is a more robust measure of location. \\
It is also the second quartile (\ref{iqr}).
\subsubsection{Mode}
The \emph{sample mode} is the most common value of $y$ in a sample; it may not be unique when there are multiple values of same frequency.
\subsection{Measure of Dispersion or Variability}
\subsubsection{Variance and Standard Deviation}
The \emph{sample variance} is roughly the average of the squared deviation from the mean:
\begin{align*}
s^{2} &= \frac{1}{n-1}\sum_{i=1}^{n}(y_{i} - \bar{y})^{2} \\
&= \frac{1}{n-1}
\left[
\sum_{i=1}^{n}y_{i}^{2} - n(\bar{y})^{2}
\right]
\end{align*}
In addition, \emph{standard deviation} is then
$$s = \sqrt{s^{2}}$$
\subsubsection{Quantiles and Interquartile Range} \label{iqr}
\begin{defn}
Let $\{y_{(1)}, y_{(2)}, \dots, y_{(n)}\}$ where $y_{(1)} \leq y_{(2)} \leq \dots \leq y_{(n)}$ be the ordered statistic for the data set $\{y_1, y_2, \dots, y_n\}$.
The $pth$ quantile (also called the $100pth$ percentile) is a value, call it $q(p)$, determined as follows:
\begin{itemize}
\item Let $m = (n+1)p$ where $n$ is the sample size
\item If $m$ is an integer between $1$ and $n$, then $q(p) = y_{(m)}$ which is the $mth$ largest value in the data set
\item If $m$ is not an integer but $1 < m < n$ then determine the closest integer $j$ such that $j < m < j+1$ and take $q(p) = \frac{1}{2}[y_{(j)} + y_{(j+1)}]$
\end{itemize}
\end{defn}
The first (lower) quartile is $q(0.25)$, also the 25\textsuperscript{th} percentile. \\
The second quartile is $q(0.50)$, also the 50\textsuperscript{th} percentile and the median. \\
The third(upper) quartile is $q(0.75)$, also the 75\textsuperscript{th} percentile. \\
The \emph{interquartile range} is $IQR = q(0.75) - q(0.25)$
\subsubsection{Range}
Range is the difference between highest and lowest value in the sample
$$range = y_{(n)} - y_{(1)}$$
where
$$y_{(1)} = min(y_{1}, \dots, y_{n})$$
and
$$y_{(n)} = max(y_{1}, \dots, y_{n})$$
\subsection{Measure of Shape}
\subsubsection{Skewness}
The skewness $g_{1}$ can be measured precisely by
\begin{align*}
g_{1} =
\frac{\frac{1}{n}\sum_{i=1}^{n}(y_{i} - \bar{y})^{3}}{\left[\frac{1}{n}\sum_{i=1}^{n}(y_{i} - \bar{y})^{2}\right]^{3/2}}
\end{align*}
It's a measure on the (lack of) symmetry in the data. \\
If $g_{1} = 0$, then data is symmetric. \\
If $g_{1} < 0$, then data is left skewed (long left tail). \\
If $g_{1} > 0$, then data is right skewed (long right tail). \\
A quick estimate on skewness is $mean - median$
\subsubsection{Kurtosis}
$$g_{2} = \frac{\frac{1}{n}\sum_{i=1}^{n}(y_{i} - \bar{y})^{4}}{\left[\frac{1}{n}\sum_{i=1}^{n}(y_{i} - \bar{y})^{2}\right]^{2}}$$
measures the heaviness of the tails and the peakedness of the data relative to data that are Normally distributed. \\
Kurtosis is always positive. \\
For the Normal distribution, kurtosis is equal to 3. \\
If $g_{2} < 3$, then more stacked peaks and smaller tails. \\
If $g_{2} > 3$, then more peaked center and heavier tails.
\subsection{More Definitions}
\subsubsection{Five Numbers Summary}
\begin{defn}
The five number summary of a data set consists of the three quartiles and the minimum and maximum values of the data set. \\
That is, $q(0.25)$, $q(0.5)$, $q(0.75)$, $y_{(1)}$, and $y_{(n)}$.
\end{defn}
\subsubsection{Correlation}
\begin{defn}
The sample \emph{correlation}, denoted by $r$, for data $\{(x_{1},y_{1}), (x_{2},y_{2}), \dots, (x_{n},y_{n})\}$ is
$$
r = \frac{S_{xy}}{\sqrt{S_{xx}S_{yy}}}
$$
where
\begin{align*}
S_{xx} &= \sum_{i=1}^{n}(x_{i} - \bar{x})^{2}
= \sum_{i=1}^{n}x_{i}^{2} - n(\bar{x})^2 \\
S_{xy} &= \sum_{i=1}^{n}(x_{i} - \bar{x})(y_{i} - \bar{y})
= \sum_{i=1}^{n}x_{i}y_{i} - n\bar{x}\bar{y} \\
S_{yy} &= \sum_{i=1}^{n}(y_{i} - \bar{y})^{2}
= \sum_{i=1}^{n}y_{i}^{2} - n(\bar{y})^2 \\
\end{align*}
\end{defn}
If the value of $r$ is close to $1$, then there is a strong positive linear relationship. \\
If the value of $r$ is close to $-1$, then there is a strong negative linear relationship. \\
If the value of $r$ is close to $0$, then there is no linear relationship between the two variates.
\newpage
\section{Distribution Theory}
If $Y_{1}, Y_{2}, \dots, Y_{n} ~ N(\mu, \sigma^{2})$ and they're independent, then
$$\bar{Y} \sim N(\mu, \frac{\sigma^{2}}{n})$$
and
$$\frac{\bar{Y} - \mu}{\sigma/\sqrt{n}} = Z \sim N(0, 1) $$
For large $n$, then
$$\frac{\bar{Y} - \mu}{s/\sqrt{n}} \approx Z \sim N(0, 1) $$
\newpage
\section{Statistical Models and Maximum Likelihood Estimation}
\begin{defn}
\emph{The \textbf{relative likelihood function} is defined as}
$$R(\theta) = \frac{L(\theta)}{L(\hat{\theta})} ~~~for~\theta \in \Omega$$
$Note~that~0 \leq R(\theta) \leq 1~for~all~\theta \in \Omega$.
\end{defn}
\begin{defn}
\emph{The \textbf{log likelihood function} is defined as}
$$l(\theta) = \ln L(\theta) ~~~for~\theta \in \Omega$$
\end{defn}
\subsection{Likelihood Function for Binomial Distribution}
The maximum likelihood estimate of $\theta$ is $\bar{\theta} = y / n$.
\subsection{Likelihood Function for Poisson Distribution}
The value $\theta = \bar{y}$ maximizes $l(\theta)$ and so $\hat{\theta} = \bar{y}$ is the maximum likelihood estimate of $\theta$.
\subsection{Likelihood Function for Exponential Distribution}
The value $\theta = \bar{y}$ maximizes $l(\theta)$ and so $\hat{\theta} = \bar{y}$ is the maximum likelihood estimate of $\theta$ for an Exponential Distribution $\sim Exp(\theta)$.
\subsection{Likelihood Function for Gaussian Distribution}
The maximum likelihood estimate of $\theta$ is $\hat{\theta} = (\hat{\mu}, \hat{\sigma})$, where
$$
\hat{\mu} = \frac{1}{n}\sum_{i=1}^{n}y_{i} = \bar{y}
~~\text{and}~~
\hat{\sigma} = \left[\frac{1}{n}\sum_{i=1}^{n}(y_{i} - \bar{y})^{2}\right]^{1/2}
$$
Note that $\hat{\sigma} \not = s$ (sample variance).
\subsection{Invariance Property of Maximum Likelihood Estimates}
\begin{theorem}
\emph{If $\hat{\theta}$ is the maximum likelihood estimate of $\theta$, then $g(\hat{\theta})$ is the maximum likelihood estimate of $g(\theta)$.}
\end{theorem}
\newpage
\section{Estimation}
\subsection{Confidence Intervals and Pivotal Quantities}
In general, construct a pivot using the estimator, use that to construct coverage interval, estimate it and find the confidence interval.
\begin{defn}
A $100p\%$, where $0 \leq p \leq 1$, confidence interval tells $100p\%$ of the intervals constructed from samples will contain the true unknown value of $\mu$ (or $\sigma$).
\end{defn}
To determine the correct value to look for in the distribution tables, calculate $(1+p)/2$ where $100p\%$ is the level of confidence. \\
For example, the $95\%$ confidence interval needs to look at $\frac{1+0.95}{2} = 0.975$.
\begin{theorem}
\textbf{Central Limit Theorem} \\
If $n$ is large, and if $Y_{1}, \dots, Y_{n}$ are drawn from a distribution with mean $\mu$ and variance $\sigma^{2}$, then $\bar{Y} \sim N\left(\mu, \frac{\sigma^{2}}{n}\right)$.
\end{theorem}
For a Binomial Distribution, the confidence interval is
$$\left[~\hat{\pi} \pm z^{*}\sqrt{\frac{\hat{\pi}(1 - \hat{\pi})}{n}}~\right]$$
where $\hat{\pi} = \frac{y}{n}$, $y$ is the observed data. \\
To determine the sample size
$$n \geq \left(\frac{z^{*}}{MoE}\right)^{2}\hat{\pi}(1 - \hat{\pi})$$
where $MoE$ is the margin of error. \\
To be conservative, we usually pick $\hat{\pi} = 0.5$ as it maximizes $\hat{\pi}(1 - \hat{\pi})$. \\
For a Poisson Distribution, $Y_1, Y_2, \dots, Y_n \sim Poi(\mu)$, the pivotal quantity is
$$\frac{\bar{Y} - \mu}{\sqrt{\frac{\bar{Y}}{n}}} = Z \sim N(0,1)$$
and the confidence interval is
$$\left[~\bar{y} \pm z^{*}\sqrt{\frac{\bar{y}}{n}}~\right]$$
For an Exponential Distribution $Y_1, Y_2, \dots, Y_n \sim Exp(\mu)$, for large $n$, the pivotal quantity is
$$\frac{\bar{Y} - \mu}{\mu / \sqrt{n}} = Z \sim N(0, 1)$$
and the confidence interval is then
$$\left[~ \frac{\bar{Y}}{1 + z^{*}\frac{1}{\sqrt{n}}}, \frac{\bar{Y}}{1 - z^{*}\frac{1}{\sqrt{n}}} ~\right]$$
Otherwise, consider
$$\sum_{i=1}^{n}\frac{2Y_{i}}{\mu} \sim \chi_{2n}^{2}$$
and
$$P\left(a \leq \sum_{i=1}^{n}\frac{2Y_{i}}{\mu} \leq b\right) = p$$
Which the confidence interval is
$$\left[\frac{2\sum Y_{i}}{b}, \frac{2\sum Y_{i}}{a}\right]$$
\subsection{Chi-Squared Distribution $\sim X_{k}^{2}$}
The Gamma function is
$$\Gamma(\alpha) = \int_{0}^{\infty}y^{\alpha -1}e^{-y}dy ~~\text{for}~\alpha > 0$$
Properties of the Gamma Function:
\begin{itemize}
\item $\Gamma(\alpha) = (\alpha - 1)\Gamma(\alpha - 1)$
\item $\Gamma(\alpha) = (\alpha - 1)!$
\item $\Gamma(1/2) = \sqrt{\pi}$
\end{itemize}
The $X_{k}^{2}$ distribution is a continuous family of distributions on $(0, \infty)$ with probability density function
$$f(x;k) = \frac{1}{2^{k/2}\Gamma(k/2)}x^{(k/2)-1}e^{-x/2}~~~\text{for}~x > 0$$
where $k \in \{1, 2, \dots\}$ is a parameter of the distribution. \\
$k$ is referred to as the ``degrees of freedom'' (d.f) parameter. \\
For $X \sim X_{k}^{2}$
\begin{itemize}
\item $E(X) = k$ and $Var(X) = 2k$
\item If $k = 1$, $X = Z^{2}$ and $Z \sim G(0,1)$
\item If $k = 2$, $X \sim Exp(2)$ ($\theta = 2$)
\item If $k$ is large, $X \stackrel{Appr.}{\sim} N(k, 2k)$
\item Let $X_{k_{1}}, X_{k_{1}}$ be independent random variables with $X_{k_{i}} \sim X_{k_{i}}^{2}$. \\
Then $X_{k_{1}} + X_{k_{2}} = X_{k_{1} + k_{2}}^{2}$.
\end{itemize}
\begin{theorem}
If $Y_{i} \sim Exp(\mu)$, then
$$\frac{2Y_{i}}{\mu} \sim Exp(2) \rightarrow X_{2}^{2}$$
\end{theorem}
\subsection{Student's \emph{t} Distribution}
Student's $t$ distribution has probability density function
$$
f(t;k) = c_{k}\left(1 + \frac{t^{2}}{k}\right)^{-(k+1)/2}
~~~\text{for}~t \in \Re~\text{and}~ k = 1, 2, \dots
$$
where the constant $c_{k}$ is given by
$$
c_{k} = \frac{\Gamma(\frac{k+1}{2})}{\sqrt{k\pi}\Gamma(\frac{k}{2})}
~~~k\text{ is the }degrees~of~freedom
$$
Properties of $T$:
\begin{itemize}
\item[i)] Range of $T$: $(-\infty, \infty)$
\item[ii)] T is symmetric around 0
\item[iii)] As $k \uparrow$, $T \rightarrow Z$
\end{itemize}
\begin{theorem}
\emph{Suppose $Z \sim G(0,1)$ and $U \sim X_{k}^{2}$ independently. Let}
$$T = \frac{Z}{\sqrt{U / k}}$$
$$\rightarrow \frac{\bar{Y} - M}{s / \sqrt{n}} \sim t_{n-1}$$
\emph{Then T has \textbf{Student's} t \textbf{distribution with} k \bf{degrees of freedom}}.
\end{theorem}
\subsection{Likelihood-Based Confidence Intervals}
\begin{theorem}
$A~100p\%~likelihood~interval~is~an~approximate~100q\%~where~q = 2P(Z \leq \sqrt{-2\ln p}) - 1~and~Z \sim N(0,1).$
\end{theorem}
\begin{ex}
Show that a 1\% likelihood interval is an approximate 99.8\% confidence interval. \\
Note that $p = 0.01$
\begin{align*}
q &= 2P(Z \leq \sqrt{-2\ln(0.01)}) - 1 \\
&\approx 2P(Z \leq 3.03) - 1 \\
&= 2(0.99878) - 1 \\
&= 0.998 = 99.8\%
\end{align*}
\end{ex}
\begin{theorem}
\emph{If a is a value such that}
$$P = 2P(Z \leq a) - 1 ~~ where ~ Z \sim N(0,1)$$
\emph{then the likelihood interval }$\{\theta : R(\theta) \geq e^{-a^{2}/2}\}$ \emph{is an approximate $100p\%$ confidence interval.}
\end{theorem}
\begin{ex}
Since
$$0.95 = 2P(Z \leq 1.96) - 1 ~\text{where}~Z\sim N(0,1)$$
and
$$e^{-(1.96)^{2}/2} = e^{-1.9208} \approx 0.1465 \approx 0.15$$
therefore a $15\%$ \emph{likelihood interval} for $\theta$ is also an approximate $95\%$ \emph{confidence interval} for $\theta$.`'
\end{ex}
\subsection{Confidence Intervals for Parameters in the $G(\mu, \sigma)$ Model}
If $Y_{1}, \dots, Y_{n}$ are independent $N(\mu, \sigma^{2})$, then $\bar{Y} \sim N(\mu, \frac{\sigma^{2}}{n})$ and
\begin{align}
\frac{\bar{Y} - \mu}{s / \sqrt{n}}~ &\sim t_{n-1} \\
\frac{(n-1)S^{2}}{\sigma^{2}} ~&\sim X_{n-1}^{2}
\end{align}
General Rule: \\
The Confidence Interval for $\mu$ if $\sigma$ is unknown is
$$\left[~\bar{y} \pm t^{*}\frac{s}{\sqrt{n}}~\right]$$
When $\sigma$ is unknown, we replace $\sigma$ by its estimate $s$, and we use t-pivot. \\
Confidence interval when $\sigma$ is known is
$$\left[~\bar{y} \pm z^{*}\frac{\sigma}{\sqrt{n}}~\right]$$
When $\sigma$ is known, we use z-pivot. \\
If $n$ is really large, then the $t^{*}$ value converges to the corresponding $z^{*}$ value (by Central Limit Theorem). \\
\textbf{Confidence Intervals for $\sigma^{2}$ and $\sigma$}
\begin{theorem}
\emph{Suppose} $Y_{1}, Y_{2}, \dots, Y_{n}$ \emph{is a random sample from the} $G(\mu, \sigma)$ \emph{distribution with sample variance} $S^{2}$.
\emph{Then the random variable}
$$\frac{(n-1)S^{2}}{\sigma^{2}} = \frac{1}{\sigma^{2}}\sum_{i=1}^{n}(Y_{i} - \bar{Y})^{2}$$
\emph{has a Chi-squared distribution with} $n - 1$ \emph{degrees of freedom}.
\end{theorem}
Using the theorem, we can construct a $100p\%$ confidence interval for the parameter $\sigma^{2}$ or $\sigma$. \\
Recall this is the same as the equation (2) in this sub-section. \\
We can find constants $a$ and $b$ such that
$$P(a \leq U \leq b) = p$$
where $U \sim X_{n-1}^{2}$. \\
So a $100p\%$ confidence interval for $\sigma^{2}$ is
$$\left[~\frac{(n-1)s^{2}}{b},\frac{(n-1)s^{2}}{a}~\right]$$
and a $100p\%$ confidence interval for $\sigma$ is
$$\left[~\sqrt{\frac{(n-1)s^{2}}{b}},\sqrt{\frac{(n-1)s^{2}}{a}}~\right]$$ \\
Unlike confidence interval for $\mu$, the confidence interval for $\sigma^{2}$ is \emph{not symmetric} about $s^{2}$. the estimator of $\sigma^{2}$.
The $X_{n-1}^{2}$ distribution is not a symmetric distribution. \\
\textbf{Prediction Interval for a Future Observation} \\
Suppose that $Y \sim G(\mu,\sigma)$ with \textbf{independent} observations, then
$$Y - \widetilde{\mu} = Y - \bar{Y} \sim N\left(0, \sigma^{2}\left(1 + \frac{1}{n}\right)\right)$$
Also
$$\frac{Y - \bar{Y}}{S\sqrt{1 + \frac{1}{n}}} \sim t_{n-1}$$
is a pivotal quantity which can be used to obtain an interval of values for $Y$. \\
Let $t^{*}$ be a value such that $P(-t^{*} \leq T \leq t^{*}) = p$ or $P(T \leq t^{*}) = (1+p)/2$ which is obtained from tables. Thus
$$\left[ ~\bar{y} \pm t^{*}s\sqrt{1 + \frac{1}{n}} ~\right]$$
\newpage
\section{Tests of Hypothesis}
\begin{defn}
A \emph{hypothesis} in statistic is a claim made about the values of a certain parameter of the population.
\end{defn}
There are \textbf{two} competing hypotheses:
\begin{itemize}
\item \emph{Null} Hypothesis, denoted $H_{0}$; current ``status quo'' assumption.
\item \emph{Alternative} Hypothesis, denoted $H_{1}$; seeks to challenge $H_{0}$.
\end{itemize}
\begin{defn}
A \emph{test statistic \emph{or} discrepancy measure} $D$ is a function of the data \textbf{Y} that is constructed to measure the degree of ``agreement'' between the data \textbf{Y} and the null hypothesis $H_{0}$.
\end{defn}
For every testing decision, there is a possibility of making two kinds of errors:
\begin{itemize}
\item[\textbf{Type I}] $H_{0}$ is true; $H_{0}$ is rejected.
\item[\textbf{Type II}] $H_{1}$ is true; $H_{0}$ is not rejected.
\end{itemize}
If Type I error goes down, then Type II error goes up; vice versa holds as well.
\subsection{p-value}
Suppose there's the test statistic $D = D(\textbf{Y})$ to test the hypothesis $H_{0}$. \\
Also suppose that $d = D(\textbf{y})$ is the observed value of $D$.
\begin{defn}
A \emph{p-value} or observed significance level of the test of hypothesis $H_{0}$ using test statistic $D$ is
$$\emph{p-value} = P(D \geq d; H_{0})$$
\end{defn}
\textbf{Caution}: The \emph{p-value} is \textbf{not} the probability that $H_{0}$ is true.
\begin{table}[H]
\caption{Interpretation of \emph{p-values}}
\begin{center}
\begin{tabular}{c | p{9cm}}
\bf \emph{p-value} & \bf Interpretation \\ \hline \hline
\emph{p-value} $> 0.1$ & No evidence against $H_0$ based on the observed data. \\ \hline
$0.05 < \emph{p-value} \leq 0.10$ & Weak evidence against $H_0$ based on the observed data. \\ \hline
$0.01 < \emph{p-value} \leq 0.05$ & Evidence against $H_0$ based on the observed data. \\ \hline
$0.001 < \emph{p-value} \leq 0.01$ & Strong evidence against $H_0$ based on the observed data. \\ \hline
\emph{p-value} $\leq 0.001$ & Very strong evidence against $H_0$ based on the observed data.
\end{tabular}
\end{center}
\end{table}
If the \emph{p-value} is not small, it \textbf{cannot be concluded that} $H_{0}$ \textbf{is true}.
It can only be said that there is \textbf{no evidence against the null hypothesis in light of the observed data}. \\
\textbf{Confidence Interval vs. Hypothesis Testing} \\
\emph{Confidence interval} is the range of ``reasonable'' values for $\theta$, given the level of confidence and sample data. \\
\emph{Hypothesis testing} tests whether a particular value of $\theta$ is ``reasonable'' given the \emph{p-value} and sample data.
\subsection{Tests of Hypotheses for Parameter in the Poi($\mu$) Model}
Suppose $Y_{1}, Y_{2}, \dots, Y_{n} \sim Poi(\mu)$ \\
$H_{0}: \mu = \mu_{0}$ and $H_{1}: \mu \not = \mu_{0}$ \\
Mean and Variance both are $\mu$ \\
By Central Limit Theorem, $\bar{Y} \sim Poi(\mu, \frac{\mu}{n})$ \\
Thus the test statistic $D$ is
\begin{align*}
&\frac{\bar{Y} - \mu}{\sqrt{\mu / n}} = Z \sim N(0,1) \\
\rightarrow~~~ &\frac{\bar{y} - \mu_{0}}{\sqrt{\mu_{0} / n}} = Z \sim N(0,1)
\end{align*}
\subsection{Tests of Hypotheses for Parameters in the $G(\mu, \sigma)$ Model}
\textbf{Hypothesis Tests for} $\mu$ \\
Using the test statistic
$$D = \frac{|\bar{Y} - \mu_0|}{S/\sqrt{n}}$$
Then using the sample mean $\bar{y}$ and standard deviation $s$, we get
$$d = \frac{|\bar{y} - \mu_0|}{s/\sqrt{n}}$$
The \emph{p-value} can be then obtained via
\begin{align*}
\emph{p-value} &= P(D \geq d) \\
&= P(|T| \geq d) \\
&= 1 - P(-d \leq T \leq d) \\
&= 2[1 - P(T \leq d)] ~~~\text{where } T \sim t_{n-1}
\end{align*}
\textbf{One-sided hypothesis tests} \\
Suppose that the null hypothesis is $H_{0} : \mu = \mu_{0}$ and the alternative hypothesis is $H_{1} : \mu > \mu_{0}$. \\
To test $\mu = \mu_{0}$, use the same test statistic and observed value.
Then \emph{p-value} can be obtained via
\begin{align*}
\emph{p-value} &= P(D \geq d) \\
&= P(T \geq d) \\
&= 1 - P(T \leq d) ~~~\text{where } T \sim t_{n-1}
\end{align*}
\textbf{Relationship Between Hypothesis Testing and Interval Estimation} \\
Suppose $y_1, y_2, \dots, y_n$ is an observed random sample from the $G(\mu, \sigma)$ distribution. \\
Suppose $H_0 : \mu = \mu_0$ is tested, and we have
$$\emph{p-value} \geq 0.05$$
$$\text{if and only if } P\left(\frac{|\bar{Y} - \mu_0|}{S/\sqrt{n}} \geq \frac{|\bar{y} - \mu_0|}{s/\sqrt{n}}; H_0 : \mu = \mu_0 \text{is true}\right) \geq 0.05$$
$$
\text{if and only if } P\left(|T| \geq \vphantom{\frac{|\bar{y} - \mu_0|}{s/\sqrt{n}}}\right.
\underbrace{\frac{|\bar{y} - \mu_0|}{s/\sqrt{n}}}_{\text{b}}
\left.\vphantom{\frac{|\bar{y} - \mu_0|}{s/\sqrt{n}}}\right) \geq 0.05 ~~~\text{where } T \sim t_{n-1}
$$
$$
\text{if and only if } P\left(|T| \leq \vphantom{\frac{|\bar{y} - \mu_0|}{s/\sqrt{n}}}\right.
\underbrace{\frac{|\bar{y} - \mu_0|}{s/\sqrt{n}}}_{\text{a}}
\left.\vphantom{\frac{|\bar{y} - \mu_0|}{s/\sqrt{n}}}\right) \leq 0.95
$$
$$\text{if and only if } \frac{|\bar{y} - \mu_0|}{s/\sqrt{n}} \leq a ~~~\text{where } P(|T| \leq a) = 0.95$$
$$\text{if and only if } \mu_0 \in \left[~\bar{y} - a\frac{s}{\sqrt{n}}, \bar{y} + a\frac{s}{\sqrt{n}}~\right]$$
which is a $95\%$ confidence interval for $\mu$. \\
In general, suppose we have data $\textbf{y}$, a model $f(\textbf{y},\theta)$ and we use the same pivotal quantity to construct a confidence interval for $\theta$ and a test of the hypothesis $H_0 : \mu = \mu_0$. \\
Then the parameter value $\theta = \theta_0$ is inside a $100q\%$ confidence interval for $\theta$ if and only if the \emph{p-value} for testing $H_0 : \mu = \mu_0$ is greater than $1 - q$. \\
The disadvantage is that we need to construct the appropriate test statistics $D$ and that may be difficult if the original distribution is complicated. \\
\textbf{Hypothesis tests for} $\sigma$ \\
For testing $H_0 : \sigma = \sigma_0$, use the test statistic
$$\frac{(n-1)S^{2}}{\sigma_{0}^{2}} = U \sim \chi_{n-1}^{2}$$
Note that for large values of $U$ and small values of $U$ provide evidence against $H_0$ due to the asymmetric shape of Chi-squared distributions. \\
To approximate the \emph{p-value}:
\begin{itemize}
\item[1.] Let $u = (n-1)s^{2}/\sigma_{0}^{2}$ denote the observed value of $U$ from the data
\item[2.] If $u$ is large (that is, if $P(U \leq u) > 0.5$) compute the \emph{p-value} as
$$\emph{p-value} = 2P(U \geq u)$$
where $U \sim \chi^{2}_{n-1}$
\item[3.] If $u$ is small (that is, if $P(U \leq u) < 0.5$) compute the \emph{p-value} as
$$\emph{p-value} = 2P(U \leq u)$$
where $U \sim \chi^{2}_{n-1}$
\end{itemize}
\subsection{Likelihood Ratio Tests of Hypotheses - One Parameter}
When a pivotal quantity does not exist then a general method for finding a test statistic with good properties can be based on the likelihood function.
\begin{theorem}
Suppose
\begin{align*}
\theta &= ~\text{unknown parameter} \\
n &= ~\text{sample size} \\
\hat{\theta} &= ~\text{MLE for $\theta$} \\
\widetilde{\theta} &= ~\text{Maximum Likelihood Estimator} \\
H_{0} &: ~\theta = \theta_{0} \\
H_{1} &: ~\theta \not = \theta_{0}
\end{align*}
Then for large n, the Likelihood Ratio Test Statistic is
\begin{align*}
\Lambda(\theta_{0}) &= -2\ln{\frac{L(\theta_{0})}{L(\widetilde{\theta})}} \sim X_{1}^{2} \\
\Lambda(\theta_{0}) &= 2[L(\widetilde{\theta}) - L(\theta_{0})]
\end{align*}
Using the observed value of $\Lambda(\theta_{0})$, denoted by
$$
\lambda(\theta_{0})
= -2\ln{\left[\frac{L(\theta_{0})}{L(\hat{\theta})}\right]}
= -2\ln{R(\theta_0)}
$$
where $R(\theta_0)$ is the relative likelihood function evaluated at $\theta = \theta_0$. \\
The \emph{p-value} can then be approximated via
\begin{align*}
\emph{p-value}
&\approx P[W\geq \lambda(\theta_0)]~~~\text{where }W\sim \chi_{1}^{2} \\
&= P\left(|Z|\geq \sqrt{\lambda(\theta_0)}\right) ~~~\text{where }Z\sim G(0, 1) \\
&= 2\left[1 - P(Z \leq \sqrt{\lambda(\theta_0)}\right]
\end{align*}
\end{theorem}
\subsubsection{Likelihood Ratio Test Statistic for Binomial}
\begin{align*}
\lambda(\theta_0)
&= -2\ln{\left[\left(\frac{\theta_0}{\hat{\theta}}\right)^{y}\left(\frac{1 - \theta_0}{1 - \hat{\theta}}\right)^{n - y}\right]}
\end{align*}
where $\hat{\theta} = y/n$
\subsubsection{Likelihood Ratio Test Statistic for Exponential}
Suppose $y_1, y_2, \dots, y_n \sim \text{Exponential}(\theta)$
\begin{align*}
\lambda(\theta_0) = -2
\ln{
\left[
\left(\frac{\hat{\theta}}{\theta_0}\right)^{n}
e^{n(1 - \hat{\theta}/\theta_0)}
\right]
}
\end{align*}
\subsubsection{Likelihood Ratio Test Statistic for $G(\mu, \sigma)$}
Suppose $Y \sim G(\mu, \sigma)$ with p.d.f.
$$
f(y; \mu, \sigma) =
\frac{1}{\sqrt{2\pi}\sigma}
\text{exp}\left[-\frac{1}{2\sigma^{2}}(y - \mu)^{2}\right]
$$
Then the likelihood ratio test statistic is
$$
\Lambda(\theta_0) =
\left(\frac{\bar{Y} - \mu_0}{\sigma/\sqrt{n}}
\right)^{2}
$$
Notice that $\Lambda(\theta_0)$ is the square of the standard Normal Distribution random variable
$$\frac{\bar{Y} - \mu_0}{\sigma/\sqrt{n}}$$
Therefore, it has exactly a $\chi_{1}^{2}$ distribution.
\newpage
\section{Simple Linear Regression Model}
$Y_{i}$ is the \emph{Response Variate}; attribute whose variability we want to explain. \\
$X_{i}$ is the \emph{Explanatory Variable} and is given.
We want explain $Y$ by using $X$. \\
The relevant degree of freedom for an additive model is equal to
$$n - \text{number of unknown parameters in the systematic part of the model}$$
Assumptions made:
\begin{enumerate}
\item Linearity: Mean of $Y$ is a linear function of $x$.\\
$E(Y_{i}) = \alpha + \beta x_{i}$
\item Variance is the same for any $x$; homoscedasticity. \\
$\sigma^{2} = \sigma^{2}(x)$; heteroscedasticity.
\item Normality: $Y_{i}$ are normally distributed. \\
$Y_{i} = \text{Constant} + \text{Normal}$ \\
$Y_{i} = \alpha + \beta x_{i} + R_{i}$ \\
where $i = 1, \dots, n$, $R_{i} \sim N(\mu, \sigma^{2})$, $R_{i}$ independent
\end{enumerate}
Our model is independent $Y_{i}$ such that
$$Y_{i} \sim N(\mu(x_{i}), \sigma^{2}) \text{ where } \mu(x_{i}) = \alpha + \beta x_{i}$$
\subsection{Maximum Likelihood Estimators}
Let
$$a_{i} = \frac{x_{i} - \bar{x}}{S_{xx}}$$
which has properties
\begin{align*}
\sum a_{i} &= 0 \\
\sum a_{i}x_{i} &= 1 \\
\sum a_{i}^{2} &= \frac{1}{S_{xx}}
\end{align*}
Also, we have
\begin{align*}
S_{xx} &= \sum_{i=1}^{n}(x_{i} - \bar{x})^{2}
= \sum_{i=1}^{n}(x_{i} - \bar{x})x_{i}
= \sum_{i=1}^{n}x_{i}^{2} - n\bar{x}^{2} \\
S_{xy} &= \sum_{i=1}^{n}(x_{i} - \bar{x})(Y_{i} - \bar{Y})
= \sum_{i=1}^{n}(x_{i} - \bar{x})Y_{i}
= \sum_{i=1}^{n}x_{i}Y_{i} - n\bar{x}\bar{Y} \\
S_{yy} &= \sum_{i=1}^{n}(Y_{i} - \bar{Y})^{2}
= \sum_{i=1}^{n}(Y_{i} - \bar{Y}^{2})Y_{i}
= \sum_{i=1}^{n}Y_{i}^{2} - n\bar{Y}^{2}
\end{align*}
\subsubsection{$\beta$}
\begin{align*}
\widetilde{\beta} &= \frac{S_{xy}}{S_{xx}} \\
&= \sum a_{i}y_{i} ~~\text{where } a_{i} = \frac{x_{i} - \bar{x}}{S_{xx}}
\end{align*}
\textbf{Distribution for } $\widetilde{\beta}$ \\
The mean is
\begin{align*}
E(\widetilde{\beta}) &= \sum_{i=1}^{n} a_{i}E(Y_{i}) = \sum_{i=1}^{n} a_{i}(\alpha + \beta x_{i}) \\
&= \beta \sum_{i=1}^{n}a_{i}x_{i} ~~\text{since } \sum_{i=1}^{n} a_{i} = 0 \\
&= \beta ~~\text{since } \sum_{i=1}^{n} a_{i}x_{i} = 1
\end{align*}
Similarly for variance is
\begin{align*}
Var(\widetilde{\beta}) &= \sum_{i=1}^{n} a_{i}^{2}Var(Y_{i}) \\
&= \sigma^{2} \sum_{i=1}^{n} a_{i}^{2} \\
&= \frac{\sigma^{2}}{S_{xx}} ~~\text{since } \sum_{i=1}^{n}a_{i}^{2} = \frac{1}{S_{xx}}
\end{align*}
Thus
$$\widetilde{\beta} \sim N\left(\beta, \frac{\sigma^{2}}{S_{xx}}\right)$$
\textbf{Confidence Interval for} $\beta$ \\
If $\sigma$ is known, then
$$\frac{\widetilde{\beta} - \beta}{\sigma / \sqrt{S_{xx}}} \sim N(0, 1)$$
and the $100p\%$ confidence interval for $\beta$ is given by
$$\left[~ \hat{\beta} \pm z^{*}\frac{\sigma}{\sqrt{S_{xx}}} ~\right]$$
where
$$P(|Z| \leq z^{*}) = p ~~~ Z \sim N(0, 1)$$
\newline
Otherwise,
$$\frac{\widetilde{\beta} - \beta}{S_{e} / \sqrt{S_{xx}}} \sim t_{n-2}$$
then the $100p\%$ confidence interval for $\beta$ is given by
$$\left[~ \hat{\beta} \pm t^{*}\frac{S_{e}}{\sqrt{S_{xx}}} ~\right]$$
where
$$P(|T| \leq t^{*}) = p, ~~~ T \sim t_{n-2}$$
\newline
\textbf{Test of Hypothesis of No Relationship} \\
To test the hypothesis of no relationship or $H_{0}: \beta = 0$, we use the test statistic
$$D = \frac{|\widetilde{\beta} - 0|}{S_{e} / \sqrt{S_{xx}}}$$
with observed value of
$$d = \frac{|\hat{\beta} - 0|}{s_{e} / \sqrt{S_{xx}}}$$
and p-value given by
$$\text{p-value} = P\left(|T| \geq \frac{|\hat{\beta} - 0|}{s_{e} / \sqrt{S_{xx}}}\right)$$
where $T \sim t_{n-2}$
\subsubsection{$\alpha$}
\begin{align*}
\widetilde{\alpha} &= \bar{Y} - \widetilde{\beta}\bar{x} \\
&= \bar{Y} - \left(\frac{S_{xy}}{S_{xx}}\right)\bar{x}
\end{align*}
\textbf{Distribution for } $\widetilde{\alpha}$ \\
$$\widetilde{\alpha} \sim N\left(\alpha, \sigma^{2}\left(\frac{1}{n} + \frac{\bar{x}^{2}}{S_{xx}}\right)\right)$$
\textbf{Confidence Interval for} $\alpha$ \\
If $\sigma$ is known
$$\frac{\widetilde{\alpha} - \alpha}{\sigma \sqrt{\frac{1}{n} + \frac{\bar{x}^{2}}{S_{xx}}}} \sim N(0, 1)$$
then a $100p\%$ confidence interval for $\alpha$ is given by
$$\left[~ \hat{\alpha} \pm z^{*}\sigma \sqrt{\frac{1}{n} + \frac{\bar{x}^{2}}{S_{xx}}} ~\right]$$
where
$$P(|Z| \leq z^{*}) = p ~~~\text{and } Z \sim N(0, 1)$$
\newline
Otherwise,
$$\frac{\widetilde{\alpha} - \alpha}{S_{e}\sqrt{\frac{1}{n} + \frac{\bar{x}^{2}}{S_{xx}}}} \sim t_{n-2}$$
then a $100p\%$ confidence interval for $\alpha$ is given by
$$\left[~ \hat{\alpha} \pm t^{*}s_{e}\sqrt{\frac{1}{n} + \frac{\bar{x}^{2}}{S_{xx}}} ~\right]$$
where
$$P(|T| \leq t^{*}) = p ~~~ T \sim t_{n-2}$$
\newline
\textbf{Test of Hypothesis} \\
To test the hypothesis or $H_{0}: \alpha = \alpha_{0}$, we use the test statistic
$$D = \frac{|\widetilde{\alpha} - \alpha_{0}|}{S_{e}\sqrt{\frac{1}{n} + \frac{\bar{x}^{2}}{S_{xx}}}}$$
with observed value of
$$d = \frac{|\hat{\alpha} - \alpha_{0}|}{s_{e}\sqrt{\frac{1}{n} + \frac{\bar{x}^{2}}{S_{xx}}}}$$
and p-value given by
$$\text{p-value} = P\left(~|T| \geq \frac{|\hat{\alpha} - \alpha_{0}|}{s_{e}\sqrt{\frac{1}{n} + \frac{\bar{x}^{2}}{S_{xx}}}}~\right)$$
where $T \sim t_{n-2}$
\subsubsection{$\sigma^{2}$ and $S_{e}^{2}$}
\begin{align*}
\sigma^{2} &= \frac{1}{n} \sum_{i=1}^{n} (Y_{i} - \widetilde{\alpha} - \widetilde{\beta}x_{i})^{2} \\
&= \frac{1}{n}\left[~S_{yy} - \widetilde{\beta}S_{xy}~\right]
\end{align*}
The \emph{Standard Error} is defined as
$$
S_{e}^{2}
= \frac{1}{n - 2} \sum_{i=1}^{n} (Y_{i} - \widetilde{\alpha} - \widetilde{\beta}x_{i})^{2}
= \frac{1}{n - 2}\left[~S_{yy} - \widetilde{\beta}S_{xy}~\right]
$$
\textbf{Confidence Interval for} $\sigma$ \\
Notice that
$$\frac{(n-2)S_{e}^{2}}{\sigma^{2}} \sim \chi_{n-2}^{2}$$
And the $100p\%$ confidence interval for $\sigma^{2}$ is
$$\left[~\frac{(n-2)s_{e}^{2}}{b}, \frac{(n-2)s_{e}^{2}}{a}~\right]$$
where
$$P(a \leq X \leq b) = p ~~~\text{and } X \sim \chi_{n-2}^{2}$$
\subsection{Least Squares Estimation}
Given data $(x_{i}, y_{i}), i = 1, 2, \dots, n$ \\
The goal is to obtain a line of ``best fit'', find a line which minimizes the sum of the squares of the distances between the observed points and the fitted line $y = \alpha + \beta x$ \\
In other words, find $\alpha$ and $\beta$ to minimize the function
$$g(\alpha, \beta) = \sum_{i=1}^{n}[y_{i} - (\alpha + \beta x_{i})]^{2}$$
those are the \emph{least squares estimates}. \\
By maximizing the maximum likelihood estimates, we're minimizing least squared.
\subsection{Confidence Intervals for the Mean Response}
The Mean Response is defined as $\mu(x) = \alpha + \beta x$ \\
The maximum likelihood estimator of $\mu(x)$ is
\begin{align*}
\widetilde{\mu}(x) &= \widetilde{\alpha} + \widetilde{\beta}x \\
&= \bar{Y} + \widetilde{\beta}(x - \bar{x}) \\
&= \sum_{i=1}^{n}a_{i}Y_{i} ~~~\text{where } a_{i} = \frac{1}{n} + (x - \bar{x})\frac{(x_{i} - \bar{x})}{S_{xx}}
\end{align*}
Notice that $a_{i}$ has the following properties
$$\sum_{i=1}^{n}a_{i} = 1, ~\sum_{i=1}^{n}a_{i}x_{i} = x ~\text{and } \sum_{i=1}^{n}a_{i}^{2} = \frac{1}{n} + \frac{(x - \bar{x})^{2}}{S_{xx}}$$
Thus, $\mu(x)$ has the distribution
$$\widetilde{\mu}(x) \sim N\left(\mu(x), \sigma^{2}\left(\frac{1}{n} + \frac{(x - \bar{x})^{2}}{S_{xx}}\right)\right)$$
So
$$\frac{\widetilde{\mu}(x) - \mu(x)}{\sigma\sqrt{\frac{1}{n} + \frac{(x - \bar{x})^{2}}{S_{xx}}}} \sim N(0, 1)$$
And
$$\left[~\hat{\mu}(x) \pm z^{*}\sigma\sqrt{\frac{1}{n} + \frac{(x - \bar{x})^{2}}{S_{xx}}}~\right], ~P(|Z| \leq z^{*}) = p$$
\newline
Otherwise
$$\frac{\widetilde{\mu}(x) - \mu(x)}{S_{e}\sqrt{\frac{1}{n} + \frac{(x - \bar{x})^{2}}{S_{xx}}}} \sim t_{n-2}$$
Thus,
$$\left[~\hat{\mu}(x) \pm t^{*}s_{e}\sqrt{\frac{1}{n} + \frac{(x - \bar{x})^{2}}{S_{xx}}}~\right], ~P(|T| \leq t^{*}) = p$$
\newline
Note that, $\hat{x}(x) = \hat{\alpha} + \hat{\beta}x$ \\
\newline
\textbf{Prediction Interval for Future Response} \\
$$Y - \widetilde{\mu}(x) \sim N\left(0, \sigma^{2}\left(1 + \frac{1}{n} + \frac{(x - \bar{x})^{2}}{S_{xx}}\right)\right)$$
Thus,
$$\frac{Y - \widetilde{\mu}(x)}{\sigma\sqrt{1 + \frac{1}{n} + \frac{(x - \bar{x})^{2}}{S_{xx}}}} \sim N(0, 1)$$
Or
$$\frac{Y - \widetilde{\mu}(x)}{S_{e}\sqrt{1 + \frac{1}{n} + \frac{(x - \bar{x})^{2}}{S_{xx}}}} \sim t_{n-2}$$
\newline
Prediction Interval is then
$$\left[~\hat{\mu}(x) \pm t^{*}s_{e}\sqrt{1 + \frac{1}{n} + \frac{(x - \bar{x})^{2}}{S_{xx}}} ~\right]$$
\subsection{Terminologies}
Total Sum of Squares, denoted TSS, is
$$\sum (y_{i} - \bar{y})^{2} = S_{yy}$$
Regression Sum of Squares, denoted RSS, is
$$\hat{\beta}S_{xy}$$
It is part of the variability of $Y$ that can be explained by change in $X$.
Error Sum of Squares, denoted ESS, is
$$S_{yy} - \hat{\beta}S_{xy} = \sum [y_{i} - (\hat{\alpha} + \hat{\beta}x_{i})]^{2}$$
It is part of the variability explained by $X$.
Thus, the Total Sum of Squares is the sum of Regression Sum of Squares and Error Sum of Squares
$$TSS = RSS + ESS$$
$$\sum (y_{i} - \bar{y})^{2} = \hat{\beta}S_{xy} + [S_{yy} - \hat{\beta}S_{xy}]$$
Note that $\frac{RSS}{TSS}$ should be high if the model is a good fit. \\
As ESS $\uparrow$, the model is not a good fit.
\newpage
\section{Comparing the Means of Two Population}
This is also known as \emph{Test for Equality of Means}.
\subsection{Case I: Common Variance}
Suppose $Y_{11}, Y_{12}, \dots, Y_{1n_{1}}$ is a random sample sample from $N(\mu_{1}, \sigma^2)$; \\
and \emph{independently} $Y_{21}, Y_{22}, \dots, Y_{2n_{2}}$ is a random sample from $N(\mu_{2}, \sigma^2)$. \\
Both populations have the same variance $\sigma^2$. \\
Maximum Likelihood Estimators:
\begin{align*}
\widetilde{\mu_{1}} &= \frac{1}{n_{1}}\sum_{i=1}^{n_{1}}Y_{1i} = \bar{Y_{1}} \\
\widetilde{\mu_{2}} &= \frac{1}{n_{2}}\sum_{i=1}^{n_{2}}Y_{2i} = \bar{Y_{2}} \\
\widetilde{\sigma}^{2} &=
\frac{1}{n_{1} + n_{2}}\sum_{j=1}^{2}\sum_{i=1}^{n_{j}}(Y_{ij} - \widetilde{\mu_{j}})^{2}
\end{align*}
An estimator of $\sigma^2$, also referred to as the \emph{pooled estimator of variance} adjusted for the degrees of freedom is
\begin{align*}
S_{p}^{2} &= \frac{(n_{1} - 1)S_{1}^{2} + (n_{2} - 1)S_{2}^{2}}{n_{1} + n_{2} - 2} \\
&= \frac{n_{1} + n_{2}}{n_{1} + n_{2} - 2}\widetilde{\sigma}^{2}
\end{align*}
where
$$S_{j}^{2} = \frac{1}{n_{j} - 1}\sum_{i=1}^{n_{j}}(Y_{ji} - \bar{Y_{j}})^{2}, ~~ j = 1,2$$
The estimator $S_{p}^{2}$ can be written as a \emph{weighted average} of the estimators $S_{j}^{2}$, written as
$$S_{p}^{2} = \frac{w_{1}S_{1}^{2} + w_{2}S_{2}^{2}}{w_{1} + w_{2}}$$
where $w_{j} = n_{j} - 1$ \\
We use $S_{p}^{2}$ for $\sigma^{2}$ instead of $\widetilde{\sigma}^{2}$ since $E(S_{p}^{2}) = \sigma^2$. \\
\textbf{Confidence Interval for} $\mu_{1} - \mu_{2}$ \\
$$\bar{Y}_{1} - \bar{Y}_{2} \sim N\left(\mu_{1} - \mu_{2},
\sigma^{2}\left(\frac{1}{n_{1}} + \frac{1}{n_{2}}\right)\right)$$
$$\frac{(\bar{Y}_{1} - \bar{Y}_{2}) - (\mu_{1} - \mu_{2})}
{S_{p}\sqrt{\frac{1}{n_{1}} + \frac{1}{n_{2}}}} \sim t(n_{1} + n_{2} - 2)$$
$$\frac{(n_{1} + n_{2} - 2)S_{p}^{2}}{\sigma^2} =
\frac{1}{\sigma^2}\sum_{j=1}^{2}\sum_{i=1}^{n_{j}}(Y_{ji} - \bar{Y}_{j})^{2} \sim
\chi^{2}(n_{1} + n_{2} - 2)$$
The $100p\%$ confidence interval for $\mu_{1} - \mu_{2}$ is given by
$$\left[~\bar{y}_1 - \bar{y}_2 \pm t^{*}s_{p}\sqrt{\frac{1}{n_{1}} + \frac{1}{n_{2}}}~\right]$$
where $P(|T| \leq t^{*}) = p$ and $T \sim t(n_{1} + n_{2} - 2)$. \\
\textbf{Hypothesis Testing for} $\mu_{1} - \mu_{2}$ \\
To test $H_{0} : \mu_{1} - \mu_{2} = 0$, use the test statistic
$$D = \frac{|\bar{Y}_{1} - \bar{Y}_{2} - 0|}{S_{p}\sqrt{\frac{1}{n_{1}} + \frac{1}{n_{2}}}}
= \frac{|\bar{Y}_{1} - \bar{Y}_{2}|}{S_{p}\sqrt{\frac{1}{n_{1}} + \frac{1}{n_{2}}}}$$
The observed value of the test statistic is
$$d = \frac{|\bar{y}_{1} - \bar{y}_{2}|}{s_{p}\sqrt{\frac{1}{n_{1}} + \frac{1}{n_{2}}}}$$
and
$$\text{p-value} = P(|T| \geq d) = 2[1 - P(T \leq d)]$$
where $T \sim t(n_{1} + n_{2} - 2)$.
\subsection{Case II: Unequal Variances}
\textbf{Note}: assuming large sample size
$$\frac{\bar{Y}_{1} - \bar{Y}_{2} - (\mu_{1} - \mu_{2})}
{\sqrt{\frac{S_{1}^{2}}{n_{1}} + \frac{S_{2}^{2}}{n_{2}}}}
\rightarrow Z \sim N(0, 1)$$
The $100p\%$ confidence interval for $\mu_{1} - \mu_{2}$ is given by
$$\left[~\bar{y}_1 - \bar{y}_2 \pm z^{*}\sqrt{\frac{s_1}{n_{1}} + \frac{s_2}{n_{2}}}~\right]$$
where $P(|Z| \leq z^{*}) = p$ and $Z \sim N(0, 1)$. \\
For test statistic, use
$$D = \frac{|\bar{Y}_{1} - \bar{Y}_{2} - (\mu_{1} - \mu_{2})|}
{\sqrt{\frac{S_{1}^{2}}{n_{1}} + \frac{S_{2}^{2}}{n_{2}}}}
\rightarrow Z \sim N(0, 1)$$
\subsection{Case III: Matched Pair}
Often experimental studies designed to compare means are conducted with \emph{pairs of units}, where the responses within a pair are not independent. \\
We have the following sample pair data:
\[
\begin{pmatrix}
b_{1} & \dots & b_{n} \\
a_{1} & \dots & a_{n}
\end{pmatrix}
\]
which can be constructed into a new data set $(y_1, \dots, y_n)$ where $y_i = b_i - a_i$. \\
The new model becomes
$$Y_{i} \sim N(\mu, \sigma^{2})$$
and
$$\bar{Y} \sim N(\mu, \frac{\sigma^{2}}{n})$$
The $100p\%$ confidence interval for $\mu$ is given by
$$\left[~\bar{y} \pm t^{*}\frac{s}{\sqrt{n}}~\right]$$
where $P(|T| \leq t^{*}) = p$ and $T \sim t(n - 1)$. \\
The test statistic (testing for equality is that $H_{0} : \mu = 0$)
$$D = \frac{|\bar{Y} - \mu_{0}|}{S/\sqrt{n}}$$
and
$$d = \frac{|\bar{Y}|}{s/\sqrt{n}}$$
where p-value $= P(D \geq d) = P(|T_{n-1}| \geq d)$ \\
\textbf{Note}: an equivalent way of testing $\mu_{1} = \mu_{2}$ is to see whether 0 belongs to the confidence interval.
\newpage
\section{Multinomial Models and Goodness of Fit Tests}
Instead of 2 categories, we have $k$ mutually exclusive and exhaustive categories. \\
$\theta_{i} =$ probability of observation will be in category $i$; fixed for every $i$.
\subsection{Likelihood Ratio Test for the Multinomial Model}
The joint probability function is
\[
f(y_{1}, y_{2}, \dots, y_{k}; \theta_{1}, \theta_{2}, \dots, \theta_{k})
=
\frac{n!}{y_{1}!y_{2}!\cdots y_{k}!}\theta_{1}^{y_{1}}\theta_{2}^{y_{2}}\cdots\theta_{k}^{y_{k}}
\]
where $y_{j} = 0, 1, \dots$ and $\sum_{j=1}^{k}y_{j} = n$ \\
The probabilities $\theta_{j}$ satisfies $0 < \theta_{j} < 1$ and $\sum_{j=1}^{k}\theta_{j} = 1$, so there are only $k - 1$ unknown parameters in this model. \\
The likelihood function is
\begin{align*}
L(\theta_{1}, \theta_{2}, \dots, \theta_{k}) &=
\frac{n!}{y_{1}!y_{2}!\cdots y_{k}!}\theta_{1}^{y_{1}}\theta_{2}^{y_{2}}\cdots\theta_{k}^{y_{k}} \\
L(\Theta) &= \prod_{j=1}^{k}\theta_{j}^{y_{j}}
\end{align*}
$L(\Theta)$ is maximized by $\hat{\Theta} = (\hat{\theta}_{1}, \hat{\theta}_{2}, \dots, \hat{\theta}_{k})$ where $\hat{\theta}_{j} = y_{j} / n, ~j = 1, 2, \dots, k$ \\
The likelihood ratio statistic is defined as
$$\Lambda = -2\ln\left[\frac{L(\widetilde{\Theta}_{0})}{L(\widetilde{\Theta})}\right]$$
then, under the assumption that the null hypothesis is true, it can be written as
\begin{align*}
\Lambda &= 2\sum_{j=1}^{k}
Y_{j}\ln\left[\frac{\widetilde{\theta}_{j}}{\theta_{j}(\widetilde{\alpha})}\right] \\
\Lambda &= 2\sum_{j=1}^{k}Y_{j}\ln\left(\frac{Y_{j}}{E_{j}}\right) \\
\lambda &= 2\sum_{j=1}^{k}Y_{j}\ln\left(\frac{y_{j}}{e_{j}}\right) \\
\end{align*}
Noting that $\widetilde{\theta}_{j} = \frac{Y_{j}}{n}$ and the expected frequencies under $H_0$ as
$$E_{j} = n\theta_{j}(\widetilde{\alpha}) ~\text{for}~j=1, 2, \dots, k$$
If $n$ is large, the p-value from observed data can be approximately via
\[
\text{p-value} = P(\Lambda \geq \lambda; H_{0}) \approx P(W \geq \lambda)
~\text{where}~ W \sim \chi^{2}(k - p - 1)
\]
\textbf{Note}: approximation is accurate when $n$ is large ($n > 50$) and no $\theta_{j}$ is too small ($n_{j} \geq 5$). \\
$p$ is the number of parameters to estimate under the null hypothesis. \\
$dim(\alpha) = p < k - 1$ \\
An alternative test statistic is the Pearson goodness of fit statistic
$$D = \sum_{j=1}^{k}\frac{(Y_{j} - E_{j})^{2}}{E_{j}} \sim \chi^{2}(k - p - 1)$$
\subsection{Goodness of Fit Tests}
To check the fit of a probability distribution is by comparing the observed frequencies $f_{j}$ and the expected frequencies $e_{j} = n\hat{p}_{j}$
\subsection{Contingency Table}
To tests the independence of data in a study. \\
The degrees of freedom of the Chi-squared approximation are
$$k - p - 1 = (a - 1)(b - 1)$$
where $a$ and $b$ are the number of rows and columns respectively.
\newpage
\section{To-do}
\begin{enumerate}
\item Likelihood functions for various distributions
\end{enumerate}
\end{document}
|
{"hexsha": "93e886a37e72f1b3d8715f92913350b32bd9ed45", "size": 43251, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Latex Notes/STAT 231/stat_231.tex", "max_stars_repo_name": "cc-shen/Handy-Tools", "max_stars_repo_head_hexsha": "1fa14867e5957e3e2ce78b8acb6976a7df140a12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Latex Notes/STAT 231/stat_231.tex", "max_issues_repo_name": "cc-shen/Handy-Tools", "max_issues_repo_head_hexsha": "1fa14867e5957e3e2ce78b8acb6976a7df140a12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Latex Notes/STAT 231/stat_231.tex", "max_forks_repo_name": "cc-shen/Handy-Tools", "max_forks_repo_head_hexsha": "1fa14867e5957e3e2ce78b8acb6976a7df140a12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.7578378378, "max_line_length": 266, "alphanum_fraction": 0.5982289427, "num_tokens": 16978}
|
import numpy as np
from math import sqrt, pow
from numba import njit, prange
@njit(fastmath=True)
def KineticEnergy(J, pA) -> float:
k = 0.0
# Outside of compute loop so prange can be used.
for j in prange(J):
v2 = pow(pA[j]['vx'], 2) + pow(pA[j]['vy'], 2)
k += 0.5 * pA[j]['m'] * v2
return k
|
{"hexsha": "7f0bf16ff8dc41acfc1dd40d29c6ecfc995f9146", "size": 338, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Equations/KineticEnergy.py", "max_stars_repo_name": "KoningJasper/Offshore-SPH", "max_stars_repo_head_hexsha": "558bb359249eb89b082322f7585e19df003281fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Equations/KineticEnergy.py", "max_issues_repo_name": "KoningJasper/Offshore-SPH", "max_issues_repo_head_hexsha": "558bb359249eb89b082322f7585e19df003281fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2018-11-27T13:33:54.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-11T10:30:20.000Z", "max_forks_repo_path": "src/Equations/KineticEnergy.py", "max_forks_repo_name": "KoningJasper/Offshore-SPH", "max_forks_repo_head_hexsha": "558bb359249eb89b082322f7585e19df003281fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-27T12:11:21.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-27T12:11:21.000Z", "avg_line_length": 26.0, "max_line_length": 55, "alphanum_fraction": 0.5621301775, "include": true, "reason": "import numpy,from numba", "num_tokens": 118}
|
/**
* Copyright (c) 2019 Melown Technologies SE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef mapproxy_generator_tms_raster_base_hpp_included_
#define mapproxy_generator_tms_raster_base_hpp_included_
#include <boost/optional.hpp>
#include "vts-libs/registry/extensions.hpp"
#include "vts-libs/storage/support.hpp"
#include "../support/wmts.hpp"
#include "../generator.hpp"
namespace vre = vtslibs::registry::extensions;
namespace vs = vtslibs::storage;
namespace generator {
class TmsRasterBase : public Generator {
public:
TmsRasterBase(const Params ¶ms
, const boost::optional<RasterFormat> &format
= boost::none);
protected:
struct ImageFlags {
bool dontOptimize;
bool atlas;
bool forceFormat;
ImageFlags()
: dontOptimize(false), atlas(false), forceFormat(false)
{}
bool checkFormat(RasterFormat requested, RasterFormat configured)
const;
};
virtual void generateTileImage(const vts::TileId &tileId
, const Sink::FileInfo &sfi
, RasterFormat format
, Sink &sink, Arsenal &arsenal
, const ImageFlags &imageFlags
= ImageFlags()) const = 0;
private:
virtual Task generateFile_impl(const FileInfo &fileInfo
, Sink &sink) const;
virtual Task generateVtsFile_impl(const FileInfo &fileInfo
, Sink &sink) const = 0;
Task wmtsInterface(const FileInfo &fileInfo, Sink &sink) const;
wmts::WmtsResources wmtsResources(const WmtsFileInfo &fileInfo) const;
std::string wmtsReadme() const;
const vre::Wmts& getWmts() const;
RasterFormat format_;
const vre::Wmts *wmts_;
friend class AtlasProvider;
};
inline bool TmsRasterBase::ImageFlags::checkFormat(RasterFormat requested
, RasterFormat configured)
const
{
if (atlas || forceFormat) { return true; }
return (requested == configured);
}
} // namespace generator
#endif // mapproxy_generator_tms_raster_base_hpp_included_
|
{"hexsha": "6e79a9e5474bb265d5238ea3d0e744a1f03a9eb1", "size": 3533, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "mapproxy/src/mapproxy/generator/tms-raster-base.hpp", "max_stars_repo_name": "melowntech/vts-mapproxy", "max_stars_repo_head_hexsha": "241ba43c1f7dcc226ec0f2089d47e11c699c2587", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 13.0, "max_stars_repo_stars_event_min_datetime": "2019-05-03T06:09:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-10T05:05:45.000Z", "max_issues_repo_path": "mapproxy/src/mapproxy/generator/tms-raster-base.hpp", "max_issues_repo_name": "melowntech/vts-mapproxy", "max_issues_repo_head_hexsha": "241ba43c1f7dcc226ec0f2089d47e11c699c2587", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 10.0, "max_issues_repo_issues_event_min_datetime": "2019-04-16T12:43:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T02:44:09.000Z", "max_forks_repo_path": "mapproxy/src/mapproxy/generator/tms-raster-base.hpp", "max_forks_repo_name": "melowntech/vts-mapproxy", "max_forks_repo_head_hexsha": "241ba43c1f7dcc226ec0f2089d47e11c699c2587", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-09-25T04:57:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-10T05:05:46.000Z", "avg_line_length": 34.9801980198, "max_line_length": 78, "alphanum_fraction": 0.6722332295, "num_tokens": 732}
|
# Create figures showing donor site segments that make up the
# different sequences for a variable region.
# Currently specifically set up for choice V1 donor sites in SFig 5.
import os
import sys
import dna_features_viewer
import pandas as pd
import numpy as np
from dna_features_viewer import GraphicFeature, GraphicRecord
from matplotlib import pyplot as plt
from matplotlib import patches
from collections import Counter
# Assigns certain colors to often found 4-nucleotide repeats in V1.
def find_color(seq):
if seq == "GCAT":
return "firebrick"
# Reads table from ds_names.R - "donorsites.csv."
df = pd.read_csv("V1_donorsites.csv",sep=",")
fig = plt.figure(figsize=(20,15))
# Loops through each unique sequence in a variable region
for num,fasta_seq in enumerate(df.qseqid.unique()):
location = num + 1
# Subfigures in rows, columns, format
ax = fig.add_subplot(7,2,location)
subset = df.loc[df['qseqid']==fasta_seq]
features = []
percentage = 0
num_events = 0
qstart_list = []
ds_list = []
highlight_repeats=False
# Loops through each donor site segment for each unique sequence
for index,record in subset.iterrows():
qstart = int(record['qstart'])
qend = int(record['qend'])+1
# Finds where donor site segment is in full donor site sequence
dsstart = record['qseqseq'].find(record['sequence'])
dsend = dsstart + (qend - qstart)
features.append(
GraphicFeature(start=record['qstart'],end=record['qend']+1,strand=+1,color=record['color'],label=record['ds_name']))
# Defining variables for plotting
sequence = record['qseqseq']
aaseq = record['aaseq']
#percentage = record['percentage']
num_events += 1
qstart_list.append(int(record['qstart']))
ds_list.append(record['qseqseq'])
if("_any" in record['ds_name']):
highlight_repeats=True
# Sort donor sites by order of position
qstart_list,ds_list = zip(*sorted(zip(qstart_list,ds_list)))
# For highlighting different repeat regions
if(highlight_repeats):
color = "cornflowerblue"
ax.add_patch(patches.Rectangle((1.5,-0.85),4,0.35,facecolor=color,linewidth=0,clip_on=False))
ax.add_patch(patches.Rectangle((6.5,-0.85),4,0.35,facecolor=color,linewidth=0,clip_on=False))
ax.add_patch(patches.Rectangle((qend-4.5,-0.85),4,0.35,facecolor=color,linewidth=0,clip_on=False))
# Plotting
record = GraphicRecord(sequence_length=45,features=features,sequence=sequence,first_index = 1,labels_spacing=-500)
record.plot(ax = ax,figure_width = 13,figure_height=30, annotate_inline=True)
record.plot_sequence(ax)
plot_title = aaseq
ax.text(0,1.5,plot_title,fontsize=14,fontweight='bold')
fig = plt.tight_layout()
plt.subplots_adjust(hspace = -0.3)
plt.savefig("SFig 5.pdf")
|
{"hexsha": "32af60bf7ffc55e761aa3137481bfb9397133f27", "size": 2698, "ext": "py", "lang": "Python", "max_stars_repo_path": "donor_sites/plot_donor_sites.py", "max_stars_repo_name": "greninger-lab/longitudinal_tprk", "max_stars_repo_head_hexsha": "769d93ec7feeb14a7640469266f3a4531c1b6d25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "donor_sites/plot_donor_sites.py", "max_issues_repo_name": "greninger-lab/longitudinal_tprk", "max_issues_repo_head_hexsha": "769d93ec7feeb14a7640469266f3a4531c1b6d25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "donor_sites/plot_donor_sites.py", "max_forks_repo_name": "greninger-lab/longitudinal_tprk", "max_forks_repo_head_hexsha": "769d93ec7feeb14a7640469266f3a4531c1b6d25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5060240964, "max_line_length": 119, "alphanum_fraction": 0.7442550037, "include": true, "reason": "import numpy", "num_tokens": 752}
|
"""
Damavand Volcano
~~~~~~~~~~~~~~~~
Visualize 3D models of Damavand Volcano, Alborz, Iran.
This is an adaption of `Alexey Pechnikov <https://orcid.org/0000-0001-9626-8615>`_ and `A.V.Durandin <https://orcid.org/0000-0001-6468-9757>`_'s `ParaView-MoshaFault <https://github.com/mobigroup/ParaView-MoshaFault>`_.
See LinkedIn posts for more details:
- `The slices of the 3D model of the density on the Mosha fault area, North Iran <https://www.linkedin.com/posts/activity-6610080454911631360-97-V/>`_
- `Comparing Magnetic and Gravity Data to the Mosha Fault Area <https://www.linkedin.com/posts/activity-6609736436344201216-Kxls/>`_
- `North Iran, Mosha fault <https://www.linkedin.com/posts/activity-6609681862937853952-2BPG/>`_
- `North Iran <https://www.linkedin.com/posts/activity-6609486793676996608-ZF-J/>`_
Originally posted: https://github.com/banesullivan/damavand-volcano
"""
import numpy as np
# sphinx_gallery_thumbnail_number = 6
import pyvista as pv
from pyvista import examples
###############################################################################
a, _ = examples.downloads._download_file("gebco7510_49cl.stl")
b, _ = examples.downloads._download_file("gebco7510_55cl.stl")
c, _ = examples.downloads._download_file("AOI.Damavand.32639.vtp")
gebco = examples.download_damavand_volcano()
gebco_a = pv.read(a)
gebco_b = pv.read(b)
aoi = pv.read(c)
###############################################################################
opacity = [0, 0.75, 0, 0.75, 1.0]
clim = [0, 100]
p = pv.Plotter()
p.add_volume(
gebco,
cmap="magma",
clim=clim,
opacity=opacity,
opacity_unit_distance=6000,
)
p.show()
###############################################################################
voi = gebco.extract_subset([175, 200, 105, 132, 98, 170])
p = pv.Plotter()
p.add_mesh(gebco.outline(), color="k")
p.add_mesh(voi, cmap="magma")
p.show()
###############################################################################
p = pv.Plotter()
p.add_volume(voi, cmap="magma", clim=clim, opacity=opacity, opacity_unit_distance=2000)
p.camera_position = [
(531554.5542909054, 3944331.800171338, 26563.04809259223),
(599088.1433822059, 3982089.287834022, -11965.14728669936),
(0.3738545892415734, 0.244312810377319, 0.8947312427698892),
]
p.show()
###############################################################################
contours = voi.contour(np.arange(5, 55, 5))
contours
###############################################################################
contours.plot(cmap="nipy_spectral", opacity=0.15)
###############################################################################
roi = [*voi.bounds[0:4], *aoi.bounds[4:6]]
aoi_clipped = aoi.clip_box(roi, invert=False)
pv.plot([aoi, pv.Box(roi).outline()], cpos="xy")
###############################################################################
p = pv.Plotter(window_size=np.array([1024, 768]) * 2)
# Add all the data we want to see
p.add_mesh(contours, cmap="nipy_spectral", opacity=0.15)
p.add_mesh(gebco_a, color="#ff0000")
p.add_mesh(gebco_b, color="#ff0000")
p.add_mesh(aoi_clipped, cmap="coolwarm", opacity=0.7)
# Add a title
p.add_text("Vent and Magma Chambers\nDamavand Volcano, Alborz")
# A nice perspective
p.camera_position = [
(544065.5831913119, 3924518.576093113, 24324.3096344195),
(597885.1732914157, 3982998.0900773173, -12587.537450058662),
(0.33162714740718435, 0.26609487244915314, 0.9051060456978746),
]
p.show()
|
{"hexsha": "167e1c0a455e8c27330f3894358611bc5e8a6adc", "size": 3469, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyvista-examples/damavand.py", "max_stars_repo_name": "RichardScottOZ/banesullivan", "max_stars_repo_head_hexsha": "8b6a530fc7ea36a91f6aa6a5dc3d4d5557128d04", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-14T10:44:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-26T05:47:48.000Z", "max_issues_repo_path": "pyvista-examples/damavand.py", "max_issues_repo_name": "RichardScottOZ/banesullivan", "max_issues_repo_head_hexsha": "8b6a530fc7ea36a91f6aa6a5dc3d4d5557128d04", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-11-14T16:53:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-28T23:05:42.000Z", "max_forks_repo_path": "pyvista-examples/damavand.py", "max_forks_repo_name": "RichardScottOZ/banesullivan", "max_forks_repo_head_hexsha": "8b6a530fc7ea36a91f6aa6a5dc3d4d5557128d04", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-11-14T10:20:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-28T18:01:38.000Z", "avg_line_length": 34.3465346535, "max_line_length": 219, "alphanum_fraction": 0.5955606803, "include": true, "reason": "import numpy", "num_tokens": 992}
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import glob
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# In[50]:
df = pd.read_csv('/home/thais/CSV/status_A1.csv')
df.sort_values(by='title', inplace=True)
images_path = sorted(glob.glob('/home/thais/Windows/Train/*'))
df['Unnamed: 0'] = np.array(images_path)
df.reset_index(inplace=True, drop=True)
df['status'] = df['status'].astype(str)
# ### Particionando bases de treino e teste com split 70-30%
# In[49]:
### particiona considerando 70-30% e mantendo a frequência de amostras para treino, validação e teste de acordo com as colunas título e status (rótulo da rede)
X_train, X_test, y_train, y_test = train_test_split(df[['Unnamed: 0', 'title']], df['status'], test_size=0.30, random_state=42, stratify=df[['status', 'title']])
df_train = pd.concat([X_train, y_train], axis = 1)
X_train, X_val, y_train, y_val = train_test_split(df_train[['Unnamed: 0', 'title']], df_train['status'], test_size=0.30, random_state=42, stratify=df_train[['status', 'title']])
### contatena atributos de entrada e rótulo em um único dataframe para utilizar o flow_from_dataframe do tensorflow
df_test = pd.concat([X_test, y_test], axis=1)
df_train = pd.concat([X_train, y_train], axis=1)
df_val = pd.concat([X_val, y_val], axis=1)
print('Total de imagens de treinamento', len(df_train))
print('Total de imagens de validação', len(df_val))
print('Total de imagens de teste', len(df_test))
# ### Particionando bases de treino e teste com diferentes músicas
# In[52]:
''''
songs, n = df['title'].unique(), 5
index = np.random.choice(len(songs), 5, replace=False)
selected_songs = songs[index] ## seleciona n músicas disponíveis para teste
df_test = df[df['title'].isin(selected_songs)] ## banco de teste contém todos os espectrogramas das n músicas selecionadas anteriormemente
df_train = df[~(df['title'].isin(selected_songs))] ## banco de treino contém os espectrogramas de todas as músicas EXCETO as selecionadas anteriormente para teste
#X_train, X_val, y_train, y_val = train_test_split(df_train[['Unnamed: 0', 'title']], df_train['status'], test_size=0.30, random_state=42, stratify=df_train[['status', 'title']]) ## divide em validação considerando 30% e balanceamento de acordo com título e status
### contatena atributos de entrada e rótulo em um único dataframe para utilizar o flow_from_dataframe do tensorflow
df_train = pd.concat([X_train, y_train], axis=1)
df_val = pd.concat([X_val, y_val], axis=1)
print('Total de imagens de treinamento', len(df_train))
print('Total de imagens de validação', len(df_val))
print('Total de imagens de teste', len(df_test))
'''
# In[54]:
datagen=ImageDataGenerator(rescale=1./255)
train_generator=datagen.flow_from_dataframe(dataframe=df_train, directory='/home/thais/Windows/Train/', x_col='Unnamed: 0', y_col="status", class_mode="binary", target_size=(32,32), batch_size=32)
valid_generator=datagen.flow_from_dataframe(dataframe=df_val, directory='/home/thais/Windows/Train/', x_col='Unnamed: 0', y_col="status", class_mode="binary", target_size=(32,32), batch_size=32)
test_generator=datagen.flow_from_dataframe(dataframe=df_test, directory='/home/thais/Windows/Train/', x_col='Unnamed: 0', y_col="status", class_mode="binary", target_size=(32,32), batch_size=32)
# In[87]:
mc = tf.keras.callbacks.ModelCheckpoint('resnet_model.h5', monitor='val_binary_accuracy', mode='max', save_best_only=True)
model = tf.keras.applications.ResNet50(
include_top=True,
weights=None,
input_shape=(32,32,3)
)
'''
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), padding='same',
input_shape=(32,32,3)),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(64, (3, 3)),
tf.keras.layers.Conv2D(128, (3, 3)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128,activation='relu'),
tf.keras.layers.Dense(2)
])
'''
model.compile(
optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()],
)
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=valid_generator.n//valid_generator.batch_size
train_history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=10,
callbacks = [mc])
# In[ ]:
STEP_SIZE_TEST=test_generator.n//test_generator.batch_size
print('---------------Teste-------------')
test_generator.reset()
buceta = model.predict_generator(test_generator,
steps=STEP_SIZE_TEST,
verbose=1)
print(accuracy_score(df_test['status'].values, np.argmax(buceta, axis=1)))
predicted_class_indices=np.argmax(buceta,axis=1)
labels = (train_generator.class_indices)
labels = dict((v,k) for k,v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]
print(accuracy_score(df_test['status'].values, predictions))
|
{"hexsha": "ea4d6a5e3fe75ca3afebd92b6825574bfa7c8a68", "size": 5248, "ext": "py", "lang": "Python", "max_stars_repo_path": "DataBase/Neural_Networks/Resnet/resnet50.py", "max_stars_repo_name": "J0AZZ/chord-detection-challenge", "max_stars_repo_head_hexsha": "e0648d235ee0fbbf48d692911032aba7e4fedb31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-02T16:36:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-14T14:30:45.000Z", "max_issues_repo_path": "DataBase/Neural_Networks/Resnet/resnet50.py", "max_issues_repo_name": "J0AZZ/chord-detection-challenge", "max_issues_repo_head_hexsha": "e0648d235ee0fbbf48d692911032aba7e4fedb31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DataBase/Neural_Networks/Resnet/resnet50.py", "max_forks_repo_name": "J0AZZ/chord-detection-challenge", "max_forks_repo_head_hexsha": "e0648d235ee0fbbf48d692911032aba7e4fedb31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1641791045, "max_line_length": 264, "alphanum_fraction": 0.7223704268, "include": true, "reason": "import numpy", "num_tokens": 1353}
|
#!/usr/bin/env python
'''
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import math
bins = np.arange(256).reshape(256,1)
def hist_curve(im):
h = np.zeros((300,256,3))
if len(im.shape) == 2:
color = [(255,255,255)]
elif im.shape[2] == 3:
color = [ (255,0,0),(0,255,0),(0,0,255) ]
for ch, col in enumerate(color):
hist_item = cv.calcHist([np.uint8(im)],[ch],None,[256],[0,256])
cv.normalize(hist_item,hist_item,0,255,cv.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
pts = np.int32(np.column_stack((bins,hist)))
cv.polylines(h,[pts],False,col)
y=np.flipud(h)
return y
def binary(img, threshold = 127):
im = img
im = cv.cvtColor(im,cv.COLOR_BGR2GRAY)
#blur = cv.GaussianBlur(im,(3,3),0)
#ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
ret1,th3 = cv.threshold(im,threshold,255,cv.THRESH_BINARY)
return th3
def contrast(im, alpha):
img = np.where(im*alpha < im, 255, alpha*im)
return img
def brightness(im, beta):
if beta > 0:
img = np.where(im+beta < im, 255, im+beta)
elif beta < 0:
img = np.where(im+beta < 0, 0, im+beta)
return np.uint8(img)
else:
return im
return img
def main():
import sys
if len(sys.argv)>1:
fname = sys.argv[1]
#../../../practice_1_2_fft/resources/gato_2.jpg
else :
fname = 'lena.jpg'
print("usage : python hist.py <image_file>")
im = cv.imread(cv.samples.findFile(fname))
if im is None:
print('Failed to load image file:', fname)
sys.exit(1)
gray = cv.cvtColor(im,cv.COLOR_BGR2GRAY)
print(''' Histogram plotting \n
show histogram for color image in curve mode \n
show binarization from the input image \n
Esc - exit \n
''')
cv.imshow('image',im)
curve = hist_curve(im) #obtener el histograma
cv.imshow('histogram original image',curve)#mostrar el histograma
img = contrast(im, 2)
cv.imshow('contrast', img)
curve = hist_curve(img) #obtener el histograma
cv.imshow('histogram contrast',curve)
img = brightness(im, -100)
cv.imshow('brightness mine', img)
curve = hist_curve(img) #obtener el histograma
cv.imshow('histogram brightness mine',curve)
#for contrast and brightness given function
img = cv.convertScaleAbs(im, alpha=1, beta=100)
cv.imshow('brightness', img)
curve = hist_curve(img) #obtener el histograma
cv.imshow('histogram brightness',curve)
#Edge detection
#gaussiana = cv2.GaussianBlur(gris, (n,n), 0)
img_gauss = cv.GaussianBlur(gray, (3,3), 0) # 3x3 kernel
img = binary(im, 150)
cv.imshow('image',img)
# Canny
#canny = cv2.Canny(imagen, umbral_minimo histeresis, umbral_maximo)
img_canny = cv.Canny(img, 100, 200)
cv.imshow("Canny", img_canny)
# Sobel
img_sobelx = cv.Sobel(img_gauss, cv.CV_8U, 1, 0, ksize=3)
img_sobely = cv.Sobel(img_gauss, cv.CV_8U, 0, 1, ksize=3)
img_sobel = img_sobelx + img_sobely
cv.imshow("Sobel X", img_sobelx)
cv.imshow("Sobel Y", img_sobely)
cv.imshow("Sobel", img_sobel)
# Prewitt
kernelx = np.array([[1,1,1],[0,0,0],[-1,-1,-1]])
kernely = np.array([[-1,0,1],[-1,0,1],[-1,0,1]])
img_prewittx = cv.filter2D(img_gauss, -1, kernelx)
img_prewitty = cv.filter2D(img_gauss, -1, kernely)
cv.imshow("Prewitt X", img_prewittx)
cv.imshow("Prewitt Y", img_prewitty)
cv.imshow("Prewitt", img_prewittx + img_prewitty)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
{"hexsha": "11697342f81f1095ad633d38f90db236b182f495", "size": 3777, "ext": "py", "lang": "Python", "max_stars_repo_path": "samples/python/practice_2p1.py", "max_stars_repo_name": "jeroFlo/robotsVision_openCV", "max_stars_repo_head_hexsha": "cbf1bf440bcb6ad2e0fec9ed9d967e05c8e9d531", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "samples/python/practice_2p1.py", "max_issues_repo_name": "jeroFlo/robotsVision_openCV", "max_issues_repo_head_hexsha": "cbf1bf440bcb6ad2e0fec9ed9d967e05c8e9d531", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "samples/python/practice_2p1.py", "max_forks_repo_name": "jeroFlo/robotsVision_openCV", "max_forks_repo_head_hexsha": "cbf1bf440bcb6ad2e0fec9ed9d967e05c8e9d531", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1726618705, "max_line_length": 72, "alphanum_fraction": 0.6176859942, "include": true, "reason": "import numpy", "num_tokens": 1179}
|
-- This test ensures that implicits bound on the RHS of a
-- record update field are correctly bound by the compiler.
record Rec where
n : Nat
data T : Rec -> Type where
C : T ({ n := Z } r)
data U : Rec -> Type where
D : U ({ n $= S } r)
|
{"hexsha": "8782a6cb2e0adcd8235c6604cdb5222c48021660", "size": 257, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "idris2/tests/idris2/record010/record.idr", "max_stars_repo_name": "chrrasmussen/Idris2-Erlang", "max_stars_repo_head_hexsha": "dfa38cd866fd683d4bdda49fc0bf2f860de273b4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 128, "max_stars_repo_stars_event_min_datetime": "2020-06-09T21:25:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:50:24.000Z", "max_issues_repo_path": "idris2/tests/idris2/record010/record.idr", "max_issues_repo_name": "chrrasmussen/Idris2-Erlang", "max_issues_repo_head_hexsha": "dfa38cd866fd683d4bdda49fc0bf2f860de273b4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-08-26T03:38:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-23T21:32:49.000Z", "max_forks_repo_path": "idris2/tests/idris2/record010/record.idr", "max_forks_repo_name": "chrrasmussen/Idris2-Erlang", "max_forks_repo_head_hexsha": "dfa38cd866fd683d4bdda49fc0bf2f860de273b4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-08-28T04:16:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-20T12:47:16.000Z", "avg_line_length": 17.1333333333, "max_line_length": 59, "alphanum_fraction": 0.5953307393, "num_tokens": 73}
|
# coding:utf-8
import os
import sys
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from catboost import CatBoostClassifier
from sklearn.utils import shuffle
from category_encoders import TargetEncoder
from sklearn.metrics import roc_auc_score
np.random.seed(7)
class CatBoostKfold(object):
def __init__(self, *, input_path_1, input_path_2, output_path):
self.__input_path_1 = input_path_1
self.__input_path_2 = input_path_2
self.__output_path = output_path
self.__sample_submission = None
self.__train, self.__test = [None for _ in range(2)]
self.__train_res, self.__test_res = [None for _ in range(2)]
self.__train_feature, self.__train_label = [None for _ in range(2)]
self.__test_feature = None
self.__categorical_index = None
self.__encoder = None
self.__numeric_index = None
self.__folds = None
self.__oof_preds = None
self.__sub_preds = None
self.__cat = None
def data_prepare(self):
self.__sample_submission = pd.read_csv(os.path.join(self.__input_path_1, "sample_submission.csv"))
self.__train = pd.read_csv(os.path.join(self.__input_path_1, "train_feature_df.csv"))
self.__test = pd.read_csv(os.path.join(self.__input_path_1, "test_feature_df.csv"))
self.__train_res = pd.read_csv(os.path.join(self.__input_path_2, "feature_train_res.csv"))
self.__test_res = pd.read_csv(os.path.join(self.__input_path_2, "feature_test_res.csv"))
self.__train_label = self.__train["TARGET"]
self.__train_feature = self.__train.drop(["SK_ID_CURR", "TARGET"], axis=1)
self.__test_feature = self.__test[self.__train_feature.columns]
self.__train_res = self.__train_res.drop(["EXT_SOURCE_1", "EXT_SOURCE_2", "EXT_SOURCE_3"], axis=1)
self.__test_res = self.__test_res.drop(["EXT_SOURCE_1", "EXT_SOURCE_2", "EXT_SOURCE_3"], axis=1)
self.__train_feature = pd.concat([self.__train_feature, self.__train_res], axis=1)
self.__test_feature = pd.concat([self.__test_feature, self.__test_res], axis=1)
self.__categorical_index = np.where(self.__train_feature.dtypes == "object")[0]
self.__train_feature.iloc[:, self.__categorical_index] = (
self.__train_feature.iloc[:, self.__categorical_index].fillna("missing")
)
self.__test_feature.iloc[:, self.__categorical_index] = (
self.__test_feature.iloc[:, self.__categorical_index].fillna("missing")
)
self.__encoder = TargetEncoder()
self.__encoder.fit(self.__train_feature.iloc[:, self.__categorical_index], self.__train_label)
self.__train_feature.iloc[:, self.__categorical_index] = (
self.__encoder.transform(self.__train_feature.iloc[:, self.__categorical_index])
)
self.__test_feature.iloc[:, self.__categorical_index] = (
self.__encoder.transform(self.__test_feature.iloc[:, self.__categorical_index])
)
# There are NaNs in test dataset (feature number 77) but there were no NaNs in learn dataset"
self.__numeric_index = np.where(self.__train_feature.dtypes != "object")[0]
self.__train_feature.iloc[:, self.__numeric_index] = (
self.__train_feature.iloc[:, self.__numeric_index].apply(
lambda x: x.fillna(-999999.0) if x.median() > 0 else x.fillna(999999.0)
)
)
self.__test_feature.iloc[:, self.__numeric_index] = (
self.__test_feature.iloc[:, self.__numeric_index].apply(
lambda x: x.fillna(-999999.0) if x.median() > 0 else x.fillna(999999.0)
)
)
# blending 之前需要 shuffle, 这里其实并不需要, 因为后面 StratifiedKFold shuffle
self.__train_feature, self.__train_label = shuffle(self.__train_feature, self.__train_label)
def model_fit(self):
self.__folds = StratifiedKFold(n_splits=5, shuffle=True)
self.__oof_preds = np.zeros(shape=self.__train_feature.shape[0])
self.__sub_preds = np.zeros(shape=self.__test_feature.shape[0])
for n_fold, (trn_idx, val_idx) in enumerate(self.__folds.split(self.__train_feature, self.__train_label)):
trn_x, trn_y = self.__train_feature.iloc[trn_idx], self.__train_label.iloc[trn_idx]
val_x, val_y = self.__train_feature.iloc[val_idx], self.__train_label.iloc[val_idx]
self.__cat = CatBoostClassifier(
iterations=6000,
od_wait=200,
od_type="Iter",
eval_metric="AUC"
)
self.__cat.fit(
trn_x,
trn_y,
eval_set=[(val_x, val_y)],
use_best_model=True
)
pred_val = self.__cat.predict_proba(val_x)[:, 1]
pred_test = self.__cat.predict_proba(self.__test_feature)[:, 1]
self.__oof_preds[val_idx] = pred_val
self.__sub_preds += pred_test / self.__folds.n_splits
print("Fold %2d AUC : %.6f" % (n_fold + 1, roc_auc_score(val_y, self.__oof_preds[val_idx])))
print("Full AUC score %.6f" % roc_auc_score(self.__train_label, self.__oof_preds))
def model_predict(self):
self.__sample_submission["TARGET"] = self.__sub_preds
self.__sample_submission.to_csv(os.path.join(self.__output_path, "sample_submission.csv"), index=False)
if __name__ == "__main__":
cbk = CatBoostKfold(
input_path_1=sys.argv[1],
input_path_2=sys.argv[2],
output_path=sys.argv[3]
)
cbk.data_prepare()
cbk.model_fit()
cbk.model_predict()
|
{"hexsha": "8c19a74f7bd5c0b5fcc39ed1c6694bd270efa739", "size": 5681, "ext": "py", "lang": "Python", "max_stars_repo_path": "20180617/CombineStackingAndCatBoostKfold/CatBoostKfold.py", "max_stars_repo_name": "fengjiaxin/Home_Credit_Default_Risk", "max_stars_repo_head_hexsha": "3407e76b4e5cfb8dd6056d24675b80fe0e82c123", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2018-06-13T07:34:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-07T16:38:25.000Z", "max_issues_repo_path": "20180617/CombineStackingAndCatBoostKfold/CatBoostKfold.py", "max_issues_repo_name": "fengjiaxin/Home_Credit_Default_Risk", "max_issues_repo_head_hexsha": "3407e76b4e5cfb8dd6056d24675b80fe0e82c123", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-05-02T12:48:31.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-25T09:06:22.000Z", "max_forks_repo_path": "20180617/CombineStackingAndCatBoostKfold/CatBoostKfold.py", "max_forks_repo_name": "fengjiaxin/Home_Credit_Default_Risk", "max_forks_repo_head_hexsha": "3407e76b4e5cfb8dd6056d24675b80fe0e82c123", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-08-02T11:03:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-09T10:42:11.000Z", "avg_line_length": 44.0387596899, "max_line_length": 114, "alphanum_fraction": 0.6629114593, "include": true, "reason": "import numpy", "num_tokens": 1403}
|
import time
import edgeiq
import numpy
from sign_monitor import SignMonitor
"""
Simultaneously use object detection to detect human faces and classification to classify
the detected faces in terms of age groups, and output results to
shared output stream.
To change the computer vision models, follow this guide:
https://dashboard.alwaysai.co/docs/application_development/changing_the_model.html
To change the engine and accelerator, follow this guide:
https://dashboard.alwaysai.co/docs/application_development/changing_the_engine_and_accelerator.html
"""
def main():
# First make a detector to detect facial objects
hand_detector = edgeiq.ObjectDetection(
"alwaysai/hand_detection")
hand_detector.load(engine=edgeiq.Engine.DNN)
# Then make a detector to detect the sign of the hand
sign_detector = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
sign_detector.load(engine=edgeiq.Engine.DNN)
# Descriptions printed to console
print("Engine: {}".format(hand_detector.engine))
print("Accelerator: {}\n".format(hand_detector.accelerator))
print("Model:\n{}\n".format(hand_detector.model_id))
print("Labels:\n{}\n".format(hand_detector.labels))
print("Engine: {}".format(sign_detector.engine))
print("Accelerator: {}\n".format(sign_detector.accelerator))
print("Model:\n{}\n".format(sign_detector.model_id))
print("Labels:\n{}\n".format(sign_detector.labels))
fps = edgeiq.FPS()
# Variables to limit inference
counter = 0
DETECT_RATE = 10
sign_monitor = SignMonitor()
try:
with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
edgeiq.Streamer() as streamer:
# Allow Webcam to warm up
time.sleep(2.0)
fps.start()
# Loop detection
while True:
counter += 1
if counter % DETECT_RATE == 0:
# Read in the video stream
frame = video_stream.read()
# Detect human faces
results = hand_detector.detect_objects(
frame, confidence_level=.5)
# Alter the original frame mark up to just show labels
frame = edgeiq.markup_image(
frame, results.predictions, show_labels=True, show_confidences=False)
# Generate labels to display the face detections on the streamer
text = ["Model: {}".format(hand_detector.model_id)]
text.append(
"Inference time: {:1.3f} s".format(results.duration))
text.append("Signs:")
# Add a counter for the face detection label
sign_label = 1
# Append each predication to the text output
for prediction in results.predictions:
# Append labels for face detection & classification
text.append("Sign {} ".format(
sign_label))
sign_label = sign_label + 1
# Cut out the hand and use for the sign detection
hand_image = edgeiq.cutout_image(frame, prediction.box)
# Attempt to classiidnetify sign object
sign_results = sign_detector.detect_objects(
hand_image, confidence_level=.9)
sign = None
# If a sign was detected, append the label
if sign_results.predictions:
sign = sign_results.predictions[0]
text.append("sign: {}, confidence: {:.2f}\n".format(
sign_results.predictions[0].label,
sign_results.predictions[0].confidence))
if sign is not None:
sign_monitor.update(sign.label)
# Send the image frame and the predictions to the output stream
streamer.send_data(frame, text)
fps.update()
if streamer.check_exit():
break
finally:
fps.stop()
print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
print("approx. FPS: {:.2f}".format(fps.compute_fps()))
print("Program Ending")
if __name__ == "__main__":
main()
|
{"hexsha": "e4b45a0e1c053d50b747f99db4551fbe07b6ea49", "size": 4613, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "alwaysai/gesture-audio-control", "max_stars_repo_head_hexsha": "9c6450ce4abcb72e7b32b799d904b30ca24421d4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "alwaysai/gesture-audio-control", "max_issues_repo_head_hexsha": "9c6450ce4abcb72e7b32b799d904b30ca24421d4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "alwaysai/gesture-audio-control", "max_forks_repo_head_hexsha": "9c6450ce4abcb72e7b32b799d904b30ca24421d4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0390625, "max_line_length": 99, "alphanum_fraction": 0.5616735313, "include": true, "reason": "import numpy", "num_tokens": 868}
|
import argparse
import bz2
import json
import os
import pickle
import random
import tempfile
import urllib.request
import pandas as pd
import glob
import pickle as pkl
import numpy as np
import boto3
import logging
from botocore.exceptions import ClientError
import xgboost
from sklearn import metrics
#from smdebug import SaveConfig
#from smdebug.xgboost import Hook
from sklearn.model_selection import StratifiedKFold
from collections import namedtuple
from fairlearn.metrics import demographic_parity_difference, demographic_parity_ratio, equalized_odds_difference
from fairlearn.metrics import selection_rate
metric = 'f1'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--max_depth", type=int, default=10)
parser.add_argument("--eta", type=float, default=1) # 0.2
parser.add_argument("--gamma", type=int, default=2)
parser.add_argument("--min_child_weight", type=int, default=6)
parser.add_argument("--silent", type=int, default=0)
parser.add_argument("--objective", type=str, default="binary:logistic")
parser.add_argument("--num_class", type=int, default=2)
parser.add_argument("--num_round", type=int, default=30)
parser.add_argument("--metric", type=str, default='f1')
parser.add_argument("--protected", type=str, default='Gender')
parser.add_argument("--thresh", type=float, default=0.5)
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_VALIDATION'))
parser.add_argument('--s3_bucket', type=str, default=None)
args = parser.parse_args()
return args
def custom_asymmetric_objective(y_pred, dtrain):
#y_pred[y_pred < -1] = -1 + 1e-6
y_true = dtrain.get_label()
#print(min(y_pred), max(y_pred))
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
y_pred = [1.0 if y > 0.5 else 0.0 for y in y_pred]
residual = (y_true - y_pred).astype("float")
grad = np.where(residual<0, -2*10.0*residual, -2*residual)
hess = np.where(residual<0, 2*10.0, 2.0)
return grad, hess
def huber_approx_obj(preds, dtrain):
#print('obj:', min(preds), max(preds))
preds = 1.0 / (1.0 + np.exp(-preds))
preds = [1.0 if y > 0.5 else 0.0 for y in preds]
d = dtrain.get_label()-preds #remove .get_labels() for sklearn
#print('obj-d:', min(d), max(d))
h = 1 #h is delta in the graphic
scale = 1 + (d / h) ** 2
scale_1 = 1 + (d / (4+h)) ** 2
scale_sqrt = np.sqrt(scale)
scale_sqrt_1 = np.sqrt(scale_1)
grad = np.where(d<0, -d / scale_sqrt_1, -d / scale_sqrt)
hess = np.where(d<0, 1 / scale_1 / scale_sqrt_1, 1 / scale / scale_sqrt)
return grad, hess
#difference in statistical parity
def fair_metrics(bst,data,column, thresh):
tr = list(data.get_label())
best_iteration = bst.best_ntree_limit
pred=bst.predict(data, ntree_limit=best_iteration)
pred = [1 if p > thresh else 0 for p in pred]
na0=0
na1=0
nd0=0
nd1=0
for p,c in zip(pred,column):
if (p==1 and c==0):
nd1 += 1
if (p==1 and c==1):
na1 += 1
if (p==0 and c==0):
nd0 += 1
if (p==0 and c==1):
na0 += 1
Pa1, Pd1, Pa0, Pd0 = na1/(na1+na0), nd1/(nd1+nd0), na0/(na1+na0), nd0/(nd1+nd0)
dsp_metric = np.abs(Pd1-Pa1)
#dsp_metric = np.abs((first-second)/(first+second))
sr_metric = selection_rate(tr, pred, pos_label=1)
dpd_metric = demographic_parity_difference(tr, pred, sensitive_features=column)
dpr_metric = demographic_parity_ratio(tr, pred, sensitive_features=column)
eod_metric = equalized_odds_difference(tr, pred, sensitive_features=column)
return dsp_metric, sr_metric, dpd_metric, dpr_metric, eod_metric
def eval_fun(y_pred, dtrain):
#y_pred[y_pred < -1] = -1 + 1e-6
y_true = list(dtrain.get_label())
#print('eval:', min(y_pred), max(y_pred))
#y_pred = 1.0 / (1.0 + np.exp(-y_pred))
y_pred = [1.0 if y > 0.5 else 0.0 for y in y_pred]
y_pred = list(y_pred)
d = y_pred - dtrain.get_label()
#print('---', Counter(d))
#print(y_true)
#print(y_pred)
tp = sum([1 if (t==1 and p==1) else 0 for t,p in zip(y_true,y_pred)])
fn = sum([1 if (t==1 and p==0) else 0 for t,p in zip(y_true,y_pred)])
fp = sum([1 if (t==0 and p==1) else 0 for t,p in zip(y_true,y_pred)])
tn = sum([1 if (t==0 and p==0) else 0 for t,p in zip(y_true,y_pred)])
#print('eval-d:', tp/sum(y_true), fn/sum(y_true))
precision = 0 if (tp+fp)==0 else tp / (tp + fp) # positive predictive value
recall = 0 if (tp+fn)==0 else tp / (tp + fn) # true_positive rate
false_negative_rate = 0 if (fn+tp)==0 else fn / (fn+tp)
false_positive_rate = 0 if (fp+tn)==0 else fp / (fp+tn)
f1 = 0 if (precision+recall)==0 else 2 * precision * recall / (precision + recall)
if metric == 'recall':
return "recall", recall
if metric == 'f1':
return "f1", f1
def eval_f1(y_pred, dtrain):
#y_pred[y_pred < -1] = -1 + 1e-6
y_true = list(dtrain.get_label())
y_pred = [1.0 if y > 0.5 else 0.0 for y in y_pred]
y_pred = list(y_pred)
tp = sum([1 if (t==1 and p==1) else 0 for t,p in zip(y_true,y_pred)])
fn = sum([1 if (t==1 and p==0) else 0 for t,p in zip(y_true,y_pred)])
fp = sum([1 if (t==0 and p==1) else 0 for t,p in zip(y_true,y_pred)])
tn = sum([1 if (t==0 and p==0) else 0 for t,p in zip(y_true,y_pred)])
#print('eval-d:', tp/sum(y_true), fn/sum(y_true))
precision = 0 if (tp+fp)==0 else tp / (tp + fp) # positive predictive value
recall = 0 if (tp+fn)==0 else tp / (tp + fn) # true_positive rate
f1 = 0 if (precision+recall)==0 else 2 * precision * recall / (precision + recall)
return "f1", f1
def eval_auc(y_pred, dtrain):
y_true = list(dtrain.get_label())
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
return "auc", auc
def write_metadata(path, train_recall, val_recall):
metrics = {
'metrics': [{
'name': 'train-recall', # The name of the metric. Visualized as the column name in the runs table.
'numberValue': train_recall, # The value of the metric. Must be a numeric value.
'format': "PERCENTAGE", # The optional format of the metric.
},
{
'name': 'val-recall',
'numberValue': val_recall,
'format': "PERCENTAGE"
}]
}
logging.info("Succeed in Writing Training Metrics")
with open('/opt/ml/output/mlpipeline-metrics.json', 'w') as f:
json.dump(metrics, f)
divmod_output = namedtuple('evalMetrics', ['train_recall', 'val_recall', 'mlpipeline_metrics'])
return divmod_output(train_recall, val_recall, json.dumps(metrics))
def xgb_evaluate(bst, data):
y = list(data.get_label())
best_iteration = bst.best_ntree_limit
#print('best_iteration:',best_iteration)
pred=bst.predict(data, ntree_limit=best_iteration)
#pred = 1.0 / (1.0 + np.exp(-pred))
pred = [1 if p > 0.5 else 0 for p in pred]
tp = sum([1 if (t==1 and p==1) else 0 for t,p in zip(y,pred)])
fn = sum([1 if (t==1 and p==0) else 0 for t,p in zip(y,pred)])
fp = sum([1 if (t==0 and p==1) else 0 for t,p in zip(y,pred)])
tn = sum([1 if (t==0 and p==0) else 0 for t,p in zip(y,pred)])
recall = 0 if (tp+fn)==0 else tp / (tp + fn)
precision = 0 if (tp+fp)==0 else tp / (tp + fp)
f1 = 0 if (precision+recall)==0 else 2 * precision * recall / (precision + recall)
if metric == 'recall':
return recall
if metric == 'f1':
return f1
def main():
args = parse_args()
train_files_path = args.train
params = {
"max_depth": args.max_depth,
"eta": args.eta,
"gamma": args.gamma,
"min_child_weight": args.min_child_weight,
#"silent": args.silent,
"tree_method": 'hist',
#"disable_default_eval_metric": 1,
"objective": args.objective,
#"num_class": args.num_class
}
job_name = json.loads(os.environ['SM_TRAINING_ENV'])['job_name']
train_files_list = glob.glob(train_files_path + '/*.*')
print(train_files_list)
print('Loading training data...')
df_train = pd.concat(map(pd.read_csv, train_files_list))
print('Data loading completed.')
y = df_train.Target.values
pcol = df_train[args.protected].values
X = df_train.drop(['Target'], axis=1).values
skf = StratifiedKFold(n_splits=5, shuffle=True)
split_count = 1
best_model = []
if metric == 'auc':
auc = 0
if metric == 'f1':
f1 = 0
if metric == 'recall':
recall = 0
best_train_metric = []
best_val_metric = []
best_dsp_train = []
best_dsp_val = []
best_sr_train = []
best_sr_val = []
best_dpd_train = []
best_dpd_val = []
best_dpr_train = []
best_dpr_val = []
best_eod_train = []
best_eod_val = []
best_iteration = 1
path = os.path.join(args.s3_bucket, job_name, "hpo-debug", str(split_count))
for train_index, test_index in skf.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
pcol_train, pcol_test = pcol[train_index], pcol[test_index]
dtrain = xgboost.DMatrix(X_train, label=y_train)
dtest = xgboost.DMatrix(X_test, label=y_test)
'''
hook = Hook(
out_dir=path,
include_collections=['feature_importance', 'full_shap', 'average_shap', 'labels', 'predictions'],
train_data=dtrain,
validation_data=dtest,
hyperparameters=params,
save_config=SaveConfig(save_interval=10)
)
'''
watchlist = [(dtrain, "train"), (dtest, "validation")]
evals_result=dict()
bst = xgboost.train(
params=params,
dtrain=dtrain,
#obj=custom_asymmetric_objective,
feval=eval_f1,
maximize=True,
evals=watchlist,
early_stopping_rounds=10,
evals_result=evals_result,
num_boost_round=args.num_round)
#callbacks=[hook])
print('evals_result: ', evals_result)
print('best iteration: ',bst.best_ntree_limit)
tr_result = xgb_evaluate(bst,dtrain)
te_result = xgb_evaluate(bst,dtest)
tr_dsp, tr_sr, tr_dpd, tr_dpr, tr_eod = fair_metrics(bst, dtrain, list(pcol_train), args.thresh)
te_dsp, te_sr, te_dpd, te_dpr, te_eod = fair_metrics(bst, dtest, list(pcol_test), args.thresh)
print('Best Stratified Model: train_f1= {}, val_f1= {}'.format(tr_result,te_result))
print('Fairness Metrics (train): dsp={}, ,sr={}, dpd={}, dpr={}, eod={}'.format(tr_dsp,tr_sr,tr_dpd,tr_dpr,te_eod))
print('Fairness Metrics (val): dsp={}, sr={}, dpd={}, dpr={}, eod={}'.format(te_dsp,te_sr,te_dpd,te_dpr,te_eod))
if te_result > f1:
best_model = bst
best_train_metric = tr_result
best_val_metric = te_result
best_dsp_train = tr_dsp
best_dsp_val = te_dsp
best_sr_train = tr_sr
best_sr_val = te_sr
best_dpd_train = tr_dpd
best_dpd_val = te_dpd
best_dpr_train = tr_dpr
best_dpr_val = te_dpr
best_eod_train = tr_eod
best_eod_val = te_eod
results=evals_result
best_iteration = bst.best_ntree_limit
f1 = best_val_metric
#if evals_result['validation']['recall'][args.num_round-1] > recall:
# best_model = bst
# best_train_metric = evals_result['train']['recall'][args.num_round-1]
# best_val_metric = evals_result['validation']['recall'][args.num_round-1]
# recall = evals_result['validation']['recall'][args.num_round-1]
file_path = os.path.join(args.s3_bucket, job_name, "metrics")
#train_metrics = write_metadata(file_path, best_train_metric, best_val_metric)
#print("train-dsp:{},validation-dsp:{}".format(best_dsp_train, best_dsp_val))
#print("train-sr:{},validation-sr:{}".format(best_sr_train, best_sr_val))
#print("train-dpd:{},validation-dpd:{}".format(best_dpd_train, best_dpd_val))
#print("train-dpr:{},validation-dpr:{}".format(best_dpr_train, best_dpr_val))
#print("train-eod:{},validation-eod:{}".format(best_eod_train, best_eod_val))
print("train-f1: {},validation-f1: {}".format(best_train_metric, best_val_metric))
print("best iteration: ", best_iteration)
model_dir = os.environ.get('SM_MODEL_DIR')
with open(model_dir + '/model.bin', 'wb') as f:
pkl.dump(best_iteration, f)
pkl.dump(best_model, f)
with open(model_dir + '/best_metrics.pkl', 'wb') as f:
pkl.dump(best_train_metric, f)
pkl.dump(best_val_metric, f)
pkl.dump(best_dsp_train, f)
pkl.dump(best_dsp_val, f)
pkl.dump(best_sr_train, f)
pkl.dump(best_sr_val, f)
pkl.dump(best_dpd_train, f)
pkl.dump(best_dpd_val, f)
pkl.dump(best_dpr_train, f)
pkl.dump(best_dpr_val, f)
pkl.dump(best_eod_train, f)
pkl.dump(best_eod_val, f)
#return train_metrics
if __name__ == "__main__":
main()
|
{"hexsha": "58bfc912578912e3647b92d204dac7680c2d45dd", "size": 13433, "ext": "py", "lang": "Python", "max_stars_repo_path": "container/train.py", "max_stars_repo_name": "tvkpz/ml-innovate-2021", "max_stars_repo_head_hexsha": "30ef7fed40ad70ad4e2a32d8843de0ed0e808a8a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "container/train.py", "max_issues_repo_name": "tvkpz/ml-innovate-2021", "max_issues_repo_head_hexsha": "30ef7fed40ad70ad4e2a32d8843de0ed0e808a8a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "container/train.py", "max_forks_repo_name": "tvkpz/ml-innovate-2021", "max_forks_repo_head_hexsha": "30ef7fed40ad70ad4e2a32d8843de0ed0e808a8a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.38, "max_line_length": 123, "alphanum_fraction": 0.6213057396, "include": true, "reason": "import numpy", "num_tokens": 3931}
|
import numpy as np
import random
import tensorflow as tf
import matplotlib.pyplot as plt
import scipy.misc
import os
import csv
import itertools
import tensorflow.contrib.slim as slim
#imageio.plugins.ffmpeg.download()
# This is a simple function to reshape our game frames.
def processState(state1):
return np.reshape(state1, [21168])
# This is a simple function to reshape our game frames.
def processImage(frame, x):
s = frame[10:-10,30:-30]
if len(frame.shape) == 2:
s = scipy.misc.imresize(s,[x,x])
else:
s = scipy.misc.imresize(s,[x,x,frame.shape[2]])
s = np.reshape(s,[np.prod(s.shape)])
return s
# This is a simple function to reshape our game buffers and create a N channel image, N being the number of images passed to the function
def processBuffers(image_size, d, l, s):
d = d[10:-10,30:-30]
d = scipy.misc.imresize(d,[image_size, image_size])
l = l[10:-10,30:-30]
l = scipy.misc.imresize(l,[image_size, image_size])
s = s[10:-10,30:-30]
s = scipy.misc.imresize(s,[image_size, image_size])
dls = np.array([d,l,s])
dls = np.rollaxis(dls, 0, 3)
dls = np.reshape(dls,[np.prod(dls.shape)])
return dls
# These functions allows us to update the parameters of our target network with those of the primary network.
def updateTargetGraph(tfVars, tau):
total_vars = len(tfVars)
op_holder = []
for idx, var in enumerate(tfVars[0:total_vars // 2]):
op_holder.append(tfVars[idx + total_vars // 2].assign(
(var.value() * tau) + ((1 - tau) * tfVars[idx + total_vars // 2].value())))
return op_holder
def updateTarget(op_holder, sess):
for op in op_holder:
sess.run(op)
total_vars = len(tf.trainable_variables())
a = tf.trainable_variables()[0].eval(session=sess)
b = tf.trainable_variables()[total_vars // 2].eval(session=sess)
if a.all() == b.all():
print("Target Set Success")
else:
print("Target Set Failed")
# Record performance metrics and episode logs for the Control Center.
def saveToCenter(i, rList, jList, bufferArray, summaryLength, h_size, sess, mainQN, time_per_step, img_x, img_z, chls, path):
with open(path + '/log.csv', 'a') as myfile:
state_display = (np.zeros([1, h_size]), np.zeros([1, h_size]))
imagesS = []
'''
for idx, z in enumerate(np.vstack(bufferArray[:, 0])):
img, state_display = sess.run([mainQN.salience, mainQN.rnn_state],
feed_dict={
mainQN.scalarInput: np.reshape(bufferArray[idx, 0], [1, img_x*img_x*img_z]) / 255.0, \
mainQN.trainLength: 1, mainQN.state_in: state_display,
mainQN.batch_size: 1})
imagesS.append(img)
imagesS = (imagesS - np.min(imagesS)) / (np.max(imagesS) - np.min(imagesS))
imagesS = np.vstack(imagesS)
imagesS = np.resize(imagesS, [len(imagesS), img_x, img_x, img_z])
luminance = np.max(imagesS, img_z)
imagesS = np.multiply(np.ones([len(imagesS), img_x, img_x, img_z]), np.reshape(luminance, [len(imagesS), img_x, img_x, 1]))
make_gif(np.ones([len(imagesS), img_x, img_x, img_z]), './Center/frames/sal' + str(i) + '.gif',
duration=len(imagesS) * time_per_step, true_image=False, salience=True, salIMGS=luminance)
'''
#print(bufferArray[:, 0])
images = list(zip(bufferArray[:, 0]))
images.append(bufferArray[-1, 3])
#print(images)
images = np.vstack(images)
images = np.resize(images, [len(images), img_x, img_x, img_z])
#When we have depth and label buffer stacks, just keep depth buffer to display
if img_z > 1:
d = np.array([images[...,0]])
d = np.rollaxis(d, 0, 4)
l = np.array([images[...,1]])
l = np.rollaxis(l, 0, 4)
s = np.array([images[...,2]])
s = np.rollaxis(s, 0, 4)
make_gif(l, path + '/frames/l/labels' + str(i) + '.gif',
duration=len(images) * time_per_step,
true_image=True, salience=False)
make_gif(d, path + '/frames/d/depth' + str(i) + '.gif',
duration=len(images) * time_per_step,
true_image=True, salience=False)
make_gif(s, path + '/frames/image' + str(i) + '.gif',
duration=len(images) * time_per_step,
true_image=True, salience=False)
else:
s = np.array([images[...,0]])
s = np.rollaxis(s, 0, 4)
make_gif(s, path + '/frames/image' + str(i) + '.gif',
duration=len(images) * time_per_step,
true_image=True, salience=False)
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL, lineterminator = '\n')
wr.writerow([i, np.mean(jList[-summaryLength:]), np.mean(rList[-summaryLength:]),
path + '/frames/image' + str(i) + '.gif',
path + '/frames/log' + str(i) + '.csv',
path + '/frames/sal' + str(i) + '.gif'])
myfile.close()
with open(path + '/frames/log' + str(i) + '.csv', 'w') as myfile:
state_train = (np.zeros([1, h_size]), np.zeros([1, h_size]))
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL, lineterminator = '\n')
#Hard-coded number of actions. Need to work on that!
#wr.writerow(["ACTION", "REWARD", "A0", "A1", 'A2', 'V'])
'''
a_size = 3
wr.writerow(["ACTION", "REWARD"] + ["A" + str(act_idx) for act_idx in range(0, a_size)] + ['V'])
test_im = list(zip(bufferArray[:, 0]))
test_im = np.vstack(test_im)
if chls == 2:
test_s = test_im[:,0:-img_x*img_x]
else:
test_s = test_im
a, v = sess.run([mainQN.Advantage, mainQN.Value],
feed_dict={mainQN.scalarInput: np.vstack(test_s) / 255.0,
mainQN.trainLength: len(bufferArray),
mainQN.state_in: state_train,
mainQN.batch_size: 1})
'''
#Hard-code number of actions Need to work on that
#a_comprehension = [a[:, act_idx] for act_idx in range(0,a_size)]
#a_list = [bufferArray[:, 1] + bufferArray[:, 2]] + a_comprehension + [v[:, 0]]
#to_write = list(zip(a_list))
#wr.writerows(to_write)
#wr.writerows(list(zip(bufferArray[:, 1], bufferArray[:, 2], a[:,0], a[:,1], a[:,2], v[:, 0])))
# This code allows gifs to be saved of the training episode for use in the Control Center.
def make_gif(images, fname, duration=2, true_image=False, salience=False, salIMGS=None):
import moviepy.editor as mpy
def make_frame(t):
try:
#print('trying to print...')
x = images[int(len(images) / duration * t)]
#print(x)
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x + 1) / 2 * 255).astype(np.uint8)
def make_mask(t):
try:
x = salIMGS[int(len(salIMGS) / duration * t)]
except:
x = salIMGS[-1]
return x
clip = mpy.VideoClip(make_frame, duration=duration)
if salience == True:
mask = mpy.VideoClip(make_mask, ismask=True, duration=duration)
clipB = clip.set_mask(mask)
clipB = clip.set_opacity(0)
mask = mask.set_opacity(0.1)
mask.write_gif(fname, fps=len(images) / duration, verbose=False)
# clipB.write_gif(fname, fps = len(images) / duration,verbose=False)
else:
clip.write_gif(fname, fps=len(images) / duration, verbose=False)
|
{"hexsha": "18bad0dcc96be5a8e90f691f09d18743839f9390", "size": 8002, "ext": "py", "lang": "Python", "max_stars_repo_path": "helper2.py", "max_stars_repo_name": "Ohara124c41/DeepRL-AgentsDB", "max_stars_repo_head_hexsha": "5e5d1b1e983e1e5c1412e2c21227442050d3b555", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "helper2.py", "max_issues_repo_name": "Ohara124c41/DeepRL-AgentsDB", "max_issues_repo_head_hexsha": "5e5d1b1e983e1e5c1412e2c21227442050d3b555", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "helper2.py", "max_forks_repo_name": "Ohara124c41/DeepRL-AgentsDB", "max_forks_repo_head_hexsha": "5e5d1b1e983e1e5c1412e2c21227442050d3b555", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-01-01T21:41:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-01T21:41:22.000Z", "avg_line_length": 41.2474226804, "max_line_length": 137, "alphanum_fraction": 0.556735816, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2072}
|
%auto-ignore
%
\providecommand{\MainFolder}{..}
\documentclass[\MainFolder/Text.tex]{subfiles}
\begin{document}
\section{String topology and Chen's iterated integrals}
String topology of a manifold~$M$ is the study of the \emph{free loop space}
\[ \Loop M = \{\gamma: \Sph{1}\rightarrow M\text{ continuous}\}, \]
which is equipped with the compact-open topology, and of natural structures on it.
Each loop~$\gamma$ is parametrized, with base-point~$1$, and there is a natural $\Sph{1}$-action changing the base-point.
Therefore, we can distinguish the following two homology theories:
\begin{center}
\begin{tabular}{rl}
$\H(\Loop M)\quad\dotsc$& the \emph{singular homology} and \\[1ex]
$\H^{\Sph{1}}\!(\Loop M)\quad\dotsc$ & \parbox[t]{10cm}{the \emph{equivariant homology} --- ``the singular homology of the space of parametrized loops with the base-point forgotten.''}
\end{tabular}
\end{center}
In this thesis, we consider $\H^{\Sph{1}}\!(\Loop M)$ with coefficients in $\R$ only.
If $M=\Sigma$ is an oriented surface, we consider immersed loops with transverse double points and define a bracket and cobracket by Figure~\ref{Fig:ConstrLoop}.
%\footnote{If one wishes, and is allowed to, one can cut holes inside of the loops and make some non-contractible.}
\begin{figure}[t]
\begin{equation*}
\begin{aligned}
\StringOp_2\left(
\parbox[c]{2.85cm}{
\begin{tikzpicture}
\def\rad{.8cm}
\draw[green,dashed,thick,decoration={markings, mark=at position 0.25 with {\arrow{>}}},postaction={decorate}] ([shift=(0:\rad)]0,0) arc (0:360:\rad);
%\draw[green,thick,dotted] ([shift=(280:\rad)]0,0) arc (280:360:\rad);
\draw[red,thick,decoration={markings, mark=at position 0.25 with {\arrow{>}}},postaction={decorate}] (1.5*\rad,0) circle (\rad);
\end{tikzpicture}}
\right)
&=\parbox[c]{2.85cm}{
\begin{tikzpicture}
\def\rad{.8cm}
\draw[blue,thick] ([shift=(90:\rad)]0,0) arc (90:360:\rad); %Big loop on the lft
\draw[blue,thick] ([shift=(0:\rad)]0,0) arc (0:180:.25*\rad); %Small connecting loop
\draw[blue,thick] ([shift=(180:\rad)]1.5*\rad,0) arc (180:450:\rad); %Big loop on the right
\draw[blue,thick,decoration={markings, mark=at position 0.5 with {\arrow{<}}},
postaction={decorate}] ([shift=(90:\rad)]0,0) to ([shift=(90:\rad)]1.5*\rad,0); %The oriented connecting line
\end{tikzpicture}}
\ -\
\parbox[c]{2.85cm}{
\begin{tikzpicture}
\def\rad{.8cm}
\draw[blue,thick] ([shift=(0:\rad)]0,0) arc (0:270:\rad); %Big loop on the left
\draw[blue,thick] ([shift=(180:\rad)]1.5*\rad,0) arc (180:360:.25*\rad); %Small connecting loop
\draw[blue,thick] ([shift=(270:\rad)]1.5*\rad,0) arc (270:540:\rad); %Big loop on the right
\draw[blue,thick,decoration={markings, mark=at position 0.5 with {\arrow{>}}},
postaction={decorate}] ([shift=(270:\rad)]0,0) to ([shift=(270:\rad)]1.5*\rad,0);
\end{tikzpicture}}\\
\StringCoOp_2\left(\hspace{-.4em}
\parbox[c]{3.3cm}{
\begin{tikzpicture}
\def\rad{.8cm}
\draw[blue,thick,decoration={markings, mark=at position 0.25 with {\arrow{>}}},
postaction={decorate}] ([shift=(45:\rad)]0,0) arc (45:315:\rad);
\draw[blue,thick,decoration={markings, mark=at position 0.25 with {\arrow{<}}},
postaction={decorate}] ([shift=(-135:\rad)]2*\rad,0) arc (-135:135:\rad);
\draw[blue,thick] (45:\rad) to[out=-45,in=135] ($(-135:\rad)+(2*\rad,0)$);
\draw[blue,thick] (-45:\rad) to[out=45,in=225] ($(135:\rad)+(2*\rad,0)$);
\end{tikzpicture}}\right)
&=\parbox[c]{1.64cm}{
\begin{tikzpicture}
\def\rad{.8cm}
\draw[green,thick,dashed,decoration={markings, mark=at position 0.25 with {\arrow{>}}},
postaction={decorate}] (0,0) circle (\rad);
\end{tikzpicture}}\otimes
\parbox[c]{1.64cm}{
\begin{tikzpicture}
\def\rad{.8cm}
\draw[red,thick,decoration={markings, mark=at position 0.25 with {\arrow{<}}},
postaction={decorate}] (0,0) circle (\rad);
\end{tikzpicture}}\ -\
\parbox[c]{1.64cm}{
\begin{tikzpicture}
\def\rad{.8cm}
\draw[red,thick,decoration={markings, mark=at position 0.25 with {\arrow{<}}},
postaction={decorate}] (0,0) circle (\rad);
\end{tikzpicture}} \otimes
\parbox[c]{1.64cm}{
\begin{tikzpicture}
\def\rad{.8cm}
\draw[green,dashed,thick,decoration={markings, mark=at position 0.25 with {\arrow{>}}},
postaction={decorate}] (0,0) circle (\rad);
\end{tikzpicture}}
\end{aligned}
\end{equation*}
\caption[Bracket and cobracket in equivariant string topology.]{The bracket and cobracket in equivariant string topology. Imagine some genus so that the operations are homotopically non-trivial.}
\label{Fig:ConstrLoop}
\end{figure}
In words:\ToDo[noline,caption={How is it with loop parametrization}]{How is it precisely with the speeds?}
\begin{description}[leftmargin=*]
\item[$\StringOp_2$:] Imagine putting one base-point $b_1$ on the first loop $\gamma_1$ and another base-point $b_2$ on the second loop $\gamma_2$ in all possible positions. Whenever $\gamma_1(b_1) = \gamma_2(b_2) = p$, construct a new loop~$\gamma = \gamma_1 \Star_p \gamma_2$ by running first along $\gamma_1$ with double speed starting and ending at~$b_1$ and continuing along $\gamma_2$ starting and ending at $b_2$. Forget the base-points and multiply $\gamma$ with the sign of the intersection $\varepsilon(p;\gamma_1, \gamma_2)$. Should more intersections occur, take the sum.
\item[$\StringCoOp_2$:] Imagine putting the basepoints $b_1$ and $b_2$ on $\gamma$ in all possible positions such that $b_1 \neq b_2$. Whenever $\gamma(b_1) = \gamma(b_2)=p$, split $\gamma$ into~$\gamma_1$ and $\gamma_2$ as follows. The first loop~$\gamma_1$ is the portion of~$\gamma$ from $b_1$ to $b_2$ and the second loop $\gamma_2$ is the portion from $b_2$ to $b_1$ ran along with the correspondingly scaled speed. Forget the base-points, form the tensor product $\gamma_1 \otimes \gamma_2$ and multiply it with $\varepsilon(p; \gamma)$. Should more self-intersections occur, take the sum of the tensors.
\end{description}
The operations $\StringOp_2$ and $\StringCoOp_2$ are known as the \emph{Goldman bracket} and the \emph{Turaev cobracket} and were defined and studied in \cite{Goldman1986} and \cite{Turaev1991}, respectively.
In \cite{Sullivan1999}, it was demonstrated that the construction of~$\StringOp_2$ extends to families of loops and to an arbitrary dimension~$n$ of an oriented manifold~$M$; it produces a Lie bracket on the equivariant homology.
The construction of $\StringCoOp_2$ generalizes too and gives a Lie cobracket; this is explained for instance in~\cite{Cieliebak2007}.
The picture is always Figure~\ref{Fig:ConstrLoop}, just the intersection points come from transverse intersections of smooth parameter spaces of points on loops in $M$; consequently, both~$\StringOp_2$ and~$\StringCoOp_2$ have degrees $2-n$.
Another definition of the coproduct is used in \cite{Basu2011}, where loops in~$M$ are viewed as open strings in $M\times M$ with endpoints at the diagonal.
In order to make these geometric constructions rigorous, the most straightforward way (which would work over $\Z$) is to use a \emph{geometric homology theory} of $M$ based on smooth chains such that the transverse intersection of two smooth chains is again a smooth chain.
Such theory for smooth manifolds was constructed in \cite{Lipyanskiy2014}.
A~version for general topological spaces is proposed in \cite{Cieliebak2013}, and some details regarding triangulations are addressed in~\cite{Hajek2014} (see also the discussion at \cite{MO157762}).
Note that $\StringOp_2$ and $\StringCoOp_2$ are only ``transversally defined'' on the chain level, and in order to define them on homology, it is important that we can homotop to a generic situation within the homology class.
The main theorem of \cite{Sullivan2002} asserts that $\StringOp_2$ and $\StringCoOp_2$ induce the structure of an \emph{involutive bi-Lie algebra}, abbreviated $\IBL$, of degree $2-n$ on the equivariant homology $H^{\Sph{1}}\!(\Loop M,M)$ relative to constant loops $M\xhookrightarrow{}\Loop M$.
Modding out constant loops is necessary for $\StringCoOp_2$ to be well defined because of the phenomenon of \emph{``vanishing of small loops''} illustrated for instance in~\cite{Cieliebak2007}: Let $\sigma\in C_1(\Loop M)$ be a $1$-chain supported on $[0,1]$ which for $t=0$ agrees with the loop in the argument of $\StringCoOp_2$ in Figure~\ref{Fig:ConstrLoop}, next, for $t\in (0,1)$, the left knot~$L$ contracts to the mid-point $p$, and for $t=1$, only the right knot $R$ remains (thus the name ``vanishing of small loops'').
It is then easy to see that
\[ 0 \neq \Bdd \StringCoOp_2(\sigma) - \StringCoOp_2(\Bdd \sigma) = p\otimes R - L \otimes p \in C(M)\otimes C(\Loop M) + C(\Loop M)\otimes C(M). \]
%In fact, there might be another problem with the restriction of $\StringOp_2$ to $H^{\Sph{1}}\!(\Loop M,M)$ as constant loops might not form an ideal (this is easy to see for $M=\T^2$).
Since the string bracket $\StringOp_2$ applied to a chain of constant loops gives a degenerate chain, it restricts to a Lie bracket on the relative homology.\footnote{This contrasts with the situation on the non-equivariant homology $\H(\Loop M)$, where constant loops do not always form an ideal for the associative loop product; this is easy to see in the case of torus~$\T^2$. They do form an ideal, however, provided that the Euler characteristics $\chi(M)$ is non-zero, see \cite{Tamanoi2010}.
On the other hand, if $\chi(M)=0$, then the loop coproduct admits an extension to $\H(\Loop M)$; this is possibly dependent on the choice of a non-vanishing vector field, see \cite{Basu2011}.}
%Our current understanding of this issue is that there should be an $\IBL$-structure either on $\H^{\Sph{1}}\!(\Loop M)$, if the Euler characteristics $\chi(M)$ vanishes, or on the homology $\H^{\Sph{1}}\!(\Loop M,\mathrm{pt})$ relative to one point, if $\chi(M) \neq 0$. However, the extension of $\StringCoOp_2$ requires choices (of a nowhere vanishing vector field, see \cite{Basu2011}), and it is not clear to which extent the $\IBL$-structure depends on it.
In work in progress~\cite{CieliebakHingston2018}, Poincar\'e duality on the Rabinowitz-Floer homology of the unit cotangent bundle of $M$ is introduced and related to the non-equivariant string topology via a long exact sequence.
This shall give a canonical extension of $\StringCoOp_2$ to $\H^{\Sph{1}}\!(\Loop M,\mathrm{pt})$, the relative homology modulo one point, which together with $\StringOp_2$ would make $\H^{\Sph{1}}\!(\Loop M,\mathrm{pt})$ into a bi-Lie algebra.
In fact, this is what our chain model is supposed to compute.
Being aware of this context, we will use the symbol $\RedEquivHom(\Loop M)$ as an avatar for either $\H^{\Sph{1}}\!(\Loop M,M)$ or $\H^{\Sph{1}}\!(\Loop M,\mathrm{pt})$.
It is expected that the $\IBL$-structure on $\RedEquivHom(\Loop M)$ is induced from a much richer and in some sense natural algebraic structure on the chain level, whose homotopy type is an invariant of $M$.
In fact, there is a notion of \emph{strong homotopy involutive bi-Lie algebra}, abbreviated $\IBLInfty$, which was developed in~\cite{Cieliebak2015}.
An $\IBLInfty$-algebra consists of operations $(\OPQ_{klg})$ for $k$, $l \ge 1$ and $g\ge 0$, where $\OPQ_{110}$ is a boundary operator, $\OPQ_{210}$ a bracket and $\OPQ_{120}$ a cobracket which satisfy the $\IBL$-relations up to a coherent system of higher homotopies~$(\OPQ_{klg})$.
Consider the string space
\[ \StringSpace M \coloneqq (\EG\Sph{1}\times \Loop M)/\Sph{1}, \]
i.e., the homotopically correct version of the quotient $\Loop M/\Sph{1}$, and let $(C(\StringSpace M),\Bdd)$ be the singular chain complex of $\StringSpace M$.
Recall that~$\StringOp_2$ and~$\StringCoOp_2$ are partially defined on transverse smooth chains therein.
An \emph{$\IBLInfty$-chain model for equivariant string topology} is an $\IBLInfty$-algebra $(\Model,(\OPQ_{klg}))$ ($\Model$ stands for ``model'') together with a weak homotopy equivalence ($\coloneqq$~zig-zag of quasi-isomorphisms) of $(C(\StringSpace M),\Bdd)$ and $(\Model,\OPQ_{110})$ which induces an isomorphism of $\IBL$-algebras
\[ (\RedEquivHom(\Loop M),\StringOp_2,\StringCoOp_2) \simeq (\H(\Model,\OPQ_{110}), \OPQ_{210}, \OPQ_{120}). \]
Note that there can be various non-homotopically equivalent $\IBLInfty$-chain models.
On the other hand, the properad $\IBLInfty$ is a quasi-free resolution of the properad $\IBL$ and as such has convenient homotopy theoretical properties; for example, homotopy inverses of quasi-isomorphisms exist.
These properties imply that any two weakly homotopy equivalent $\IBLInfty$-algebras are homotopy equivalent.
In this thesis, we use a version of \emph{perturbative Chern-Simons theory} for an oriented compact Riemannian manifold~$M$ to construct an $\IBLInfty$-chain model for the equivariant string topology of $M$.
The chains are cyclic Hochschild cochains of the de Rham cohomology $\HDR\coloneqq\HDR(M)$, and the homotopy type of the model is supposed to be an invariant of~$M$ (perhaps topological).
The construction involves a version of Feynman integrals, and the proof that it is well-defined relies on the theory of integration on certain compactifications of configuration spaces, which is currently being developed in~\cite{Cieliebak2018}.
The concrete form of this chain model was sketched in \cite{Cieliebak2015}.
For the sake of the big picture we remark that it is expected that evaluations at boundaries of pseudo-holomorphic curves in the symplectization of the unit cotangent bundle of $M$ induce an $\IBLInfty$-quasi-isomorphism of the corresponding symplectic field theory and the $\IBLInfty$-chain model of string topology; see~\cite{Cieliebak2007}.
We now describe the underlying chain complex of our $\IBLInfty$-chain model and the quasi-isomorphism to string topology in more details.
Let $\DR\coloneqq\DR(M)$ be the space of smooth de Rham forms on $M$, and let $\BCyc \DR$ be the graded vector space generated by cyclic words
\[ \omega_1\dotsb\omega_k = (-1)^{\Abs{\omega_k}(\Abs{\omega_1}+\dotsb+\Abs{\omega_k})} \omega_k\omega_1 \dotsb \omega_{k-1} \]
with homogenous components $\omega_1$, $\dotsc$, $\omega_k \in \DR$ for $k\ge 1$.
The grading satisfies $\Abs{\omega_i} = \Deg(\omega_i) - 1$, where $\Deg(\omega_i)$ denotes the form-degree of $\omega_i$.
We call $\BCyc \DR$ the \emph{cyclic bar complex of $\DR$} (it might be described as ``reduced'' because we omit $k=0$).
On $\BCyc \DR$, we consider the \emph{Hochschild differential}
\[ \Hd(\omega_1 \dotsb \omega_k) = \begin{aligned}[t]
&\sum_{i=1}^k (-1)^{\Abs{\omega_1} + \dotsb + \Abs{\omega_{i-1}}}\omega_1 \dotsb\Dd\omega_i\dotsb \omega_k \\
+&\sum_{i=1}^{k-1} (-1)^{\Abs{\omega_1}+\dotsb + \Abs{\omega_{i-1}} + \Abs{\omega_i} + 1}\omega_1 \dotsb \omega_i \wedge \omega_{i+1} \dotsb \omega_k\\
+&(-1)^{\Abs{\omega_k}(\Abs{\omega_1} + \dotsb + \Abs{\omega_{k-1}}) + \Abs{\omega_k} + 1}\omega_k \wedge \omega_1 \dotsb \omega_{k-1}.
\end{aligned}\]
It descends from the Hochschild differential on the bar construction $\B \DR$, which is defined as the sum of the unique extensions of degree shifts of $\Dd$ and $\wedge$ to coderivatives of $\B \DR$ plus the wrap-around term.
\emph{Chen's iterated integral} is the map
\[ I: \begin{aligned}[t]
\BCyc \DR& \longrightarrow C^*(\StringSpace M)\\
\omega_1 \dotsb \omega_k & \longmapsto \Bigl(\sigma \mapsto \varepsilon(\omega) \int_{K_\sigma \times \Delta^k} \omega_1(\tilde{\sigma}(x,t_1)) \dotsb \omega_k(\tilde{\sigma}(x,t_k))\Bigr),
\end{aligned}\]
where $\varepsilon(\omega)$ is the sign
\[ \varepsilon(\omega) = (-1)^{(k-1)(\Abs{\omega_1} + 1) + (k-2)(\Abs{\omega_2} + 1) + \dotsb + \Abs{\omega_{k-1}} + 1}, \]
$K_\sigma$ is a smooth chain in $M$, i.e., a manifold with corners, and $\tilde{\sigma}: K_\sigma\times\Sph{1} \rightarrow M$ is the projection of a lift $K_\sigma \xrightarrow{\hspace{.2em}\sigma\hspace{.2em}}\StringSpace M \dasharrow \EG \Sph{1} \times \Loop M$ to the second factor.
We also identify $\Sph{1} = \R/\Z$.
The map $I$ is a chain map with respect to the grading of $\BCyc\DR$ by $\Abs{\omega_1 \dotsc \omega_k} = \Abs{\omega_1} + \dotsb + \Abs{\omega_k}$.
If $\pi_1(M) = \{1\}$, then $I$ induces an isomorphism
\begin{equation}\label{Eq:IsomKai}
\H(\BCyc \DR,\Hd)/\Span\{[1^{2k-1}]\mid k\in\N\} \simeq \H^*(\StringSpace M)/\R[u],
\end{equation}
where $\Span\{[1^{2k-1}]\mid k\in\N\} = \H(\BCyc \R,\Hd)$ and the polynomial ring $\R[u]$ with $\Abs{u} = 2$ comes from the module structure on $\H^*(\StringSpace M)$ induced from $\StringSpace M = (\EG \Sph{1} \times \Loop M)/\Sph{1} \rightarrow \EG\Sph{1}/\Sph{1} = \CP^{\infty}$.
Note that the quotient on the left hand side agrees with the homology of $\coker(\BCyc \R \xhookrightarrow{} \BCyc \DR)$.
Proving \eqref{Eq:IsomKai} is the goal of \cite{Cieliebak2018b}.
They study different totalizations of the Connes' cyclic bicomplex of $\DR$ and identify the one which $(\BCyc \DR,\Hd)$ is weakly equivalent to.
Then they use the isomorphism from \cite{Getzler}.
\section{IBL-infinity chain model and Chern-Simons theory}
We consider the \emph{dual cyclic bar complex}
\[ \CDBCyc \DR = \bigoplus_{d\in\Z} \prod_{k=1}^\infty (\BCyc_k \DR)^{d*}, \]
where $(\BCyc_k \DR)^{d*}$ denotes the linear dual to the degree $d$ component of the weight $k$ component $\BCyc_k \DR$ of $\BCyc \DR$ ($k$ is the number of letters in the generating word).
We equip $\CDBCyc \DR$ with the dual Hochschild differential~$\Hd^*$.
By taking~$\prod_k$, i.e., the completion of~$\bigoplus_k$ with respect to the filtration by weights, we allow ``bubbling off'' of forms $\omega_i$ of form-degree~$1$ with $\Abs{\omega_i}=0$ and constants $1$ with $\Abs{1} = -1$.
Notice that if we take $\HDR$ instead of $\DR$, then the completion is relevant only if $\HDR^1 \neq 0$.
The dual of Chen's iterated integral map provides a quasi-isomorphism
\[I^*: (C(\StringSpace M),\Bdd) \rightarrow (\CDBCyc\DR,\Hd^*) \]
for simply-connected $M$.
%(in fact, $ \CDBCyc \DR$ is the graded dual of $\BCyc \DR$).
In the following discussion, which does not aim to be rigorous at all, we introduce a \emph{physical interpretation.}
%and reflects the author's wild imagination in the night before submitting the thesis. For more serious mathematics, please, see the rest 250+ pages.
We think of
\begin{itemize}
\item elements of $\DR$ as \emph{fields,}
\item elements of $\BCyc \DR$ as \emph{field strings} (not to confuse with string fields :-)) and
\item elements of the space
\[ \Fun(\BCyc \DR[1]) \coloneqq \hat{\Sym}(\DBCyc\DR[1])\COtimes \R((\hbar)) \]
as \emph{observables on field strings.}
Here, $\R((\hbar))$ is the ring of Laurent series in Planck's constant $\hbar$, $\hat{\Sym}$ the completed symmetric algebra and $\hat{\otimes}$ the completed tensor product.
\end{itemize}
If $\sigma:\Sph{1}\rightarrow M$ is a string, the observable $I^*(\sigma)$ ``localizes'' on field strings which approximate $\sigma$; for instance, it holds $I^*(\omega) = \int_{\sigma} \omega$, and thus $I^*(\omega)$ ``localizes'' at fundamental forms of $\sigma(\Sph{1})$. We imagine that we decorate $\sigma$ with a field string $\omega_1 \dotsc \omega_k$ as in Figure~\ref{Fig:GeomStr} and get a number $I(\sigma)(\omega_1\dotsc\omega_k)$. The isomorphism \eqref{Eq:IsomKai} guarantees that the observable~$I(\sigma)$ determines $\sigma$ up to a boundary term.
\begin{figure}[t]
\centering
\def\rad{2}
\def\len{.4}
\def\smalllen{.1}
\def\num{6}
\begin{tikzpicture}
\tikzset{point/.style = {draw, circle, fill=black, minimum size=2pt,inner sep=0pt}}
\coordinate (C) at (0,0);
\draw([shift=(0:\rad)]C) arc (0:360:\rad);
\foreach \x in {1,...,\num} {
\node at ([shift=(\x*360/\num-360/\num:\rad+\len)]C) {$\omega_\x$};
%\draw ([shift=(\x*360/\num:\rad-\smalllen)]C) -- ([shift=(\x*360/\num:\rad+\smalllen)]C);
\node[point,style={fill=white}] at ([shift=(\x*360/\num-360/\num:\rad)]C) {};
}
\end{tikzpicture}
%\includegraphics[trim=1.2cm 24cm 14cm .3cm]{\GraphicsFolder/kruh.pdf}
\caption{Inserting fields on strings.}
\label{Fig:GeomStr}
\end{figure}
We will be dealing with \emph{two ``dynamical'' theories:} one is the theory of fields $\omega\in \DR$ and one of field strings $\omega_1 \dotsb \omega_k \in \BCyc \DR$.
The field theory is at hand.
We know that $(\DR,\Dd,\wedge,\langle\cdot,\cdot\rangle)$, where $\langle\omega_1,\omega_2\rangle = \int_M \omega_1 \wedge \omega_2$ for $\omega_1$, $\omega_2\in \DR$, is a \emph{symmetric dg-Frobenius algebra}.
It is well-known that finite-dimensional symmetric Frobenius algebras $V$ are equivalent to $2d$ topological quantum field theories (TQFT).
%One needs finitely many dimensions so that one can write the identity as $T = \sum \pm e^i \otimes e_i$ is well-defined.
A finite dimension is necessary so that one can write the identity as $\Id = \sum \langle \cdot,e^i\rangle e_i$, or, in other words, that the identity propagator
\[
T = \sum \pm e^i\otimes e_i
\]
is well defined.
We will ignore this issue and substitute $V=\DR$ for now, although we will soon transfer to $\HDR$, where everything works just fine.
%One can also take the non-degenerate quotient of the small subalgebra of $\DR$ with respect to the canonical Hodge decomposition associated to a Riemannian metric on $M$. We denote it by $\VansQuotient(\VansSmall(\DR))$ and define in Chapter~\ref{Chap:5}. If our conjectures are correct, the results of our construction should be homotopy equivalent.
We will represent interactions of fields via Feynman graphs drawn on surfaces --- the trivial cylinder for fields, i.e., the free propagation, will be a line, and the pair of pants, i.e., the interaction via the intersection~$\wedge$, will be a point with $3$ segments emanating from it (we do not have to distinguish inputs and outputs by cyclic symmetry).
Let us now consider field strings. Figure \ref{Fig:OpCoOpDiag} defines the operations
\[ \OPQ_{210}: \Ext_2 \DBCyc V \longrightarrow \DBCyc V \quad\text{and}\quad \OPQ_{120}: \DBCyc V \longrightarrow \Ext_2\DBCyc V,
\]
where $\Ext_k \DBCyc V$ denotes the $k$-th exterior power of $\DBCyc V$ seen as the $k$-th symmetric power of the degree shift $(\DBCyc V)[1]$.
We read the diagram from the top to the bottom but imagine fields $\omega\in \BCyc V$ being fed into $\psi\in \DBCyc V$ from the bottom to the top.
We might think of these digrams as of \emph{string interaction diagrams} for strings freely moving in a topological space $M$, connecting and disconnecting.
\begin{figure}[t]
\centering
%\includegraphics[trim=2cm 24cm 10cm .3cm]{\GraphicsFolder/op.pdf}
\input{\GraphicsFolder/pair_of_pants.tex}
\caption[Operations $\OPQ_{210}$ and $\OPQ_{120}$.]{Operations $\OPQ_{210}$ and $\OPQ_{120}$. Fields propagate from the bottom to the top. An additional dualization is required when two outputs/inputs are connected; hence the emergence of the identity propagator.}
\label{Fig:OpCoOpDiag}
\end{figure}
%$I^*(\sigma)$ localizes which
%The output string will be localized at any field string which arose by propagating the focalized string fields along the diagram. I.e. on those which are obtained by propagating those on which the input strings are non-zero along the diagram.
%
%From the point of view of the TQFT, these propagators are trivial cylinders, and hence nothing is happening.
This suggests that $\OPQ_{210}$ and $\OPQ_{120}$ are related to $\StringOp_2$ and $\StringCoOp_2$.
Formulas for~$\OPQ_{210}$ and~$\OPQ_{120}$ were written down in \cite{Cieliebak2015}; they are also clear from the figure by decorating the world-lines with the identity (or $T$) and evaluating in a straightforward way.
It was proven that $\OPQ_{210}$ and $\OPQ_{120}$ indeed constitute an $\IBL$-algebra on~$\DBCyc V$ (note that this is not a TQFT for strings!).
\ToDo[noline,caption={Which degree shift}]{Need to sort out which degree shift for bislgebra one needs!
How is it with $2-n$.}
As a mathematical remark, we will show that $\OPQ_{210}$ is obtained from the Gerstenhaber bracket on Hochschild cochains via cyclization by $\langle\cdot,\cdot\rangle$ and that $\OPQ_{120}$ is a factorization of an extension of the canonical Schwarz's $\BV$-operator on $\Fun(V[1])$ to cyclic invariants with respect to the cyclic shuffle product.
This makes sense because odd degree shift of a finite-dimensional symmetric dg-Frobenius algebra is an odd symplectic vector space.
Since $\DBCyc V [1]$ is not naturally an odd symplectic vector space, there is no Schwarz's $\BV$-operator on $\Fun(\DBCyc V [1])$. However, the following canonical operator
\[ \BVOp_{\mathrm{s}} \coloneqq \hat{\OPQ}_{120} + \hbar \hat{\OPQ}_{210}: \Fun(\DBCyc V [1]) \rightarrow \Fun(\DBCyc V [1]), \]
where $\hat{\cdot}$ denotes the canonical extension to (co)derivatives, is a $\BV$-operator with respect to the function multiplication; we call it the \emph{string $\BV$-operator.}
In physics, a $\BV$-operator $\BVOp$ on $\Fun(U)$, where $U$ is the space of fields (typically an odd cotangent bundle with classical fields in the base and ghost fields in the fibers), is related to the measure in the path integral $\int \mu$.
An action $S\in \Fun(U)$ satisfying the \emph{quantum master equation} (QME)
\[ \BVOp S + \frac{1}{2}\{S,S\} = 0 \]
defines a new measure $e^{-S} \mu$, and the corresponding twisted $\BV$-operator (or rather $\BVInfty$-operator) satisfies $\BVOp^S = e^{-S} \BVOp e^{S}$.
For field strings, we define the following \emph{actions} $S_{\mathrm{free}}$, $S_{\mathrm{int}}\in \Fun(\BCyc V[1])$, which remind us of the \emph{Chern-Simons functional:}
\[
S_{\mathrm{free}}(\omega_1 \omega_2) \coloneqq \pm \hbar^{-1}\int_M \omega_1 \wedge \Dd \omega_2 \quad\text{and}\quad S_{\mathrm{int}}(\omega_1 \omega_2 \omega_3) \coloneqq \pm \hbar^{-1}\int_M \omega_1 \wedge \omega_2 \wedge \omega_3.
\]
More precisely, $S_{\mathrm{free}}$ and $S_{\mathrm{int}}$ are linear functions on $\BCyc V[1]$ which vanish everywhere but on field strings of lengths $2$ and $3$, respectively.
It turns out that $S_{\mathrm{free}}$ and $S\coloneqq S_{\mathrm{free}} + S_{\mathrm{int}}$ satisfy the QME for $\BVOp_{s}$.
The twisted $\BV$-operators look like
\[
\BVOp^{S_{\mathrm{free}}}_{s} = \hat{\OPQ}_{110} + \BVOp_{\mathrm{s}} \quad \text{and}\quad \BVOp^{S}_{s} = \hat{\OPQ}_{110} + \reallywidehat{\OPQ_{210}(S_{\mathrm{int}},\cdot)} + \BVOp_{\mathrm{s}}.
\]
Figure~\ref{Fig:NewTerm} depicts the new terms $\OPQ_{110}$ and $\OPQ_{210}(S_{\mathrm{int}},\cdot)$ in $\BVOp^\Action_s$.
\begin{figure}[t]
\centering
\def\caphght{.6}
\def\BddMin{.2}
\def\BddMaj{.4}
\def\HorLen{2}
\def\PMCVert{1}
\def\PantsVert{2}
\def\PantsPlunge{.5}
\newcommand{\BddSurf}[6][0]{
% #1 rotation (the optional argument)
% #2 is the center, e.g., C1 or 0:1
% #3 is the major semiaxis
% #4 is the minor semiaxis
% #5 is the style of the upper half
% #6 is the style of the lower half
\draw[#5,rotate=#1] ([shift=(0:{#3} and {#4})]#2) arc (0:180:{#3} and {#4});
\draw[#6,rotate=#1] ([shift=(180:{#3} and {#4})]#2) arc (180:360:{#3} and {#4});
%
}
\vcenterline{
\begin{tikzpicture}[scale=1.5]
\tikzset{point/.style = {draw, circle, fill=black, minimum size=2pt,inner sep=0pt}}
\node at (0,.9) {};
\coordinate (CT) at (0,0);
\coordinate (CB) at (0,-\PantsVert);
\draw ([shift={(-\BddMaj,0)}]CB) -- ([shift={(-\BddMaj,0)}]CT);
\draw ([shift={(\BddMaj,0)}]CB) -- ([shift={(\BddMaj,0)}]CT);
\BddSurf{CB}{\BddMaj}{\BddMin}{dotted}{}
\BddSurf{CT}{\BddMaj}{\BddMin}{}{}
\node[point,style={fill=white}] (NT1) at ([shift=(-40:{\BddMaj} and {\BddMin})]CT) {};
\node[point,style={fill=white}] (NT2) at ([shift=(-100:{\BddMaj} and {\BddMin})]CT) {};
\node[point,style={fill=white}] (NT3) at ([shift=(140:{\BddMaj} and {\BddMin})]CT) {};
\node[point,style={fill=white}] (NT4) at ([shift=(70:{\BddMaj} and {\BddMin})]CT) {};
\node[point,style={fill=white}] (NB1) at ([shift=(-40:{\BddMaj} and {\BddMin})]CB) {};
\node[point,style={fill=white}] (NB2) at ([shift=(-100:{\BddMaj} and {\BddMin})]CB) {};
\node[point,style={fill=white}] (NB3) at ([shift=(140:{\BddMaj} and {\BddMin})]CB) {};
\node[point,style={fill=white}] (NB4) at ([shift=(70:{\BddMaj} and {\BddMin})]CB) {};
\draw (NT1) -- (NB1);
\draw (NT2) -- (NB2);
\draw[dashed] (NT3) -- (NB3);
\draw[dashed] (NT4) -- (NB4);
\node[fill=white] at ($.5*(NT2)+.5*(NB2)$) {$\color{green}\Dd$};
\end{tikzpicture}}
\qquad + \qquad\vcenterline{
\begin{tikzpicture}[scale=1.5]
\tikzset{point/.style = {draw, circle, fill=black, minimum size=2pt,inner sep=0pt}}
\coordinate (C1) at (0,0); % left most
\coordinate (CC) at ($(C1) + (\HorLen,0)$); % the connection
\coordinate (CV) at ($(C1) + (.5*\HorLen,\PMCVert)$); % the vertical one
\coordinate (C2) at ($(CC) + (.5*\HorLen,-\PantsVert)$); % the right bottom one
\coordinate (CP) at ($(CC) + (.5*\HorLen,-\PantsPlunge)$); % the middle of propagator
\coordinate (C3) at ($(CC) + (\HorLen,0)$); % the right one
\coordinate (CT) at ($(CC) + (0,\caphght)$);
\draw ($(CC) + (-\BddMaj,0)$) to[out=90,in=180] (CT);
\draw (CT) to[out=0,in=90] ($(CC)+(\BddMaj,0)$);
\BddSurf{C2}{\BddMaj}{\BddMin}{dotted}{}
\BddSurf{C3}{\BddMaj}{\BddMin}{}{}
\BddSurf{CC}{\BddMaj}{\BddMin}{dotted}{dotted}
\draw ([shift={(-\BddMaj,0)}]CC) to[out=-90,in=90] ([shift={(-\BddMaj,0)}]C2);
\draw ([shift={(\BddMaj,0)}]C2) to[out=90,in=-90] ([shift={(\BddMaj,0)}]C3);
\draw ([shift={(\BddMaj,0)}]CC) to[out=-90,in=180] (CP);
\draw (CP) to[out=0,in=-90] ([shift={(-\BddMaj,0)}]C3);
\draw[thick,red] (CT) to[out=-50,in=90] ([shift=(-40:{\BddMaj} and {\BddMin})]CC) to[out=-85,in=180] ([shift={(0,.-.25*\PantsPlunge)}]CP) to[out=0,in=-90] ([shift=(-140:{\BddMaj} and {\BddMin})]C3);
% The identity line
%START: Inputs from C2
\draw ([shift=(245:{\BddMaj} and {\BddMin})]C2) to[out=90,in=-80] ([shift=(240:{\BddMaj} and {\BddMin})]CC) to[out=90,in=-120] (CT);
\draw[dashed] ([shift=(110:{\BddMaj} and {\BddMin})]C2) to[out=90, in=-85] ([shift=(90:{\BddMaj} and {\BddMin})]CC) to[out=90,in=-90] (CT);
\draw ([shift=(-60:{\BddMaj} and {\BddMin})]C2) to[out=90, in=-110] ([shift=(-40:{\BddMaj} and {\BddMin})]C3);
\draw[dashed] ([shift=(70:{\BddMaj} and {\BddMin})]C2) to[out=90, in=-90] ([shift=(80:{\BddMaj} and {\BddMin})]C3);
%END: Inputs from C2
%\node[label={[yshift=.1cm] $\psi$}] at (C3) {};
\node[label={[yshift=-.8cm] $\color{red}\mathrm{T}$}] at (CP) {};
\node[point,style={fill=white}] at ([shift=(80:{\BddMaj} and {\BddMin})]C3) {};
\node[point,style={fill=white}] at ([shift=(-40:{\BddMaj} and {\BddMin})]C3) {};
\node[point,style={fill=white}] at ([shift=(-140:{\BddMaj} and {\BddMin})]C3) {};
\node[point,style={fill=white}] at ([shift=(245:{\BddMaj} and {\BddMin})]C2) {};
\node[point,style={fill=white}] at ([shift=(110:{\BddMaj} and {\BddMin})]C2) {};
\node[point,style={fill=white}] at ([shift=(-60:{\BddMaj} and {\BddMin})]C2) {};
\node[point,style={fill=white}] at ([shift=(70:{\BddMaj} and {\BddMin})]C2) {};
\node[point,label={[above]$\color{green}\wedge$}] at (CT) {};
\end{tikzpicture}}
%\includegraphics[trim=1cm 24.5cm 12.6cm .4cm]{\GraphicsFolder/diff.pdf}
\caption{Adding $\Dd$ and $\wedge$ via $\Action$.}
\label{Fig:NewTerm}
\end{figure}
The corresponding $\dIBL$-algebra reads
\[ \Bigl(\DBCyc V,\OPQ_{110}^\MC\coloneqq \OPQ_{110} + \OPQ_{210}(S_{\mathrm{int}},\cdot),\OPQ_{210},\OPQ_{120}\Bigr). \]
One can show that $\OPQ_{110}^\MC$ is the Hochschild differential.
If this was well-defined for $V = \DR$, then it would surely be a model of string topology.
%They are all define $\dIBL$-structures on $\DR$ then
%$\Model \coloneqq \CycC(\DR)$ with the $\dIBL$-structure $(\OPQ_{110} + \OPQ_{210}(S_{\text{free}},\cdot), \OPQ_{210}, \OPQ_{120})$ would be the correct $\IBLInfty$-chain model for string topology.
%Making from operations $V^{\otimes k} \rightarrow V$ is called cyclization and we normaly non-degenerate pairing for that.
As in quantum field theories, we are going to ``formally'' \emph{integrate out redundant degrees of freedom in the path integral} of our ill-defined theory and obtain a well-defined theory on $\DBCyc\HDR$, which is ``formally'' homotopy equivalent to the original one.
We pick a Riemannian metric on $M$ and consider the Hodge decomposition
\[
\DR = \Harm \oplus \Dd\DR \oplus \CoDd\DR,
\]
where $\Harm \simeq \HDR$ is the space of harmonic forms defined by $\Dd \omega = \CoDd \omega = 0$.
One may interpret $\Dd \omega = 0$ as the Euler-Lagrange equation and $\CoDd \omega = 0$ as the Lorentz gauge.
The inverse of $\Dd: \CoDd\DR \rightarrow \Dd\DR$ extended by $0$ to $\Harm$ and $\CoDd\DR$ is called the \emph{standard Hodge homotopy $\HtpStd$;} equivalently, it is the unique coexact solution of
\[
\Dd \Htp + \Htp \Dd = \pi_\Harm - \Id,
\]
where $\pi_\Harm: \DR \rightarrow \Harm$ is the orthogonal projection.
The Schwartz kernel of $\HtpStd$ is the \emph{standard Hodge propagator} $\PrpgStd$.
A formula for the \emph{effective action} $W\in\Fun(\BCyc\HDR[1])$ was given in \cite{Cieliebak2015}; in their terminology, $W$ is equivalent to the \emph{(formal) pushforward Maurer-Cartan element}.
We have
\[ W = \hbar^{-1}\sum_{l\ge 1, g\ge 0} \PMC_{lg} \hbar^{g}, \]
where $\PMC_{lg} \in \hat{\Ext}_l \DBCyc\HDR$ is computed by summing over $(l+g-1)$-loop Feynman diagrams with interaction vertices $\wedge$ and propagator $\StdPrpg$.
We remark that the Feynman diagrams in~$W$ have at least one external vertex.
One might try to construct a refinement $(W^0_{lg})_{l\ge 1, g\ge 0}$ of the Chern-Simons invariant by summing over diagrams with no external vertex, but it seems to be unrelated to the $\IBLInfty$-theory so far.
%One can think of trying to push the field strings to their representatives consisting of harmonic forms.
The twisted string $\BV$-operator on $\Fun(\BCyc\HDR[1])$ reads
\[ \BVOp^W_s = \hat{\OPQ}^{\PMC}_{110} + \hbar{\OPQ}_{210} + \sum_{l\ge 2, g\ge 0} \hat{\OPQ}^{\PMC}_{1lg} \hbar^{g}, \]
where $\OPQ_{110}^\PMC = \OPQ_{210}(\PMC_{10},\cdot) = \OPQ_{210} \circ_1 \PMC_{10}$ and $\OPQ_{1lg}^\PMC = \OPQ_{210}\circ_1 \PMC_{lg}$, where $\circ_1$ means that precisely one output of the first operation is connected to precisely one input of the following operation.
The resulting $\IBLInfty$-structure on $\CDBCyc\HDR$ has lots of vanishing operations.
It is in fact a \emph{quantum $\CoLInfty$-algebra $(\OPQ_{1lg}^\PMC)$ with Drinfeld-compatible Lie bracket $\OPQ_{210}$.}
The boundary operator $\OPQ_{110}^\PMC$ is precisely the Hochschild differential of the homotopy transfered $\AInfty$-structure $(m_k)$ on $\Harm$.
The quasi-isomorphism of $(\CDBCyc\HDR,\OPQ_{110}^\PMC)$ and $(C(\StringSpace M),\Bdd)$ inducing an isomorphism of the $\IBL$-structure on homology is given by the composition $F\circ I^*: C(\StringSpace M) \rightarrow \CDBCyc \HDR$ for
\[ F = \HTP_{110} + \HTP_{210}\circ_1 \MC_{10} + \frac{1}{2!} \HTP_{310}\circ_{1,1}(\MC_{10},\MC_{10}) + \dotsb, \]
where $\HTP_{110} = \iota^*$ for $\iota: \HDR\simeq \Harm \xhookrightarrow{} \DR$ and $\HTP_{k10}\circ_{1,\dotsc,1}(\MC_{10},\dotsc,\MC_{10})$ is obtained by summing over trivalent trees as in Figure~\ref{Fig:KSTree}.
\begin{figure}[t]
\centering
\[\underbrace{\vcenterline{\begin{tikzpicture}[scale=1,
every label/.append style={font=\scriptsize},
point/.style = {draw, circle, fill=black, minimum size=2pt,inner sep=0pt},
leaf/.style = {draw, circle, fill=white, minimum size=2pt,inner sep=0pt},
]
\def\vertdist{.8}
\def\hordist{.6}
\node[leaf] (R) at (0,0) {};
\node[point, label={[right,yshift=-1mm] $\color{olive}\wedge$}] (RU) at ($(R) + (0,\vertdist)$) {};
\node[point, label={[right] $\color{olive}\wedge$}] (RUL) at ($(RU) + (-2*\hordist,\vertdist)$) {};
\node[point, label={[right] $\color{olive}\wedge$}] (RUR) at ($(RU) + (2*\hordist,\vertdist)$) {};
\node[point, label={[right] $\color{olive}\wedge$}] (RULL) at ($(RUL) + (-1*\hordist,1*\vertdist)$) {};
\coordinate (RULR) at ($(RUL) + (\hordist,\vertdist)$);
\coordinate (RURR) at ($(RUR) + (\hordist,\vertdist)$);
\node[leaf, label={[right] $h_1$}] (RULLL) at ($(RULL) + (-1*\hordist,1*\vertdist)$){};
\node[leaf, label={[right] $h_2$}] (RULLR) at ($(RULL) + (1*\hordist,1*\vertdist)$) {};
\node[leaf, label={[right] $h_3$}] (RULRR) at ($(RULR) + (1*\hordist,1*\vertdist)$) {};
\node[leaf, label={[right] $h_k$}] (RURRR) at ($(RURR) + (1*\hordist,1*\vertdist)$) {};
\node[] (RURRL) at ($(RURR) + (-1*\hordist,1*\vertdist)$) {\ \,\dots};
\node (RURL) at ($(RUR) + (-\hordist,\vertdist)$) {\dots};
\draw (R) edge (RU);
\draw[thick,blue] (RU) -- (RUL) node[below,midway,shift={(-2mm,1mm)}] {$\Htp$};
\draw[thick,blue] (RUL) -- (RULL) node[below,midway,shift={(-2mm,1.5mm)}] {$\Htp$};
\draw (RULL) edge (RULLL);
\draw (RULL) edge (RULLR);
\draw (RUL) edge (RULRR);
\draw[thick,blue] (RU) -- (RUR) node[below,midway,shift={(2mm,1mm)}] {$\Htp$};
\draw[thick,blue] (RUR) edge (RURL);
\draw (RUR) edge (RURRR);
\end{tikzpicture}}}_{\begin{multlined} \wedge \circ (\Htp \otimes \Htp)\circ (\wedge \otimes \wedge)\circ (\Htp \otimes \Id \otimes \dotsb \otimes \Id)\\
\circ (\wedge \otimes \Id \otimes \dotsb \otimes \Id)(h_1, h_2, h_3, \dots, h_k).
\end{multlined}}\]
\caption{Kontsevich-Soibelman evaluation of a decorated tree.}
\label{Fig:KSTree}
\end{figure}
Note that in order to evaluate trees, we do not need the Schwartz kernel $\Prpg$ and hence any pairing $\langle\cdot,\cdot\rangle$.
The homotopy $\Htp$ is enough because the graph is directed and we can distinguish inputs and outputs.
On the other hand, an evaluation of the $1$-loop Feynman graph in Figure~\ref{Fig:OneLoopDiag} contributing to $\OPQ_{120}^\PMC$ requires $\Prpg$, and hence also $\langle\cdot,\cdot\rangle$.
It is well-known from Sullivan's minimal model theory of a simply-connected manifold~$M$ that the homotopy type of the homotopy transferred $\AInfty$-structure $(m_k)$ on $\Harm$ is a topological invariant which encodes the rational homotopy theory of $M$.
The $\IBLInfty$-construction is associated to the Poincar\'e $\DGA$ $(\DR,\Dd,\wedge, \langle\cdot,\cdot\rangle)$, i.e, a $\DGA$ whose homology is a Poincar\'e duality algebra.
It is not clear yet to which extent it depends on the pairing and what kind of invariant of $M$ it is.
%the weak homotopy type of the Poincar\'e $\DGA$ and what kind of invariant is the homotopy type of the twisted $\IBLInfty$-algebra on $\CDBCyc \HDR$.
However, if $M$ is formal in the sense of $\DGA$'s, then $M$ is formal in the sense of Poincar\'e $\DGA$'s, and we conjecture that it is formal also in the sense of $\IBLInfty$-algebras; by this we mean that the twisted $\IBLInfty$-algebra on $\CDBCyc \HDR$ is homotopy equivalent to the canonical $\IBL$-algebra on $\CDBCyc \HDR$.
\begin{figure}[t]
\centering
\def\BddMin{.2}
\def\BddMaj{.4}
\def\HorLen{2}
\def\PMCVert{1}
\def\PantsVert{2}
\def\PantsPlunge{.5}
\newcommand{\BddSurf}[6][0]{
% #1 rotation (the optional argument)
% #2 is the center, e.g., C1 or 0:1
% #3 is the major semiaxis
% #4 is the minor semiaxis
% #5 is the style of the upper half
% #6 is the style of the lower half
\draw[#5,rotate=#1] ([shift=(0:{#3} and {#4})]#2) arc (0:180:{#3} and {#4});
\draw[#6,rotate=#1] ([shift=(180:{#3} and {#4})]#2) arc (180:360:{#3} and {#4});
%
}
\begin{tikzpicture}[scale=1.5]
\tikzset{point/.style = {draw, circle, fill=black, minimum size=2pt,inner sep=0pt}}
\coordinate (C1) at (0,0);
\coordinate (CC) at ($(C1) + (\HorLen,0)$);
\coordinate (CV) at ($(C1) + (.5*\HorLen,\PMCVert)$);
\coordinate (C2) at ($(CC) + (.5*\HorLen,-\PantsVert)$);
\coordinate (CP) at ($(CC) + (.5*\HorLen,-\PantsPlunge)$);
\coordinate (C3) at ($(CC) + (\HorLen,0)$);
\BddSurf{C1}{\BddMaj}{\BddMin}{dotted}{}
\BddSurf{C2}{\BddMaj}{\BddMin}{dotted}{}
\BddSurf{C3}{\BddMaj}{\BddMin}{}{}
\BddSurf{CC}{\BddMaj}{\BddMin}{dotted}{dotted}
\draw ([shift={(\BddMaj,0)}]C1) to[out=90,in=180] ([shift={(0,-\BddMaj)}]CV);
\draw ([shift={(0,-\BddMaj)}]CV) to[out=0,in=90] ([shift={(-\BddMaj,0)}]CC);
\draw ([shift={(-\BddMaj,0)}]CC) to[out=-90,in=90] ([shift={(-\BddMaj,0)}]C2);
\draw ([shift={(\BddMaj,0)}]C2) to[out=90,in=-90] ([shift={(\BddMaj,0)}]C3);
% Lower countour
\draw ([shift={(-\BddMaj,0)}]C1) to[out=90,in=180] ([shift={(0,\BddMaj)}]CV);
\draw ([shift={(0,\BddMaj)}]CV) to[out=0,in=90] ([shift={(\BddMaj,0)}]CC);
\draw ([shift={(\BddMaj,0)}]CC) to[out=-90,in=180] (CP);
\draw (CP) to[out=0,in=-90] ([shift={(-\BddMaj,0)}]C3);
% Upper contour
\BddSurf[90]{CV}{\BddMaj}{\BddMin}{dashed,thick,blue}{thick,blue}
% The joint
\draw[red,thick] ([shift=(45:{\BddMin} and {\BddMaj})]CV) to[out=0,in=95] ([shift=(-40:{\BddMaj} and {\BddMin})]CC) to[out=-85,in=180] ([shift={(0,.-.25*\PantsPlunge)}]CP) to[out=0,in=-90] ([shift=(-140:{\BddMaj} and {\BddMin})]C3);
% The identity line
%START: Inputs from C1
\draw ([shift=(-60:{\BddMaj} and {\BddMin})]C1) to[out=90,in=180] ([shift=(-20:{\BddMin} and {\BddMaj})]CV);
\draw ([shift=(-110:{\BddMaj} and {\BddMin})]C1) to[out=90,in=180] ([shift=(20:{\BddMin} and {\BddMaj})]CV);
\draw[dashed] ([shift=(125:{\BddMaj} and {\BddMin})]C1) to[out=80,in=180] ([shift=(145:{\BddMin} and {\BddMaj})]CV);
%END: Inputs from C1
%START: Inputs from C2
\draw ([shift=(245:{\BddMaj} and {\BddMin})]C2) to[out=90,in=-80] ([shift=(240:{\BddMaj} and {\BddMin})]CC) to[out=100,in=0] ([shift=(-50:{\BddMin} and {\BddMaj})]CV);
\draw[dashed] ([shift=(110:{\BddMaj} and {\BddMin})]C2) to[out=90, in=-85] ([shift=(90:{\BddMaj} and {\BddMin})]CC) to[out=95, in=15] ([shift=(190:{\BddMin} and {\BddMaj})]CV);
\draw ([shift=(-60:{\BddMaj} and {\BddMin})]C2) to[out=90, in=-110] ([shift=(-40:{\BddMaj} and {\BddMin})]C3);
\draw[dashed] ([shift=(70:{\BddMaj} and {\BddMin})]C2) to[out=90, in=-90] ([shift=(80:{\BddMaj} and {\BddMin})]C3);
%END: Inputs from C2
\node[point,style={fill=white}] at ([shift=(-140:{\BddMaj} and {\BddMin})]C3) {};
\node[point] at ([shift=(190:{\BddMin} and {\BddMaj})]CV) {};
\node[point] at ([shift=(45:{\BddMin} and {\BddMaj})]CV) {};
\node[point] at ([shift=(-50:{\BddMin} and {\BddMaj})]CV) {};
\node[point] at([shift=(20:{\BddMin} and {\BddMaj})]CV) {};
\node[point] at ([shift=(-20:{\BddMin} and {\BddMaj})]CV) {};
\node[point] at ([shift=(145:{\BddMin} and {\BddMaj})]CV) {};
\node[label={[yshift=.2cm] $\psi$}] at (C3) {};
%\node[label={[yshift=-.9cm] $\omega_1$}] at (C1) {};
%\node[label={[yshift=-.9cm] $\omega_2$}] at (C2) {};
\node[label={[yshift=-.8cm] $\color{red}\mathrm{T}$}] at (CP) {};
\node at ([shift={(0,-\BddMin)}]CV) {};
% External labels at the first boundary component
\node[point,style={fill=white},label={[below,yshift=-.1cm,xshift=.1cm] $\scriptstyle h_{13}$}] at ([shift=(-60:{\BddMaj} and {\BddMin})]C1) {};
\node[point,style={fill=white},label={[below,yshift=-.1cm,xshift=-.1cm] $\scriptstyle h_{12}$}] at ([shift=(-110:{\BddMaj} and {\BddMin})]C1) {};
\node[point,style={fill=white},label={[below,yshift=+.1cm,xshift=-.6cm] $\scriptstyle h_{11}$}] at ([shift=(125:{\BddMaj} and {\BddMin})]C1) {};
% External labels at the second boundary component
\node[point,style={fill=white},label={[below,xshift=-.1cm,yshift=-.1cm] $\scriptstyle h_{22}$}] at ([shift=(245:{\BddMaj} and {\BddMin})]C2) {};
\node[point,style={fill=white},label={[left,xshift=-.4cm] $\scriptstyle h_{21}$}] at ([shift=(110:{\BddMaj} and {\BddMin})]C2) {};
\node[point,style={fill=white},label={[below,xshift=.1cm,yshift=-.1cm] $\scriptstyle h_{23}$}] at ([shift=(-60:{\BddMaj} and {\BddMin})]C2) {};
\node[point,style={fill=white},label={[right,xshift=.4cm] $\scriptstyle h_{24}$}] at ([shift=(70:{\BddMaj} and {\BddMin})]C2) {};
% Internal labels of vertices
\node[label={[above,yshift=.03cm] $\scriptstyle x_1$}] at ([shift=(145:{\BddMin} and {\BddMaj})]CV) {};
\node[label={[above,yshift=-.05cm,xshift=.1cm] $\scriptstyle x_2$}] at ([shift=(45:{\BddMin} and {\BddMaj})]CV) {};
\node[label={[right,xshift=-.04cm,yshift=-.18cm] $\scriptstyle x_3$}] at ([shift=(20:{\BddMin} and {\BddMaj})]CV) {};
\node[label={[right,yshift=-.25cm,xshift=-.05cm] $\scriptstyle x_4$}] at ([shift=(-20:{\BddMin} and {\BddMaj})]CV) {};
\node[label={[below,yshift=-.27cm] $\scriptstyle x_5$}] at ([shift=(-50:{\BddMin} and {\BddMaj})]CV) {};
\node[label={[left,yshift=-.24cm,xshift=.05cm] $\scriptstyle x_6$}] at ([shift=(190:{\BddMin} and {\BddMaj})]CV) {};
\node[font=\footnotesize] (ZZ) at ([shift={(0,-4.5ex)}]CV) {$\color{blue}\Prpg$};
\end{tikzpicture}
\[\begin{aligned}
=&\sum_{a,b}\sum_{c=1}^4 \pm \mathrm{T}^{ab} \psi(e_a h_{2,c+2} h_{2,c+3}) \Bigl(\int_{x_1 x_2 x_3 x_4 x_5 x_6} \Prpg(x_1,x_2)\Prpg(x_2,x_3)\Prpg(x_3,x_4)\\
&\Prpg(x_4,x_5)\Prpg(x_5,x_6)\Prpg(x_6,x_1)\bigl( h_{11}(x_1) h_{12}(x_3)
h_{13}(x_4)\bigr)\bigl(e_b(x_2) h_{2,c}(x_6) h_{2,c+1}(x_5)\bigr)\Bigr)
\end{aligned}
\]
\caption{A $1$-loop diagram and its contribution to the twisted cobracket.}
\label{Fig:OneLoopDiag}
\end{figure}
As a final remark, it is well-known from the theory of Koszul (pr)operads that $\IBL$ is Koszul dual to $\Frob$, i.e., $\IBL^! = \Frob$ and $\Frob^! = \IBL$.
Here, $\Frob$ is the properad of Frobenius bialgebras.
%(the coproduct is obtained from the product by dualization).
It follows that $\IBLInfty = \Omega(\Frob^*)$, where $\Omega$ denotes the cobar construction and ${}^*$ the linear dual coproperad.
This precisely reflects our situation of having a Frobenius bialgebra structure on $\HDR$, where the coproduct is obtained from $\wedge$ via dualization, and an $\IBLInfty$-structure on $\DBCyc \HDR$ if $\BCyc$ is understood as a cyclic version of the cobar construction.
\section{Other relevant work}
In \cite{Cohen2001}, a homotopy theoretical realization of the Chas-Sullivan loop product on $\H(\Loop M)$ by constructing the ``wrong way map'' using the Thom-Pontryagin construction was described.
Note that having the loop product, one constructs $\StringOp_2$ on $\H^{\Sph{1}}(\Loop M)$ via the Gysin sequence for the Borel construction.
In \cite{Chen2012}, the Chas-Sullivan $\BV$-algebra on $\H(\Loop M)$ and the gravity algebra on $\H^{\Sph{1}}(\Loop M)$ were constructed using an algebraic model based on Whitney polynomial forms with coefficients in $\Q$.
The advantage of Whitney forms $A$ over de Rham forms $\DR$ is that the dualization of the product gives a complete coproduct with values in the currents $C = A^*$, which together with the product on $A$ constitutes a dg Frobenius-like algebra.
It is shown that $A\hat{\otimes}\hat{\Omega}(C)$, where $\hat{\Omega}$ denotes the complete cobar construction, carries a natural dg-algebra structure which corresponds to the loop product on $\H(\Loop M)$ under the Jones, et al., quasi-isomorphism from the singular chain complex of $\Loop M$.
The equivariant case is handled with methods of Connes' cyclic homology.
In \cite{Irie2014}, de Rham chains of marked Moore loops and their fiber products and concatenations are used to construct a non-symmetric dg operad $\mathcal{O}$ with a cyclic structure, multiplication and unit together with a morphism $\mathcal{O}\to \End_\DR$. The cyclic Deligne conjecture is applied to obtain an algebra $\widetilde{\mathcal{O}}$ over a chain model of the framed little disk operad (whose homology is the $\BV$-operad) such that the induced quasi-isomorphism from $\widetilde{\mathcal{O}}$ to Hochschild cochains induces an isomorphism of the $\BV$-structures on homology. The latter is known to be isomorphic to the $\BV$-structure on $\H(\Loop M)$ via iterated integrals.
In addition, they use the homotopy transfer from $\widetilde{\mathcal{O}}$ to obtain $\AInfty$- and $\LInfty$-structures on $\H(LM)$ whose operations with $2$ inputs are the loop product and the Gerstenhaber bracket, respectively.
Interestingly, their chain model works for non-simply connected $M$.
%In \cite{Cohen2006}, a TQFT on the non-equivariant loop space homology was constructed.
In~\cite{DrummondCole2015}, they use diffuse intersection and short geodesic segments to associate to metric chord diagrams operations on the singular chain complex of $\Loop M$.
They should recover the full positive boundary TQFT on $\H(\Loop M)$ described in \cite{Cohen2009}.
In \cite{Sullivan2005}, a rich structure of operations on equivariant chains of $\Loop M$ parametrized by chains in a certain compactification of the moduli space of Riemann surfaces is discussed.
A part of this structure is an $\IBLInfty$-chain model on the reduced chains.
%In fact, it is just the genus $0$ part of its quantum version there is more string operations coming from stacks of Sullivan diagrams than just the bracket and cobracket.
In \cite{ViterboThm}, it is proven that the $\BV$-algebra of symplectic homology of the cotangent bundle of an oriented manifold $M$ is isomorphic to the $\BV$-algebra~$\H(\Loop M)$.
In \cite{Cieliebak2007}, they sketch a proof that symplectic field theory of the unit cotangent bundle of $M$ and chain level equivariant string topology of $M$ are $\IBLInfty$-quasi-isomorphic via a map induced from evaluations at boundaries of holomorphic curves in symplectizations.
From this point of view, string topology operations arise naturally from the structure of codimension $1$ boundary stratas of the moduli space of holomorphic curves.
In fact, a precise formulation of this correspondence was perhaps the main reason for Cieliebak \& Latschev to think about an $\IBLInfty$-chain model of string topology.\ToDo[caption={Precise formulation of SFT},noline]{How is it precisely, what kind of boundary conditions and punctures?}
In \cite{Fukaya2006}, it is argued that the compactified moduli space $\widehat{\Model}$ of holomorphic discs with boundaries on a Lagrangian submanifold $L$ of a symplectic manifold $M$ gives rise to a filtered $\AInfty$-structure on $\H^*(L)$.
Evaluation at the boundary allows to interpret~$\widehat{\Model}$ as a chain in the free loop space.
The structure of codimension 1 boundary strata of~$\widehat{\Model}$ implies the relation $\Bdd \widehat{\Model} + \frac{1}{2}\{\widehat{\Model},\widehat{\Model}\} = 0$, where~$\{\cdot,\cdot\}$ is the chain level string bracket.
Under iterated integrals, this translates to the Maurer-Cartan equation on the cyclic bar construction of~$\DR(L)$ with the Gerstenhaber bracket.
The twisted coderivation gives the $\AInfty$-structure on $\DR(L)$ which is then homotopy transferred to $\H^*(L)$.
\ToDo[caption={DONE What paper?},noline]{In what paper is this? Application of Floer homology of Langrangian submanifolds to symplectic topology}
%(modulo problems with modding out constant loops and with degree shifts) In fact, there is a richer structure
%They defined the loop product $\LoopPr$ and the unary operator $\BVOp$ on~$\H(\Loop M)$. In fact, $\StringOp_2$ and $\StringCoOp_2$ descend from $\LoopPr$ and $\LoopCoPr$, respectively, using the maps $\Mark$, $\Erase$ maps. The following phenomenons occur for a general $n$ and families of loops:
%\begin{itemize}
% \item Shifted grading ---
% \item Vanishing of small loops ---
% \item Not being an ideal ---
%\end{itemize}
%A perfect reference to start with string topology from the point of view of algebraic topology is \cite{Basu2011}. More details and various advanced topics can be found in \cite{LoopSpaces}.
%The precise statement is then the following
%
%\begin{Proposition}[The Chas-Sullivan $\IBL$-structure in the equivariant string topology]
%
%\end{Proposition}
\section{Summary of results}
\begin{enumerate}[label=\arabic*)]
\item The starting point was setting up a formalism and deducing signs for a definition of the formal-pushforward Maurer-Cartan element, aka Chern-Simons Maurer-Cartan element, and the $\IBLInfty$-chain model in the de Rham setting.
A big part of the work was about trying to understand what is happening and discovering and formulating the structure and possible claims.
\item We compute the $\IBLInfty$-chain model for $\Sph{n}$ with $n\neq 2$ by finding an explicit Hodge propagator and computing Feynman integrals.
In fact, for $n\ge 3$, all integrals which are relevant for the $\IBLInfty$-theory vanish.
A trick from \cite{Mnev2009} is based on modifying an abstract Hodge propagator to obtain special properties implying vanishing of the integrals.
The author of this thesis was not aware of this trick and tried to compute integrals with an explicit propagator in spherical coordinates for around 3 years until he rediscovered a part of this trick himself.
The interesting thing is that the discovery was made via explicit computations, and it was a coincidence that the constructed Hodge propagator satisfied the special properties.
\item Using the trick from \cite{Mnev2009}, we generalize the previous computation to geometrically formal manifolds and show that the Feynman integrals vanish provided that $\HDR^1(M) = 0$.
For a general manifold, all higher coproducts vanish unless $M$ is a surface or a $3$-manifold with $\HDR^1(M) \neq 0$.
In fact, the homotopy type of the $\IBLInfty$-chain model for a manifold with $\HDR^1(M) = 0$ is determined by the tree-level perturbative Chern-Simons theory for a special Hodge propagator.
\item We conjecture that the $\IBLInfty$-chain model for formal manifolds with $\HDR^1(M) = 0$ is homotopy equivalent to the canonical $\dIBL$-structure.
\item There are two approaches of associating an $\IBLInfty$-homotopy type to a Poincar\'e $\DGA$ like $\DR(M)$.
One is using the homotopy transfer and integrals as explained in the previous section (geometric approach) and one is by taking a Poincar\'e duality model $\Model$ and constructing the canonical $\dIBL$ structure on cyclic cochains of $\Model$ (algebraic approach).
We study both and conjecture that they are equivalent.
\item We study $\DGA$'s of Hodge type and give an alternative proof of the existence of a Poincar\'e duality model in the category of $\PDGA$'s.
In the $\DGA$ category, this is originally due to Lambrechts \& Stanley.
The new method is based on adding exact partners to non-degenerates rather than adding killers of orphans.
\item We prove a proposition that the cyclic homology of a strictly unital $\AInfty$-algebra can be computed from its reduced version.
We do it by extending Loday's cyclic homology theory for $\DGA$'s to $\AInfty$-algebras.
\item We relate $\OPQ_{210}$ to Gerstenhaber bracket and its cyclization and $\OPQ_{120}$ to the Schwarz's $\BV$-operator and cyclic shuffle product.
\item We extend the $\MV$-formalism to filtered $\MV$-formalism and use it to construct a $\BV$-formulation of the weak $\IBLInfty$-theory.
This has the advantage that the exponentials are honest exponentials and honest maps.
This will be useful for studying $\BV$-chain complexes.
\item We formulate the composition at $k$-common channels $\circ_k$ using ``heart with veins'' which appears in the iterated bialgebra compatibility condition.
\item We propose a $\BV$-formulation of the $\IBLInfty$-theory with an action, effective action and quantum master equation.
\item We find the standard Hodge propagator for $\Sph{2}$ up to a constant and prove that it smoothly extends to spherical blow-up.
\end{enumerate}
%\section{Author's commentary}
%
%The idea of Prof.~Cieliebak, first one playing with the formalism and realizing what has been somewhere. The thesis is not about proving theorems but rather about getting acquitented with the field and discovering what might be interesting and how it relates to other fields what is interesting and what has been done somewhere else. Huge part lies on
\end{document}
|
{"hexsha": "7467c53c5078c7874401c66d8f8b58a1c41e8b6f", "size": 55313, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Subfiles/ThesisIntroduction.tex", "max_stars_repo_name": "p135246/phd-thesis", "max_stars_repo_head_hexsha": "0e124466a3d0ff988c012225400fadb0b170aa9e", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Subfiles/ThesisIntroduction.tex", "max_issues_repo_name": "p135246/phd-thesis", "max_issues_repo_head_hexsha": "0e124466a3d0ff988c012225400fadb0b170aa9e", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Subfiles/ThesisIntroduction.tex", "max_forks_repo_name": "p135246/phd-thesis", "max_forks_repo_head_hexsha": "0e124466a3d0ff988c012225400fadb0b170aa9e", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 78.2362093352, "max_line_length": 695, "alphanum_fraction": 0.7018241643, "num_tokens": 18628}
|
\newpage
\chapter{Plug flow reactor}
\section{Introduction}
The plug flow reactor model of Camflow simulates a one dimensional plug flow reactor with gas-phase chemistry. The model can handle a number of temperature conditions such as isothermal, non-isothermal, or user defined temperature profiles.
\section{Fundamentals}
The plug flow reactor model solves the governing equations for continuity
\begin{equation}
\frac{\rho u A_c}{dz} = 0,
\end{equation}
species continuity
\begin{equation}
\rho u A_c\frac{dY_k}{dz} = W_kA_c\dot{\omega}_k \quad k=1\ldots K_g,
\end{equation}
energy equation
\begin{equation}
\rho u A_c\frac{c_pdT}{dz} + \sum_{k=1}^{K_g} \dot{\omega}_kh_kW_kA_c = UA_s(T_w-T),
\end{equation}
and the equation of state
\begin{equation}
p\bar{W}=\rho R T.
\end{equation}
In the above equations, $\rho$ is the density in kg/m$^3$, $u$ is the velocity in m/s, $A_c$ is the area of cross section in m$^2$, $Y_k$ is the mass fraction of the k\'th chemical species, $W_k$ is the molecular mass in kg/mol of the k\'th chemical species, $\dot{\omega}_k$ is the molar production rate in mol/m$^3$-s of the k\'th chemical species, $c_p$ the specific heat at constant pressure in J/mol-K, $T$ the temperature in K, $T_w$ is the temperature of the wall in K, $A_s$ is the surface area per unit volume, and $U$ is the over all heat transfer coefficient in J/m$^2$sK.
\section{Input file}
An example of ``camflow.xml'' is shown below
{\scriptsize{\begin{verbatim}
<?xml version="1.0" encoding="ISO-8859-1"?>
<camflow>
<reactor model="plug">
<diameter unit="m">0.015</diameter>
<length unit="cm">5</length>
</reactor>
<op_condition>
<step_ignite>10</step_ignite>
<temperature>isothermal</temperature>
<twall unit="K">1073</twall>
<pressure unit="Pa">1e5</pressure>
</op_condition>
<inlet>
<fuel>
<velocity unit="m/s">0.1</velocity>
<temperature unit="C">800</temperature>
<!--flowrate unit="cgs">4.63e-3</flowrate-->
<molefrac>
<species name="NO2">0.1</species>
<species name="N2">*</species>
</molefrac>
</fuel>
</inlet>
<solver mode="coupled" solver="cvode">
<tols>
<species>
<aTol>1.e-06</aTol>
<rTol>1.e-06</rTol>
</species>
<temperature>
<aTol>1.e-03</aTol>
<rTol>1.e-03</rTol>
</temperature>
<flow>
<aTol>1.e-03</aTol>
<rTol>1.e-03</rTol>
</flow>
</tols>
</solver>
<initialize>
<Tprofile unit_L="cm" unit_T="K">
<position x="0.0">373.7</position>
<position x="0.125">484.5</position>
<position x="0.25">583.7</position>
<position x="0.375">672.2</position>
<position x="0.5">753.5</position>
<position x="0.75">901.4</position>
<position x="1.0">1027.0</position>
<position x="1.25">1120.0</position>
<position x="1.5">1184.0</position>
<position x="2.0">1260.0</position>
<position x="3.0">1348.0</position>
<position x="6.0">1475</position>
<position x="10.0">1524.0</position>
</Tprofile>
</initialize>
<report species="mole">
</report>
</camflow>
\end{verbatim}}
}
The input file follows xml specification with a number of child elements. Each child element is described in detail below.
\begin{itemize}
\item \textbf{rector} : The reactor element specifies which reactor models is to be simulated and for a plug flow reactor camflow expects plug as the model attribute value. The reactor element also holds child element for specifying reactor diameter and the reactor length, and each child element is given with the unit attribute. The unit of the value specified can be in ``cm'', ``m'', or in ``in''ches. Appropriate attribute must be specified.
\item \textbf{op\_conditions} : The element op\_conditions describes the operating conditions for the plug flow reactor. This includes the specification of the pressure and the condition applied to the solution of energy equation. The reactor pressure may be specified in the units of ``Pa'', ``atm'', or ``bar''. The temperature unit can be either in ``K'' or in ``C''. The temperature element can take the values of ``isothermal'', ``adiabatic'', ``userdefined'', or ``nonisothermal''. In the case of isothermal calculation, the energy equation is not solved and the reactor is assumed to be at the same temperature as the incoming fuel. The user may also perform the integration for a pre-calculated or measured temperature profile. In this case the temperature child element must be assigned with the value ``userdefined'' and the user defined temperature profile cane specified (explained later). For adiabatic calculations, provide the temperature element with the value ``adiabatic''. Radiation heat losses from the reactor are completely neglected. For non-isothermal calculation it is mandatory to specify the reactor wall temperature. The heat transfer coefficient is calculated internally as a function of reactor position using the following correlation.
\begin{equation}
Nu= \frac{hD}{k},
\end{equation}
where $Nu$ is the Nusselt number, $h$ the heat transfer coefficient, $D$ the diameter and $k$ the thermal conductivity. The Nusselt number is defined as
\begin{equation}
Nu = 3.657+8.827\bigg(\frac{1000}{\mathrm{Gz}}\bigg)^{-0.545} \exp\bigg(\frac{-48.2}{\mathrm{Gz}}\bigg),
\end{equation}
and the Greatz number Gz is defined as
\begin{equation}
\mathrm{Gz} = \frac{D Re Pr}{z}
\end{equation}
with D the diameter, $Re$ the Reynolds\'s number, $Pr$ the Prandtl number, and $z$ the axial position of the reactor. In this version, the overall heat transfer coefficient is replaced with the heat transfer coefficient calculated from Nusselt number. Strictly speaking the correlation presented above are valid only for a non-reacting multi-component gas mixture. However, the above correlations are used for reacting case as well due to the non-availability of better formulations.\\
\textbf{step\_ignite} is an option to evaluate the minimum temperature required to ignite the gas-mixture and is optional. However, if the user is not interested in the ignition temperature, this element should not be present in the inputfile. If its present then \textbf{step\_ignite} must specify the step for temperature increment, and the \textbf{temprature} specification must be ``adiabatic''. The ignition temperature calculated is not printed to the output file, rather only a screen output is generated.\\
\item \textbf{inlet} : The inlet element holds the information on reactants and the reactant temperature at axial position at z=0, and the flow rate or velocity at z=0; Either the velocity or the flow rate needs to be specified. The velocity may be specified in m/s or in cm/s, while the flow rate may be specified in ``cgs'' units or in ``si'' units. The temperature of the reactants must be specified using the temperature element with the appropriate units. The mass or mole fraction of the reactant species need to be specified within the element molefrac or massfrac. The sum of mass fractions or mole fraction of the reactant species must sum up to 1. Instead of specifying the mass/mole fractions of all species, the last species can be assigned with *. In this case the mole/mass fraction of the last species will be 1-sum of others.
\item \textbf{solver}: The solver element holds the solver control specifications. The attributes ``mode'' should always be specified as ``coupled'' for plug flow reactor simulation. The solver name is essentially provided to switch from one solver to another. However, the present version of Camflow uses only CVode as the numerical integrator, and therefore accepts only ``cvode'' as the solver name. The element ``tols'' hold the various tolarences that can be applied to the species, energy, and continuity equations. For species a relative tolarence of at least 10$^{-6}$ should be used. The user may need to adjust the tolarence values for the species in case of solution difficulties.
\item \textbf{initialize} The initialize element can be used to specify various initial conditions. However, for plug flow reactor model, the only initial property that can be specified is the user defined temperature profile. The temperature profile can be specified by using the ``Tprofile'' element with two attributes namely ``unit\_L'' for length unit and ``unit\_T'' for temperature unit. The length unit can be in ``cm'' or in ``m'', where as the temperature unit can be either in ``K'' or in ``C''. The actual temperature as a function of reactor position is specified with the child elements position with the attribute ``x'', which stands for the position with the reactor. If the length unit is specified as ``cm'' then ``x'' is the position from the reactor inlet in ``cm'', and the value for the position element is the temperature at position ``x''.
\item \textbf{report}: The desired output for the species composition must be specified in this element using the species attribute. ``mole'' or ``mass'' may be used as the attribute values, and correspondingly the output will be produced either in mole fraction or mass fractions.
\end{itemize}
\section{Executing the binary}
The plug reactor model of Camflow expects three input files namely, ``camflow.xml'', ``therm.dat'', ``chem.inp''. For performing non-isothermal calculation an additional file ``tran.dat'' specifying the transport data of all chemical species present in the system need to be specified. All these files must be present in the working directory. Upon succesful execution the output file ``profile.dat'' containing the axial position (m), density (kg/m$^3$), velocity (m/s), massflow rate (kg/m$^2$s), residence time (1/s) temperature (K), and the species compositions in mass or mole fractions.
\section{Results}
The following figure shows the species profiles and temperature for Hydrogen oxidation reaction
\begin{figure*}[h]
\centering
\includegraphics[scale=0.6]{plug_profile.eps}
\caption{Species profiles hydrogen oxidation with user defined temperature profile}
\end{figure*}
%===============================================================================================
%
%
%
%===============================================================================================
|
{"hexsha": "475ef8101dcfa1e2b311650b40468885105c3301", "size": 10335, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/supporting-information/camflow/plug.tex", "max_stars_repo_name": "sm453/MOpS", "max_stars_repo_head_hexsha": "f1a706c6552bbdf3ceab504121a02391a1b51ede", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-08T14:06:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-04T07:52:19.000Z", "max_issues_repo_path": "doc/supporting-information/camflow/plug.tex", "max_issues_repo_name": "sm453/MOpS", "max_issues_repo_head_hexsha": "f1a706c6552bbdf3ceab504121a02391a1b51ede", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/supporting-information/camflow/plug.tex", "max_forks_repo_name": "sm453/MOpS", "max_forks_repo_head_hexsha": "f1a706c6552bbdf3ceab504121a02391a1b51ede", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-11-15T05:18:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T13:51:20.000Z", "avg_line_length": 73.2978723404, "max_line_length": 1266, "alphanum_fraction": 0.7205611998, "num_tokens": 2596}
|
import os
import bz2
import pickle
import numpy as np
from autodp.reader.base_reader import BaseReader
from autodp import cf
@BaseReader.register
class SQReader(BaseReader):
"""This class implements a data reader that will read a file of data sequentially without shuffling."""
def __init__(self, path, num_epoch=1):
super().__init__(path, num_epoch)
# Get full file name of input file
file = os.listdir(self._path)[0]
self._file = os.path.join(self._path, file)
@staticmethod
def _get_batch(data_file, batch_size):
"""Get a batch of data instances."""
images, labels = [], []
for _ in range(batch_size):
try:
line = pickle.load(data_file)
images.append(line["i"].astype(np.float32))
labels.append(line["l"])
except EOFError:
break
return images, labels
def get_batch(self, batch_size=cf.batch_size, sess=None):
"""This function implements the abstract method of the super class and is used to read data as batch."""
for epoch in range(self._num_epoch):
with bz2.BZ2File(self._file, "rb") as df:
while True:
images, labels = self._get_batch(df, batch_size)
if len(images) == 0: break
yield (images, labels)
|
{"hexsha": "c362440b37e2b2a9ff010e6d6b31521d0a3f1127", "size": 1391, "ext": "py", "lang": "Python", "max_stars_repo_path": "autodp/reader/sq_reader.py", "max_stars_repo_name": "IBM/automation-of-image-data-preprocessing", "max_stars_repo_head_hexsha": "a5327b1b6da3f5fc92dae4dfeb235c5f24378589", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-11-26T16:31:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T23:11:19.000Z", "max_issues_repo_path": "autodp/reader/sq_reader.py", "max_issues_repo_name": "IBM/automation-of-image-data-preprocessing", "max_issues_repo_head_hexsha": "a5327b1b6da3f5fc92dae4dfeb235c5f24378589", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autodp/reader/sq_reader.py", "max_forks_repo_name": "IBM/automation-of-image-data-preprocessing", "max_forks_repo_head_hexsha": "a5327b1b6da3f5fc92dae4dfeb235c5f24378589", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3488372093, "max_line_length": 112, "alphanum_fraction": 0.6046010065, "include": true, "reason": "import numpy", "num_tokens": 307}
|
# My Algorithm: draw ROI plots with boundary pts and check if inside or outside, based on this to give sliding windows with details
#
import xml.etree.ElementTree as ET
import fnmatch
import matplotlib.pyplot as plt
import numpy as np
import math
import os
#rootDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset/CEFSL_slices_only/slice22/ROI for +C_3D_AXIAL_IRSPGR_Fast_IM-0005-0022.xml'
# test if all XML data can plot ROI and check inside pts
rootDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset'
#outputDir = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm/boundarycheck/'
outputDir = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithmPlot_V2/Boundarycheck2/'
def drawbox(xcoord,ycoord,dis,color):
localx1 = xcoord - dis
localx2 = xcoord + dis
localy1 = ycoord - dis
localy2 = ycoord + dis
drawplot(localx1, localx2, localy1, localy2, color)
# increase sample points in boundary points of XML file
# def increasesamplepoly(poly):
# newpoly = list()
# bcchecklist = list()
# n = len(poly)
#
# p1x, p1y = poly[0]
#
# for i in range(1,n + 1):
# #for sampleinterval in np.arange(0,1,0.25):
#
# p2x, p2y = poly[i % n]
#
# checkx1c = int(np.rint(p1x))
# checky1c = int(np.rint(p1y))
# bcchecklist.append(list())
# bcchecklist[len(bcchecklist) - 1].append(checkx1c)
# bcchecklist[len(bcchecklist) - 1].append(checky1c)
#
# # set sample ratio to get more points
# for ratio in range(1):
# pix = (ratio/2) * (p2x - p1x)+p1x
# piy = (ratio/2) * (p2y - p1y)+p1y
#
# # new poly after using increasing sample points method
# newpoly.append([p1x, p1y])
# newpoly.append([pix, piy])
#
# # get close int of x/y and store in bcchecklist
# checkxic = int(np.rint(pix))
# checkyic = int(np.rint(piy))
# bcchecklist.append(list())
# bcchecklist[len(bcchecklist) - 1].append(checkxic)
# bcchecklist[len(bcchecklist) - 1].append(checkyic)
#
# p1x, p1y = p2x, p2y
#
# return bcchecklist
# check if point is inside ROI boundary or outside boundary
def point_inside_polygon(x,y,poly):
n = len(poly)
inside =False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# draw contour rectangle plots
def drawplot(contourx1,contourx2,contoury1,contoury2,color):
plt.plot([contourx1, contourx1], [contoury1, contoury2], color)
plt.plot([contourx1, contourx2], [contoury1, contoury1], color)
plt.plot([contourx1, contourx2], [contoury2, contoury2], color)
plt.plot([contourx2, contourx2], [contoury1, contoury2], color)
# draw whole rectangle plots (get ceil and floor)
def drawSameRangeScalePlot(contourx1,contourx2,contoury1,contoury2, interval,color):
# fix X and Y axises range in plot
plt.xlim(contourx1, contourx2)
plt.ylim(contoury1, contoury2)
for yinterval in range(contoury1, contoury2, interval):
plt.plot([contourx1, contourx2], [yinterval, yinterval], color)
for xinterval in range(contourx1, contourx2, interval):
plt.plot([xinterval, xinterval], [contoury1, contoury2], color)
# T1 boundary check
def T1chooseinoutcoord(contourx1, contourx2, contoury1, contoury2, xycoord, bcchecklist):
# 0: inside boundary, 1: on the boundary, 2: outside boundary
# xyboundarypos0 = list()
# xyboundarypos1 = list()
# xyboundarypos2 = list()
# for each point inside rectangle plot, check if each point inside boundary or outside boundary, inside: True, outside: False
for testx in range(contourx1, contourx2 + 1):
for testy in range(contoury1, contoury2 + 1):
# check if point is inside boundary or not
inorout = point_inside_polygon(testx, testy, xycoord)
if inorout == True:
if [testx, testy] in bcchecklist:
plt.plot(testx, testy, 'r+')
drawbox(testx, testy, 0.5, 'r')
# check if box covers part of boundary
def checkboxinout(testx,testy,xycoord):
b1 = point_inside_polygon(testx - 4, testy - 4, xycoord)
b1h = point_inside_polygon(testx, testy - 4, xycoord)
b2 = point_inside_polygon(testx - 4, testy + 4, xycoord)
b2h = point_inside_polygon(testx - 4, testy, xycoord)
b3 = point_inside_polygon(testx + 4, testy - 4, xycoord)
b3h = point_inside_polygon(testx, testy + 4, xycoord)
b4 = point_inside_polygon(testx + 4, testy + 4, xycoord)
b4h = point_inside_polygon(testx + 4, testy, xycoord)
if b1 != True or b1h != True or b2 != True or b2h != True or b3 != True or b3h != True or b4 != True or b4h != True:
# in boundary
return False
else:
return True
# check if coords inside T2 boundary or outside T2 boundary
def T2chooseinoutcoord(contourx1,contourx2,contoury1,contoury2,xycoord):
# 0: inside boundary, 1: on the boundary, 2: outside boundary
# xyboundarypos0 = list()
# xyboundarypos1 = list()
# xyboundarypos2 = list()
# for each point inside rectangle plot, check if each point inside boundary or outside boundary, inside: True, outside: False
# Version 2 for T2 boundary definition: if box in T2 cover boundary, which means one of 4 box pts is outside boundary, then center is in boundary
# previous version: if center is near boundary (in boundary checklist) then it is in boundary
for testx in range(contourx1,contourx2+1):
for testy in range(contoury1,contoury2+1):
# check if point is inside boundary or not
inorout = point_inside_polygon(testx, testy, xycoord)
if inorout == True:
if checkboxinout(testx,testy,xycoord) == False:
# False means in boundary
plt.plot(testx, testy, 'r+')
drawbox(testx,testy,4,'r')
# draw ROI from coordinates in XML file
def ParseXMLDrawROI(rootDir,T):
tree = ET.parse(rootDir)
root = tree.getroot()
childnum = 0
xcoordlist = list()
ycoordlist = list()
xycoordlist = list()
bcchecklist = list()
for child in root.iter('string'):
if not fnmatch.fnmatch(child.text,'*{*}*'):
continue
childnum+=1
#print child.text
#xycoord = list()
xcoords = str(child.text).split(',')[0]
ycoords = str(child.text).split(',')[1]
xc = float(xcoords.split('{')[1])
yc = float(ycoords.split('}')[0].replace(' ',''))
checkxc = int(np.rint(xc))
checkyc = int(np.rint(yc))
xcoordlist.append(xc)
ycoordlist.append(yc)
xycoordlist.append(list())
xycoordlist[len(xycoordlist) - 1].append(xc)
xycoordlist[len(xycoordlist) - 1].append(yc)
bcchecklist.append(list())
bcchecklist[len(bcchecklist) - 1].append(checkxc)
bcchecklist[len(bcchecklist) - 1].append(checkyc)
xcoordlist.append(xcoordlist[0])
ycoordlist.append(ycoordlist[0])
#bcchecklist = increasesamplepoly(xycoordlist)
#xycoordlist.append(xycoordlist[0])
# get x/y min/max in coords
xmin = min(xcoordlist)
ymin = min(ycoordlist)
xmax = max(xcoordlist)
ymax = max(ycoordlist)
# draw contour rectangle plot
drawplot(xmin,xmax,ymin,ymax,'k')
# ceil: get higher int
# floor: get lower int
xmin = int(math.floor(xmin))
xmax = int(math.ceil(xmax))
ymin = int(math.floor(ymin))
ymax = int(math.ceil(ymax))
# draw whole rectangle plot
drawSameRangeScalePlot(xmin,xmax,ymin,ymax,8,'k')
# check if coords inside boundary or outside boundary
if T == 'T1':
T1chooseinoutcoord(xmin,xmax,ymin,ymax,xycoordlist,bcchecklist)
else:
T2chooseinoutcoord(xmin,xmax,ymin,ymax,xycoordlist)
# draw boundary plot of ROI
plt.plot(xcoordlist,ycoordlist,'b')
def checkallXML(rootDir):
for texturemapfile in os.listdir(rootDir):
if texturemapfile.startswith('.'):
continue
if texturemapfile.startswith('..'):
continue
print texturemapfile
patientname = texturemapfile.split('_')[0]
if fnmatch.fnmatch(patientname,"*FSL*"):
newpatientname = patientname.replace("FSL","")
elif fnmatch.fnmatch(patientname,"*h*"):
newpatientname = patientname.replace("h","")
else:
newpatientname = patientname
print newpatientname
slicepathfile = os.path.join(rootDir,texturemapfile)
for slicefile in os.listdir(slicepathfile):
if slicefile.startswith('.'):
continue
if slicefile.startswith('..'):
continue
print slicefile
dcmxmlfilepath = os.path.join(slicepathfile,slicefile)
for xmlfile in os.listdir(dcmxmlfilepath):
if not fnmatch.fnmatch(xmlfile, '*.xml'):
continue
if fnmatch.fnmatch(xmlfile, '*NECROSIS*'):
continue
if fnmatch.fnmatch(xmlfile, '*C*SPGR*') or fnmatch.fnmatch(xmlfile, '*+C*T1*') or fnmatch.fnmatch(
xmlfile, '*T1*+C*'):
T1xmlfile = xmlfile
print T1xmlfile
if fnmatch.fnmatch(xmlfile, '*T2*'):
T2xmlfile = xmlfile
print T2xmlfile
T1xmlfilepath = os.path.join(dcmxmlfilepath,T1xmlfile)
T2xmlfilepath = os.path.join(dcmxmlfilepath,T2xmlfile)
# original image T1
plt.figure()
ParseXMLDrawROI(T1xmlfilepath,'T1')
plt.title(newpatientname + ' ' + ' ' + slicefile + ' T1')
plt.savefig(outputDir + newpatientname + ' ' + ' ' + slicefile + ' T1.png')
plt.cla()
plt.close()
# original image T2
plt.figure()
ParseXMLDrawROI(T2xmlfilepath,'T2')
plt.title(newpatientname + ' ' + ' ' + slicefile + ' T2')
plt.savefig(outputDir + newpatientname + ' ' + ' ' + slicefile + ' T2.png')
plt.cla()
plt.close()
checkallXML(rootDir)
|
{"hexsha": "16c5d9d1f42e97284bf1c176f298b87c0d9a8b9f", "size": 10671, "ext": "py", "lang": "Python", "max_stars_repo_path": "GBM/GBMSlidingwindows_V2/GBMSlidingWindows_QualityControl/Boundarycheck.py", "max_stars_repo_name": "joshlyman/TextureAnalysis", "max_stars_repo_head_hexsha": "bfbedbd53f62396fdef383408089b37e5ab511d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-06T01:47:49.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-06T01:47:49.000Z", "max_issues_repo_path": "GBM/GBMSlidingwindows_V2/GBMSlidingWindows_QualityControl/Boundarycheck.py", "max_issues_repo_name": "kumarneeraj2005/TextureAnalysis", "max_issues_repo_head_hexsha": "bfbedbd53f62396fdef383408089b37e5ab511d0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GBM/GBMSlidingwindows_V2/GBMSlidingWindows_QualityControl/Boundarycheck.py", "max_forks_repo_name": "kumarneeraj2005/TextureAnalysis", "max_forks_repo_head_hexsha": "bfbedbd53f62396fdef383408089b37e5ab511d0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-22T08:26:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-22T08:26:10.000Z", "avg_line_length": 33.1397515528, "max_line_length": 168, "alphanum_fraction": 0.6166244963, "include": true, "reason": "import numpy", "num_tokens": 3046}
|
"""
Benchmark an implementation of the Black–Scholes model.
"""
import math
import numpy as np
# Taken from numba.tests.test_blackscholes
# XXX this data should be shared with bench_cuda.py
# (see https://github.com/spacetelescope/asv/issues/129)
N = 16384
RISKFREE = 0.02
VOLATILITY = 0.30
A1 = 0.31938153
A2 = -0.356563782
A3 = 1.781477937
A4 = -1.821255978
A5 = 1.330274429
RSQRT2PI = 0.39894228040143267793994605993438
callResultGold = np.zeros(N)
putResultGold = np.zeros(N)
stockPrice = np.random.RandomState(0).uniform(5.0, 30.0, N)
optionStrike = np.random.RandomState(1).uniform(1.0, 100.0, N)
optionYears = np.random.RandomState(2).uniform(0.25, 10.0, N)
args = (callResultGold, putResultGold, stockPrice, optionStrike,
optionYears, RISKFREE, VOLATILITY)
def setup():
"""
Precompile jitted functions.
"""
global cnd, blackscholes
from numba import jit
@jit(nopython=True)
def cnd(d):
K = 1.0 / (1.0 + 0.2316419 * math.fabs(d))
ret_val = (RSQRT2PI * math.exp(-0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))))
if d > 0:
ret_val = 1.0 - ret_val
return ret_val
@jit(nopython=True)
def blackscholes(callResult, putResult, stockPrice, optionStrike,
optionYears, Riskfree, Volatility):
S = stockPrice
X = optionStrike
T = optionYears
R = Riskfree
V = Volatility
for i in range(len(S)):
sqrtT = math.sqrt(T[i])
d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)
d2 = d1 - V * sqrtT
cndd1 = cnd(d1)
cndd2 = cnd(d2)
expRT = math.exp((-1. * R) * T[i])
callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1))
blackscholes(*args)
class BlackScholes:
def time_blackscholes(self):
for i in range(10):
blackscholes(*args)
|
{"hexsha": "bfc807bf8b4539cafd6f9d4a2648ae9504e4d886", "size": 2041, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmarks/bench_blackscholes.py", "max_stars_repo_name": "abitrolly/numba-benchmark", "max_stars_repo_head_hexsha": "4bea9c23276fd0399df26452d19f13810a6496c7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2015-10-19T09:18:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-29T10:37:10.000Z", "max_issues_repo_path": "benchmarks/bench_blackscholes.py", "max_issues_repo_name": "abitrolly/numba-benchmark", "max_issues_repo_head_hexsha": "4bea9c23276fd0399df26452d19f13810a6496c7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2015-03-03T09:50:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-13T08:34:06.000Z", "max_forks_repo_path": "benchmarks/bench_blackscholes.py", "max_forks_repo_name": "abitrolly/numba-benchmark", "max_forks_repo_head_hexsha": "4bea9c23276fd0399df26452d19f13810a6496c7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2015-09-09T17:38:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-24T15:06:19.000Z", "avg_line_length": 24.2976190476, "max_line_length": 81, "alphanum_fraction": 0.5771680549, "include": true, "reason": "import numpy,from numba", "num_tokens": 696}
|
\PassOptionsToPackage{unicode=true}{hyperref} % options for packages loaded elsewhere
\PassOptionsToPackage{hyphens}{url}
%
\documentclass[]{article}
\usepackage{stata}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{textcomp} % provides euro and other symbols
\else % if luatex or xelatex
\usepackage{unicode-math}
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage[]{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
}
\usepackage{hyperref}
\hypersetup{
pdftitle={Stata Markdown Tutorial},
pdfauthor={Cyrus Samii},
pdfborder={0 0 0},
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{0}
% Redefines (sub)paragraphs to behave more like sections
\ifx\paragraph\undefined\else
\let\oldparagraph\paragraph
\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
\fi
\ifx\subparagraph\undefined\else
\let\oldsubparagraph\subparagraph
\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
\fi
% set default figure placement to htbp
\makeatletter
\def\fps@figure{htbp}
\makeatother
\usepackage{multicol}
\usepackage{tabularx}
\usepackage{booktabs}
\usepackage{lscape}
\usepackage{fullpage}
\usepackage{pgffor}
\title{Stata Markdown Tutorial}
\author{Cyrus Samii}
\date{January 2019}
\begin{document}
\maketitle
\hypertarget{overview}{%
\section{Overview}\label{overview}}
Here are some notes and examples for using the Stata Markdown package.
The Stata Markdown package was written by German Rodriguez. These notes
offer some basic guidance on using the package. For instructions on
installation and dependencies, refer to the Stata Markdown website:
\url{https://data.princeton.edu/stata/markdown/}
I give examples of some things we might want to do in social science
related projects.
\hypertarget{markdown}{%
\section{Markdown}\label{markdown}}
Markdown is a simple markup language that, through Pandoc, can be
rendered in a variety of formats, including pdf (via tex), html, or
docx.\\
If you are used to writing latex or html, then markdown will be easy,
since it admits a lot of the syntax used in those languages.
There are lots of cheatsheets out there, such as:
\url{https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet}
Lots of things are done very simply in Markdown. E.g., here is a
numbered list:
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Foo
\item
Foo 2
\item
Foo 3
\end{enumerate}
The header of this document is a YAML header for Markdown, which
contains meta instructions for the Markdown-\textgreater{}Pandoc
compilation. I am rendering this document in PDF through Tex, and so you
will see that in my YAML header I have included some Tex instructions.
\hypertarget{workflow}{%
\section{Workflow}\label{workflow}}
The way I work is to type into this document and then compile by running
the requisite commands that I have put into a separate .do file called
``stata-markdown-example-do.do''. That way, I can load the various
compilation options (that is, the options to the \texttt{markstat}
function in a way that I can easily recall them later. Using the
\texttt{do} button in the Stata .do file editor gives me one button
compilation. I also have my commands to set the working directory and
also load in dependencies (e.g., the \texttt{stata.sty} file needed to
compile to PDF).
I may also have another Stata .do file that I use as a scratch pad for
working out the kinks of the Stata code that I then insert as code
chunks into this document.
\hypertarget{simple-script-example}{%
\section{``Simple Script'' Example}\label{simple-script-example}}
Here we replicate the simple example from German Rodriguez's ``Simple
Script'' example, tweaking a few things to make some additional points.
Stata code appears below in ``chunks'' that are demarcated in the
following manner:
For code chunks that you want to appear in the rendered document:
\begin{verbatim}```{s}
[code here]
```
\end{verbatim}
For code chunks that you DO NOT want to appear in the rendered document:
\begin{verbatim}```{s/}
[code here]
```
\end{verbatim}
Now we can proceed with the simple example. First read in the fuel
efficiency data that is shipped with Stata:
\begin{stlog}
. sysuse auto, clear
(1978 Automobile Data)
\end{stlog}
To study how fuel efficiency depends on weight it is useful to transform
the dependent variable from ``miles per gallon'' to ``gallons per 100
miles'':
\begin{stlog}
. gen gphm = 100/mpg
\end{stlog}
We can then plot the relationship. We will run this code in a manner
that is not echoed in the resulting output file (PDF, docx, etc.).
\begin{stlog}
{\smallskip}
\end{stlog}
\begin{figure}
\centering
\includegraphics[width=0.75\linewidth]{auto.png}
\caption{Fuel Efficiency}
\end{figure}
\hypertarget{regression-table-with-esttab}{%
\section{\texorpdfstring{Regression table with
\texttt{esttab}}{Regression table with esttab}}\label{regression-table-with-esttab}}
Something that we frequently need to do is to report regression tables.
We can use the \texttt{esttab} function in Stata and insert its output
here:
\begin{stlog}
{\smallskip}
{\smallskip}
\end{stlog}
\begin{center}
\input{reg-example.tex}
\end{center}
(If you look at the Stata Markdown .stmd file, you will see that I used
tex commands to insert the regression table and center it.)
\hypertarget{summary-stats-with-esttab}{%
\section{\texorpdfstring{Summary stats with
\texttt{esttab}}{Summary stats with esttab}}\label{summary-stats-with-esttab}}
Sometimes we want nice summary stats tables. Here is an example:
\begin{stlog}
{\smallskip}
\end{stlog}
\begin{center}
\input{sum-stats-example.tex}
\end{center}
\hypertarget{loop-with-display}{%
\section{Loop with display}\label{loop-with-display}}
\begin{stlog}
{\smallskip}
2. sum `varUp', detail
3. hist `varUp'
4. {\rbr}
{\smallskip}
Price
\HLI{61}
Percentiles Smallest
1\% 3291 3291
5\% 3748 3299
10\% 3895 3667 Obs 74
25\% 4195 3748 Sum of Wgt. 74
{\smallskip}
50\% 5006.5 Mean 6165.257
Largest Std. Dev. 2949.496
75\% 6342 13466
90\% 11385 13594 Variance 8699526
95\% 13466 14500 Skewness 1.653434
99\% 15906 15906 Kurtosis 4.819188
(bin=8, start=3291, width=1576.875)
{\smallskip}
Weight (lbs.)
\HLI{61}
Percentiles Smallest
1\% 1760 1760
5\% 1830 1800
10\% 2020 1800 Obs 74
25\% 2240 1830 Sum of Wgt. 74
{\smallskip}
50\% 3190 Mean 3019.459
Largest Std. Dev. 777.1936
75\% 3600 4290
90\% 4060 4330 Variance 604029.8
95\% 4290 4720 Skewness .1481164
99\% 4840 4840 Kurtosis 2.118403
(bin=8, start=1760, width=385)
{\smallskip}
\end{stlog}
\end{document}
|
{"hexsha": "3b3bfa3c4429161b0643284ed92fa67b84cb36b3", "size": 8485, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "stata-dynamic/stata-markdown-example.tex", "max_stars_repo_name": "cdsamii/cds-demos", "max_stars_repo_head_hexsha": "422fe62caeb961dac6b24efb3443d9475cea9318", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-03-12T23:51:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-18T00:28:35.000Z", "max_issues_repo_path": "stata-dynamic/stata-markdown-example.tex", "max_issues_repo_name": "cdsamii/cds-demos", "max_issues_repo_head_hexsha": "422fe62caeb961dac6b24efb3443d9475cea9318", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stata-dynamic/stata-markdown-example.tex", "max_forks_repo_name": "cdsamii/cds-demos", "max_forks_repo_head_hexsha": "422fe62caeb961dac6b24efb3443d9475cea9318", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-10T13:37:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-10T13:37:51.000Z", "avg_line_length": 29.4618055556, "max_line_length": 85, "alphanum_fraction": 0.6999410725, "num_tokens": 2455}
|
[STATEMENT]
lemma tr_tfr:
assumes "A' \<in> set (tr A [])" and "tfr\<^sub>s\<^sub>s\<^sub>t A" and "fv\<^sub>s\<^sub>s\<^sub>t A \<inter> bvars\<^sub>s\<^sub>s\<^sub>t A = {}"
shows "tfr\<^sub>s\<^sub>t A'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
have *: "trms\<^sub>s\<^sub>t A' \<subseteq> trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. trms\<^sub>s\<^sub>t A' \<subseteq> trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A
[PROOF STEP]
using tr_trms_subset[OF assms(1)]
[PROOF STATE]
proof (prove)
using this:
trms\<^sub>s\<^sub>t A' \<subseteq> trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` set []
goal (1 subgoal):
1. trms\<^sub>s\<^sub>t A' \<subseteq> trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
trms\<^sub>s\<^sub>t A' \<subseteq> trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
hence "SMP (trms\<^sub>s\<^sub>t A') \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)"
[PROOF STATE]
proof (prove)
using this:
trms\<^sub>s\<^sub>t A' \<subseteq> trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A
goal (1 subgoal):
1. SMP (trms\<^sub>s\<^sub>t A') \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
[PROOF STEP]
using SMP_mono
[PROOF STATE]
proof (prove)
using this:
trms\<^sub>s\<^sub>t A' \<subseteq> trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A
?A \<subseteq> ?B \<Longrightarrow> SMP ?A \<subseteq> SMP ?B
goal (1 subgoal):
1. SMP (trms\<^sub>s\<^sub>t A') \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
SMP (trms\<^sub>s\<^sub>t A') \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
SMP (trms\<^sub>s\<^sub>t A') \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
have "tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
tfr\<^sub>s\<^sub>s\<^sub>t A
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
[PROOF STEP]
unfolding tfr\<^sub>s\<^sub>s\<^sub>t_def
[PROOF STATE]
proof (prove)
using this:
tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A) \<and> list_all tfr\<^sub>s\<^sub>s\<^sub>t\<^sub>p A
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
SMP (trms\<^sub>s\<^sub>t A') \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
[PROOF STEP]
have 1: "tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>t A')"
[PROOF STATE]
proof (prove)
using this:
SMP (trms\<^sub>s\<^sub>t A') \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A)
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>t A')
[PROOF STEP]
by (metis tfr_subset(2)[OF _ *])
[PROOF STATE]
proof (state)
this:
tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>t A')
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
have **: "list_all tfr\<^sub>s\<^sub>s\<^sub>t\<^sub>p A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. list_all tfr\<^sub>s\<^sub>s\<^sub>t\<^sub>p A
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
tfr\<^sub>s\<^sub>s\<^sub>t A
goal (1 subgoal):
1. list_all tfr\<^sub>s\<^sub>s\<^sub>t\<^sub>p A
[PROOF STEP]
unfolding tfr\<^sub>s\<^sub>s\<^sub>t_def
[PROOF STATE]
proof (prove)
using this:
tfr\<^sub>s\<^sub>e\<^sub>t (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A) \<and> list_all tfr\<^sub>s\<^sub>s\<^sub>t\<^sub>p A
goal (1 subgoal):
1. list_all tfr\<^sub>s\<^sub>s\<^sub>t\<^sub>p A
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
list_all tfr\<^sub>s\<^sub>s\<^sub>t\<^sub>p A
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
have "pair ` setops\<^sub>s\<^sub>s\<^sub>t A \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A) - Var`\<V>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pair ` setops\<^sub>s\<^sub>s\<^sub>t A \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A) - range Var
[PROOF STEP]
using setops\<^sub>s\<^sub>s\<^sub>t_are_pairs
[PROOF STATE]
proof (prove)
using this:
?t \<in> pair ` setops\<^sub>s\<^sub>s\<^sub>t ?A \<Longrightarrow> \<exists>s s'. ?t = pair (s, s')
goal (1 subgoal):
1. pair ` setops\<^sub>s\<^sub>s\<^sub>t A \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A) - range Var
[PROOF STEP]
unfolding pair_def
[PROOF STATE]
proof (prove)
using this:
?t \<in> (\<lambda>d. case d of (t, t') \<Rightarrow> Fun Pair [t, t']) ` setops\<^sub>s\<^sub>s\<^sub>t ?A \<Longrightarrow> \<exists>s s'. ?t = (case (s, s') of (t, t') \<Rightarrow> Fun Pair [t, t'])
goal (1 subgoal):
1. (\<lambda>d. case d of (t, t') \<Rightarrow> Fun Pair [t, t']) ` setops\<^sub>s\<^sub>s\<^sub>t A \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> (\<lambda>d. case d of (t, t') \<Rightarrow> Fun Pair [t, t']) ` setops\<^sub>s\<^sub>s\<^sub>t A) - range Var
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
pair ` setops\<^sub>s\<^sub>s\<^sub>t A \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A) - range Var
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
hence ***: "\<forall>t \<in> pair`setops\<^sub>s\<^sub>s\<^sub>t A. \<forall>t' \<in> pair`setops\<^sub>s\<^sub>s\<^sub>t A. (\<exists>\<delta>. Unifier \<delta> t t') \<longrightarrow> \<Gamma> t = \<Gamma> t'"
[PROOF STATE]
proof (prove)
using this:
pair ` setops\<^sub>s\<^sub>s\<^sub>t A \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A) - range Var
goal (1 subgoal):
1. \<forall>t\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A. \<forall>t'\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A. (\<exists>\<delta>. Unifier \<delta> t t') \<longrightarrow> \<Gamma> t = \<Gamma> t'
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
pair ` setops\<^sub>s\<^sub>s\<^sub>t A \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A) - range Var
tfr\<^sub>s\<^sub>s\<^sub>t A
goal (1 subgoal):
1. \<forall>t\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A. \<forall>t'\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A. (\<exists>\<delta>. Unifier \<delta> t t') \<longrightarrow> \<Gamma> t = \<Gamma> t'
[PROOF STEP]
unfolding tfr\<^sub>s\<^sub>s\<^sub>t_def tfr\<^sub>s\<^sub>e\<^sub>t_def
[PROOF STATE]
proof (prove)
using this:
pair ` setops\<^sub>s\<^sub>s\<^sub>t A \<subseteq> SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A) - range Var
(\<forall>s\<in>SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A) - range Var. \<forall>t\<in>SMP (trms\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` setops\<^sub>s\<^sub>s\<^sub>t A) - range Var. (\<exists>\<delta>. Unifier \<delta> s t) \<longrightarrow> \<Gamma> s = \<Gamma> t) \<and> list_all tfr\<^sub>s\<^sub>s\<^sub>t\<^sub>p A
goal (1 subgoal):
1. \<forall>t\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A. \<forall>t'\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A. (\<exists>\<delta>. Unifier \<delta> t t') \<longrightarrow> \<Gamma> t = \<Gamma> t'
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<forall>t\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A. \<forall>t'\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A. (\<exists>\<delta>. Unifier \<delta> t t') \<longrightarrow> \<Gamma> t = \<Gamma> t'
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
have 2: "list_all tfr\<^sub>s\<^sub>t\<^sub>p A'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. list_all tfr\<^sub>s\<^sub>t\<^sub>p A'
[PROOF STEP]
using tr_tfr\<^sub>s\<^sub>s\<^sub>t\<^sub>p[OF assms(1) ** assms(3)] ***
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>\<forall>(t, s)\<in>set []. (fv t \<union> fv s) \<inter> bvars\<^sub>s\<^sub>s\<^sub>t A = {}; \<forall>t\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` set []. \<forall>t'\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A \<union> pair ` set []. (\<exists>\<delta>. Unifier \<delta> t t') \<longrightarrow> \<Gamma> t = \<Gamma> t'\<rbrakk> \<Longrightarrow> list_all tfr\<^sub>s\<^sub>t\<^sub>p A'
\<forall>t\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A. \<forall>t'\<in>pair ` setops\<^sub>s\<^sub>s\<^sub>t A. (\<exists>\<delta>. Unifier \<delta> t t') \<longrightarrow> \<Gamma> t = \<Gamma> t'
goal (1 subgoal):
1. list_all tfr\<^sub>s\<^sub>t\<^sub>p A'
[PROOF STEP]
unfolding pair_def
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>\<forall>(t, s)\<in>set []. (fv t \<union> fv s) \<inter> bvars\<^sub>s\<^sub>s\<^sub>t A = {}; \<forall>t\<in>(\<lambda>d. case d of (t, t') \<Rightarrow> Fun Pair [t, t']) ` setops\<^sub>s\<^sub>s\<^sub>t A \<union> (\<lambda>d. case d of (t, t') \<Rightarrow> Fun Pair [t, t']) ` set []. \<forall>t'\<in>(\<lambda>d. case d of (t, t') \<Rightarrow> Fun Pair [t, t']) ` setops\<^sub>s\<^sub>s\<^sub>t A \<union> (\<lambda>d. case d of (t, t') \<Rightarrow> Fun Pair [t, t']) ` set []. (\<exists>\<delta>. Unifier \<delta> t t') \<longrightarrow> \<Gamma> t = \<Gamma> t'\<rbrakk> \<Longrightarrow> list_all tfr\<^sub>s\<^sub>t\<^sub>p A'
\<forall>t\<in>(\<lambda>d. case d of (t, t') \<Rightarrow> Fun Pair [t, t']) ` setops\<^sub>s\<^sub>s\<^sub>t A. \<forall>t'\<in>(\<lambda>d. case d of (t, t') \<Rightarrow> Fun Pair [t, t']) ` setops\<^sub>s\<^sub>s\<^sub>t A. (\<exists>\<delta>. Unifier \<delta> t t') \<longrightarrow> \<Gamma> t = \<Gamma> t'
goal (1 subgoal):
1. list_all tfr\<^sub>s\<^sub>t\<^sub>p A'
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
list_all tfr\<^sub>s\<^sub>t\<^sub>p A'
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. tfr\<^sub>s\<^sub>t A'
[PROOF STEP]
by (metis 1 2 tfr\<^sub>s\<^sub>t_def)
[PROOF STATE]
proof (state)
this:
tfr\<^sub>s\<^sub>t A'
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5228, "file": "Stateful_Protocol_Composition_and_Typing_Stateful_Typing", "length": 34}
|
[STATEMENT]
lemma GreatestIB:
fixes n :: \<open>nat\<close> and P
assumes a:\<open>\<exists>k\<le>n. P k\<close>
shows GreatestBI: \<open>P (GREATEST k. k\<le>n \<and> P k)\<close> and GreatestB: \<open>(GREATEST k. k\<le>n \<and> P k) \<le> n\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P (GREATEST k. k \<le> n \<and> P k) &&& (GREATEST k. k \<le> n \<and> P k) \<le> n
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. P (GREATEST k. k \<le> n \<and> P k)
2. (GREATEST k. k \<le> n \<and> P k) \<le> n
[PROOF STEP]
show \<open>P (GREATEST k. k\<le>n \<and> P k)\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P (GREATEST k. k \<le> n \<and> P k)
[PROOF STEP]
using GreatestI_ex_nat[OF assms]
[PROOF STATE]
proof (prove)
using this:
(\<And>y. y \<le> n \<and> P y \<Longrightarrow> y \<le> ?b) \<Longrightarrow> (GREATEST k. k \<le> n \<and> P k) \<le> n \<and> P (GREATEST k. k \<le> n \<and> P k)
goal (1 subgoal):
1. P (GREATEST k. k \<le> n \<and> P k)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
P (GREATEST k. k \<le> n \<and> P k)
goal (1 subgoal):
1. (GREATEST k. k \<le> n \<and> P k) \<le> n
[PROOF STEP]
show \<open>(GREATEST k. k\<le>n \<and> P k) \<le> n\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (GREATEST k. k \<le> n \<and> P k) \<le> n
[PROOF STEP]
using GreatestI_ex_nat[OF assms]
[PROOF STATE]
proof (prove)
using this:
(\<And>y. y \<le> n \<and> P y \<Longrightarrow> y \<le> ?b) \<Longrightarrow> (GREATEST k. k \<le> n \<and> P k) \<le> n \<and> P (GREATEST k. k \<le> n \<and> P k)
goal (1 subgoal):
1. (GREATEST k. k \<le> n \<and> P k) \<le> n
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(GREATEST k. k \<le> n \<and> P k) \<le> n
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 839, "file": "IFC_Tracking_IFC", "length": 8}
|
% !TeX spellcheck = en_GB
\section{Simulations Experiments}
\subsection{Study on Response Time Limits}
Before choosing the extremes of the \textbf{mean inter-arrival time} factor, a study on the \textbf{mean response time}, by changing the latter, has been carried out by comparing limit values of other factors (all the ranges will be shown in the next section).
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{img/BufferExplosion.pdf}
\caption{Response Time Behaviour changing the Mean Inter-arrival Time}
\label {img: warmUp}
\end{figure}
\noindent Basically, two different zone of interest of the mean inter-arrival time were found:
\begin{itemize}
\item the first one between \textbf{[25ms, 55ms]}, for which the mean response time diverges for the majority of the combination of other factors limits. In this area \textbf{only information about the throughput} can be carried out, because the \textbf{mean response time}, as diverges, is \textbf{dependent on the simulation duration}.
\item the second one between \textbf{[125ms, 500ms]}, for which both information about the \textbf{throughput} and the \textbf{response time} can be carried out.
\end{itemize}
For the second interval, the left bound was chosen when the mean-response time began to be comparable with the time-slot duration (which will be 5ms) and the 95\% CI scissor is small in comparison with the scale of the response time (basically when the standard deviation becomes very small). Here a "zoom" on the rightmost part of the latter plot:
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{img/BufferExplosionZoom.png}
\caption{Buffer Explosion between 95ms and 145ms: the 95\% CI are too small to be seen on the right part of the plot}
\label {img: bufferExplosion}
\end{figure}
\subsection{Scenario Calibration}
In order to calibrate the simulator parameters, the following range of values were used:
\paragraph{Limited Response Time Scenario}
\begin{itemize}
\item \textbf{Number of Couples Tx-Rx (N)}: [5, 30]
\item \textbf{Number of Channels (C)} : [6, 100] (Resource Blocks in LTE for different Frequencies)
\item \textbf{Mean Inter-arrival Time ($\frac{1}{\lambda}$)}: [125ms, 500ms]
\item \textbf{Time-slot duration ($T_{slot}$)}: 5 ms
\item \textbf{Send Probability (p)}: [0.1, 0.5]
\end{itemize}
\paragraph{Explosion of Response Time Scenario}
\begin{itemize}
\item \textbf{Number of Couples Tx-Rx (N)}: [5, 30]
\item \textbf{Number of Channels (C)} : [6, 100]
\item \textbf{Mean Inter-arrival Time ($\frac{1}{\lambda}$)}: [25ms, 55ms]
\item \textbf{Time-slot duration ($T_{slot}$)}: 5 ms
\item \textbf{Send Probability (p)}: [0.1, 1]
\end{itemize}
\subsection{Calibration of Warm-Up Period and Simulation duration}
For calibrating the warm-up different simulation were made (with the factors range in the latter paragraph). After various test we found that the KPI that impacts heavily in the choose of the warmup time is the \textit{Response Time}.\\
The worst case in terms of convergence time was encountered with \textbf{N = 30, C = 6, $\dfrac{1}{\lambda}$ = 125ms, p = 0.1 }
%The worst case in terms of convergence time was encountered with the \textbf{mean throughput} with N = 5, C = 6, $\dfrac{1}{\lambda}$ = 500ms, p = 0.5:
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{img/warmup.png}
\caption{Worst Case Warm-up Response Time}
\label {img: warmUp}
\end{figure}
%With the \textbf{mean response time} the worst case is the following with N = 5, C = 100, $\dfrac{1}{\lambda}$ = 25ms, p = 0.5:
\noindent\textbf{A warm-up period of 250s was chosen}.\\
For what concerns the simulation duration was made a trade-off between the \textbf{memory and time consumption} for storing data and performing simulation and the tendency of the KPIs to achieve a \textbf{more stable standard deviation}. This was done because there are not stochastic elements in the model (like a particular error probability with a low percentage) that will need a particular amount of time to be shown. Obviously the duration has to be greater than the warm-up duration. All things considered, \textbf{a simulation-duration of 5000s was chosen}.
\subsection{Limited Response Time Scenario Study}
In this chapter we will enter more in the deep to the limited response time scenario, in order to find insights about \textbf{throughput} and \textbf{response time} (we can due to the fact that in this interval it converges).
\subsubsection{Factorial Analysis $2^kr$ on Throughput}
In order to analyse the contribution of the factors on the throughput performance, we perform a $2^kr$ analysis with $r=5$ and $k=4$ (so we perform $5\cdot2^4 = 80$ experiments). We take into account the following factors:
\begin{itemize}
\item Number of Couples Tx-Rx (\textbf{N}): [5, 30] \textbf{(A)}
\item Number of Channels (\textbf{C}) : [6, 100] \textbf{(B)}
\item Send Probability (\textbf{p}): [0.1, 0.5] \textbf{(C)}
\item Mean Inter-arrival Time ($\dfrac{1}{\lambda}$): [125ms, 500ms] \textbf{(D)}
\end{itemize}
\noindent The first step is to \textbf{check the hypothesis}, in particular we have to control that the \textbf{residuals are normal} and that its \textbf{standard deviation is constant} (a.k.a. homoskedasticity). For what concerns the normal hypothesis it's possible to see (Figure \ref{img: qqplot_throughput}) that the QQ plot of residuals vs normal \textbf{show a linear tendency} and so the \textbf{hypothesis is verified}.
\noindent For the\textbf{homoskedasticity}, we have a QQ plot residuals vs predicted response and we can see (Figure \ref{img: homoskedasticity_throughput}) that indeed there is a trend, however the \textbf{errors} (y axis) \textbf{are two order of magnitude below the predicted response} (x axis) and so \textbf{we can ignore trends} and state that the \textbf{homoskedasticity hypothesis is respected}.
\begin{figure}[H]
\centering
\includegraphics[width=0.85\textwidth]{img/QQplot_2kr_throughput.png}
\caption{QQ Plot for testing the normal hypothesis}
\label {img: qqplot_throughput}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=0.85\textwidth]{img/homoskedasticity_2kr_throughput.png}
\caption{QQ Plot for testing homoskedasticity}
\label {img: homoskedasticity_throughput}
\end{figure}
\noindent Now we can analyse the obtained results. The most relevant ones are the one in the following list, the other factors have an impact on the variability that is very low, ($<<1\%$) and so they are non relevant:
\begin{itemize}
\item \textbf{Number of Couples (N)}
\noindent It has a \textbf{positive impact} on throughput, in particular $qi = [0,312532; 0,312537]$\footnote{This and the following are 95\% confidence interval} and it accounts for the \textbf{48,44\%} of the variability. This means that the \textbf{higher the number of couples the higher the throughput}. In fact with more transmitters we have more packets and so we have an higher throughput.
\item \textbf{Mean Inter-Arrival Time ($\dfrac{1}{\lambda}$)}
\noindent It has a \textbf{negative impact}: $qi = [-0,262444; -0,262439]$ and it accounts for the \textbf{34,15\%} of the variability. Thus we can say that the \textbf{higher the mean inter-arrival time, the lower the throughput}. This happens due to the fact that when we increase the mean inter-arrival time it's more likely that a transmitter have an empty buffer and so it has no packets to transmit, then the throughput decreases.
\item \textbf{Jointly Effect of Number of Couples and Mean Inter-Arrival Time}
\noindent The \textbf{jointly effect of the above factors} accounts for the \textbf{17,39\%} of the variability and it has a \textbf{negative impact} ($qi = [-0,187301; -0,187296]$). This because the \textbf{effect of the mean inter-arrival time is greater with respect to the one of the number of couples}, so \textbf{if both increase then the throughput decreases}. Indeed, if we have an higher number of transmitters, but we have the most of them which have an empty buffer (due to the previously explained phenomenon caused by the increasing of the mean inter-arrival time), then the throughput decreases because there are too few packets to transmit.
\end{itemize}
\subsubsection{Factorial Analysis $2^kr$ on Response Time}
Now let analyse the contribution of factors on the other KPI, the \textbf{Response Time}. Also in this case, as the previously, we perform a $2^kr$ analysis with $r=5$ and $k=4$ and so 80 experiments in total. The factors are the same of the previously analysis.
\noindent %\colorbox{yellow}{We can see the plots to check} the hypothesis in figure \ref{img: qqplot_responsetime} (for what concerns the normal hp) and in figure \ref{img: homoskedasticity_responsetime} (for what concerns the homoskedasticity).
%\begin{figure}[H]
% \centering
% \includegraphics[width=0.8\textwidth]{img/qqplot_2kr_responsetime.png}
% \caption{\colorbox{yellow}{QQ Plot for testing} the normal hypothesis}
% \label {img: qqplot_responsetime}
%\end{figure}
%\begin{figure}[H]
% \centering
% \includegraphics[width=0.8\textwidth]{img/homoskedasticity_2kr_responsetime.png}
% \caption{\colorbox{yellow}{QQ Plot for testing} homoskedasticity}
% \label {img: homoskedasticity_responsetime}
%\end{figure}
%\noindent \colorbox{yellow}{The plot about the normal hp} is obtained through a logarithmic transformation and it shows an approximating linear trend. Instead for the homoskedasticity we can see that there is a trend but the errors are at least one order of magnitude below the predicted response, and so also this hypothesis is verified.
\noindent After verifying the hypothesis as shown in the in the latter section, the most relevant contributions are the ones explained in the following lists, the others one have an impact in the variability that is negligible w.r.t. the following ones:
\begin{itemize}
\item \textbf{Send Probability p}
\noindent It has a \textbf{negative impact} on the response time, in particular $qi = [-0,033928; -0,033926]$ and it accounts for the \textbf{65,67\%} of the variability. This means that the \textbf{higher the send probability the lower the response time}. In fact with an high send probability it's more likely that a packet will sent (both in the case of first transmission and in the case of retransmission because of collision) and so it doesn't remain in the transmitter queue increasing its response.
\item \textbf{Mean Inter-Arrival Time}
\noindent It has a \textbf{negative impact}: $qi = [-0,012955; -0,012954]$ and it accounts for the \textbf{9,57\%} of the variability. Thus we can say that the \textbf{higher the mean inter-arrival time, the lower the response time}. This happens due to the fact that when we increase the mean inter-arrival time it's more likely that transmitters send few packets in a slot time and so the probability of having collisions, and so the necessity of retransmitting a packet, is lesser. Moreover even if a collision occurs with an higher mean inter-arrival time there are not several packets in the transmitter queue and so the response time decreases.
\end{itemize}
\newpage
\subsubsection{$2^kr$ Overall Results}
As a reference, in the table \ref{tab: 2kr_results} we can see the results obtained through the $r2^k$ analysis.
\begin{itemize}
\item Number of Couples Tx-Rx: [5, 30] \textbf{(A)}
\item Number of Channels C : [6, 100] \textbf{(B)}
\item Send Probability p: [0.1, 0.5] \textbf{(C)}
\item Mean Inter-arrival Time: [125ms, 500ms] \textbf{(D)}
\end{itemize}
\begin{table}[H]
\centering
\begin{tabular}{|c|c|c|c|c|}
\hline
\textbf{} & \multicolumn{2}{c|}{\textit{\textbf{Throughput}}} & \multicolumn{2}{c|}{\textit{\textbf{Response Time}}} \\ \hline
Factors & qi & Impact on Variability (\%) & qi & Impact on Variability (\%) \\ \hline
A & 0,312534 & 48,44\% & 0,006183 & 2,18\% \\ \hline
B & -4,342100 x $10^-7$ & 9,35 x $10^{-11}$\% & -0,006719 & 2,57\% \\ \hline
C & -6,710519 x $10^-7$ & 2,23 x $10^{-10}$\% & -0,033927 & 65,67\% \\ \hline
D & -0,262442 & 34,15\% & -0,012954 & 9,57\% \\ \hline
AB & -1,447366 x $10^-7$ & 1,03 x $10^{-11}$\% & -0,005873 & 1,96\% \\ \hline
AC & -9,078937 x $10^-7$ & 4,08 x $10^{-10}$\% & -0,004269 & 1,04\% \\ \hline
AD & -0,187298 & 17,39\% & -0,005481 & 1,71\% \\ \hline
BC & 5,657888 x $10^-7$ & 1,58 x $10^{-10}$\% & 0,004619 & 1,21\% \\ \hline
BD & 4,342100 x $10^-7$ & 9,35 x $10^{-11}$\% & 0,005906 & 1,99\% \\ \hline
CD & 1,973682 x $10^-7$ & 1,93 x $10^{-11}$\% & 0,010951 & 6,84\% \\ \hline
ABC & 4,868415 x $10^-7$ & 1,17 x $10^{-10}$\% & 0,004054 & 0,93\% \\ \hline
ABD & 1,447366 x $10^-7$ & 1,03 x $10^{-11}$\% & 0,005238 & 1,56\% \\ \hline
ACD & 8,552622 x $10^-7$ & 3,62 x $10^{-10}$\% & 0,003929 & 0,88\% \\ \hline
BCD & -6,710519 x $10^-7$ & 2,23 x $10^{-10}$\% & -0,004240 & 1,02\% \\ \hline
ABCD & -4,868415 x $10^-7$ & 1,17 x $10^{-10}$\% & -0,003761 & 0,80\% \\ \hline
\end{tabular}
\caption{Results of $r2^k$ analysis for Throughput and for Response Time. 95\% of confidence.}
\label{tab: 2kr_results}
\end{table}
\subsection{Limited Response Time Scenario: Result Analysis}
Our objective is (as we said at the beginning of the report) the \textit{Assessment of the Effectiveness of the Slotted Random-Access Network Protocol}. In order to do so we choose the mean response time and the mean throughput as Key Performance Indexes. For each KPI we set 2 scenarios: one for \textbf{low traffic condition} (in terms of Transmitters and Channels) and the other one for \textbf{high traffic condition}. For each factor variation of each scenario of each KPI we perform 35 repetitions in order to obtain meaningful and statistically valid data. Data are computed with a confidence level of 95\% (to small to be seen). Thanks to the analysis performed on this two scenarios and studying the evolution of the KPIs in them, we are able to reach our aim and give some insights on the network protocol.
\subsubsection{Throughput}
For what concerns the throughput we set these two scenarios:
\begin{enumerate}
\item \textit{High Traffic Scenario}
N = 30, C = 6, Send Probability = 0.1
\item \textit{Low Traffic Scenario}
N = 5, C = 100, Send Probability = 0.1
\end{enumerate}
\noindent In both scenarios we \textbf{vary the mean inter-arrival time} from 125 ms to 500 ms with a step of 75 ms. In fact as we seen in the $2^kr$ analysis it is the most relevant factor for the throughput.
\noindent We can see the results obtained in the outlined scenarios when the mean inter-arrival time grows from 125 ms to 500 ms in figure \ref{img: insight1_throughput}.
\begin{figure}[H]
\centering
\includegraphics[width=0.9\textwidth]{img/insight1_throughput.png}
\caption{Insight 1}
\label{img: insight1_throughput}
\end{figure}
\noindent From the plot we can state that the \textbf{throughput is higher in the high traffic scenario}, because the more the traffic the more the packets. We can also see that the \textbf{best results is obtained with the lower mean inter-arrival time}, this because we have to consider the time slot duration that is 5 ms and it is a lot smaller than the mean inter-arrival time: if the mean inter-arrival time increases, then there are slots where no packets are transmitted (because transmitters have no packets in the queue) and this affect the mean throughput and this is the reason why the throughput decreases with the increase of the mean inter-arrival time. So the latter is another validation of what we obtain from the $2^kr$ analysis. So at the end we can say that the \textbf{throughput depends mainly from the traffic and from the mean inter-arrival time}, this is good because this means that the effect of collisions doesn't not affect so much the overall performance of the network protocol with this particular scenario. In fact in the high traffic condition there are more collisions w.r.t. to the scenario with low traffic, but nevertheless the throughput it is not affected by this.
%\noindent \colorbox{yellow}{Now let analyse the fairness} of the network protocol for what regards the throughput. It's possible to see the Lorenz Curve for both scenarios in figure \ref{img: insight2_throughput}, we show one figure for both cases because they are indeed equals.
%\begin{figure}[H]
% \centering
% \includegraphics[width=0.7\textwidth]{img/lorenz_throughput.png}
% \caption{\colorbox{yellow}{Insight 2}}
% \label{img: insight2_throughput}
%\end{figure}
%\noindent \colorbox{yellow}{As we can see in the plot in all cases} the network protocol is fair for what regards the throughput.
\subsubsection{Response Time}
For what concerns the response time we set these two scenarios:
\begin{enumerate}
\item \textit{High Traffic Scenario}
N = 30, C = 6, Mean Inter-Arrival Time = 125 ms
\item \textit{Low Traffic Scenario}
N = 5, C = 100, Mean Inter-Arrival Time = 125 ms
\end{enumerate}
\noindent In both scenarios we \textbf{vary the send probability} from 0.1 to 1 with a step of 0.1, in fact for the response time the latter is the most relevant factor (as we seen in the $2^kr$ analysis) and so it is the only that if changes causes a big difference.
\noindent The results obtained in the previously explained scenario when the send probability (p) varies from 0.1 to 1 is shown in figure \ref{img: insight1_respTime}.
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{img/MeanResponseTimeInsight.png}
\caption{Insight 1}
\label{img: insight1_respTime}
\end{figure}
\noindent From the figure we can infer that in the \textbf{high traffic scenario the response time is higher} than the low traffic scenario, this is something normal. Then we can also say that the m\textbf{ean response time decreases with the increasing of the send probability} (as we expected from the $r2^k$ analysis) and that the \textbf{differences between the two scenarios are lower when p increases}. The greater value is about 0.17 seconds that are equal to 170 ms which is not a good response time but it is acceptable one and it is a response time which can be experimented also by other network protocols.
%\noindent \colorbox{yellow}{Now let focus on the fairness} of the protocol as regards the response time. We can see Lorenz Curve for the high traffic scenario in figure \ref{img: insight2_respTime} and Lorenz Curve for the low traffic scenario in figure \ref{img: insight3_respTime}.
%\begin{figure}[H]
% \centering
% \includegraphics[width=0.9\textwidth]{img/LorenzHighTraffic.png}
% \caption{Insight 2}
% \label{img: insight2_respTime}
%\end{figure}
%\begin{figure}[H]
% \centering
% \includegraphics[width=0.9\textwidth]{img/LorenzLowTraffic.png}
% \caption{Insight 3}
% \label{img: insight3_respTime}
%\end{figure}
%\noindent \colorbox{yellow}{We can see that in both cases} the curve related to the simulation scenarios are very close to the line of maximum fairness (in the case of low traffic they are overlapping). So we can say that the network protocol is absolutely fair for what regards the response time, whatsoever be the traffic condition.
\subsection{Response Time Explosion Scenario Study}
In this chapter we will study more deeply the case in which we have a \textbf{low mean inter-arrival time} which lead to the "explosion" of the response time. Due to the fact that a \textbf{huge number of collision is expected}, in order to obtain insights for the throughput we created two other \textbf{sub-scenarios} with different behaviour in case of the occurrence of a collision:
\begin{itemize}
\item \textbf{(1)} \textit{Change Of Channel in case of collision}: in case of collision of a particular packet, when retrying the retransmission of that packet (after the back-off period) the channel assigned will be picket randomly another time (in general this is the default choice).
\item \textbf{(2)} \textit{No-Change of Channel in case of collision}
\end{itemize}
We repeated the $2^{k}r$ analysis on the throughput for both sub-scenarios and, \textbf{after verifying the hypothesis} (as we shown before) and \textbf{checking that the $q_{i}$ of the most important factors does non include 0 in the relative 95\% CI scissor}, we obtained the following result:
\begin{itemize}
\item In both sub-scenarios the number of \textbf{Couples Tx-Rx N} is the \textbf{main factor} in terms of impact on the \textbf{Throughput} (in both cases an impact greater than 50\%)
\item In both sub-scenarios the number of \textbf{Channels C} has a lower impact on the throughput (around 10\%)
\item In both sub-scenarios the percentage of the Bernoullian experiment success \textbf{p} had a low impact (4-5\%) on the \textbf{Throughput}
\item With respect of the limited response time scenario, the \textbf{mean interarrival time} has a \textbf{low impact on the throughput} (around 4\%)
\end{itemize}
\subsubsection{$2^kr$ Overall Results}
As a reference, in the following table \ref{tab: 2kr_results_explosion} we can see the results obtained through the $r2^k$ analysis.
\begin{itemize}
\item Number of Couples Tx-Rx: [5, 30] \textbf{(A)}
\item Number of Channels C : [6, 100] \textbf{(B)}
\item Send Probability p: [0.1, 1] \textbf{(C)}
\item Mean Inter-arrival Time: [25ms, 55ms] \textbf{(D)}
\end{itemize}
\begin{table}[H]
\centering
\begin{tabular}{|c|c|c|c|c|}
\hline
\textbf{} & \multicolumn{2}{c|}{\textit{\textbf{Throughput Change}}} & \multicolumn{2}{c|}{\textit{\textbf{Throughput No-Change}}} \\ \hline
Factors & qi & Impact on Variability (\%) & qi & Impact on Variability (\%) \\ \hline
A & 1.064 & 55.93\% & 1.032 & 53.00\% \\ \hline
B & 0.432 & 9.218\% & 0.464 & 10.71\% \\ \hline
C & 0.308 & 4.684\% & 0.290 & 4.207\% \\ \hline
D & -0.285 & 4.033\% & -0.284 & 4.036\% \\ \hline
AB & 0.427 & 9.004\% & 0.458 & 10.47\% \\ \hline
AC & 0.177 & 1.551\% & 0.159 & 1.268\% \\ \hline
AD & -0.143 & 1.0225\% & -0.143 & 1.018\% \\ \hline
BC & 0.144 & 1.026\% & 0.162 & 1.309\% \\ \hline
BD & -0.216 & 2.314\% & -0.216 & 2.338\% \\ \hline
CD & -0.260 & 3.345\% & -0.260 & 3.372\% \\ \hline
ABC & 0.149 & 1.099\% & 0.167 & 1.397\% \\ \hline
ABD & -0.211 & 2.210\% & -0.211 & 2.230\% \\ \hline
ACD & -0.129 & 0.830\% & -0.129 & 0.834\% \\ \hline
BCD & -0.191 & 1.817\% & -0.192 & 1.847\% \\ \hline
ABCD & -0.196 & 1.912\% & -0.197 & 1.945\% \\ \hline
\end{tabular}
\caption{Results of $2^k$ analysis for Throughput.}
\label{tab: 2kr_results_explosion}
\end{table}
\subsection{Response Time Explosion Scenario: Result Analysis}
Due to the fact that the $2^{k}r$ analysis underlined the importance of the Number of Couples Tx-Rx (\textbf{N}) this factor will be taken into consideration for further analysis. The number of Channels and the mean inter-arrival time, from now on, are set to constant due to their limited impact: C=6 and $\frac{1}{\lambda}$ = 35ms. \\
In the following plot we can see more clearly the effects of changing N: the \textbf{throughput increase by increasing N}.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{img/MeanThroughputBufferExplosion.png}
\caption{Mean Throughput with variation of N and p}
\label{img: insight4}
\end{figure}
The plot was also done with respect to different values of \textbf{p}, even if such factor does not have a really big impact. This was done because, thinking about the real world, the parameter \textbf{p} is the simplest thing to change, so, even if his impact is not huge, can be a good idea to show which values of p tunes the \textbf{throughput}. All things considered: \textbf{increasing p increases slighly the throughput}, however this increase becomes "smaller" with bigger values of p.\\
For what concerns the \textbf{comparison between the Change Channel sub-scenario with the No-Change one}, the following plot can describe well the differences:
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{img/MeanThroughputBufferExplosionChangeVSNoChange.png}
\caption{Mean Throughput with variation of N and p with different sub-scenarios}
\label{img: insight3_respTime}
\end{figure}
As we can notice, the dotted lines and the continuous lines represent the same simulation in terms of number N, but with different option of Changing or not the channel in case of collision. So, we can see clearly that for more higher number of N, there is a consistent difference between the two scenarios: \textbf{the sub-scenario without change of the channel is worst in terms of throughput}.
|
{"hexsha": "50b2fe50dfbd18cf46b62c13453247599ea0eb9b", "size": 24781, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/chapters/simulation_experiments.tex", "max_stars_repo_name": "gerti98/PerformanceEvaluationGroupProject", "max_stars_repo_head_hexsha": "055c30da1352aa22c128456bc2407c6a7619d4b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/chapters/simulation_experiments.tex", "max_issues_repo_name": "gerti98/PerformanceEvaluationGroupProject", "max_issues_repo_head_hexsha": "055c30da1352aa22c128456bc2407c6a7619d4b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/chapters/simulation_experiments.tex", "max_forks_repo_name": "gerti98/PerformanceEvaluationGroupProject", "max_forks_repo_head_hexsha": "055c30da1352aa22c128456bc2407c6a7619d4b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-06T09:39:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-06T09:39:48.000Z", "avg_line_length": 76.9596273292, "max_line_length": 1205, "alphanum_fraction": 0.7332633873, "num_tokens": 7396}
|
[STATEMENT]
lemma set_takeWhileD: "x \<in> set (takeWhile P xs) \<Longrightarrow> x \<in> set xs \<and> P x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> set (takeWhile P xs) \<Longrightarrow> x \<in> set xs \<and> P x
[PROOF STEP]
by (induct xs) (auto split: if_split_asm)
|
{"llama_tokens": 116, "file": null, "length": 1}
|
// Copyright Oleg Maximenko 2014.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://github.com/svgpp/svgpp for library home page.
#pragma once
#include <svgpp/definitions.hpp>
#include <boost/mpl/set.hpp>
namespace svgpp { namespace traits
{
/*
6.16 User agent style sheet
The user agent shall maintain a user agent style sheet ([CSS2], section 6.4) for elements in the SVG namespace
for visual media ([CSS2], section 7.3.1). The user agent style sheet below is expressed using CSS syntax; however,
user agents are required to support the behavior that corresponds to this default style sheet even if CSS style
sheets are not supported in the user agent:
svg, symbol, image, marker, pattern, foreignObject { overflow: hidden }
...
The first line of the above user agent style sheet will cause the initial clipping path to be established at the
bounds of the initial viewport. Furthermore, it will cause new clipping paths to be established at the bounds of
the listed elements, all of which are elements that establish a new viewport. (Refer to the description of SVG's
use of the 'overflow' property for more information.)
*/
typedef boost::mpl::set6<
tag::element::svg,
tag::element::symbol,
tag::element::image,
tag::element::marker,
tag::element::pattern,
tag::element::foreignObject
> default_overflow_hidden_elements;
}}
|
{"hexsha": "d26e4b635083dc31eece02ed8c04e98b12217bc0", "size": 1480, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/svgpp/traits/overflow_hidden_elements.hpp", "max_stars_repo_name": "RichardCory/svgpp", "max_stars_repo_head_hexsha": "801e0142c61c88cf2898da157fb96dc04af1b8b0", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 428.0, "max_stars_repo_stars_event_min_datetime": "2015-01-05T17:13:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:25:47.000Z", "max_issues_repo_path": "include/svgpp/traits/overflow_hidden_elements.hpp", "max_issues_repo_name": "andrew2015/svgpp", "max_issues_repo_head_hexsha": "1d2f15ab5e1ae89e74604da08f65723f06c28b3b", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 61.0, "max_issues_repo_issues_event_min_datetime": "2015-01-08T14:32:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-06T16:55:11.000Z", "max_forks_repo_path": "include/svgpp/traits/overflow_hidden_elements.hpp", "max_forks_repo_name": "andrew2015/svgpp", "max_forks_repo_head_hexsha": "1d2f15ab5e1ae89e74604da08f65723f06c28b3b", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 90.0, "max_forks_repo_forks_event_min_datetime": "2015-05-19T04:56:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T16:42:50.000Z", "avg_line_length": 36.0975609756, "max_line_length": 115, "alphanum_fraction": 0.7554054054, "num_tokens": 352}
|
import numpy as np
import fns
from . import PLSRregressionMethods
from . import PLSRsave
import tkinter
import copy
import sklearn.model_selection
import types
from . import PLSRclassifiers
def get_buttons():
buttons=[
{'key': 'RNNtab2name', 'type': 'tabname', 'text': 'Wavelength Selection', 'tab': 2} ,
{'key': 'RegressionL3', 'type': 'label', 'text': 'Type of wavelength selection:', 'tab': 2, 'row': 2} ,
{'key': 'regression_wavelength_selection', 'type': 'radio:vertical:text', 'texts': ['No wavelength selection', 'Moving Window', 'Genetic Algorithm','Sequential Feature Selector'], 'tab': 2, 'row': 3} ,
{'key': 'moving_window_min', 'type': 'txt:float', 'text': 'Min window', 'default': '30', 'width': 4, 'tab': 2, 'row': 4} ,
{'key': 'moving_window_max', 'type': 'txt:float', 'text': 'Max window', 'default': '100', 'width': 4, 'tab': 2, 'row': 4} ,
{'key': 'RegressionL4', 'type': 'label', 'text': 'GA options ', 'tab': 2, 'row': 5} ,
{'key': 'GA_number_of_individuals', 'type': 'txt:int', 'text': 'GA num. Individuals', 'default': '100', 'width': 4, 'tab': 2, 'row': 5} ,
{'key': 'GA_crossover_rate', 'type': 'txt:float', 'text': 'GA crossover rate', 'default': '0.8', 'width': 4, 'tab': 2, 'row': 5} ,
{'key': 'GA_mutation_rate', 'type': 'txt:float', 'text': 'GA mutation rate', 'default': '0.001', 'width': 6, 'tab': 2, 'row': 5} ,
{'key': 'GA_max_number_of_generations', 'type': 'txt:int', 'text': 'GA generations', 'default': '20', 'width': 3, 'tab': 2, 'row': 5} ,
{'key': 'SFS type', 'type': 'radio:text', 'texts': ['Forward', 'Backward'], 'tab': 2, 'row': 6} ,
{'key': 'SFS_floating', 'type': 'check', 'text': 'Floating', 'tab': 2, 'row': 6} ,
{'key': 'SFS_num_after_min', 'type': 'txt:int', 'text': 'Iterations after min', 'default': '30', 'width': 4, 'tab': 2, 'row': 6 },
{'key': 'SFS_target', 'type': 'txt:int', 'text': 'Target number', 'default': '20', 'width': 4, 'tab': 2, 'row': 6 },
{'key': 'SFS_max_iterations', 'type': 'txt:int', 'text': 'Max iterations', 'default': '300', 'width': 4, 'tab': 2, 'row': 6 },
{'key': 'WS_loss_type', 'type': 'radio:text', 'texts': ['X-validation on training', 'RMSEC on training', 'RMSEP on validation'], 'tab': 2, 'row': 8} ,
{'key': 'WS_cross_val_N', 'type': 'txt:int', 'text': 'WS cross val fold', 'default': '1', 'width': 4, 'tab': 2, 'row': 9} ,
{'key': 'WS_cross_val_max_cases', 'type': 'txt:int', 'text': 'WS cross val num cases', 'default': '-1', 'width': 4, 'tab': 2, 'row': 9} ,
]
return buttons
def MW(case,ui,common_variables,keywords={}):
T=case.T
V=case.V
wavenumbers=case.wavenumbers
folder=case.folder
try:
keywords=case.keywords
except:
keywords={}
WS_getCrossvalSplits([0,1],T,V,ui,use_stored=False)
# get regression module
reg_module=PLSRregressionMethods.getRegModule(ui['reg_type'],keywords)
# Set what datapoints to include, the parameter 'wavenum' is in units cm^-1
if ui['save_check_var']:
common_variables.tempax.fig=common_variables.tempfig
#len_wavenumbers=len(wavenumbers)
dw=wavenumbers[0]-wavenumbers[1]
# Windowsize is input in cm^-1, transform to indexes
MWmax=int(round(ui['moving_window_max']/abs(dw),0))
MWmin=int(round(ui['moving_window_min']/abs(dw),0))
Wresults=np.zeros((len(wavenumbers),MWmax+1-MWmin))
Wsizes=np.arange(MWmin,MWmax+1)
# do moving window
for i,Wsize in enumerate(Wsizes):
trail_active_wavenumbers=[]
for j, Wcenter in enumerate(wavenumbers):
Wstart=j-Wsize//2
Wend=Wstart+Wsize
#if Wsize < MWmax+1 and i < len(wavenumbers)+1:
if Wstart<0:
k=j
continue
elif Wend>len(wavenumbers):
l=j
break
else:
trail_active_wavenumbers.append(np.arange(Wstart,Wend))
#Wresults[j,i]=WS_getRMSEP(reg_module,trail_active_wavenumbers[-1],T,V,use_stored=False)
print('moving window row '+str(i)+' of '+str(len(Wsizes)))
Wresults[k+1:l,i], _ = WS_evaluate_chromosomes(reg_module,
T, V, trail_active_wavenumbers,
use_stored=True)
# done moving window
Wresults=Wresults+(Wresults==0)*np.max(Wresults) # set empty datapoints to max value
j,i=np.unravel_index(Wresults.argmin(), Wresults.shape)
bestVal=Wresults[j,i]
bestSize=Wsizes[i]
bestStart=j-bestSize//2
# plot MWresults
Wresults=np.array(Wresults)
# make plot
Wwindowsize,Wwavenumbers = np.meshgrid(Wsizes*abs(dw), wavenumbers)
unique_keywords=PLSRsave.get_unique_keywords_formatted(common_variables.keyword_lists,keywords)
PLSRsave.PcolorMW(Wwavenumbers,Wwindowsize,Wresults,fns.add_axis(common_variables.fig,ui['fig_per_row'],ui['max_plots']),unique_keywords[1:],ui)
if ui['save_check_var']:
tempCbar=PLSRsave.PcolorMW(Wwavenumbers,Wwindowsize,Wresults,common_variables.tempax,unique_keywords[1:],ui)
common_variables.tempfig.subplots_adjust(bottom=0.13,left=0.15, right=0.97, top=0.9)
plotFileName=folder+ui['reg_type']+unique_keywords.replace('.','p')+'_moving_window'
common_variables.tempfig.savefig(plotFileName+ui['file_extension'])
tempCbar.remove()
# set result as keywords, so that they are saved
bestEnd=bestStart+bestSize
Wwidth=wavenumbers[bestStart]-wavenumbers[bestEnd-1] #cm-1
Wcenter=0.5*(wavenumbers[bestStart]+wavenumbers[bestEnd-1]) #cm-1
keywords['MW width']=str(round(Wwidth,1))+r' cm$^{-1}$'
keywords['MW center']=str(round(Wcenter,1))+r' cm$^{-1}$'
# prepare return vector
active_wavenumers=np.zeros(len(wavenumbers), dtype=bool)
active_wavenumers[bestStart:bestEnd]=True
return active_wavenumers
def WS_evaluate_chromosomes(reg_module,T,V,trail_active_wavenumbers,ui=None,use_stored=False,backup_reg_module=None):
used_mlr=False
losses=np.zeros(len(trail_active_wavenumbers))
for i,active_wavenumers in enumerate(trail_active_wavenumbers):
#print(,i,' of ',len(active_wavenumers))
#i+=1
try:
losses[i]=WS_getRMSEP(reg_module,active_wavenumers,T,V,ui=ui,use_stored=use_stored)
except:
used_mlr=True
losses[i]=WS_getRMSEP(backup_reg_module,active_wavenumers,T,V,ui=ui,use_stored=use_stored)
return losses, used_mlr
def WS_getRMSEP(reg_module,chromosome,T,V,ui=None,use_stored=False):
# ui is optional only if use_stored=True
Ts,Vs=WS_getCrossvalSplits(chromosome,T,V,ui=None,use_stored=use_stored)
RMSEP=[]
percent_cor_classified_list=[]
for curT,curV in zip(Ts,Vs):
reg_module.fit(curT.X, curT.Y)
curV.pred = reg_module.predict(curV.X)[:,0]
if reg_module.type=='regression':
RMSEP.append(np.sqrt((np.sum((curV.pred-curV.Y)**2)))/len(curV.Y))
else: #reg_module.type=='classifier'
percent_cor_classified_list.append(PLSRclassifiers.get_correct_categorized(curV.pred,curV.Y))
if reg_module.type=='regression':
return np.sqrt(np.sum(np.array(RMSEP)**2)/len(RMSEP))
else:
return 1-np.average(percent_cor_classified_list)
def WS_getCrossvalSplits(chromosome,T,V,ui=None,use_stored=False):
global stored_XvalTs
global stored_XvalVs
if use_stored==True:
XvalTs = copy.deepcopy(stored_XvalTs)
XvalVs = copy.deepcopy(stored_XvalVs)
else:
XvalTs=[]
XvalVs=[]
if ui['WS_loss_type']=='X-validation on training':
if ui['WS_cross_val_N']==1 and ui['WS_cross_val_max_cases']==-1:
splitmodule=sklearn.model_selection.LeaveOneOut()
else:
splitmodule=sklearn.model_selection.ShuffleSplit(n_splits=ui['WS_cross_val_max_cases'], test_size=ui['WS_cross_val_N'])
for train,val in splitmodule.split(T.X):
XvalTs.append(types.SimpleNamespace())
XvalTs[-1].X=np.array(T.X[train])
XvalTs[-1].Y=np.array(T.Y[train])
XvalVs.append(types.SimpleNamespace())
XvalVs[-1].X=np.array(T.X[val])
XvalVs[-1].Y=np.array(T.Y[val])
elif ui['WS_loss_type']=='RMSEC on training':
XvalTs.append(copy.deepcopy(T))
XvalVs=XvalTs # pointer to object, no need to copy it
else:# ui['WS_loss_type']=='RMSEP on validation':
XvalTs.append(copy.deepcopy(T))
XvalVs.append(copy.deepcopy(V))
stored_XvalTs = copy.deepcopy(XvalTs)
stored_XvalVs = copy.deepcopy(XvalVs)
for T in XvalTs:
T.X=T.X[:,chromosome]
if len(XvalVs[0].X[0])>len(XvalTs[0].X[0]): # this is just a check to see if T==V, in that case we should not act on
for V in XvalVs:
V.X=V.X[:,chromosome]
return XvalTs,XvalVs
|
{"hexsha": "7c3033396dee72ca48175006b61899349e0822a0", "size": 8090, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules/libs/PLSRwavelengthSelection.py", "max_stars_repo_name": "jernelv/SpecAnalysis", "max_stars_repo_head_hexsha": "175875ea14f200ecd5de8eaa5b228c32c6621e46", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-01-04T10:30:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T10:31:19.000Z", "max_issues_repo_path": "modules/libs/PLSRwavelengthSelection.py", "max_issues_repo_name": "jernelv/SpecAnalysis", "max_issues_repo_head_hexsha": "175875ea14f200ecd5de8eaa5b228c32c6621e46", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/libs/PLSRwavelengthSelection.py", "max_forks_repo_name": "jernelv/SpecAnalysis", "max_forks_repo_head_hexsha": "175875ea14f200ecd5de8eaa5b228c32c6621e46", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-11-17T13:07:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-28T15:03:02.000Z", "avg_line_length": 44.2076502732, "max_line_length": 202, "alphanum_fraction": 0.7019777503, "include": true, "reason": "import numpy", "num_tokens": 2647}
|
import tensorflow as tf
import numpy as np
from data import *
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# set up placeholders
ph_s1_x = tf.placeholder(tf.float32, [N_BATCH, L], name="ph_s1_x")
ph_s1_y = tf.placeholder(tf.float32, [N_BATCH, L], name="ph_s1_y")
ph_s1_o = tf.placeholder(tf.float32, [N_BATCH, 2], name="ph_s1_o")
ph_s2_x = tf.placeholder(tf.float32, [N_BATCH, L], name="ph_s2_x")
ph_s2_y = tf.placeholder(tf.float32, [N_BATCH, L], name="ph_s2_y")
ph_s2_o = tf.placeholder(tf.float32, [N_BATCH, 2], name="ph_s2_o")
ph_obs_x = [tf.placeholder(tf.float32, [N_BATCH, L],
name="ph_ob_x"+str(i)) for i in range(OBS_SIZE)]
ph_obs_y = [tf.placeholder(tf.float32, [N_BATCH, L],
name="ph_ob_y"+str(j)) for j in range(OBS_SIZE)]
ph_obs_tf = [tf.placeholder(tf.float32, [N_BATCH, 2],
name="ph_ob_tf"+str(k)) for k in range(OBS_SIZE)]
ph_new_ob_x = tf.placeholder(tf.float32, [N_BATCH, L], name="ph_new_ob_x")
ph_new_ob_y = tf.placeholder(tf.float32, [N_BATCH, L], name="ph_new_ob_y")
ph_new_ob_tf = tf.placeholder(tf.float32, [N_BATCH,2], name="ph_new_ob_tf")
def gen_feed_dict(s1_x, s1_y, s1_o, s2_x, s2_y, s2_o,
obs_x, obs_y, obs_tf,
new_ob_x, new_ob_y, new_ob_tf):
ret = {}
ret[ph_s1_x] = s1_x
ret[ph_s1_y] = s1_y
ret[ph_s1_o] = s1_o
ret[ph_s2_x] = s2_x
ret[ph_s2_y] = s2_y
ret[ph_s2_o] = s2_o
for a, b in zip(ph_obs_x, obs_x):
ret[a] = b
for a, b in zip(ph_obs_y, obs_y):
ret[a] = b
for a, b in zip(ph_obs_tf, obs_tf):
ret[a] = b
ret[ph_new_ob_x] = new_ob_x
ret[ph_new_ob_y] = new_ob_y
ret[ph_new_ob_tf] = new_ob_tf
return ret
# some constants
n_hidden = 400
n_pred_hidden = 400
# a list of variables for different tasks
VAR_inv = []
VAR_pred = []
# --------------------------------------------------------------------- initial hidden h(X)
# set up weights for input outputs!
state = tf.zeros([N_BATCH, n_hidden])
# ------------------------------------------------------------------ convolve in the observations
# initialize some weights
# stacked lstm
lstm = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.LSTMCell(100), tf.nn.rnn_cell.LSTMCell(100)])
hiddens = [state]
with tf.variable_scope("LSTM") as scope:
for i in range(OBS_SIZE):
if i > 0:
scope.reuse_variables()
cell_input = tf.concat(1, [ph_obs_x[i], ph_obs_y[i], ph_obs_tf[i]])
output, state = lstm(cell_input, state)
hiddens.append(state)
lstm_variables = [v for v in tf.all_variables()
if v.name.startswith("LSTM")]
print lstm_variables
VAR_inv += lstm_variables
VAR_pred += lstm_variables
# ----------------------------------------------------------------- answer the inversion
W_inv_s1_x = weight_variable([n_hidden, L])
b_inv_s1_x = bias_variable([L])
W_inv_s1_y = weight_variable([n_hidden, L])
b_inv_s1_y = bias_variable([L])
W_inv_s1_o = weight_variable([n_hidden, 2])
b_inv_s1_o = bias_variable([2])
W_inv_s2_x = weight_variable([n_hidden, L])
b_inv_s2_x = bias_variable([L])
W_inv_s2_y = weight_variable([n_hidden, L])
b_inv_s2_y = bias_variable([L])
W_inv_s2_o = weight_variable([n_hidden, 2])
b_inv_s2_o = bias_variable([2])
VAR_inv += [\
W_inv_s1_x,
b_inv_s1_x,
W_inv_s1_y,
b_inv_s1_y,
W_inv_s1_o,
b_inv_s1_o,
W_inv_s2_x,
b_inv_s2_x,
W_inv_s2_y,
b_inv_s2_y,
W_inv_s2_o,
b_inv_s2_o]
eL = tf.constant(1e-10, shape=[N_BATCH, L])
e2 = tf.constant(1e-10, shape=[N_BATCH, 2])
inv_s1_xs = [tf.nn.softmax(tf.matmul(h, W_inv_s1_x) + b_inv_s1_x)+eL for h in hiddens]
inv_s1_ys = [tf.nn.softmax(tf.matmul(h, W_inv_s1_y) + b_inv_s1_y)+eL for h in hiddens]
inv_s1_os = [tf.nn.softmax(tf.matmul(h, W_inv_s1_o) + b_inv_s1_o)+e2 for h in hiddens]
inv_s2_xs = [tf.nn.softmax(tf.matmul(h, W_inv_s2_x) + b_inv_s2_x)+eL for h in hiddens]
inv_s2_ys = [tf.nn.softmax(tf.matmul(h, W_inv_s2_y) + b_inv_s2_y)+eL for h in hiddens]
inv_s2_os = [tf.nn.softmax(tf.matmul(h, W_inv_s2_o) + b_inv_s2_o)+e2 for h in hiddens]
print "inv_s1_x shape ", show_dim(inv_s1_xs)
inv_s1x_costs = [-tf.reduce_sum(ph_s1_x * tf.log(op)) for op in inv_s1_xs]
inv_s1y_costs = [-tf.reduce_sum(ph_s1_y * tf.log(op)) for op in inv_s1_ys]
inv_s1o_costs = [-tf.reduce_sum(ph_s1_o * tf.log(op)) for op in inv_s1_os]
inv_s2x_costs = [-tf.reduce_sum(ph_s2_x * tf.log(op)) for op in inv_s2_xs]
inv_s2y_costs = [-tf.reduce_sum(ph_s2_y * tf.log(op)) for op in inv_s2_ys]
inv_s2o_costs = [-tf.reduce_sum(ph_s2_o * tf.log(op)) for op in inv_s2_os]
# print "costs shapes ", show_dim(query_pred_costs)
cost_inv = sum(inv_s1x_costs) + sum(inv_s1y_costs) + sum(inv_s1o_costs) +\
sum(inv_s2x_costs) + sum(inv_s2y_costs) + sum(inv_s2o_costs)
# ----------------------------------------------------------------- answer the query
# put the predicted x, y, o back into here
W_query1 = weight_variable([n_hidden + L + L + 4 * L + 2 + 2, n_pred_hidden])
b_query1 = bias_variable([n_pred_hidden])
W_query2 = weight_variable([n_pred_hidden, 2])
b_query2 = bias_variable([2])
VAR_pred += [W_query1, b_query1, W_query2, b_query2]
hiddens_zip = zip(hiddens, inv_s1_xs, inv_s1_ys, inv_s1_os, inv_s2_xs, inv_s2_ys, inv_s2_os)
hiddens_zip = [tf.concat(1, x) for x in hiddens_zip]
hidden_cat_query = [tf.nn.relu(\
tf.matmul(tf.concat(1, [ph_new_ob_x, ph_new_ob_y, hidden]),W_query1) + b_query1)\
for hidden in hiddens_zip]
print "hidden_cat_query shape ", show_dim(hidden_cat_query)
query_preds = [tf.nn.softmax(tf.matmul(hcq, W_query2) + b_query2)+e2 for hcq in hidden_cat_query]
print "query_preds shape ", show_dim(query_preds)
query_pred_costs = [-tf.reduce_sum(ph_new_ob_tf * tf.log(op)) for op in query_preds]
print "costs shapes ", show_dim(query_pred_costs)
cost_query_pred = sum(query_pred_costs)
# ------------------------------------------------------------------------ training steps
optimizer = tf.train.RMSPropOptimizer(0.0002)
train_query_pred = optimizer.minimize(cost_query_pred, var_list = VAR_pred)
train_inv = optimizer.minimize(cost_inv, var_list = VAR_inv)
# Before starting, initialize the variables. We will 'run' this first.
init = tf.initialize_all_variables()
# ------------------------------------------------------------------------- using the model!
# train the model and save checkpt to a file location
def train_model(save_loc):
# Launch the graph.
sess = tf.Session()
sess.run(init)
saver = tf.train.Saver()
for i in range(5000001):
s1_x, s1_y, s1_o, s2_x, s2_y, s2_o, obs_x, obs_y, obs_tfs, new_ob_x, new_ob_y, new_ob_tf = gen_data()
feed_dic = gen_feed_dict(s1_x, s1_y, s1_o, s2_x, s2_y, s2_o, obs_x, obs_y, obs_tfs, new_ob_x, new_ob_y, new_ob_tf)
# train query prediction
cost_inv_pre = sess.run([cost_inv], feed_dict=feed_dic)[0]
sess.run([train_inv], feed_dict=feed_dic)
cost_inv_post = sess.run([cost_inv], feed_dict=feed_dic)[0]
print "train inv ", cost_inv_pre, " ", cost_inv_post, " ", True if cost_inv_post < cost_inv_pre else False
# train query prediction
cost_query_pred_pre = sess.run([cost_query_pred], feed_dict=feed_dic)[0]
sess.run([train_query_pred], feed_dict=feed_dic)
cost_query_pred_post = sess.run([cost_query_pred], feed_dict=feed_dic)[0]
print "train query pred ", cost_query_pred_pre, " ", cost_query_pred_post, " ", True if cost_query_pred_post < cost_query_pred_pre else False
if i % 100 == 0:
# print "for inversion "
# ran_predss = sess.run(query_preds, feed_dict=feed_dic)[9]
print "for query prediction"
print "query loc "
print new_ob_x[0]
print new_ob_y[0]
print "predicted <===> true"
ran_predss = sess.run(query_preds, feed_dict=feed_dic)[9]
total_cor = 0.0
for haha in zip(ran_predss, new_ob_tf):
print haha
if np.argmax(haha[0]) == np.argmax(haha[1]):
total_cor += 1
cor_ratio = total_cor / len(ran_predss)
print "total correct ", cor_ratio
# something special!
global RAND_HIT
NEW_RAND_HIT = 1.0 - cor_ratio
RAND_HIT = RAND_HIT * 0.9 + NEW_RAND_HIT * 0.1
print "rand hit now ", RAND_HIT
save_path = saver.save(sess, save_loc)
print("Model saved in file: %s" % save_path)
# load the model and give back a session
def load_model(saved_loc):
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess, saved_loc)
print("Model restored.")
return sess
|
{"hexsha": "08ec9e71222ba8318e3b1f376d1e4310a7446cca", "size": 8525, "ext": "py", "lang": "Python", "max_stars_repo_path": "battleship_lstm/model.py", "max_stars_repo_name": "evanthebouncy/nnhmm", "max_stars_repo_head_hexsha": "acd76edaa1b3aa0c03d39f6a30e60d167359c6ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "battleship_lstm/model.py", "max_issues_repo_name": "evanthebouncy/nnhmm", "max_issues_repo_head_hexsha": "acd76edaa1b3aa0c03d39f6a30e60d167359c6ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "battleship_lstm/model.py", "max_forks_repo_name": "evanthebouncy/nnhmm", "max_forks_repo_head_hexsha": "acd76edaa1b3aa0c03d39f6a30e60d167359c6ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.970464135, "max_line_length": 145, "alphanum_fraction": 0.6724926686, "include": true, "reason": "import numpy", "num_tokens": 2628}
|
import os
import numpy as np
from enn_zoo.griddly import create_env
from entity_gym.environment import CategoricalActionSpace, DenseCategoricalActionMask
init_path = os.path.dirname(os.path.realpath(__file__))
def test_griddly_wrapper() -> None:
env_class = create_env(os.path.join(init_path, "env_descriptions/test.yaml"))
# Check the observation space is being created correctly from the test environment
observation_space = env_class.obs_space()
assert len(observation_space.entities) == 3
assert observation_space.entities["__global__"].features == [
"_steps",
"test_global_variable",
]
# TODO: currently we pass all possible variables to each feature, this should be fixed once features API is in a more consistent state
assert observation_space.entities["entity_1"].features == [
"x",
"y",
"z",
"orientation",
"player_id",
"entity_1_variable",
]
assert observation_space.entities["entity_2"].features == [
"x",
"y",
"z",
"orientation",
"player_id",
"entity_2_variable",
]
# Check the action space is being created correctly fro the test environment
action_space = env_class.action_space()
assert isinstance(action_space["move_one"], CategoricalActionSpace)
assert action_space["move_one"].choices == [
"NOP",
"Left",
"Up",
"Right",
"Down",
]
assert isinstance(action_space["move_two"], CategoricalActionSpace)
assert action_space["move_two"].choices == [
"NOP",
"Do a little dance",
"Make a little love",
"Get down tonight",
]
# Check that observation is created correctly
env = env_class()
observation = env._make_observation()
# Check the entities in the observation
assert np.all(
observation.entities["entity_1"]
== np.array([[2, 2, 0, 0, 1, 5]], dtype=np.float32)
)
print(np.sort(observation.entities["entity_2"], axis=0))
print(np.array([[2, 3, 0, 0, 0, 10], [4, 4, 0, 0, 0, 10]], dtype=np.float32))
assert np.all(
np.sort(observation.entities["entity_2"], axis=0)
== np.array([[2, 3, 0, 0, 0, 10], [4, 4, 0, 0, 0, 10]], dtype=np.float32)
)
# Check the masks in the observation
assert isinstance(observation.action_masks["move_one"], DenseCategoricalActionMask)
assert np.all(
observation.action_masks["move_one"].mask
== np.array([[1, 1, 1, 1, 0]]) # can do everything but move down
)
assert isinstance(observation.action_masks["move_two"], DenseCategoricalActionMask)
assert np.all(observation.action_masks["move_two"].mask == np.array([[1, 1, 1, 1]]))
|
{"hexsha": "b2811359567f3849ca0739f160fa780a2038a937", "size": 2751, "ext": "py", "lang": "Python", "max_stars_repo_path": "enn_zoo/enn_zoo/griddly/test_griddly_env.py", "max_stars_repo_name": "batu/incubator", "max_stars_repo_head_hexsha": "11f0f60de24102af4356c9738cbb9793ea6aa334", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "enn_zoo/enn_zoo/griddly/test_griddly_env.py", "max_issues_repo_name": "batu/incubator", "max_issues_repo_head_hexsha": "11f0f60de24102af4356c9738cbb9793ea6aa334", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "enn_zoo/enn_zoo/griddly/test_griddly_env.py", "max_forks_repo_name": "batu/incubator", "max_forks_repo_head_hexsha": "11f0f60de24102af4356c9738cbb9793ea6aa334", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-30T14:40:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T14:40:03.000Z", "avg_line_length": 33.5487804878, "max_line_length": 138, "alphanum_fraction": 0.6426753908, "include": true, "reason": "import numpy", "num_tokens": 695}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.