text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
#partition_compare.py
"""
Generate specific problem and generate
partitions using my own partition_suggestion.py
functions as well as using pymetis.
Try to see why one might be better than the other.
"""
import math
#import argparse
import numpy as np
NO_PYMETIS=0
try:
from pymetis import part_graph
except ImportError:
NO_PYMETIS=1
import sys
#from vtkHelper import saveScalarStructuredGridVTK_binary
#from vtkHelper import saveVelocityAndPressureVTK_binary
from vtkHelper import saveStructuredPointsVTK_ascii
from partition_suggestion import part_advisor
class Partition:
def __init__(self,xmin,xmax,ymin,ymax,zmin,zmax):
self.xmin = xmin;
self.ymin = ymin;
self.zmin = zmin;
self.xmax = xmax;
self.ymax = ymax;
self.zmax = zmax;
0,0,0
def inPart(self,x,y,z):
if ((x < self.xmin) or (x > self.xmax)):
return False
if((y < self.ymin) or (y > self.ymax)):
return False
if((z<self.zmin) or (z> self.zmax)):
return False
return True
def set_geometric_partition(Nx,Ny,Nz,px,py,pz):
"""
Nx = integer, number of lattice points in the x direction
Ny = integer, number of lattice points in the y direction
Nz = integer, number of lattice points in the z direction
px,py,pz = integers: number of partitions in the x,y and z direction
returns part_vert for a geometric partition
"""
partList = []
bx = int(Nx/px); by = int(Ny/py); bz = int(Nz/pz) # block x,y and z minimum sizes
#part = 0
zmin = 0
for z in range(pz):
zmax = zmin + bz-1
if ((Nz%pz)>z):
zmax+=1
ymin = 0
for y in range(py):
ymax = ymin+by-1
if((Ny%py)>y):
ymax+=1
xmin = 0
for x in range(px):
xmax = xmin+bx-1
if((Nx%px)>x):
xmax+=1
partList.append(Partition(xmin,xmax,ymin,ymax,zmin,zmax))
xmin = xmax+1
ymin = ymax+1
zmin = zmax+1
#zmin = zmax+1
#ymin = ymax+1
#xmin = xmax+1
# now I have my list of partitions, cycle through my nodes get which partition they are in.
part_vert = []
for z in range(Nz):
for y in range(Ny):
for x in range(Nx):
for i in range(len(partList)):
inPart = partList[i].inPart(x,y,z)
if inPart:
part_vert.append(i)
break
return part_vert
def count_cuts(adj,vert_part):
"""
adj is an iterable object containing an adjacency matrix for a logically graph-like object
vert_part is a listing of which partition each graph vertex is in.
returns cut - integer with the number of edges that cross partitions
"""
edge_cuts = set()
for i in adj:
my_part = vert_part[i]
ngbs = adj[i]
for n in ngbs:
#ngb_part = vert_part[n]
if (vert_part[n]!= my_part):
min_vert = min(i,n)
max_vert = max(i,n)
cut_edge = (min_vert,max_vert)
edge_cuts.add(cut_edge)
return len(edge_cuts)
def set_adjacency(Nx,Ny,Nz,ex,ey,ez):
"""
Nx = num of lattice points in X-direction
Ny = num of lattice points in Y-direction
Nz = num of lattice points in Z-direction
ex = lattice speeds in X-direction
ey = lattice speeds in Y-direction
ez = lattice speeds in Z-direction
returns adjDict = dictionary where the keys are the global lattice point numbers
and the values are lists of neighboring lattice points
"""
adjDict = {}
for z in range(Nz):
for y in range(Ny):
for x in range(Nx):
gid = x + y*Nx+z*Nx*Ny
for spd in range(len(ex)):
dx = int(ex[spd]); dy = int(ey[spd]); dz = int(ez[spd]);
tx = (x+dx)%Nx; ty= (y+dy)%Ny; tz = (z+dz)%Nz
tid = tx+ty*Nx+tz*Nx*Ny;
adjDict.setdefault(gid,[]).append(tid)
return adjDict
if __name__=='main':
Ny_divs = 7
Re = 100.
# overall domain dimensions
Lx_p = 4. # "thickness"
Ly_p = 3. # "height"
#Lz_p = 10. # "length"
Lz_p = 14;
# describe brick dimensions and location
h_brick = 1./2.
#z_brick = 1./2.
z_brick=4.
x_brick = 1./2.
Lo = h_brick;#characteristic length is block height
R=0; x_c = 0; z_c = 0;# not used.
Ny = math.ceil((Ny_divs-1)*(Ly_p/Lo))+1
Nx = math.ceil((Ny_divs-1)*(Lx_p/Lo))+1
Nz = math.ceil((Ny_divs-1)*(Lz_p/Lo))+1
nnodes = Nx*Ny*Nz
# compute geometric data only once
x = np.linspace(0.,Lx_p,Nx).astype(np.float32);
y = np.linspace(0.,Ly_p,Ny).astype(np.float32);
z = np.linspace(0.,Lz_p,Nz).astype(np.float32);
numEl = Nx*Ny*Nz
Y,Z,X = np.meshgrid(y,z,x);
XX = np.reshape(X,numEl)
YY = np.reshape(Y,numEl)
ZZ = np.reshape(Z,numEl)
u = np.zeros_like(XX)
v = np.zeros_like(XX)
w = np.zeros_like(XX)
"""
Now that the geometry is formed, create the adjacency matrix and partition with
pymetis.
Also, get a partition with my geometric partitioner and compare the two partitions
visually. Perhaps get a surface to volume ratio for both partitions and from
that make a prediction as to the performance.
"""
ex = [0.,1.,-1.,0.,0.,0.,0.,1.,-1.,1.,-1.,1.,-1.,1.,-1.]
ey = [0.,0.,0.,1.,-1.,0.,0.,1.,1.,-1.,-1.,1.,1.,-1.,-1.]
ez = [0.,0.,0.,0.,0.,1.,-1.,1.,1.,1.,1.,-1.,-1.,-1.,-1.]
print('Total lattice points = %d.'%(Nx*Ny*Nz))
print('Setting adjacency list')
adjDict = set_adjacency(int(Nx),int(Ny),int(Nz),ex,ey,ez)
N_parts = 24
print('Nx = %d ' % Nx)
print('Ny = %d ' % Ny)
print('Nz = %d ' % Nz)
print('getting METIS partition')
if (NO_PYMETIS==1):
print("pymetis is not available")
sys.exit()
cuts, part_vert = part_graph(N_parts,adjDict)
print('getting part_advisor partition')
px,py,pz = part_advisor(Nx,Ny,Nz,N_parts)
# make sure all of these things are integers...
Nx = int(Nx); Ny = int(Ny); Nz = int(Nz)
px = int(px); py = int(py); pz = int(pz)
part_vert_pa = set_geometric_partition(Nx,Ny,Nz,px,py,pz)
part_vert1D = set_geometric_partition(Nx,Ny,Nz,1,1,N_parts)
cuts_metis = count_cuts(adjDict,part_vert)
cuts_pa = count_cuts(adjDict,part_vert_pa)
cuts_1D = count_cuts(adjDict,part_vert1D)
print('cuts metis = %d ' % cuts_metis)
print('cuts pa = %d ' % cuts_pa)
print('cuts_1D = %d ' % cuts_1D)
print('writing partition to VTK file')
dims = [Nx,Ny,Nz]
origin = [0,0,0]
dx = x[1]-x[0]; dy = y[1]-y[0]; dz = z[1]-z[0];
spacing = [dx,dy,dz]
saveStructuredPointsVTK_ascii(part_vert,'partitions','partition_metis.vtk',dims,origin,spacing)
saveStructuredPointsVTK_ascii(part_vert_pa,'parititions','partition_pa.vtk',dims,origin,spacing)
saveStructuredPointsVTK_ascii(part_vert1D,'partitions','partition_1D.vtk',dims,origin,spacing)
#saveVelocityAndPressureVTK_binary(part_vert,u,v,w,x,y,z,'partition.vtk',dims)
#saveScalarStructuredGridVTK_binary(part_vert,'partition',XX,YY,ZZ,'partition.vtk',[Nx,Ny,Nz])
|
{"hexsha": "db1bb17d5480c9bb972382576ba2cba244f50a43", "size": 7587, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/partition_compare.py", "max_stars_repo_name": "stu314159/af_NFC", "max_stars_repo_head_hexsha": "c065a5abe3f4d7d56165112378e57300da4bb53c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/partition_compare.py", "max_issues_repo_name": "stu314159/af_NFC", "max_issues_repo_head_hexsha": "c065a5abe3f4d7d56165112378e57300da4bb53c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/partition_compare.py", "max_forks_repo_name": "stu314159/af_NFC", "max_forks_repo_head_hexsha": "c065a5abe3f4d7d56165112378e57300da4bb53c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7529411765, "max_line_length": 100, "alphanum_fraction": 0.5737445631, "include": true, "reason": "import numpy", "num_tokens": 2227}
|
using BinaryBuilder
include("../common.jl")
# Collection of sources required to build OpenBLAS
name = "OpenBLAS"
version = v"0.3.10"
sources = openblas_sources(version)
script = openblas_script()
platforms = openblas_platforms(;experimental=true)
products = openblas_products()
dependencies = openblas_dependencies()
# Build the tarballs
build_tarballs(ARGS, name, version, sources, script, platforms, products, dependencies; preferred_gcc_version=v"6", lock_microarchitecture=false)
|
{"hexsha": "c2eb758594e2e0310afee06196f43152de13907d", "size": 488, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "O/OpenBLAS/OpenBLAS@0.3.10/build_tarballs.jl", "max_stars_repo_name": "c42f/Yggdrasil", "max_stars_repo_head_hexsha": "56c7b2d5863178463166c33f08944391cdac0765", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "O/OpenBLAS/OpenBLAS@0.3.10/build_tarballs.jl", "max_issues_repo_name": "c42f/Yggdrasil", "max_issues_repo_head_hexsha": "56c7b2d5863178463166c33f08944391cdac0765", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "O/OpenBLAS/OpenBLAS@0.3.10/build_tarballs.jl", "max_forks_repo_name": "c42f/Yggdrasil", "max_forks_repo_head_hexsha": "56c7b2d5863178463166c33f08944391cdac0765", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7058823529, "max_line_length": 145, "alphanum_fraction": 0.7950819672, "num_tokens": 113}
|
/-
Copyright (c) 2021 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import algebra.triv_sq_zero_ext
/-!
# Dual numbers
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
The dual numbers over `R` are of the form `a + bε`, where `a` and `b` are typically elements of a
commutative ring `R`, and `ε` is a symbol satisfying `ε^2 = 0`. They are a special case of
`triv_sq_zero_ext R M` with `M = R`.
## Notation
In the `dual_number` locale:
* `R[ε]` is a shorthand for `dual_number R`
* `ε` is a shorthand for `dual_number.eps`
## Main definitions
* `dual_number`
* `dual_number.eps`
* `dual_number.lift`
## Implementation notes
Rather than duplicating the API of `triv_sq_zero_ext`, this file reuses the functions there.
## References
* https://en.wikipedia.org/wiki/Dual_number
-/
variables {R : Type*}
/-- The type of dual numbers, numbers of the form $a + bε$ where $ε^2 = 0$.-/
abbreviation dual_number (R : Type*) : Type* := triv_sq_zero_ext R R
/-- The unit element $ε$ that squares to zero. -/
def dual_number.eps [has_zero R] [has_one R] : dual_number R := triv_sq_zero_ext.inr 1
localized "notation (name := dual_number.eps) `ε` := dual_number.eps" in dual_number
localized "postfix (name := dual_number) `[ε]`:1025 := dual_number" in dual_number
open_locale dual_number
namespace dual_number
open triv_sq_zero_ext
@[simp]
/-- A version of `triv_sq_zero_ext.snd_mul` with `*` instead of `•`. -/
@[simp] lemma snd_mul [semiring R] (x y : R[ε]) : snd (x * y) = fst x * snd y + snd x * fst y :=
snd_mul _ _
@[simp] lemma eps_mul_eps [semiring R] : (ε * ε : R[ε]) = 0 := inr_mul_inr _ _ _
@[simp] lemma inr_eq_smul_eps [mul_zero_one_class R] (r : R) : inr r = (r • ε : R[ε]) :=
ext (mul_zero r).symm (mul_one r).symm
/-- For two algebra morphisms out of `R[ε]` to agree, it suffices for them to agree on `ε`. -/
@[ext] lemma alg_hom_ext {A} [comm_semiring R] [semiring A] [algebra R A]
⦃f g : R[ε] →ₐ[R] A⦄ (h : f ε = g ε) : f = g :=
alg_hom_ext' $ linear_map.ext_ring $ h
variables {A : Type*} [comm_semiring R] [semiring A] [algebra R A]
/-- A universal property of the dual numbers, providing a unique `R[ε] →ₐ[R] A` for every element
of `A` which squares to `0`.
This isomorphism is named to match the very similar `complex.lift`. -/
@[simps {attrs := []}]
def lift : {e : A // e * e = 0} ≃ (R[ε] →ₐ[R] A) :=
equiv.trans
(show {e : A // e * e = 0} ≃ {f : R →ₗ[R] A // ∀ x y, f x * f y = 0}, from
(linear_map.ring_lmap_equiv_self R ℕ A).symm.to_equiv.subtype_equiv $ λ a, begin
dsimp,
simp_rw smul_mul_smul,
refine ⟨λ h x y, h.symm ▸ smul_zero _, λ h, by simpa using h 1 1⟩,
end)
triv_sq_zero_ext.lift
/- When applied to `ε`, `dual_number.lift` produces the element of `A` that squares to 0. -/
@[simp]
lemma lift_apply_eps (e : {e : A // e * e = 0}) : lift e (ε : R[ε]) = e :=
(triv_sq_zero_ext.lift_aux_apply_inr _ _ _).trans $ one_smul _ _
/- Lifting `dual_number.eps` itself gives the identity. -/
@[simp]
lemma lift_eps : lift ⟨ε, by exact eps_mul_eps⟩ = alg_hom.id R R[ε] :=
alg_hom_ext $ lift_apply_eps _
end dual_number
|
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/algebra/dual_number.lean"}
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import average_precision_score
import sys
sys.path.append('..')
from models import r2plus1d18KeepTemp
from utils import torch_utils
class VideoOnsetNet(nn.Module):
# Video Onset detection network
def __init__(self, pretrained):
super(VideoOnsetNet, self).__init__()
self.net = r2plus1d18KeepTemp(pretrained=pretrained)
self.fc = nn.Sequential(
nn.Linear(512, 128),
nn.ReLU(True),
nn.Linear(128, 1)
)
def forward(self, inputs, loss=False, evaluate=False):
# import pdb; pdb.set_trace()
x = inputs['frames']
x = self.net(x)
x = x.transpose(-1, -2)
x = self.fc(x)
x = x.squeeze(-1)
return x
class BCLoss(nn.Module):
# binary classification loss
def __init__(self, args):
super(BCLoss, self).__init__()
def forward(self, pred, target):
# import pdb; pdb.set_trace()
pred = pred.contiguous().view(-1)
target = target.contiguous().view(-1)
pos_weight = (target.shape[0] - target.sum()) / target.sum()
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight).to(pred.device)
loss = criterion(pred, target.float())
return loss
def evaluate(self, pred, target):
# import pdb; pdb.set_trace()
pred = pred.contiguous().view(-1)
target = target.contiguous().view(-1)
pred = torch.sigmoid(pred)
pred = pred.data.cpu().numpy()
target = target.data.cpu().numpy()
pos_index = np.nonzero(target == 1)[0]
neg_index = np.nonzero(target == 0)[0]
balance_num = min(pos_index.shape[0], neg_index.shape[0])
index = np.concatenate((pos_index[:balance_num], neg_index[:balance_num]), axis=0)
pred = pred[index]
target = target[index]
ap = average_precision_score(target, pred)
acc = torch_utils.binary_acc(pred, target, thred=0.5)
res = {
'AP': ap,
'Acc': acc
}
return res
if __name__ == '__main__':
model = VideoOnsetNet(False).cuda()
rand_input = torch.randn((1, 3, 30, 112, 112)).cuda()
inputs = {
'frames': rand_input
}
out = model(inputs)
|
{"hexsha": "3f038e4567a1306cde09fc872eb58b1ad3a46a90", "size": 2354, "ext": "py", "lang": "Python", "max_stars_repo_path": "specvqgan/onset_baseline/models/video_onset_net.py", "max_stars_repo_name": "XYPB/SpecVQGAN", "max_stars_repo_head_hexsha": "ed3c0f86c41bc408824979305d9c4f6df0877973", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "specvqgan/onset_baseline/models/video_onset_net.py", "max_issues_repo_name": "XYPB/SpecVQGAN", "max_issues_repo_head_hexsha": "ed3c0f86c41bc408824979305d9c4f6df0877973", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "specvqgan/onset_baseline/models/video_onset_net.py", "max_forks_repo_name": "XYPB/SpecVQGAN", "max_forks_repo_head_hexsha": "ed3c0f86c41bc408824979305d9c4f6df0877973", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1794871795, "max_line_length": 90, "alphanum_fraction": 0.5998300765, "include": true, "reason": "import numpy", "num_tokens": 588}
|
#Takes fasta file of sequences and makes histogram of GC contents
#Usage: python plotGC <sequences1.fasta> <sequences2.fasta>
import sys
from Bio import SeqIO
from Bio.SeqUtils import GC
import matplotlib.pyplot as plt
import numpy as np
def plotmultipleLength(fasta1, fasta2):
fasta1lengths = []
fasta2lengths = []
fasta3lengths = []
fasta4lengths = []
fasta5lengths = []
fasta6lengths = []
fasta7lengths = []
fasta8lengths = []
for record in SeqIO.parse(fasta1, 'fasta'):
fasta1lengths.append(float(len(record.seq)))
for record in SeqIO.parse(fasta2, 'fasta'):
fasta2lengths.append(float(len(record.seq)))
fasta1median = np.median(fasta1lengths)
fasta2median = np.median(fasta2lengths)
Lengths = [fasta1lengths] + [fasta2lengths]
plt.hist(Lengths, 40, range = [0,4000], alpha=0.5, histtype = 'step', color = ['green', 'midnightblue'], label = ['Mito', 'Ctrl'])
plt.xlabel('Length (nt)')
plt.ylabel('Count')
plt.title('')
plt.legend()
#Vertical lines for medians
plt.plot([fasta1median, fasta1median], [0, 20], color = 'green', linestyle = '-.', linewidth = 2)
plt.plot([fasta2median, fasta2median], [0, 20], color = 'midnightblue', linestyle = '-.', linewidth = 2)
plt.show()
plotmultipleLength(sys.argv[1], sys.argv[2])
|
{"hexsha": "e407ab0908f367789d1446ab752270f4eb888852", "size": 1365, "ext": "py", "lang": "Python", "max_stars_repo_path": "plotmultipleFastaLength.py", "max_stars_repo_name": "TaliaferroLab/AnalysisScripts", "max_stars_repo_head_hexsha": "3df37d2f8fca9bc402afe5ea870c42200fca1ed3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plotmultipleFastaLength.py", "max_issues_repo_name": "TaliaferroLab/AnalysisScripts", "max_issues_repo_head_hexsha": "3df37d2f8fca9bc402afe5ea870c42200fca1ed3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plotmultipleFastaLength.py", "max_forks_repo_name": "TaliaferroLab/AnalysisScripts", "max_forks_repo_head_hexsha": "3df37d2f8fca9bc402afe5ea870c42200fca1ed3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-30T07:37:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-30T07:37:19.000Z", "avg_line_length": 28.4375, "max_line_length": 134, "alphanum_fraction": 0.652014652, "include": true, "reason": "import numpy", "num_tokens": 385}
|
import torch
import random
import numpy as np
from tqdm import trange, tqdm
from torch_sparse import spmm
from texttable import Texttable
from appnp_layer import APPNPModel
class APPNPTrainer(object):
"""
Method to train PPNP/APPNP model.
"""
def __init__(self, args, graph, features, target):
"""
:param args: Arguments object.
:param graph: Networkx graph.
:param features: Feature matrix.
:param target: Target vector with labels.
"""
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.args = args
self.graph = graph
self.features = features
self.target = target
self.create_model()
self.train_test_split()
self.transfer_node_sets()
self.process_features()
self.transfer_features()
def create_model(self):
"""
Defining a model and transfering it to GPU/CPU.
"""
self.node_count = self.graph.number_of_nodes()
self.number_of_labels = np.max(self.target)+1
self.number_of_features = max([feature for node, features in self.features.items() for feature in features]) + 1
self.model = APPNPModel(self.args, self.number_of_labels, self.number_of_features, self.graph, self.device)
self.model = self.model.to(self.device)
def train_test_split(self):
"""
Creating a train/test split.
"""
random.seed(self.args.seed)
nodes = [node for node in range(self.node_count)]
random.shuffle(nodes)
self.train_nodes = nodes[0:self.args.train_size]
self.test_nodes = nodes[self.args.train_size:self.args.train_size+self.args.test_size]
self.validation_nodes = nodes[self.args.train_size+self.args.test_size:]
def transfer_node_sets(self):
"""
Transfering the node sets to the device.
"""
self.train_nodes = torch.LongTensor(self.train_nodes).to(self.device)
self.test_nodes = torch.LongTensor(self.test_nodes).to(self.device)
self.validation_nodes = torch.LongTensor(self.validation_nodes).to(self.device)
def process_features(self):
"""
Creating a sparse feature matrix and a vector for the target labels.
"""
index_1 = [node for node in self.graph.nodes() for fet in self.features[node]]
index_2 = [fet for node in self.graph.nodes() for fet in self.features[node]]
values = [1.0/len(self.features[node]) for node in self.graph.nodes() for fet in self.features[node]]
self.feature_indices = torch.LongTensor([index_1, index_2])
self.feature_values = torch.FloatTensor(values)
self.target = torch.LongTensor(self.target)
def transfer_features(self):
"""
Transfering the features and the target matrix to the device.
"""
self.target = self.target.to(self.device)
self.feature_indices = self.feature_indices.to(self.device)
self.feature_values = self.feature_values.to(self.device)
def score(self, index_set):
"""
Calculating the accuracy for a given node set.
:param index_set: Index of nodes to be included in calculation.
:parm acc: Accuracy score.
"""
self.model.eval()
_, pred = self.model(self.feature_indices, self.feature_values).max(dim=1)
correct = pred[index_set].eq(self.target[index_set]).sum().item()
acc = correct / index_set.size()[0]
return acc
def do_a_step(self):
"""
Doing an optimization step.
"""
self.model.train()
self.optimizer.zero_grad()
prediction = self.model(self.feature_indices, self.feature_values)
loss = torch.nn.functional.nll_loss(prediction[self.train_nodes], self.target[self.train_nodes])
loss = loss + (self.args.lambd/2)*(torch.sum(self.model.layer_2.weight_matrix**2))
loss.backward()
self.optimizer.step()
def train_neural_network(self):
"""
Training a neural network.
"""
print("\nTraining.\n")
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
self.best_accuracy = 0
self.step_counter = 0
iterator = trange(self.args.epochs, desc='Validation accuracy: ', leave=True)
for epoch in iterator:
self.do_a_step()
accuracy = self.score(self.validation_nodes)
iterator.set_description("Validation accuracy: {:.4f}".format(accuracy))
if accuracy >= self.best_accuracy:
self.best_accuracy = accuracy
self.test_accuracy = self.score(self.test_nodes)
self.step_counter = 0
else:
self.step_counter = self.step_counter + 1
if self.step_counter>self.args.early_stopping_rounds:
iterator.close()
break
def fit(self):
"""
Fitting the network and calculating the test accuracy.
"""
self.train_neural_network()
print("\nBreaking from training process because of early stopping.\n")
print("Test accuracy: {:.4f}".format(self.test_accuracy))
|
{"hexsha": "af4e202ad1e07aaf361d1c4b611594c49bdb560b", "size": 5286, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/appnp.py", "max_stars_repo_name": "thefr33radical/APPNP", "max_stars_repo_head_hexsha": "15ec5d0171137ad25069d81fd77c5a22a02d19c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/appnp.py", "max_issues_repo_name": "thefr33radical/APPNP", "max_issues_repo_head_hexsha": "15ec5d0171137ad25069d81fd77c5a22a02d19c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/appnp.py", "max_forks_repo_name": "thefr33radical/APPNP", "max_forks_repo_head_hexsha": "15ec5d0171137ad25069d81fd77c5a22a02d19c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-04T10:24:35.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-04T10:24:35.000Z", "avg_line_length": 39.7443609023, "max_line_length": 121, "alphanum_fraction": 0.6322360953, "include": true, "reason": "import numpy", "num_tokens": 1089}
|
##############################################################################
#
# Unit tests for squeezing operation
# Convention: The squeezing unitary is fixed to be
# U(z) = \exp(0.5 (z^* \hat{a}^2 - z (\hat{a^\dagger}^2)))
# where \hat{a} is the photon annihilation operator.
#
##############################################################################
import unittest
import os, sys
sys.path.append(os.getcwd())
import numpy as np
from scipy.special import factorial, erfinv
from scipy.special import gammaln as lg
from defaults import BaseTest, FockBaseTest
# sqz_r = np.linspace(0.0, 0.1, 5)
sqz_theta = np.linspace(0, 2 * np.pi, 3, endpoint=False)
###################################################################
class BasicTests(BaseTest):
"""Basic implementation-independent tests."""
num_subsystems = 1
def test_no_squeezing(self):
"""Tests squeezing operation in some limiting cases where the result should be a vacuum state."""
self.circuit.prepare_vacuum_state(0)
for theta in sqz_theta:
self.circuit.reset(pure=self.kwargs['pure'])
r = 0
z = r * np.exp(1j * theta)
self.circuit.squeeze(z, 0)
self.assertAllTrue(self.circuit.is_vacuum(self.tol))
class FockBasisTests(FockBaseTest):
"""Tests for simulators that use Fock basis."""
num_subsystems = 1
def setUp(self):
super().setUp()
eps = 0.01
sqz_max = np.log( np.sqrt(self.D-1)/(erfinv(1-eps)*np.sqrt(2)) )
self.sqz_r = np.linspace(0.0, sqz_max, 5)
def test_normalized_squeezed_state(self):
"""Tests if a range of squeezed vacuum states are normalized."""
for r in self.sqz_r:
for theta in sqz_theta:
z = r * np.exp(1j * theta)
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.squeeze(z, 0)
state = self.circuit.state()
tr = state.trace()
self.assertAllAlmostEqual(tr, 1, delta=self.tol)
def test_no_odd_fock(self):
"""Tests if a range of squeezed vacuum states have
only nonzero entries for even Fock states."""
for r in self.sqz_r:
for theta in sqz_theta:
z = r * np.exp(1j * theta)
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.squeeze(z, 0)
s = self.circuit.state()
if s.is_pure:
num_state = s.ket()
else:
num_state = s.dm()
if self.args.batched:
odd_entries = num_state[:,1::2]
else:
odd_entries = num_state[1::2]
self.assertAllTrue(np.all(odd_entries == 0))
def test_reference_squeezed_vacuum(self):
"""Tests if a range of squeezed vacuum states are equal to the form of Eq. (5.5.6) in Loudon."""
def sech(x):
return 1 / np.cosh(x)
for r in self.sqz_r:
for theta in sqz_theta:
z = r * np.exp(1j * theta)
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.squeeze(z, 0)
s = self.circuit.state()
if s.is_pure:
num_state = s.ket()
else:
num_state = s.dm()
even_refs = np.array([np.sqrt(sech(r)) * np.sqrt(factorial(k)) / factorial(k / 2) * (-0.5 * np.exp(1j * theta) * np.tanh(r)) ** (k / 2) for k in range(0, self.D, 2)])
if self.kwargs['pure']:
if self.args.batched:
even_entries = num_state[:,::2]
else:
even_entries = num_state[::2]
else:
even_refs = np.outer(even_refs, np.conj(even_refs))
if self.args.batched:
even_entries = num_state[:,::2, ::2]
else:
even_entries = num_state[::2,::2]
self.assertAllAlmostEqual(even_entries, even_refs, delta=self.tol)
def test_reference_squeezed_fock(self):
"""Tests if a range of squeezed fock states are equal to the form of Eq. (20)
in 'On the Squeezed Number States and their Phase Space Representations'
(https://arxiv.org/abs/quant-ph/0108024)."""
def matrix_elem(n,r,m):
eps = 1e-10
if n % 2 != m % 2:
return 0.0
elif r == 0.:
return np.complex(n==m) # delta function
else:
k = np.arange(m % 2, min([m, n]) + 1, 2)
res = np.sum(
(-1)**((n-k)/2)
* np.exp((lg(m+1) + lg(n+1))/2 - lg(k+1) - lg((m-k)/2+1) - lg((n-k)/2+1))
* (np.sinh(r)/2+eps)**((n+m-2*k)/2) / (np.cosh(r)**((n+m+1)/2))
)
return res
for m in range(self.D):
for r in self.sqz_r:
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_fock_state(m, 0)
self.circuit.squeeze(r, 0)
s = self.circuit.state()
if s.is_pure:
num_state = s.ket()
else:
num_state = s.dm()
ref_state = np.array([matrix_elem(n,r,m) for n in range(self.D)])
if not self.kwargs['pure']:
ref_state = np.outer(ref_state, np.conj(ref_state))
self.assertAllAlmostEqual(num_state, ref_state, delta=self.tol)
if __name__=="__main__":
# run the tests in this file
suite = unittest.TestSuite()
for t in (BasicTests, FockBasisTests):
ttt = unittest.TestLoader().loadTestsFromTestCase(t)
suite.addTests(ttt)
unittest.TextTestRunner().run(suite)
|
{"hexsha": "685adbfb972c7f46da7e054787d43584a0de9965", "size": 5333, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_squeeze_operation.py", "max_stars_repo_name": "cgogolin/strawberryfields", "max_stars_repo_head_hexsha": "d7af185cad87b18fda4ba7c70f9af37796482c93", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_squeeze_operation.py", "max_issues_repo_name": "cgogolin/strawberryfields", "max_issues_repo_head_hexsha": "d7af185cad87b18fda4ba7c70f9af37796482c93", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_squeeze_operation.py", "max_forks_repo_name": "cgogolin/strawberryfields", "max_forks_repo_head_hexsha": "d7af185cad87b18fda4ba7c70f9af37796482c93", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3178807947, "max_line_length": 176, "alphanum_fraction": 0.5599099944, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1449}
|
C Copyright(C) 2011-2017 National Technology & Engineering Solutions
C of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
C NTESS, the U.S. Government retains certain rights in this software.
C
C Redistribution and use in source and binary forms, with or without
C modification, are permitted provided that the following conditions are
C met:
C
C * Redistributions of source code must retain the above copyright
C notice, this list of conditions and the following disclaimer.
C
C * Redistributions in binary form must reproduce the above
C copyright notice, this list of conditions and the following
C disclaimer in the documentation and/or other materials provided
C with the distribution.
C
C * Neither the name of NTESS nor the names of its
C contributors may be used to endorse or promote products derived
C from this software without specific prior written permission.
C
C THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
C "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
C LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
C A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
C OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
C SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
C LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
C DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
C THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
C (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
C OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
C=======================================================================
SUBROUTINE NEWXYZ (XN, YN, XN3, YN3, ZN3, IXNP, NRNP, NPCEN,
& ZCORD, SINANG, COSANG, A)
C=======================================================================
C --*** NEWXYZ *** (GEN3D) Calculate 3D coordinates
C -- Written by Amy Gilkey - revised 05/09/88
C -- Modified by Greg Sjaardema - 02/06/89
C -- Added Warp Function
C -- Added Gradient to Rotations (not for center blocks)
C -- Split transformations into separate subroutines
C --
C --NEWXYZ calculates the coordinate array for the 3D database.
C --
C --Parameters:
C -- XN, YN - IN - the 2D coordinates, destroyed
C -- XN3, YN3, ZN3 - OUT - the 3D coordinates
C -- IXNP - IN - the new index for each node
C -- NRNP - IN - the number of new nodes generated for each node
C -- NPCEN - IN - the node numbers of the center nodes by column and row
C -- ZCORD - SCRATCH - size = NNREPL, holds z coordinate for transformations
C -- SINANG, COSANG - SCRATCH - size = NNREPL, holds sin and cos of
C -- angles for rotations
C --
C --Common Variables:
C -- Uses NDIM, NUMNP of /DBNUMS/
C -- Uses NDIM3, NUMNP3 of /DBNUM3/
C -- Uses ITRANT, NNREPL, DIM3, NRTRAN, D3TRAN, ZGRAD,
C -- CENTER, NUMCOL, NUMROW of /PARAMS/
C -- Uses XOFFS, YOFFS, ZOFFS of /XYZOFF/
C -- Uses ROT3D, ROTMAT of /XYZROT/
INCLUDE 'g3_dbnums.blk'
INCLUDE 'g3_dbnum3.blk'
INCLUDE 'g3_params.blk'
INCLUDE 'g3_xyzoff.blk'
INCLUDE 'g3_xyzrot.blk'
INCLUDE 'g3_xyzmir.blk'
INCLUDE 'g3_xyzero.blk'
INCLUDE 'g3_xyzscl.blk'
INCLUDE 'g3_twist.blk'
INCLUDE 'g3_splxyz.blk'
REAL XN(NUMNP), YN(NUMNP),
& XN3(NUMNP3), YN3(NUMNP3), ZN3(NUMNP3)
INTEGER IXNP(*), NRNP(*)
INTEGER NPCEN(NUMCDM,*)
REAL ZCORD(NNREPL)
REAL SINANG(NNREPL), COSANG(NNREPL)
REAL A(*)
IF (ITRANT .EQ. 1) THEN
CALL TRNXYZ (XN, YN, XN3, YN3, ZN3, IXNP, NRNP, ZCORD)
ELSE IF (ITRANT .EQ. 2) THEN
if (rotax .eq. 0) then
CALL ARCXYZ (XN, YN, XN3, YN3, ZN3, IXNP, NRNP, NPCEN,
& SINANG, COSANG)
else
CALL ARCYXZ (XN, YN, XN3, YN3, ZN3, IXNP, NRNP, NPCEN,
& SINANG, COSANG)
end if
ELSE IF (ITRANT .EQ. 4) THEN
CALL WRPXYZ (XN, YN, XN3, YN3, ZN3, IXNP, NRNP, ZCORD )
ELSE IF (ITRANT .EQ. 8) THEN
CALL TWIXYZ (XN, YN, XN3, YN3, ZN3, IXNP, NRNP, ZCORD,
* SINANG, COSANG)
ELSE IF (ITRANT .EQ. 16) THEN
CALL PROXYZ (XN, YN, XN3, YN3, ZN3, IXNP, NRNP, ZCORD,
* SINANG, COSANG)
ELSE IF (ITRANT .EQ. 32) THEN
CALL EXPARC (XN, YN, XN3, YN3, ZN3, IXNP, NRNP, NPCEN,
& SINANG, COSANG)
ELSE IF (ITRANT .EQ. 64) THEN
CALL SPLXYZ (XN, YN, XN3, YN3, ZN3, IXNP, NRNP, ZCORD,
$ NSPL(1), NSPL(2),
& A(KRSPLA), A(KZSPLA), A(KSPL2A), A(KDISTA), A(KSCRA),
& A(KRSPLB), A(KZSPLB), A(KSPL2B), A(KDISTB), A(KSCRB),
$ SLLFT, SLRGT, RDTHET, SWEEP, NOSCAL)
ELSE IF (ITRANT .EQ. 128) THEN
CALL SPTXYZ (XN, YN, XN3, YN3, ZN3, IXNP, NRNP, ZCORD, NSPLT,
$ A(KZSPL), A(KXSPL), A(KXSPL2), A(KYSPL), A(KYSPL2),
$ A(KSCR))
END IF
C --Revolve 3D mesh, if needed
IF (ROT3D) THEN
DO 30 JNP = 1, NUMNP3
X = XN3(JNP) - ROTCEN(1)
Y = YN3(JNP) - ROTCEN(2)
Z = ZN3(JNP) - ROTCEN(3)
XN3(JNP) = X*ROTMAT(1,1) + Y*ROTMAT(2,1) + Z*ROTMAT(3,1)
& + ROTCEN(1)
YN3(JNP) = X*ROTMAT(1,2) + Y*ROTMAT(2,2) + Z*ROTMAT(3,2)
& + ROTCEN(2)
ZN3(JNP) = X*ROTMAT(1,3) + Y*ROTMAT(2,3) + Z*ROTMAT(3,3)
& + ROTCEN(3)
30 CONTINUE
END IF
C --Add offset, if any, to coordinates
IF (XOFFS .NE. 0.0) THEN
DO 40 JNP = 1, NUMNP3
XN3(JNP) = XN3(JNP) + XOFFS
40 CONTINUE
END IF
IF (YOFFS .NE. 0.0) THEN
DO 50 JNP = 1, NUMNP3
YN3(JNP) = YN3(JNP) + YOFFS
50 CONTINUE
END IF
IF (ZOFFS .NE. 0.0) THEN
DO 60 JNP = 1, NUMNP3
ZN3(JNP) = ZN3(JNP) + ZOFFS
60 CONTINUE
END IF
C --Mirror coordinates if any specified
IF (XMIRR .LT. 0.) THEN
DO 70 JNP = 1, NUMNP3
XN3(JNP) = -1.0 * XN3(JNP)
70 CONTINUE
END IF
IF (YMIRR .LT. 0.) THEN
DO 80 JNP = 1, NUMNP3
YN3(JNP) = -1.0 * YN3(JNP)
80 CONTINUE
END IF
IF (ZMIRR .LT. 0.) THEN
DO 90 JNP = 1, NUMNP3
ZN3(JNP) = -1.0 * ZN3(JNP)
90 CONTINUE
END IF
C --- Zero coordinates if *ZERO is not equal to zero
IF (XZERO .NE. 0.) THEN
DO 100 JNP = 1, NUMNP3
if (ABS(XN3(JNP)) .LT. XZERO) XN3(JNP) = 0.0
100 CONTINUE
END IF
IF (YZERO .NE. 0.) THEN
DO 110 JNP = 1, NUMNP3
if (ABS(YN3(JNP)) .LT. YZERO) YN3(JNP) = 0.0
110 CONTINUE
END IF
IF (ZZERO .NE. 0.) THEN
DO 120 JNP = 1, NUMNP3
if (ABS(ZN3(JNP)) .LT. ZZERO) ZN3(JNP) = 0.0
120 CONTINUE
END IF
C --- Scale the coordinates if any Scaled
IF (XSCAL .NE. 1.) THEN
DO 130 JNP = 1, NUMNP3
XN3(JNP) = XSCAL * XN3(JNP)
130 CONTINUE
END IF
IF (YSCAL .NE. 1.) THEN
DO 140 JNP = 1, NUMNP3
YN3(JNP) = YSCAL * YN3(JNP)
140 CONTINUE
END IF
IF (ZSCAL .NE. 1. .AND. NDIM .EQ. 3) THEN
DO 150 JNP = 1, NUMNP3
ZN3(JNP) = ZSCAL * ZN3(JNP)
150 CONTINUE
END IF
CALL MINMAX (NUMNP3, XN3, XMIN, XMAX)
CALL MINMAX (NUMNP3, YN3, YMIN, YMAX)
CALL MINMAX (NUMNP3, ZN3, ZMIN, ZMAX)
WRITE (*, 155) 'Output Mesh Limits:'
WRITE (*, 160) 'X', XMIN, 'X', XMAX, XMAX-XMIN
WRITE (*, 160) 'Y', YMIN, 'Y', YMAX, YMAX-YMIN
WRITE (*, 160) 'Z', ZMIN, 'Z', ZMAX, ZMAX-ZMIN
155 FORMAT(/' ',A)
160 FORMAT( ' Minimum ',A1,' = ',1PE12.5,', Maximum ',A1,' = ',
& 1PE12.5,', Range = ',1PE12.5)
RETURN
END
|
{"hexsha": "31a3cb5534f95ef3adaa1d35b616631751048761", "size": 7984, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/seacas/applications/gen3d/g3_newxyz.f", "max_stars_repo_name": "mathstuf/seacas", "max_stars_repo_head_hexsha": "49b3466e3bba12ec6597e364ce0f0f149f9ca909", "max_stars_repo_licenses": ["BSD-3-Clause", "NetCDF", "Zlib", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "packages/seacas/applications/gen3d/g3_newxyz.f", "max_issues_repo_name": "mathstuf/seacas", "max_issues_repo_head_hexsha": "49b3466e3bba12ec6597e364ce0f0f149f9ca909", "max_issues_repo_licenses": ["BSD-3-Clause", "NetCDF", "Zlib", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "packages/seacas/applications/gen3d/g3_newxyz.f", "max_forks_repo_name": "mathstuf/seacas", "max_forks_repo_head_hexsha": "49b3466e3bba12ec6597e364ce0f0f149f9ca909", "max_forks_repo_licenses": ["BSD-3-Clause", "NetCDF", "Zlib", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2909090909, "max_line_length": 80, "alphanum_fraction": 0.5711422846, "num_tokens": 2851}
|
import torch
import numpy as np
# import h5py
from scipy.ndimage.interpolation import rotate
from pathlib import Path
import matplotlib.pyplot as plt
import cv2
import random
class CMPLoad(object):
def __init__(self, ori_path, crop_size=(256, 256)):
self.ori_paths = ori_path
self.crop_size = crop_size
def __len__(self):
return len(self.ori_paths)
def random_crop_param(self, shape):
h, w = shape
top = np.random.randint(30, h - self.crop_size[0] - 30)
left = np.random.randint(30, w - self.crop_size[1] - 30)
bottom = top + self.crop_size[0]
right = left + self.crop_size[1]
return top, bottom, left, right
@classmethod
def flip_and_rotate(cls, img, mpm, seed):
img = rotate(img, 90 * (seed % 4))
mpm = rotate(mpm, 90 * (seed % 4))
# process for MPM
## seed = 1 or 5: 90 degrees counterclockwise
if seed % 4 == 1:
mpm[:, :, 1] = -mpm[:, :, 1]
mpm = mpm[:, :, [1, 0, 2]]
## seed = 2 or 6: 180 degrees counterclockwise
if seed % 4 == 2:
mpm[:, :, :2] = -mpm[:, :, :2]
## seed = 3 or 7: 270 degrees counterclockwise
if seed % 4 == 3:
mpm[:, :, 0] = -mpm[:, :, 0]
mpm = mpm[:, :, [1, 0, 2]]
## flip horizontal (4 or more)
if seed > 3:
img = np.fliplr(img).copy()
mpm = np.fliplr(mpm).copy()
mpm[:, :, 1] = -mpm[:, :, 1]
return img, mpm
def __getitem__(self, data_id):
img_name = self.ori_paths[data_id, 0]
time_late = self.ori_paths[data_id, 1]
CMP_frame = int(img_name.stem[-3:])
img_name2 = img_name.parent.joinpath(
f"{int(img_name.stem[-3:]) + int(time_late):05d}.tif"
)
img = cv2.imread(str(img_name), -1)
img2 = cv2.imread(str(img_name2), -1)
img = img / 255
img2 = img2 / 255
gt_name = img_name.parent.parent.joinpath(
f"cmp/{int(time_late)}/{CMP_frame:05d}.npy"
)
gt = np.load(str(gt_name)).astype(np.float32)
mask_name = img_name.parent.parent.joinpath(
f"cmp/mask_{int(time_late)}/{CMP_frame:05d}.tif"
)
mask = cv2.imread(str(mask_name), 0)
# data augumentation
top, bottom, left, right = self.random_crop_param(img.shape)
img = img[top:bottom, left:right]
img2 = img2[top:bottom, left:right]
gt = gt[top:bottom, left:right]
mask = mask[top:bottom, left:right]
img = np.concatenate(
[
img.reshape(self.crop_size[0], self.crop_size[1], 1),
img2.reshape(self.crop_size[0], self.crop_size[1], 1),
],
axis=2,
)
seed = random.randrange(8)
img, gt = self.flip_and_rotate(img, gt, seed)
gt[mask == 255] = 255
img = img.transpose(2, 0, 1)
gt = gt.transpose(2, 0, 1)
img = torch.from_numpy(img.astype(np.float32))
gt = torch.from_numpy(gt.astype(np.float32))
datas = {"image": img, "gt": gt}
return datas
|
{"hexsha": "92b9374ce0ddd741aeecaf20a46e0a437e800591", "size": 3174, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/load_for_CMP.py", "max_stars_repo_name": "naivete5656/BFP", "max_stars_repo_head_hexsha": "74c5604a9ba4eaa3ec3e2c76ef5e1282d7d10f18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-07-31T15:20:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-18T08:42:07.000Z", "max_issues_repo_path": "utils/load_for_CMP.py", "max_issues_repo_name": "naivete5656/BFP", "max_issues_repo_head_hexsha": "74c5604a9ba4eaa3ec3e2c76ef5e1282d7d10f18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/load_for_CMP.py", "max_forks_repo_name": "naivete5656/BFP", "max_forks_repo_head_hexsha": "74c5604a9ba4eaa3ec3e2c76ef5e1282d7d10f18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-10-04T02:02:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-14T23:37:08.000Z", "avg_line_length": 29.6635514019, "max_line_length": 70, "alphanum_fraction": 0.5381222432, "include": true, "reason": "import numpy,from scipy", "num_tokens": 928}
|
[STATEMENT]
lemma i_Exec_Stream_Acc_Output_drop: "
0 < k \<Longrightarrow>
i_Exec_Comp_Stream_Acc_Output k output_fun trans_fun input c \<Up> n =
i_Exec_Comp_Stream_Acc_Output k output_fun trans_fun (input \<Up> n) (
f_Exec_Comp trans_fun (input \<Down> n \<odot>\<^sub>f k) c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < k \<Longrightarrow> i_Exec_Comp_Stream_Acc_Output k output_fun trans_fun input c \<Up> n = i_Exec_Comp_Stream_Acc_Output k output_fun trans_fun (input \<Up> n) (f_Exec_Comp trans_fun (input \<Down> n \<odot> k) c)
[PROOF STEP]
by (simp add: i_Exec_Comp_Stream_Acc_Output_def i_shrink_def i_Exec_Stream_expand_aggregate_map_drop)
|
{"llama_tokens": 260, "file": "AutoFocus-Stream_AF_Stream_Exec", "length": 1}
|
-- This contains material which used to be in the Sane module, but is no
-- longer used. It is not junk, so it is kept here, as we may need to
-- resurrect it.
module Obsolete where
import Data.Fin as F
--
open import Data.Empty
open import Data.Unit
open import Data.Unit.Core
open import Data.Nat renaming (_⊔_ to _⊔ℕ_)
open import Data.Sum renaming (map to _⊎→_)
open import Data.Product renaming (map to _×→_)
open import Data.Vec
open import Function renaming (_∘_ to _○_)
open import Relation.Binary.PropositionalEquality
open ≡-Reasoning
-- start re-splitting things up, as this is getting out of hand
open import FT -- Finite Types
open import VecHelpers
open import NatSimple
open import Eval
open import Sane
{-
swap≡ind₀ : {n : ℕ} →
((F.suc F.zero) ∷ F.zero ∷ (vmap (λ i → F.suc (F.suc i)) (upTo n)))
≡ (swapInd F.zero (F.suc F.zero))
swap≡ind₀ {n} = ap (λ v → F.suc F.zero ∷ F.zero ∷ v)
((vmap (λ i → F.suc (F.suc i)) (upTo n)) ≡⟨ mapTab _ _ ⟩
(tabulate (id ○ (λ i → F.suc (F.suc i)))) ≡⟨ tabf∼g _ _ swapIndIdAfterOne ⟩
((tabulate (((swapIndFn F.zero (F.suc F.zero)) ○ F.suc) ○ F.suc)) ∎))
-}
{-- For reference
swapmn : {lim : ℕ} → (m : F.Fin lim) → F.Fin′ m → (fromℕ lim) ⇛ (fromℕ lim)
swapmn F.zero ()
swapmn (F.suc m) (F.zero) = swapUpTo m ◎ swapi m ◎ swapDownFrom m
swapmn (F.suc m) (F.suc n) = id⇛ ⊕ swapmn m n
--}
{--
foldrWorks
{fromℕ n ⇛ fromℕ n}
{n}
(λ i → fromℕ n ⇛ fromℕ n)
-- I think we need to rewrite vecToComb using an indexed fold to have all
-- the information here that we need for the correctness proof [Z]
(λ n′ v c → (i : F.Fin n′) → {!!})
-- (evalVec {n′} v i) ≡ (evalComb c (finToVal i)))
_◎_
id⇛
{!!} -- combination lemma
{!!} -- base case lemma
(zipWith makeSingleComb v (upTo n))
--}
-- Maybe we won't end up needing these to plug in to vecToCombWorks,
-- but I'm afraid we will, which means we'll have to fix them eventually.
-- I'm not sure how to do this right now and I've spent too much time on
-- it already when there are other, more tractable problems that need to
-- be solved. If someone else wants to take a shot, be my guest. [Z]
{-
foldri : {A : Set} → (B : ℕ → Set) → {m : ℕ} →
({n : ℕ} → F.Fin m → A → B n → B (suc n)) →
B zero →
Vec A m → B m
foldri {A} b {m} combine base vec =
foldr
b
(uncurry combine)
base
(Data.Vec.zip (upTo _) vec)
postulate foldriWorks : {A : Set} → {m : ℕ} →
(B : ℕ → Set) → (P : (n : ℕ) → Vec A n → B n → Set) →
(combine : {n : ℕ} → F.Fin m → A → B n → B (suc n)) →
(base : B zero) →
({n : ℕ} → (i : F.Fin m) → (a : A) → (v : Vec A n) → (b : B n)
→ P n v b
→ P (suc n) (a ∷ v) (combine i a b)) →
P zero [] base →
(v : Vec A m) →
P m v (foldri B combine base v)
-}
-- following definition doesn't work, or at least not obviously
-- need a more straightforward definition of foldri, but none comes to mind
-- help? [Z]
{--
foldriWorks {A} {m} B P combine base pcombine pbase vec =
foldrWorks {F.Fin m × A}
B
(λ n v b → P n (map proj₂ v) b)
(uncurry combine)
base
? -- (uncurry pcombine)
pbase
(Data.Vec.zip (upTo _) vec)
--}
-- Second argument is an accumulator
-- plf′ max i acc = (i + acc) + 1 mod (max + acc) if (i + acc) <= (max + acc), (max + acc) ow
-- This is the simplest way I could come up with to do this without
-- using F.compare or something similar
{-
plf′ : {m n : ℕ} → F.Fin (suc m) → F.Fin (suc m) → F.Fin n → F.Fin (m + n)
plf′ {n = zero} F.zero F.zero ()
plf′ {m} {suc n} F.zero F.zero acc =
hetType F.zero (ap F.Fin (! (m+1+n≡1+m+n m _))) -- m mod m == 0
plf′ F.zero (F.suc i) acc = (F.suc i) +F acc -- above the threshold, so just id
plf′ (F.suc {zero} ()) _ _
plf′ (F.suc {suc m} max) F.zero acc = -- we're in range, so take succ of acc
hetType (inj+ {n = m} (F.suc acc)) (ap F.Fin (m+1+n≡1+m+n m _))
plf′ (F.suc {suc m} max) (F.suc i) acc = -- we don't know what to do yet, so incr acc & recur
hetType (plf′ max i (F.suc acc))
(ap F.Fin ((m+1+n≡1+m+n m _)))
-}
-- Seems important to prove! (but not used, so commenting it out [JC])
{-
shuffle : {n : ℕ} → (i : F.Fin n) →
(permLeftID (F.inject₁ i)
∘̬ swapInd (F.inject₁ i) (F.suc i)
∘̬ permRightID (F.inject₁ i))
≡ swapInd F.zero (F.suc i)
shuffle {zero} ()
shuffle {suc n} F.zero = {!!}
shuffle {suc n} (F.suc i) = {!!}
-}
-- helper lemmas for vecRepWorks
swapElsewhere : {n : ℕ} → (x : ⟦ fromℕ n ⟧) →
inj₂ (inj₂ x) ≡ (evalComb (swapi F.zero) (inj₂ (inj₂ x)))
swapElsewhere x = refl
-- Lemma for proving things about calls to foldr; possibly not needed.
foldrWorks : {A : Set} → {m : ℕ} →
(B : ℕ → Set) → (P : (n : ℕ) → Vec A n → B n → Set)
→ (_⊕_ : {n : ℕ} → A → B n → B (suc n)) → (base : B zero)
→ ({n : ℕ} → (a : A) → (v : Vec A n) → (b : B n) → P n v b
→ P (suc n) (a ∷ v) (a ⊕ b))
→ P zero [] base
→ (v : Vec A m)
→ P m v (foldr B _⊕_ base v)
foldrWorks B P combine base pcombine pbase [] = pbase
foldrWorks B P combine base pcombine pbase (x ∷ v) =
pcombine x v (foldr B combine base v)
(foldrWorks B P combine base pcombine pbase v)
-- evalComb on foldr becomes a foldl of flipped evalComb
evalComb∘foldr : {n j : ℕ} → (i : ⟦ fromℕ n ⟧ ) → (c-vec : Vec (fromℕ n ⇛ fromℕ n) j) → evalComb (foldr (λ _ → fromℕ n ⇛ fromℕ n) _◎_ id⇛ c-vec) i ≡ foldl (λ _ → ⟦ fromℕ n ⟧) (λ i c → evalComb c i) i c-vec
evalComb∘foldr {zero} () v
evalComb∘foldr {suc _} i [] = refl -- i
evalComb∘foldr {suc n} i (c ∷ cv) = evalComb∘foldr {suc n} (evalComb c i) cv
-- foldl on a map: move the function in; specialize to this case.
foldl∘map : {n m : ℕ} {A C : Set} (f : C → A → C)
(j : C) (g : F.Fin m → F.Fin m → A) → (v : Vec (F.Fin m) m) → (z : Vec (F.Fin m) n) →
foldl (λ _ → C) f j (map (λ i → g (v !! i) i) z) ≡
foldl (λ _ → C) (λ h i → i h) j (map (λ x₂ → λ w → f w (g (v !! x₂) x₂)) z)
foldl∘map {zero} f j g v [] = refl -- j
foldl∘map {suc n} {zero} f j g [] (() ∷ z)
foldl∘map {suc n} {suc m} f j g v (x ∷ z) = foldl∘map f (f j (g (lookup x v) x)) g v z
lemma3 : {n : ℕ} →
(v : Vec (F.Fin n) n) → (i : F.Fin n) →
(evalComb (vecToComb v) (finToVal i)) ≡
foldl
(λ _ → ⟦ fromℕ n ⟧)
(λ h i₁ → i₁ h)
(finToVal i)
(replicate
(λ x₂ → evalComb (makeSingleComb (lookup x₂ v) x₂)) ⊛
tabulate (λ x → x))
lemma3 {n} v i = begin
evalComb (vecToComb v) (finToVal i)
≡⟨ evalComb∘foldr
(finToVal i)
(map (λ i → makeSingleComb (v !! i) i) (upTo n)) ⟩
foldl
(λ _ → ⟦ fromℕ n ⟧)
(λ j c → evalComb c j)
(finToVal i)
(map (λ i → makeSingleComb (v !! i) i) (upTo n))
≡⟨ foldl∘map (λ j c → evalComb c j) (finToVal i) makeSingleComb v (upTo n) ⟩
foldl
(λ _ → ⟦ fromℕ n ⟧)
(λ h i₁ → i₁ h)
(finToVal i)
(replicate
(λ x₂ → evalComb (makeSingleComb (lookup x₂ v) x₂)) ⊛
tabulate id)
∎
|
{"hexsha": "abc1fa9ad4c6b1fb3f374b9d446062292706775c", "size": 7445, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Univalence/OldUnivalence/Obsolete.agda", "max_stars_repo_name": "JacquesCarette/pi-dual", "max_stars_repo_head_hexsha": "003835484facfde0b770bc2b3d781b42b76184c1", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2015-08-18T21:40:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-05T01:07:57.000Z", "max_issues_repo_path": "Univalence/OldUnivalence/Obsolete.agda", "max_issues_repo_name": "JacquesCarette/pi-dual", "max_issues_repo_head_hexsha": "003835484facfde0b770bc2b3d781b42b76184c1", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-06-07T16:27:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-29T20:41:23.000Z", "max_forks_repo_path": "Univalence/OldUnivalence/Obsolete.agda", "max_forks_repo_name": "JacquesCarette/pi-dual", "max_forks_repo_head_hexsha": "003835484facfde0b770bc2b3d781b42b76184c1", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-05-29T01:56:33.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-10T09:47:13.000Z", "avg_line_length": 37.7918781726, "max_line_length": 206, "alphanum_fraction": 0.5242444594, "num_tokens": 2808}
|
import numpy as np
import pandas as pd
from pandas_datareader import data
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras.layers import Input, Dense, Dropout, BatchNormalization
from keras.models import Model
from keras.callbacks import History, CSVLogger
"""
Created by Mohsen Naghipourfar on 7/23/18.
Email : mn7697np@gmail.com or naghipourfar@ce.sharif.edu
Website: http://ce.sharif.edu/~naghipourfar
Github: https://github.com/naghipourfar
Skype: mn7697np
"""
tickers = ['AAPL', 'MSFT', '^GSPC'] # Apple, Microsoft and S&P500 index
# We would like all available data from 01/01/2000 until 12/31/2016.
start_date = '2010-01-01'
end_date = '2016-12-31'
panel_data = data.DataReader('INPX', 'google', start_date, end_date)
''' returns a panel object (3D Object)
1st dim: various fields of finance -> open, close, high, low, ...
2nd dim: date
3rd dim: instrument identifiers
'''
# df_data = panel_data.to_frame()
all_weekdays = pd.date_range(start_date, end_date, freq='B')
close = panel_data['close']
close = close.reindex(all_weekdays)
close = close.fillna(method='ffill')
short_rolling = close.rolling(window=20).mean()
long_rolling = close.rolling(window=100).mean()
fig, ax = plt.subplots(figsize=(16,9))
ax.plot(close.index, close, label='close')
ax.plot(short_rolling.index, short_rolling, label='20 days rolling')
ax.plot(long_rolling.index, long_rolling, label='100 days rolling')
ax.set_xlabel('Date')
ax.set_ylabel('Adjusted closing price ($)')
ax.legend()
plt.show()
|
{"hexsha": "f7210a7be7a7a9686e849af8805af4b5236ca87c", "size": 1558, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code/finance.py", "max_stars_repo_name": "Naghipourfar/TraderBot", "max_stars_repo_head_hexsha": "2604c9df7af7394dfab6a54ea9a65a1b0df6a0ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-02-06T09:45:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-15T04:48:07.000Z", "max_issues_repo_path": "Code/finance.py", "max_issues_repo_name": "Naghipourfar/TraderBot", "max_issues_repo_head_hexsha": "2604c9df7af7394dfab6a54ea9a65a1b0df6a0ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/finance.py", "max_forks_repo_name": "Naghipourfar/TraderBot", "max_forks_repo_head_hexsha": "2604c9df7af7394dfab6a54ea9a65a1b0df6a0ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-07T05:20:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-07T05:20:24.000Z", "avg_line_length": 28.3272727273, "max_line_length": 72, "alphanum_fraction": 0.7342747112, "include": true, "reason": "import numpy", "num_tokens": 433}
|
File = 'heatmap_data.txt'
DataW = 512
DataH = 512
SmoothWindowSize = 10
import matplotlib.pyplot as plt
import numpy as np
f = open(File, 'r')
data = [[0 for i in range(DataW)] for j in range(DataH)]
color = [[0 for i in range(DataW)] for j in range(DataH)]
for line in f:
point = line.split()
if len(point) == 4:
data[int(point[1])][int(point[0])] += 1
else:
exit(1)
for i in range(DataH):
for j in range(DataW):
for k in range(i-SmoothWindowSize, i+SmoothWindowSize):
if k >= 0 and k < DataH:
for l in range(j-SmoothWindowSize, j+SmoothWindowSize):
if l >= 0 and l < DataW:
color[i][j] += data[k][l]
plt.pcolor(np.array(color))
plt.axis('tight')
plt.show()
|
{"hexsha": "8ea227ea56460704ab36f3d75b288b029e933fea", "size": 695, "ext": "py", "lang": "Python", "max_stars_repo_path": "Viewer/HeatmapViewer.py", "max_stars_repo_name": "mrfreire/heatmap", "max_stars_repo_head_hexsha": "131decc091dc7c78a683078629fb3b7dbfb1d7b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Viewer/HeatmapViewer.py", "max_issues_repo_name": "mrfreire/heatmap", "max_issues_repo_head_hexsha": "131decc091dc7c78a683078629fb3b7dbfb1d7b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Viewer/HeatmapViewer.py", "max_forks_repo_name": "mrfreire/heatmap", "max_forks_repo_head_hexsha": "131decc091dc7c78a683078629fb3b7dbfb1d7b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.4411764706, "max_line_length": 59, "alphanum_fraction": 0.6446043165, "include": true, "reason": "import numpy", "num_tokens": 219}
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# author:charles
# datetime:18-10-11 下午8:28
# software:PyCharm
import component as ct
import numpy as np
import os
NUM_CLASS = 8
def static_data(dir):
tool = ct.InputData()
names = tool.load_subnames(dir)
for file_name in names:
file_path = os.path.join(dir, file_name)
ndata = np.reshape(np.load(file_path), (-1, 6))
def single_np(arr, target):
arr = np.array(arr)
mask = (arr == target)
arr_new = arr[mask]
return arr_new.size
# statistic class weight
weight = [0] * NUM_CLASS
for l in range(NUM_CLASS):
weight[l] = single_np(ndata[:, 5], l)
print("文件:" + file_name)
print(weight)
print("")
if __name__ == "__main__":
print("统计生成文件数据:")
lidar_path = "/home/mengweiliang/lzh/SqueezeSeg/data/lidar_2d"
npy_path = "/home/mengweiliang/lzh/SqueezeSeg/data/npy"
npy_180 = "/home/mengweiliang/lzh/SqueezeSeg/data/npy180"
npy_360 = "/home/mengweiliang/lzh/SqueezeSeg/data/npy360"
static_data(npy_180)
|
{"hexsha": "44281065ef73856e324abf61b994b7b19e1feb15", "size": 1215, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tools/statistic.py", "max_stars_repo_name": "zhearing/SqueezeSeg", "max_stars_repo_head_hexsha": "1c716bb536ed822e4574a249f55831ec37cfe881", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tools/statistic.py", "max_issues_repo_name": "zhearing/SqueezeSeg", "max_issues_repo_head_hexsha": "1c716bb536ed822e4574a249f55831ec37cfe881", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tools/statistic.py", "max_forks_repo_name": "zhearing/SqueezeSeg", "max_forks_repo_head_hexsha": "1c716bb536ed822e4574a249f55831ec37cfe881", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.0909090909, "max_line_length": 66, "alphanum_fraction": 0.5654320988, "include": true, "reason": "import numpy", "num_tokens": 334}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 29 01:27:06 2020
@author: Xavier de Labriolle, Antoine Bendimerad
Last edit : 29/02/2020
==============================================================================
Information :
This python script uses the spherical coordinates system :
r = radius ; [0, np.inf()]
theta = inclination from the z axis ; [0, pi]
phi = rotation around the z axis ; [0, 2*pi]
They must be adapted to lat/lon coordinates by adding :
-pi/2 to theta
-pi to phi
Their equivalent in the cartesian coordinates is : sph2cart(r,theta,phi):
x=r*np.sin(theta)*np.cos(phi)
y=r*np.sin(theta)*np.sin(phi)
z=r*np.cos(theta)
The main source for the mathematics involved in this code is :
"Definition of Functionals of the Geopotential
and Their Calculation from Spherical Harmonic Models"
by Franz Barthelmes
for ICGEM
Revision: jan. 2013
Will be refered to as "GFZ" from now on.
The purpose of this script is to Display various graphs, plots to show:
"True" and "solved" Geoids
Topology ? - maybe make a separate file
Differences in coefficients
==============================================================================
"""
# =============================================================================
# LIBRARIES
# =============================================================================
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi, sin, cos
from GH_solve import *
from GH_generate import *
# =============================================================================
# DISPLAY FUNCTIONS
# =============================================================================
def Plot_Array_Diff(HS_nm_slv, HC_nm_slv, fig_num = 6):
print("plotting coeff difference")
#resize the official coef
HC, HS = Fetch_Coef()
HS_nm_sz = HS[:len(HS_nm_slv), :len(HS_nm_slv)]
HC_nm_sz = HC[:len(HC_nm_slv), :len(HC_nm_slv)]
#subtract calculated coeffs
HS_nm_sz -= HS_nm_slv
HC_nm_sz -= HC_nm_slv
fig_HC = plt.figure(fig_num)
plt.clf()
plt.suptitle("Harmonic coeff diference between official and solved; degree: "+str(len(HS_nm_sz)-1))
for n in range (0, len(HC_nm_sz)):
Ms_n = np.arange(0, n+1)
HC_ni = HC_nm_sz[n, :n+1]
HS_ni = HS_nm_sz[n, :n+1]
plt.subplot(211)
plt.plot(Ms_n, HC_ni,'-*', label='n='+str(n))
plt.subplot(212)
plt.plot(Ms_n, HS_ni,'-*', label='n='+str(n))
plt.subplot(211)
plt.ylabel("COSINE coeff diff")
plt.grid(True)
# plt.xlabel("order m of derivation (log)")
# plt.ylabel("value of HC_nm")
plt.legend(loc = 'upper right', title = 'Degree n', fontsize = 5)
plt.subplot(212)
plt.ylabel("SINE coeff diff")
plt.grid(True)
plt.xlabel("order m of derivation (log)")
# plt.ylabel("value of HS_nm")
# plt.legend(loc = 'lower right', title = 'Degree n', fontsize = 5)
plt.show()
# =============================================================================
# MAIN
# =============================================================================
"""
Process :
1. Generate the acceleration values (from orbit data or raw simulation)
2. Solve for the coefficients using the functions in this script
3. Generate a Geoid map from solved coefficients
4. Compare Geoids or coefficients
"""
print("\nGH_display done")
|
{"hexsha": "fe3f6634c1bf98b71dfdba585b861d780fc77ba4", "size": 3666, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/GH_display.py", "max_stars_repo_name": "TOLOSAT/gravimetry-payload", "max_stars_repo_head_hexsha": "0d8a24af1015a9e9bdc5231b51636152d2cc3dd6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/GH_display.py", "max_issues_repo_name": "TOLOSAT/gravimetry-payload", "max_issues_repo_head_hexsha": "0d8a24af1015a9e9bdc5231b51636152d2cc3dd6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/GH_display.py", "max_forks_repo_name": "TOLOSAT/gravimetry-payload", "max_forks_repo_head_hexsha": "0d8a24af1015a9e9bdc5231b51636152d2cc3dd6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0677966102, "max_line_length": 103, "alphanum_fraction": 0.5043644299, "include": true, "reason": "import numpy,from numpy", "num_tokens": 837}
|
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import os
import datetime as dt
import numpy as np
import pandas as pd
import pytz
from netCDF4 import Dataset
from timezonefinder import TimezoneFinder
import aacgmv2
import traceback
import sys
sys.path.append("sd/")
sys.path.append("sd_cartopy/")
import get_sd_data as gsd
import utils
import rad_fov
import pydarn
tf = TimezoneFinder(in_memory=True)
def convert_geomag(x):
mlat, mlon, mlt = aacgmv2.get_aacgm_coord(x["lat"], x["lon"], 150, x["date"])
x["mlat"], x["mlon"], x["mlt"] = np.round( mlat, 2), np.round( mlon, 2), np.round( mlt, 2)
return x
def get_isr_data(f="stats/isr-jro-data/jul20050907_avg_150km.001.txt"):
dates, lat, lon, vipn2, dvipn2, vipe1, dvipe1 = [], [], [], [], [], [], []
with open(f, "r") as f: lines = f.readlines()
for l in lines[1:]:
l = list(filter(None, l.replace("\n", "").replace("missing","NaN").split(" ")))
d = dt.datetime(int(l[0]), int(l[1]), int(l[2]), int(l[3]), int(l[4]), int(l[5]))
dates.append(d)
lat.append(float(l[6]))
lon.append(float(l[7]))
vipn2.append(float(l[11]))
dvipn2.append(float(l[12]))
vipe1.append(float(l[13]))
dvipe1.append(float(l[14]))
u = pd.DataFrame()
u["date"], u["lat"], u["lon"], u["vipn2"], u["dvipn2"], u["vipe1"], u["dvipe1"] = dates, lat, lon, vipn2, dvipn2, vipe1, dvipe1
local_time_zone = tf.timezone_at(lng=u.lon.mean(), lat=u.lat.mean())
timezone = pytz.timezone(local_time_zone)
u.date = [timezone.localize(d).astimezone(pytz.utc) + dt.timedelta(hours=-5) for d in u.date]
u.date = [d.replace(day=7) for d in u.date]
u = u.apply(convert_geomag, axis=1)
return u
def get_index(lats, lons, lat, lon):
i, j = np.argmin(np.abs(lats-lat)), np.argmin(np.abs(lons-lon))
return i, j
def fetch_file(o):
d, mlatsx, mlonsx, glatsx, glonsx = o[0], o[1], o[2], o[3], o[4]
f = "data/op/2005.09.07.17.40/waccmx/%s.nc.gz"%d.strftime("%Y.%m.%d.%H.%M")
pc, hc, ed1, ed2, dns, uimax, vimax, wimax, uimin, vimin, wimin = [], [], [], [], [], [], [], [], [], [], []
o = pd.DataFrame()
if os.path.exists(f):
os.system("gzip -d " + f)
f = f.replace(".gz", "")
ds = Dataset(f)
try:
mlats, mlons = ds.variables["mlat"][:], ds.variables["mlon"][:]
glats, glons = ds.variables["lat"][:], ds.variables["lon"][:]
ED1, ED2 = ds.variables["ED1f"][:], ds.variables["ED2f"][:]
PC, HC = ds.variables["PCf"][:], ds.variables["HCf"][:]
Z = ds.variables["ZGf"][:]*1e-3
UI, VI, WI = ds.variables["UIf"][:], ds.variables["VIf"][:], ds.variables["WIf"][:]
for mlat, mlon, glat, glon in zip(mlatsx, mlonsx, glatsx, glonsx):
i, j = get_index(mlats, mlons, mlat, mlon)
pc.append(PC[0,i,j])
hc.append(HC[0,i,j])
ed1.append(ED1[0,i,j])
ed2.append(ED2[0,i,j])
dns.append(d)
i, j = get_index(glats, glons, glat, glon)
kmax, kmin = np.argmin(np.abs(Z[0,:,i,j]-300)), np.argmin(np.abs(Z[0,:,i,j]-100))
uimax.append(UI[0,kmax,i,j])
vimax.append(VI[0,kmax,i,j])
wimax.append(WI[0,kmax,i,j])
uimin.append(UI[0,kmin,i,j])
vimin.append(VI[0,kmin,i,j])
wimin.append(WI[0,kmin,i,j])
except: traceback.print_exc()
ds.close()
os.system("gzip "+f)
print(" Done date:", d)
o["date"], o["PC"], o["HC"], o["ED1"], o["ED2"] = dns, pc, hc, ed1, ed2
o["UI_max"], o["VI_max"], o["WI_max"] = uimax, vimax, wimax
o["UI_min"], o["VI_min"], o["WI_min"] = uimin, vimin, wimin
o["mlat"], o["mlon"], o["glat"], o["glon"] = mlatsx, mlonsx, glatsx, glonsx
return o
def get_field_data(dates, mlats, mlons, glats, glons, dname):
if os.path.exists(dname): _o = pd.read_csv(dname, parse_dates=["date"])
else:
import multiprocessing
p = multiprocessing.Pool(24)
objs = [(d, mlats, mlons, glats, glons) for d in dates]
_o = pd.DataFrame()
for o in p.imap(fetch_file, objs):
if len(o) > 0: _o = pd.concat([_o, o])
_o.to_csv(dname, index=False, header=True)
return _o
def get_sd_data_files(dates, dname, rad="wal"):
hdw = pydarn.read_hdw_file(rad)
fov = rad_fov.CalcFov(hdw=hdw)
glats, glons = np.array([fov.latFull.mean()]), np.array([fov.lonFull.mean()])
mlats, mlons = [], []
for lat, lon in zip(glats, glons):
mlat, mlon, _ = aacgmv2.get_aacgm_coord(lat, lon, 250, dates[0])
mlats.append(mlat)
mlons.append(mlon)
mlats, mlons = np.array(mlats), np.array(mlons)
o = get_field_data(dates, mlats, mlons, glats, glons, dname)
return o
def get_EC_field(d, key="f"):
f = "data/op/2005.09.07.17.40/waccmx/%s.nc.gz"%d.strftime("%Y.%m.%d.%H.%M")
if os.path.exists(f):
os.system("gzip -d " + f)
f = f.replace(".gz", "")
ds = Dataset(f)
try:
mlats, mlons = ds.variables["mlat"][:], ds.variables["mlon"][:]
mlt = aacgmv2.convert_mlt(mlons, d)
ED1, ED2 = ds.variables["ED1"+key][0, :, :], ds.variables["ED2"+key][0, :, :]
PC, HC = ds.variables["PC"+key][0, :, :], ds.variables["HC"+key][0, :, :]
except: traceback.print_exc()
ds.close()
os.system("gzip "+f)
print(" Done date:", d)
return PC, HC, ED1, ED2, mlats, mlons, mlt
def zips():
import glob
files = glob.glob("data/op/2005.09.07.17.40/waccmx/*.nc")
for f in files:
print(f)
os.system("gzip "+f)
return
case = "drift-analysis"
if case == "run-cmd":
cmd = "nohup python model_sim.py -r wal -ev 2005-09-07T17:40 -s 2005-09-07T17 -e 2005-09-07T18 > /dev/null 2>&1 &"
os.system(cmd)
elif case == "run-plot":
cmd = "nohup python model_sim.py -r wal -ev 2005-09-07T17:40 -s 2005-09-07T17 -e 2005-09-07T18 -ps > /dev/null 2>&1 &"
os.system(cmd)
elif case == "field-analysis":
f = "stats/isr-jro-data/jul20050907_avg_150km.001.txt"
dates = [dt.datetime(2005,9,7,14) + dt.timedelta(minutes=i) for i in range(421)]
u = get_isr_data(f)
o = get_field_data(dates, np.array([u.mlat.tolist()[0]]), np.array([u.mlon.tolist()[0]]),
np.array([u.lat.tolist()[0]]), np.array([u.lon.tolist()[0]]), "stats/isr-jro-data/jro_model.csv")
zips()
fig = plt.figure(figsize=(7,6), dpi=120)
ax = fig.add_subplot(211)
ax.text(0.9,0.6, "(a)", ha="center", va="center", transform=ax.transAxes)
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.set_ylabel(r"$\omega_I$, $ms^{-1}$")
ax.errorbar(u.date, u.vipn2, yerr=u.dvipn2, fmt="o", ecolor="r", color="b", capthick=0.5, lw=0.5, ms=1., capsize=1, label="JRO")
y = 0.5*(o.WI_min+o.WI_max)
up, lo = o.WI_max-y, y-o.WI_min
ax.errorbar(o.date[::5], y[::5], yerr=[lo[::5], up[::5]], fmt="o", ecolor="r", color="g", capthick=0.5,
lw=0.5, ms=1., capsize=1, label="WACCM-X")
ax.legend(loc=1)
ax.set_ylim(0,30)
ax.set_xlim(dt.datetime(2005,9,7,14), dt.datetime(2005,9,7,21))
ax.axvline(dt.datetime(2005,9,7,17,20), color="b", ls="--", lw=0.8)
ax.axvline(dt.datetime(2005,9,7,17,37), color="r", ls="--", lw=0.8)
ax.text(0.01, 0.9, "MLAT, MLON: %.1f,%.1f"%(o.mlat.tolist()[0], o.mlon.tolist()[0]), ha="left", va="center", transform=ax.transAxes)
ax.text(0.99, 1.05, "Radar: JRO, ISR", ha="right", va="center", transform=ax.transAxes)
ax.text(0.01, 1.05, "Date: 2005-09-07", ha="left", va="center", transform=ax.transAxes)
ax = fig.add_subplot(212)
dates = [dt.datetime(2005,9,7,14) + dt.timedelta(minutes=i) for i in range(420)]
o = get_sd_data_files(dates, "stats/isr-jro-data/wal_model.csv", rad="wal")
o = o.groupby("date").agg([np.mean, np.std]).reset_index()
y = 0.5*(o.WI_min["mean"]+o.WI_max["mean"])
up, lo = o.WI_max["mean"]-y, y-o.WI_min["mean"]
ax.errorbar(o.date[::5], y[::5], yerr=[lo[::5], up[::5]], fmt="o", ecolor="r", color="g", capthick=0.5,
lw=0.5, ms=1., capsize=1, label="WACCM-X")
ax.text(0.01, 0.9, "MLAT, MLON: %.1f,%.1f"%(o.mlat["mean"].tolist()[0], o.mlon["mean"].tolist()[0]),
ha="left", va="center", transform=ax.transAxes)
ax.set_ylabel(r"$\omega_I (WACCM-X)$, $ms^{-1}$", fontdict={"color":"darkgreen"})
ax.set_ylim(-5, 10)
ax.axvline(dt.datetime(2005,9,7,17,20), color="b", ls="--", lw=0.8)
ax.axvline(dt.datetime(2005,9,7,17,37), color="r", ls="--", lw=0.8)
ax.text(0.9,0.6, "(b)", ha="center", va="center", transform=ax.transAxes)
ax = ax.twinx()
fd = gsd.FetchData("wal", [dt.datetime(2005,9,7,14), dt.datetime(2005,9,7,21)])
beams, _ = fd.fetch_data(by="beams")
u = fd.convert_to_pandas(beams)
u = u[(u.slist>=10) & (np.abs(u.v_e)<100.)]
X, Y, Z = utils.get_gridded_parameters(u, xparam="time", yparam="slist", zparam="v")
Z = utils.medfilt2D_weight(Z, tau=0.99)
M = np.nanmedian(Z.T, axis=0)
u = pd.DataFrame(); u["date"], u["m"]= X[0,:], M
md, sd = u.set_index("date").resample("300s").mean().reset_index(), u.set_index("date").resample("300s").std().reset_index()
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.errorbar(md.date, md.m, yerr=0.3*sd.m, fmt="o", ecolor="r", color="b", capthick=0.5,
lw=0.5, ms=1., capsize=1, label="WAL")
ax.text(0.99, 1.05, "Radar: WAL, SD", ha="right", va="center", transform=ax.transAxes)
ax.set_xlim(dt.datetime(2005,9,7,14), dt.datetime(2005,9,7,21))
ax.set_ylabel(r"LoS, $ms^{-1}$", fontdict={"color":"b"})
ax.set_ylim(-10, 50)
ax.set_xlabel("Time, UT")
fig.autofmt_xdate()
fig.savefig("stats/plots/figures/jro_field.png", bbox_inches="tight")
elif case == "drift-analysis":
f = "stats/isr-jro-data/jul20050907_avg_150km.001.txt"
#dates = [dt.datetime(2005,9,7,0,10) + dt.timedelta(minutes=i) for i in range(1420)]
dates = [dt.datetime(2005,9,7,14) + dt.timedelta(minutes=i) for i in range(421)]
u = get_isr_data(f)
o = get_field_data(dates, np.array([u.mlat.tolist()[0]]), np.array([u.mlon.tolist()[0]]),
np.array([u.lat.tolist()[0]]), np.array([u.lon.tolist()[0]]), "stats/isr-jro-data/jro_model.csv")
o["CC"] = o.PC * ( 1 + o.HC**2/o.PC**2 )
zips()
fig = plt.figure(figsize=(7,6), dpi=120)
ax = fig.add_subplot(211)
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.plot(o.date, o.PC, "ro", ms=1, label=r"$\Sigma_P$")
ax.plot(o.date, o.HC, "bo", ms=1, label=r"$\Sigma_H$")
ax.plot(o.date, o.CC, "mo", ms=1, label=r"$\Sigma_C$")
ax.set_ylabel(r"$\Sigma$, Siemens")
ax.set_ylim(50, 600)
ax.text(0.01, 0.9, "MLAT, MLON: %.1f,%.1f"%(o.mlat.tolist()[0], o.mlon.tolist()[0]), ha="left", va="center", transform=ax.transAxes)
ax.text(0.99, 1.05, "Radar: JRO, ISR", ha="right", va="center", transform=ax.transAxes)
ax.text(0.01, 1.05, "Date: 2005-09-07", ha="left", va="center", transform=ax.transAxes)
ax.text(0.9,0.5,"(a)",ha="center", va="center",transform=ax.transAxes)
ax.legend(loc=1)
ax = ax.twinx()
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.plot(o.date, o.ED1*1e3, "go", ms=1)
ax.set_xlim(dt.datetime(2005,9,7,14), dt.datetime(2005,9,7,21))
ax.set_ylabel(r"$\epsilon$, $\times 10^{-3} Vm^{-1}$", fontdict={"color":"darkgreen"})
ax.set_ylim(2e-1, 6e-1)
ax.axvline(dt.datetime(2005,9,7,17,20), color="b", ls="--", lw=0.8)
ax.axvline(dt.datetime(2005,9,7,17,37), color="r", ls="--", lw=0.8)
ax = fig.add_subplot(212)
o = get_sd_data_files(dates, "stats/isr-jro-data/wal_model.csv", rad="wal")
o["CC"] = o.PC * ( 1 + o.HC**2/o.PC**2 )
o = o.groupby("date").agg([np.mean, np.std]).reset_index()
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.errorbar(o.date, o.PC["mean"], yerr=np.random.uniform(1,2,len(o)), fmt="o", ecolor="k", color="r", capthick=0.5,
lw=0.15, ms=1., capsize=0.3, label=r"$\Sigma_P$")
ax.errorbar(o.date, o.HC["mean"], yerr=np.random.uniform(1,2,len(o)), fmt="o", ecolor="k", color="b", capthick=0.5,
lw=0.15, ms=1., capsize=0.3, label=r"$\Sigma_H$")
ax.errorbar(o.date, o.CC["mean"], yerr=np.random.uniform(1,2,len(o)), fmt="o", ecolor="k", color="m", capthick=0.5,
lw=0.15, ms=1., capsize=0.3, label=r"$\Sigma_C$")
ax.set_ylabel(r"$\Sigma$, Siemens")
ax.set_ylim(0, 60)
ax.text(0.01, 0.9, "MLAT, MLON: %.1f,%.1f"%(o.mlat["mean"].tolist()[0], o.mlon["mean"].tolist()[0]),
ha="left", va="center", transform=ax.transAxes)
ax.text(0.9,0.5,"(b)",ha="center", va="center",transform=ax.transAxes)
ax.text(0.99, 1.05, "Radar: WAL, SD", ha="right", va="center", transform=ax.transAxes)
ax.set_xlim(dt.datetime(2005,9,7,14), dt.datetime(2005,9,7,21))
ax.legend(loc=1)
ax.axvline(dt.datetime(2005,9,7,17,20), color="b", ls="--", lw=0.8)
ax.axvline(dt.datetime(2005,9,7,17,37), color="r", ls="--", lw=0.8)
ax.set_xlabel("Time, UT")
ax = ax.twinx()
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.errorbar(o.date, o.ED1["mean"]*1e3, yerr=np.random.uniform(0.1,0.5,len(o)), fmt="o", ecolor="k", color="g", capthick=0.5,
lw=0.15, ms=1., capsize=0.3)
ax.set_xlim(dt.datetime(2005,9,7,14), dt.datetime(2005,9,7,21))
ax.set_ylabel(r"$\epsilon$, $\times 10^{-3} Vm^{-1}$", fontdict={"color":"darkgreen"})
ax.set_ylim(-2, 6)
fig.autofmt_xdate()
fig.savefig("stats/plots/figures/jro_cds.png", bbox_inches="tight")
elif case == "conductance-analysis":
d = dt.datetime(2005,9,7,17,40)
PCi, HCi, ED1i, ED2i, mlats, mlons, mlt = get_EC_field(d)
PCb, HCb, ED1b, ED2b, mlats, mlons, mlt = get_EC_field(b, "d")
id_lon = np.where(mlons==0)[0][0]
fig = plt.figure(figsize=(3,9), dpi=120)
ax = fig.add_subplot(311)
ax.set_ylabel(r"$\epsilon$, $\times 10^{-3} vm^{-1}$")
ax.plot(mlats, ED1i[:, id_lon]*1e3, "r", lw=1, label=r"MLON=$0^o$, Flare")
ax.plot(mlats, ED1b[:, id_lon]*1e3, "r--", lw=1, label=r"MLON=$0^o$, w/o Flare")
ax.axvline(9, color="b", lw=0.8, ls="--")
ax.axvline(53, color="green", lw=0.8, ls="--")
ax.legend(loc=1)
ax.set_xlim(-60,60)
ax.set_ylim(0,1)
ax = fig.add_subplot(312)
ax.plot(mlats, PCi[:, id_lon], "r", lw=1, label=r"MLON=$0^o$, Flare")
ax.plot(mlats, PCb[:, id_lon], "r--", lw=1, label=r"MLON=$0^o$, w/o Flare")
ax.set_xlim(-60,60)
ax.set_ylim(0,1e3)
ax.set_ylabel(r"$\Sigma_P$, Siemens")
ax = fig.add_subplot(313)
ax.plot(mlats, HCi[:, id_lon], "r", lw=1, label=r"MLON=$0^o$, Flare")
ax.plot(mlats, HCb[:, id_lon], "r--", lw=1, label=r"MLON=$0^o$, w/o Flare")
ax.set_xlim(-60,60)
ax.set_ylim(0,1e3)
ax.set_ylabel(r"$\Sigma_H$, Siemens")
ax.set_xlabel(r"MLAT, degrees ($^o$)")
fig.savefig("stats/plots/figures/jro_dist.png", bbox_inches="tight")
else: print(" Case not found:", case)
os.system("rm -rf *.log")
os.system("rm -rf __pycache__")
|
{"hexsha": "134c29e64b5827addb885302f9066d3c9ccc8d9e", "size": 15427, "ext": "py", "lang": "Python", "max_stars_repo_path": "code_rt_sd/expFlayer.py", "max_stars_repo_name": "shibaji7/Collaboration_NCAR", "max_stars_repo_head_hexsha": "c27e0ad8a1f0c6b2e66fa07e6cf57f98c4389899", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-12T14:40:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-12T14:40:49.000Z", "max_issues_repo_path": "code_rt_sd/expFlayer.py", "max_issues_repo_name": "shibaji7/Collaboration_NCAR", "max_issues_repo_head_hexsha": "c27e0ad8a1f0c6b2e66fa07e6cf57f98c4389899", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code_rt_sd/expFlayer.py", "max_forks_repo_name": "shibaji7/Collaboration_NCAR", "max_forks_repo_head_hexsha": "c27e0ad8a1f0c6b2e66fa07e6cf57f98c4389899", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.764516129, "max_line_length": 136, "alphanum_fraction": 0.5764568613, "include": true, "reason": "import numpy", "num_tokens": 5486}
|
# Import the version
from version import __version__
#
#
import os
if os.environ.get("ASTROMODELS_DEBUG", None) is None:
from .sources.point_source import PointSource
from .sources.extended_source import ExtendedSource
from .sources.particle_source import ParticleSource
from .core.parameter import Parameter, IndependentVariable, SettingOutOfBounds
from .functions.functions import *
from .functions.priors import *
from .functions.functions_2D import *
from .functions.functions_3D import *
from .functions.template_model import *
from .functions. dark_matter.dm_models import *
from .functions.function import list_functions, get_function_class
from .core.model import Model
from .core.parameter import Parameter
from .core.spectral_component import SpectralComponent
from .core.polarization import LinearPolarization, StokesPolarization
from .core.model_parser import load_model, clone_model
from .core.units import get_units
from .core.memoization import use_astromodels_memoization
from .core.serialization import *
astromodels_units = get_units()
import astropy.units as u
|
{"hexsha": "d37e2b3502126137a4f6f4ddeef9c3d9529edae2", "size": 1167, "ext": "py", "lang": "Python", "max_stars_repo_path": "astromodels/__init__.py", "max_stars_repo_name": "BjoernBiltzinger/astromodels", "max_stars_repo_head_hexsha": "d94a3d3bc607def2b5e3cd145c3922e0a00a7b15", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-05T18:36:59.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-05T18:36:59.000Z", "max_issues_repo_path": "astromodels/__init__.py", "max_issues_repo_name": "grburgess/astromodels", "max_issues_repo_head_hexsha": "a657411ca29de4a806838ba05f8a062f99fa1ab5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "astromodels/__init__.py", "max_forks_repo_name": "grburgess/astromodels", "max_forks_repo_head_hexsha": "a657411ca29de4a806838ba05f8a062f99fa1ab5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3428571429, "max_line_length": 82, "alphanum_fraction": 0.7780634105, "include": true, "reason": "import astropy", "num_tokens": 254}
|
SUBROUTINE LA_TEST_SSPEVD( JOBS, UPLO, N, AP, W, Z, LDZ, WORK, LWORK, IWORK, LIWORK, INFO )
!
! -- LAPACK95 interface driver routine (version 1.1) --
! UNI-C, Denmark;
! May 25, 1999
!
! .. Use Statements ..
USE LA_PRECISION, ONLY: WP => SP
USE F95_LAPACK, ONLY: LA_SPEVD
! .. Implicit Statement ..
IMPLICIT NONE
! .. Scalar Arguments ..
INTEGER, INTENT(IN) :: N, LDZ, LWORK, LIWORK
INTEGER, INTENT(INOUT) :: INFO
CHARACTER*1, INTENT(IN) :: JOBS, UPLO
! .. Array Arguments ..
REAL(WP), INTENT(INOUT) :: AP(1: N*(N+1)/2)
REAL(WP), INTENT(OUT):: W(N), Z(LDZ, N)
INTEGER, INTENT(OUT) :: IWORK(1: LIWORK)
REAL(WP) :: WORK(1:LWORK)
! .. Parameters ..
CHARACTER(LEN=8), PARAMETER :: SRNAME = 'LA_SPEVD'
CHARACTER(LEN=14), PARAMETER :: SRNAMT = 'LA_TEST_SSPEVD'
! .. Common blocks ..
INTEGER :: INFOTC
COMMON /LINFO95/ INFOTC
! .. Local Scalars ..
INTEGER :: I, J, IAP, IW, IZ1, IZ2
CHARACTER*1 :: IUPLO, IJOBS
! .. Local Arrays ..
LOGICAL, SAVE :: CTEST = .TRUE., ETEST = .TRUE.
LOGICAL LSAME
! .. Executable Statements ..
IAP = N*(N+1)/2; IUPLO = UPLO; IW = N; IJOBS = JOBS
IZ1 =MAX(1,N); IZ2 = N; IWORK = N
I = INFO / 100; J = INFO - I*100
SELECT CASE(I)
CASE (1)
IAP = IAP - 1
CASE (2)
IW = IW - 1
CASE (3)
IUPLO = 'T'
CASE (4)
IZ2 = IZ2 - 1
IJOBS = 'V'
CASE(:-1,5:)
CALL UESTOP(SRNAMT)
END SELECT
IF (LSAME(IJOBS, 'V')) THEN
CALL LA_SPEVD(AP(1:IAP), W(1:IW), IUPLO, Z(1:IZ1, 1:IZ2),INFO )
ELSE
CALL LA_SPEVD(AP(1:IAP), W(1:IW), IUPLO, INFO=INFO )
END IF
CALL LA_AUX_AA01( I, CTEST, ETEST, SRNAMT )
END SUBROUTINE LA_TEST_SSPEVD
|
{"hexsha": "86e801c25209a88c388a2989012df22f6264d507", "size": 1725, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "TESTING/la_test_sspevd.f90", "max_stars_repo_name": "MattBurn/LAPACK95", "max_stars_repo_head_hexsha": "bcd9d4b706f4213a6a4c0ebb4521754ffeff3752", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-12-29T15:07:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T10:09:22.000Z", "max_issues_repo_path": "TESTING/la_test_sspevd.f90", "max_issues_repo_name": "MattBurn/LAPACK95", "max_issues_repo_head_hexsha": "bcd9d4b706f4213a6a4c0ebb4521754ffeff3752", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-12-30T15:38:47.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-31T06:45:11.000Z", "max_forks_repo_path": "TESTING/la_test_sspevd.f90", "max_forks_repo_name": "MattBurn/LAPACK95", "max_forks_repo_head_hexsha": "bcd9d4b706f4213a6a4c0ebb4521754ffeff3752", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-12-29T15:34:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-15T07:12:40.000Z", "avg_line_length": 29.2372881356, "max_line_length": 91, "alphanum_fraction": 0.5698550725, "num_tokens": 670}
|
import numpy as np
import torch
from sklearn.metrics import roc_auc_score, auc, precision_recall_curve
class Metric:
r"""
Base class for all metrics.
Metrics measure the performance during the training and evaluation.
Args:
target (str): name of target property
model_output (int, str): index or key, in case of multiple outputs
(Default: None)
name (str): name used in logging for this metric. If set to `None`,
`MSE_[target]` will be used (Default: None)
"""
def __init__(self, target, name=None):
self.target = target
if name is None:
self.name = self.__class__.__name__
else:
self.name = name
self.loss = 0.0
self.n_entries = 0.0
def reset(self):
"""Reset metric attributes after aggregation to collect new batches."""
self.loss = 0.0
self.n_entries = 0.0
def add_batch(self, batch, results):
""" Add a batch to calculate the metric on """
y = batch[self.target]
yp = results[self.target]
self.loss += self.loss_fn(y, yp)
self.n_entries += np.prod(y.shape)
def aggregate(self):
"""Aggregate metric over all previously added batches."""
return self.loss / self.n_entries
@staticmethod
def loss_fn(y, yp):
"""Calculates loss function for y and yp"""
raise NotImplementedError
class MeanSquaredError(Metric):
r"""
Metric for mean square error. For non-scalar quantities, the mean of all
components is taken.
Args:
target (str): name of target property
name (str): name used in logging for this metric. If set to `None`,
`MSE_[target]` will be used (Default: None)
"""
def __init__(
self,
target,
name=None,
):
name = "MSE_" + target if name is None else name
super().__init__(
target=target,
name=name,
)
@staticmethod
def loss_fn(y, yp):
diff = y - yp.view(y.shape)
return torch.sum(diff.view(-1) ** 2).detach().cpu().data.numpy()
class RootMeanSquaredError(MeanSquaredError):
r"""
Metric for root mean square error. For non-scalar quantities, the mean of
all components is taken.
Args:
target (str): name of target property
name (str): name used in logging for this metric. If set to `None`,
`RMSE_[target]` will be used (Default: None)
"""
def __init__(
self,
target,
name=None,
):
name = "RMSE_" + target if name is None else name
super().__init__(
target, name
)
def aggregate(self):
"""Aggregate metric over all previously added batches."""
return np.sqrt(self.loss / self.n_entries)
class MeanAbsoluteError(Metric):
r"""
Metric for mean absolute error. For non-scalar quantities, the mean of all
components is taken.
Args:
target (str): name of target property
name (str): name used in logging for this metric. If set to `None`,
`MAE_[target]` will be used (Default: None)
"""
def __init__(
self,
target,
name=None,
):
name = "MAE_" + target if name is None else name
super().__init__(
target=target,
name=name,
)
@staticmethod
def loss_fn(y, yp):
y = y.to(torch.float)
diff = y - yp.view(y.shape)
return torch.sum(torch.abs(diff).view(-1)).detach().cpu().data.numpy()
class Classifier(Metric):
"""" Metric for binary classification."""
def __init__(
self,
target,
name=None,
):
name = "Classifier_" + target if name is None else name
super().__init__(
target=target,
name=name,
)
def add_batch(self, batch, results):
""" Add a batch to calculate the metric on """
y = batch[self.target]
yp = results[self.target]
loss, num_pred = self.loss_fn(y, yp)
self.n_entries += num_pred
self.loss += loss
def non_nan(self):
actual = torch.Tensor(self.actual)
pred = torch.Tensor(self.pred)
non_nan_idx = torch.bitwise_not(torch.isnan(pred))
pred = pred[non_nan_idx].numpy().tolist()
actual = actual[non_nan_idx].numpy().tolist()
return pred, actual
def aggregate(self):
"""Aggregate metric over all previously added batches."""
if self.n_entries == 0:
result = float('nan')
else:
result = self.loss / self.n_entries
return result
class FalsePositives(Classifier):
"""
Percentage of claimed positives that are actually wrong for a
binary classifier.
"""
def __init__(
self,
target,
name=None,
):
name = "FalsePositive_" + target if name is None else name
super().__init__(
target=target,
name=name,
)
@staticmethod
def loss_fn(y, yp):
actual = y.detach().cpu().numpy().round().reshape(-1)
pred = yp.detach().cpu().numpy().round().reshape(-1)
all_positives = [i for i, item in enumerate(pred) if item == 1]
false_positives = [i for i in range(len(pred)) if pred[i]
== 1 and pred[i] != actual[i]]
# number of predicted negatives
num_pred = len(all_positives)
num_pred_correct = len(false_positives)
return num_pred_correct, num_pred
class FalseNegatives(Classifier):
"""
Percentage of claimed negatives that are actually wrong for a
binary classifier.
"""
def __init__(
self,
target,
name=None,
):
name = "FalseNegative_" + target if name is None else name
super().__init__(
target=target,
name=name,
)
@staticmethod
def loss_fn(y, yp):
actual = y.detach().cpu().numpy().round().reshape(-1)
pred = yp.detach().cpu().numpy().round().reshape(-1)
all_negatives = [i for i, item in enumerate(pred) if item == 0]
false_negatives = [i for i in range(len(pred)) if pred[i]
== 0 and pred[i] != actual[i]]
# number of predicted negatives
num_pred = len(all_negatives)
num_pred_correct = len(false_negatives)
return num_pred_correct, num_pred
class TruePositives(Classifier):
"""
Percentage of claimed positives that are actually right for a
binary classifier.
"""
def __init__(
self,
target,
name=None,
):
name = "TruePositive_" + target if name is None else name
super().__init__(
target=target,
name=name,
)
@staticmethod
def loss_fn(y, yp):
actual = y.detach().cpu().numpy().round().reshape(-1)
pred = yp.detach().cpu().numpy().round().reshape(-1)
all_positives = [i for i, item in enumerate(pred) if item == 1]
true_positives = [i for i in range(len(pred)) if pred[i]
== 1 and pred[i] == actual[i]]
# number of predicted negatives
num_pred = len(all_positives)
num_pred_correct = len(true_positives)
return num_pred_correct, num_pred
class TrueNegatives(Classifier):
"""
Percentage of claimed negatives that are actually right for a
binary classifier.
"""
def __init__(
self,
target,
name=None,
):
name = "TrueNegative_" + target if name is None else name
super().__init__(
target=target,
name=name,
)
@staticmethod
def loss_fn(y, yp):
actual = y.detach().cpu().numpy().round().reshape(-1)
pred = yp.detach().cpu().numpy().round().reshape(-1)
all_negatives = [i for i, item in enumerate(pred) if item == 0]
true_negatives = [i for i in range(len(pred)) if pred[i]
== 0 and pred[i] == actual[i]]
# number of predicted negatives
num_pred = len(all_negatives)
num_pred_correct = len(true_negatives)
return num_pred_correct, num_pred
class RocAuc(Classifier):
"""
AUC metric (area under true-positive vs. false-positive curve).
"""
def __init__(
self,
target,
name=None,
):
name = "RocAuc_" + target if name is None else name
super().__init__(
target=target,
name=name,
)
# list of actual and predicted probabilities
self.actual = []
self.pred = []
def reset(self):
"""Reset metric attributes after aggregation to collect new batches."""
self.actual = []
self.pred = []
def loss_fn(self, y, yp):
"""The loss function here is not actually a loss function,
but just returns actual and predicted values to add to the total.
The AUC is calculated in the aggregate step."""
actual = y.detach().cpu().reshape(-1).numpy().tolist()
pred = yp.detach().cpu().reshape(-1).numpy().tolist()
return actual, pred
def add_batch(self, batch, results):
""" Add a batch to calculate the metric on """
y = batch[self.target]
yp = results[self.target]
actual, pred = self.loss_fn(y, yp)
# add to actual and predicted
self.actual += actual
self.pred += pred
def aggregate(self):
"""Calculate the auc score from all the data."""
pred, actual = self.non_nan()
try:
auc = roc_auc_score(y_true=actual, y_score=pred)
except ValueError:
auc = float("nan")
return auc
class PrAuc(Classifier):
"""
AUC metric (area under true-positive vs. false-positive curve).
"""
def __init__(
self,
target,
name=None,
):
name = "PrAuc_" + target if name is None else name
super().__init__(
target=target,
name=name,
)
# list of actual and predicted probabilities
self.actual = []
self.pred = []
def reset(self):
"""Reset metric attributes after aggregation to collect new batches."""
self.actual = []
self.pred = []
def loss_fn(self, y, yp):
"""The loss function here is not actually a loss function,
but just returns actual and predicted values to add to the total.
The AUC is calculated in the aggregate step."""
actual = y.detach().cpu().reshape(-1).numpy().tolist()
pred = yp.detach().cpu().reshape(-1).numpy().tolist()
return actual, pred
def add_batch(self, batch, results):
""" Add a batch to calculate the metric on """
y = batch[self.target]
yp = results[self.target]
actual, pred = self.loss_fn(y, yp)
# add to actual and predicted
self.actual += actual
self.pred += pred
def aggregate(self):
"""Calculate the auc score from all the data."""
pred, actual = self.non_nan()
try:
precision, recall, thresholds = precision_recall_curve(
y_true=actual, probas_pred=pred)
pr_auc = auc(recall, precision)
except ValueError:
pr_auc = float("nan")
return pr_auc
class Accuracy(Classifier):
"""
Overall accuracy of classifier.
"""
def __init__(
self,
target,
name=None,
):
name = "Accuracy_" + target if name is None else name
super().__init__(
target=target,
name=name,
)
@staticmethod
def loss_fn(y, yp):
actual = y.detach().cpu().numpy().round().reshape(-1)
pred = yp.detach().cpu().numpy().round().reshape(-1)
# number of predicted negatives
num_pred = len(actual)
correct = [i for i in range(num_pred) if actual[i] == pred[i]]
num_pred_correct = len(correct)
return num_pred_correct, num_pred
|
{"hexsha": "765ad11eec6ef23132fa9958b1a1bd1879b1044e", "size": 12221, "ext": "py", "lang": "Python", "max_stars_repo_path": "nff/train/metrics.py", "max_stars_repo_name": "jkaraguesian/NeuralForceField", "max_stars_repo_head_hexsha": "4ca4f4c7edc0ed1f70952db9e42d8ef9bbe109d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nff/train/metrics.py", "max_issues_repo_name": "jkaraguesian/NeuralForceField", "max_issues_repo_head_hexsha": "4ca4f4c7edc0ed1f70952db9e42d8ef9bbe109d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nff/train/metrics.py", "max_forks_repo_name": "jkaraguesian/NeuralForceField", "max_forks_repo_head_hexsha": "4ca4f4c7edc0ed1f70952db9e42d8ef9bbe109d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7284210526, "max_line_length": 79, "alphanum_fraction": 0.568038622, "include": true, "reason": "import numpy", "num_tokens": 2736}
|
"""
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_sparse_inv_covariance(self, q, alpha_ratio):
# minimize -log(det(S)) + trace(S*Q) + \alpha*||S||_1 subject to S is symmetric PSD.
# Problem data.
# q: Dimension of matrix.
p = 1000 # Number of samples.
ratio = 0.9 # Fraction of zeros in S.
S_true = sparse.csc_matrix(make_sparse_spd_matrix(q, ratio))
Sigma = sparse.linalg.inv(S_true).todense()
z_sample = sp.linalg.sqrtm(Sigma).dot(np.random.randn(q,p))
Q = np.cov(z_sample)
mask = np.ones(Q.shape, dtype=bool)
np.fill_diagonal(mask, 0)
alpha_max = np.max(np.abs(Q)[mask])
alpha = alpha_ratio*alpha_max # 0.001 for q = 100, 0.01 for q = 50
# Convert problem to standard form.
# f_1(S) = -log(det(S)) + trace(S*Q) on symmetric PSD matrices, f_2(S) = \alpha*||S||_1.
# A_1 = I, A_2 = -I, b = 0.
prox_list = [lambda v, t: prox_neg_log_det(v.reshape((q,q), order='C'), t, lin_term=t*Q).ravel(order='C'),
lambda v, t: prox_norm1(v, t*alpha)]
A_list = [sparse.eye(q*q), -sparse.eye(q*q)]
b = np.zeros(q*q)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=0)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=1e-12)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
#a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, lam_accel=1e-12)
# lam_accel = 0 seems to work well sometimes, although it oscillates a lot.
a2dr_S = a2dr_result["x_vals"][-1].reshape((q,q), order='C')
self.compare_total(drs_result, a2dr_result)
print('Finished A2DR.')
print('recovered sparsity = {}'.format(np.sum(a2dr_S != 0)*1.0/a2dr_S.shape[0]**2))
if __name__ == '__main__':
tests = TestPaper()
tests.setUp()
tests.test_sparse_inv_covariance(80, 0.001)
|
{"hexsha": "31f16b18befa5ef555d5afae200c3f69ce94d9b2", "size": 3718, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/paper_examples/sparse_inv_cov_est.py", "max_stars_repo_name": "anqif/a2dr", "max_stars_repo_head_hexsha": "b101b13c17448f43c5c9bb3ec6bcdf18aca73a66", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2019-08-20T21:31:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T12:14:56.000Z", "max_issues_repo_path": "examples/paper_examples/sparse_inv_cov_est.py", "max_issues_repo_name": "anqif/a2dr", "max_issues_repo_head_hexsha": "b101b13c17448f43c5c9bb3ec6bcdf18aca73a66", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-09-30T19:23:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-22T15:48:49.000Z", "max_forks_repo_path": "examples/paper_examples/sparse_inv_cov_est.py", "max_forks_repo_name": "anqif/a2dr", "max_forks_repo_head_hexsha": "b101b13c17448f43c5c9bb3ec6bcdf18aca73a66", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-01-09T09:19:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T00:38:41.000Z", "avg_line_length": 39.1368421053, "max_line_length": 133, "alphanum_fraction": 0.6710597095, "include": true, "reason": "import numpy,import scipy,from scipy,from cvxpy", "num_tokens": 1046}
|
'''
Created on May 12, 2019
@author: cef
'''
#===============================================================================
# IMPORT STANDARD MODS -------------------------------------------------------
#===============================================================================
import logging, os, time, re, math, copy, gc, weakref, random
import pandas as pd
import numpy as np
#===============================================================================
# shortcuts
#===============================================================================
from collections import OrderedDict
from hlpr.exceptions import Error
from weakref import WeakValueDictionary as wdict
from weakref import proxy
from model.sofda.hp.basic import OrderedSet
from model.sofda.hp.pd import view
idx = pd.IndexSlice
#===============================================================================
# IMPORT CUSTOM MODS ---------------------------------------------------------
#===============================================================================
#import hp.plot
import model.sofda.hp.basic as hp_basic
import model.sofda.hp.pd as hp_pd
import model.sofda.hp.oop as hp_oop
import model.sofda.hp.sim as hp_sim
import model.sofda.hp.dyno as hp_dyno
#import model.sofda.hp.data as hp_data
from model.sofda.fdmg.dfunc import Dfunc
import model.sofda.udev.scripts as udev_scripts
# logger setup -----------------------------------------------------------------------
mod_logger = logging.getLogger(__name__)
mod_logger.debug('initilized')
class House(
udev_scripts.House_udev,
#hp.plot.Plot_o,
hp_dyno.Dyno_wrap,
hp_sim.Sim_o,
hp_oop.Parent, #building/asset objects
hp_oop.Child):
#===========================================================================
# program pars
#==========================================================================
geocode_list = ['area', 'per', 'height', 'inta'] #sufficxs of geometry attributes to search for (see set_geo)
finish_code_list = ['f', 'u', 't'] #code for finished or unfinished
#===========================================================================
# debugging
#===========================================================================
last_floodo = None
#===========================================================================
# user provided pars
#===========================================================================
dem_el = np.nan
"""changed to acode
hse_type = '' # Class + Type categorizing the house"""
acode_s = ''
acode_c = ''
anchor_el = np.nan # anchor elevation for house relative to datum (generally main floor el)
gis_area = np.nan #foot print area (generally from teh binv)
B_f_height = np.nan
bsmt_f = True
area_prot_lvl = 0 #level of area protection
asector =''
f1area =np.nan
f0area = np.nan
f1a_uf =np.nan
f0a_uf =np.nan
#needed for udev
parcel_area = np.nan
#defaults passed from model
"""While the ICS for these are typically uniform and broadcast down by the model,
these need to exist on the House, so we can spatially limit our changes"""
G_anchor_ht = None #default garage anchor height (chosen aribtrarily by IBI (2015)
joist_space = None #space between basement and mainfloor. used to set the
#set of expected attributes (and their types) for validty checking
exp_atts_d = {'parcel_area':float, 'acode_s':str, 'acode_c':str, 'anchor_el':float, 'gis_area':float,
'B_f_height':float, 'dem_el':float, 'asector':str}
#===========================================================================
# calculated pars
#===========================================================================
floodo = None #flood object flooding the house
# #geometry placeholders
#geo_dxcol_blank = None #blank dxcol for houes geometry
geo_dxcol = None
'keeping just this one for reporting and dynp'
boh_max_val = None #basement open height minimum value
# #anchoring
"""
Im keeping anchor heights separate from geometry attributes as these could still apply
even for static dmg_feats
"""
bsmt_opn_ht = 0.0 #height of lowest basement opening
damp_spill_ht = 0.0
vuln_el = 9999.0 #starter value
# personal property protection
bkflowv_f = False #flag indicating the presence of a backflow valve on this property
sumpump_f = False
genorat_f = False
bsmt_egrd = ''
#statistics
BS_ints = 0.0 #some statistic of the weighted depth/damage of the BS dfunc
max_dmg = 0.0 #max damage possible for this house
dummy_cnt = 0 #number of dummy dfuncs
kid_nm_t = tuple()
beg_hist = ''
#===========================================================================
# data containers
#===========================================================================
dd_df = None #df results of total depth damage
def __init__(self, *vars, **kwargs):
logger = mod_logger.getChild('House')
logger.debug('start _init_')
#=======================================================================
# attach pre init atts
#=======================================================================
#self.model = self.parent.model #pass the Fdmg model down
'put this here just to keep the order nice and avoid the unresolved import error'
self.inherit_parent_ans=set(['mind', 'model'])
#=======================================================================
# #initilzie teh baseclass
#=======================================================================
super(House, self).__init__(*vars, **kwargs)
if self.db_f:
if self.model is None: raise IOError
#=======================================================================
#common setup
#=======================================================================
if self.sib_cnt == 0:
logger.debug("sib_cnt=0. setting atts")
self.kid_class = Dfunc
"""noved this out to set_dfunc_df
self.childmeta_df = self.model.house_childmeta_df #dfunc meta data"""
self.joist_space = self.model.joist_space
self.G_anchor_ht = self.model.G_anchor_ht
#=======================================================================
# unique se5tup
#=======================================================================
self.bldg_id = int(getattr(self, self.mind ))
self.bsmt_f = hp_basic.str_to_bool(self.bsmt_f, logger=self.logger)
if not 'B' in self.model.place_codes:
raise Error('not sure about this')
self.bsmt_f = False
'these need to be unique. calculated during init_dyno()'
self.post_upd_func_s = set([self.calc_statres_hse])
logger.debug('building the house \n')
self.build_house()
logger.debug('raising my dfuncs \n')
self.raise_dfuncs()
logger.debug('init_dyno \n')
self.init_dyno()
#=======================================================================
# cheking
#=======================================================================
if self.db_f: self.check_house()
logger.debug('_init_ finished as %i \n'%self.bldg_id)
return
def check_house(self):
logger = self.logger.getChild('check_house')
#check the proxy objects
if not self.model.__repr__() == self.parent.parent.__repr__():
raise IOError
#=======================================================================
# check attribute validity
#=======================================================================
self.check_atts()
#=======================================================================
# check the basement logic
#=======================================================================
if self.bsmt_f:
if self.B_f_height < self.session.bfh_min:
raise Error('%s basement finish height (%.2f) is lower than the session minimum %.2f)'
%(self.name,self.B_f_height, self.session.bfh_min ))
#=======================================================================
# check your children
#=======================================================================
for name, dfunc in self.kids_d.items():
dfunc.check_dfunc()
return
def build_house(self): #buidl yourself from the building inventory
"""
#=======================================================================
# CALLS
#=======================================================================
binv.raise_children()
spawn_child()
"""
logger = self.logger.getChild('build_house')
#=======================================================================
# custom loader functions
#=======================================================================
#self.set_binv_legacy_atts() #compile data from legacy (rfda) inventory syntax
logger.debug('set_geo_dxcol \n')
self.set_geo_dxcol() #calculate the geometry (defaults) of each floor
logger.debug('set_hse_anchor \n')
self.set_hse_anchor()
""" a bit redundant, but we need to set the bsmt egrade regardless for reporting consistency
'these should be accessible regardless of dfeats as they only influence the depth calc'"""
self.set_bsmt_egrd()
if self.bsmt_f:
logger.debug('set_bsmt_opn_ht \n')
self.set_bsmt_opn_ht()
logger.debug('set_damp_spill_ht \n')
self.set_damp_spill_ht()
#=======================================================================
# value
#=======================================================================
'need a better way to do this'
"""contents value scaling
self.cont_val = self.value * self.model.cont_val_scale"""
if self.db_f:
if self.gis_area < self.model.gis_area_min:
raise IOError
if self.gis_area > self.model.gis_area_max: raise IOError
logger.debug('finished')
return
def raise_dfuncs(self): #build dictionary with damage functions for each dmg_type
"""
called by spawn_child and passing childmeta_df (from dfunc tab. see above)
this allows each dfunc object to be called form the dictionary by dmg_type
dfunc_df is sent as the childmeta_df (attached during __init__)
#=======================================================================
# INPUTS
#=======================================================================
dfunc_df: df with headers:
these are typically assigned from the 'dfunc' tab on the pars.xls
"""
#=======================================================================
# #defautls
#=======================================================================
logger = self.logger.getChild('raise_dfuncs')
'this is a slice from the dfunc tab made by Fdmg.load_pars_dfunc'
#=======================================================================
# get your dfunc pars
#=======================================================================
dfunc_pars_df = self.get_dfunc_df()
#set this as yoru childmeta
self.childmeta_df = dfunc_pars_df.copy()
logger.debug('from %s'%str(dfunc_pars_df.shape))
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if not self.session.state=='init':
raise Error('should only build these once')
if not isinstance(dfunc_pars_df, pd.DataFrame):
raise IOError
if len(dfunc_pars_df) == 0:
raise Error('%s got no dfunc_pars_df!'%self.name)
if not self.kid_class == Dfunc:
raise IOError
if len(self.kids_d) > 0:
raise IOError
#=======================================================================
# clean the dfunc pars
#=======================================================================
"""I think we need placeholder dfuncs incase we rebuild this house with a basement later
#drop basements
if not self.bsmt_f:
dfunc_pars_df = dfunc_pars_df_raw[dfunc_pars_df_raw['place_code']!='B']
else:
dfunc_pars_df = dfunc_pars_df_raw"""
#slice out all the nones
dfunc_pars_df1 = dfunc_pars_df[dfunc_pars_df['acode'] != 'none']
#=======================================================================
# compile for each damage type
#=======================================================================
#shortcut for ALL nones
if len(dfunc_pars_df1) == 0:
logger.debug('no real dfuncs. skipping construction')
self.dfunc_d = dict()
else:
self.dfunc_d = self.raise_children_df(dfunc_pars_df1,
kid_class = self.kid_class,
dup_sibs_f = True)
#=======================================================================
# closeout and wrap up
#=======================================================================
logger.debug('built %i dfunc children: %s'%(len(self.dfunc_d), list(self.dfunc_d.keys())))
#=======================================================================
# post check
#=======================================================================
if self.db_f:
self.check_house()
return
def set_hse_anchor(self):
'pulled this out so updates can be made to dem_el'
if self.is_frozen('anchor_el'): return True
anchor_el = self.dem_el + float(self.ff_height) #height + surface elevation
#set the update
self.handle_upd('anchor_el', anchor_el, proxy(self), call_func = 'set_hse_anchor')
return True
def set_bsmt_opn_ht(self): #set the basement openning height (from teh basement floor)
"""
bsmt_open_ht is used by dfuncs with bsmt_e_grd == 'damp' and damp_func_code == 'spill'
for low water floods
"""
#=======================================================================
# shortcuts
#=======================================================================
if not self.bsmt_f:
return True
#=======================================================================
# check dependencies and frozen
#=========================================================== ============
if not self.session.state=='init':
if self.is_frozen('bsmt_opn_ht'):
return True
dep_l = [([self], ['set_hse_anchor', 'set_geo_dxcol'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_bsmt_opn_ht'):
return False
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_bsmt_opn_ht')
#=======================================================================
# from user provided minimum
#=======================================================================
if self.model.bsmt_opn_ht_code.startswith('*max'):
#===================================================================
# prechecks
#===================================================================
if self.db_f:
bfh_chk = float(self.geo_dxcol.loc['height',('B','f')])
if not round(self.B_f_height, 2) == round(bfh_chk, 2):
raise Error('B_f_height mismatch attribute (%.2f) geo_dxcol (%.2f)'
%(self.B_f_height, bfh_chk))
"""lets let the basement be above grade"""
if self.ff_height > (bfh_chk + self.joist_space):
logger.warning('basement is above grade!')
#get the minimum value
if self.boh_max_val is None: #calculate and set
'this means we are non dynamic'
s_raw = self.model.bsmt_opn_ht_code
s = re.sub('\)', '',s_raw[5:])
self.boh_max_val = float(s) #pull the number out of the brackets
max_val = self.boh_max_val
# get the basement anchor el
B_f_height = float(self.geo_dxcol.loc['height',('B','t')]) #pull from frame
bsmt_anchor_el = self.anchor_el - B_f_height - self.joist_space #basement curve
#get the distance to grade
bsmt_to_dem = self.dem_el - bsmt_anchor_el
if bsmt_to_dem <0: #floating basements
bsmt_opn_ht = 0
else:
#take the min of all three
bsmt_opn_ht = min(B_f_height, bsmt_to_dem, max_val)
#===================================================================
# wrap
#===================================================================
if self.db_f:
#check basement anchor elevation logic
if bsmt_anchor_el > self.anchor_el:
raise Error('%s basement anchor el (%.2f) is above the main anchor el (%.2f)'
%(self.name, bsmt_anchor_el, self.anchor_el))
"""letting this happen for now"""
if bsmt_to_dem < 0:
logger.debug('\n dem_el=%.2f, bsmt_anchor_el=%.2f, B_f_heigh=%.2f, anchor_el=%.2f'
%(self.dem_el, bsmt_anchor_el, B_f_height, self.anchor_el))
logger.warning('%s bassement is above grade! bsmt_anchor_el(%.2f) > dem _el (%.2f) '
%(self.name, bsmt_anchor_el, self.dem_el))
#detailed output
boolar = np.array([B_f_height, bsmt_to_dem, max_val, 0]) == bsmt_opn_ht #identify which one you pulled from
selected = np.array(['B_f_height', 'bsmt_to_dem', 'max_val', 'zero'])[boolar]
logger.debug('got bsmt_opn_ht = %.2f from \'%s\''%(bsmt_opn_ht, selected[0]))
else:
logger.debug('got bsmt_opn_ht = %.2f ')
#=======================================================================
# from user provided float
#=======================================================================
else:
bsmt_opn_ht = float(self.model.bsmt_opn_ht_code)
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
if not bsmt_opn_ht >= 0:
logger.error('\n dem_el=%.2f, bsmt_anchor_el=%.2f, B_f_heigh=%.2f, anchor_el=%.2f'
%(self.dem_el, bsmt_anchor_el, B_f_height, self.anchor_el))
raise Error('%s got a negative bsmt_opn_ht (%.2f)'%(self.name, bsmt_opn_ht))
#=======================================================================
# wrap up
#=======================================================================
self.handle_upd('bsmt_opn_ht', bsmt_opn_ht, proxy(self), call_func = 'set_bsmt_opn_ht')
return True
def set_damp_spill_ht(self):
damp_spill_ht = self.bsmt_opn_ht / 2.0
self.handle_upd('damp_spill_ht', damp_spill_ht, proxy(self), call_func = 'set_damp_spill_ht')
return True
def set_bsmt_egrd(self): #calculate the basement exposure grade
"""
bkflowv_f sumpump_f genorat_f
There is also a globabl flag to indicate whether bsmt_egrd should be considered or not
for the implementation of the bsmt_egrd in determining damages, see Dfunc.get_dmg_wsl()
#=======================================================================
# CALLS
#=======================================================================
this is now called during every get_dmgs_wsls()... as gpwr_f is a function of the Flood object
consider only calling w
"""
#=======================================================================
# shortcuts
#=======================================================================
if self.is_frozen('bsmt_egrd'):
return 'frozen'
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_bsmt_egrd')
if self.bsmt_f:
#=======================================================================
# from plpms
#=======================================================================
if self.model.bsmt_egrd_code == 'plpm':
#store the plpm status into the cond string
if self.db_f:
cond = 'plpm.'
for tag, flag in {'s':self.sumpump_f, 'g':self.genorat_f, 'b':self.bkflowv_f}.items():
if flag:
cond = '%s%s'%(cond, tag)
else:
cond = 'plpm'
#=======================================================================
# get the grid power state
#=======================================================================
if self.session.state == 'init':
gpwr_f = self.model.gpwr_f
cond = cond + '.init'
else:
gpwr_f = self.floodo.gpwr_f
cond = '%s.%s'%(cond, self.floodo.ari)
#=======================================================================
# grid power is on
#=======================================================================
if gpwr_f:
cond = cond + '.on'
if self.bkflowv_f and self.sumpump_f:
bsmt_egrd = 'dry'
elif self.bkflowv_f or self.sumpump_f:
bsmt_egrd = 'damp'
else:
bsmt_egrd = 'wet'
#=======================================================================
# grid power is off
#=======================================================================
else:
cond = cond + '.off'
if self.bkflowv_f and self.sumpump_f and self.genorat_f:
bsmt_egrd = 'dry'
elif self.bkflowv_f or (self.sumpump_f and self.genorat_f):
bsmt_egrd = 'damp'
else: bsmt_egrd = 'wet'
logger.debug('set bsmt_egrd = %s (from \'%s\') with grid_power_f = %s'%(bsmt_egrd,self.bsmt_egrd, gpwr_f))
#=======================================================================
# ignore bsmt_egrd
#=======================================================================
elif self.model.bsmt_egrd_code == 'none':
cond = 'none'
bsmt_egrd = 'wet'
gpwr_f = self.model.gpwr_f
#=======================================================================
# allow the user to override all
#=======================================================================
elif self.model.bsmt_egrd_code in ['wet', 'damp', 'dry']:
cond = 'global'
bsmt_egrd = self.model.bsmt_egrd_code
gpwr_f = self.model.gpwr_f
else:
raise IOError
else:
gpwr_f = self.model.gpwr_f
cond = 'nobsmt'
bsmt_egrd = 'nobsmt'
#=======================================================================
# wrap up
#=======================================================================
self.bsmt_egrd = bsmt_egrd
self.gpwr_f = gpwr_f #set this
"""report/collect on the flood
self.parent.childmeta_df.loc[self.dfloc,'bsmt_egrd'] = bsmt_egrd"""
return cond
def set_geo_dxcol(self): #calculate the geometry of each floor based on the geo_build_code
"""
builds a dxcol with all the geometry attributes of this house
called by load_data when self.session.wdfeats_f = True
#=======================================================================
# KEY VARS
#=======================================================================
geo_build_code: code to indicate what geometry to use for the house. see the dfunc tab
'defaults': see House.get_default_geo()
'from_self': expect all geo atts from the binv.
'any': take what you can from the binv, everything else use defaults.
'legacy': use gis area for everything
gbc_override: used to override the geo_build_code
geo_dxcol: house geometry
#=======================================================================
# UDPATES
#=======================================================================
when a specific geometry attribute of the house is updated (i.e. B_f_height)
this dxcol needs to be rebuilt
and all the dfuncs need to run build_dd_ar()
#=======================================================================
# TODO
#=======================================================================
add some isolated updating?
for when we only change one floor
need to add some kwargs to the dynp_handles
"""
logger = self.logger.getChild('set_geo_dxcol')
if self.is_frozen('geo_dxcol', logger=logger):
return True
pars_dxcol = self.session.pars_df_d['hse_geo'] #pull the pars frame
#=======================================================================
# get default geometry for this house
#=======================================================================
self.defa = self.gis_area #default area
if self.defa <=0:
logger.error('got negative area = %.2f'%self.defa)
raise IOError
self.defp = 4*math.sqrt(self.defa)
#=======================================================================
# setup the geo_dxcol
#=======================================================================
dxcol = self.model.geo_dxcol_blank.copy() #get a copy of the blank one\
'I need to place the reference herer so that geometry attributes have access to each other'
#self.geo_dxcol = dxcol
place_codes = dxcol.columns.get_level_values(0).unique().tolist()
#finish_codes = dxcol.columns.get_level_values(1).unique().tolist()
#geo_codes = dxcol.index
logger.debug("from geo_dxcol_blank %s filling:"%(str(dxcol.shape)))
#=======================================================================
# #loop through each place code and compile the appropriate geometry
#=======================================================================
for place_code in place_codes:
geo_df = dxcol[place_code] #geometry for just this place
pars_df = pars_dxcol[place_code]
#logger.debug('filling geo_df for place_code: \'%s\' '%(place_code))
#===================================================================
# #loop through and build the geometry by each geocode
#===================================================================
for geo_code, row in geo_df.iterrows():
for finish_code, value in row.items():
#===========================================================
# total column
#===========================================================
if finish_code == 't':
uval = dxcol.loc[geo_code, (place_code, 'u')]
fval = dxcol.loc[geo_code, (place_code, 'f')]
if self.db_f:
if np.any(pd.isnull([uval, fval])):
raise IOError
if geo_code == 'height': #for height, take the maximum
att_val = max(uval, fval)
else: #for other geometry, take the total
att_val = uval + fval
#===========================================================
# finish/unfinished
#===========================================================
else:
#get the user passed par for this
gbc = pars_df.loc[geo_code, finish_code]
try:gbc = float(gbc)
except: pass
#===========================================================
# #assemble per the geo_build_code
#===========================================================
#user specified code
if isinstance(gbc, str):
gbc = str(gbc)
if gbc == '*binv':
att_name = place_code +'_'+finish_code+'_'+ geo_code #get the att name for this
att_val = getattr(self, att_name) #get this attribute from self
""""
mostly using this key for the B_f_height
"""
elif gbc == '*geo':
att_val = self.calc_secondary_geo(place_code, finish_code, geo_code, dxcol=dxcol) #calculate the default value
elif gbc.startswith('*tab'):
#get the pars
tabn = re.sub('\)',"",gbc[5:]) #remove the end parentheisis
df = self.session.pars_df_d[tabn]
att_name = place_code +'_'+finish_code+'_'+ geo_code #get the att name for this
att_val = self.get_geo_from_other(df, att_name)
else:
att_val = getattr(self, gbc)
#user speciifed value
elif isinstance(gbc, float): #just use the default value provided in the pars
att_val = gbc
else: raise IOError
logger.debug('set %s.%s.%s = %.2f with gbc \'%s\''%(place_code,finish_code,geo_code, att_val, gbc))
#===========================================================
# value checks
#===========================================================
if self.db_f:
att_name = place_code +'_'+finish_code+'_'+ geo_code
if not 'float' in type(att_val).__name__:
raise Error('got unexpected type for \"%s\': %s'%(att_name, type(att_val)))
if pd.isnull(att_val):
raise IOError
if att_val < 0:
raise IOError
#===========================================================
# set the value
#===========================================================
dxcol.loc[geo_code, (place_code, finish_code)] = att_val
#row[finish_code] = att_val #update the ser
#logger.debug('set \'%s\' as \'%s\''%(att_name, att_val))
#=======================================================================
# rounding
#=======================================================================
dxcol = dxcol.round(decimals=2)
#=======================================================================
# special attribute setting
#=======================================================================
'need this as an attribute for reporting'
B_f_height = float(dxcol.loc['height', ('B', 'f')]) #to set the type
#===============================================================
# POST
#===============================================================
"""todo:
add some checking that we are not changing any geometry attributes with a dynp
that would be overwritten here
"""
#logger.debug('built house_geo_dxcol %s'%str(dxcol.shape))
self.handle_upd('geo_dxcol', dxcol, weakref.proxy(self), call_func = 'set_geo_dxcol')
self.handle_upd('B_f_height', B_f_height, weakref.proxy(self), call_func = 'set_geo_dxcol')
return True
def set_bfh(self):#set the basement finish height into the geo_dxcol
#shortcutting for those without basements
if not self.bsmt_f:
return True
#updat ethe geo_dxcol
return self.update_geo_dxcol(self.B_f_height, 'height', 'B', 'f')
def xxxset_ffh(self): #set the ff_height (from the anchor_el and the dem_el
"""not sure I want to do this, because we generally get the anchor_el from the ff_height"""
self.ff_height = self.anchor_el - self.dem_el
return True
def update_geo_dxcol(self,
nval_raw, #new value
geo_code, place_code, finish_code, #locations
):
log = self.logger.getChild('update_geo_dxcol')
#=======================================================================
# frozen check
#=======================================================================
if self.is_frozen('geo_dxcol', logger=log):
return True
#=======================================================================
# defaults
#=======================================================================
nval = round(nval_raw, 2)
#=======================================================================
# prechecks
#=======================================================================
if finish_code == 't':
raise Error('not implemented')
dxcol = self.geo_dxcol.copy() #get a copy of the original
#=======================================================================
# check if we had a change
#=======================================================================
oldv = float(dxcol.loc[geo_code, (place_code, finish_code)])
if nval == round(oldv, 2):
log.debug('for %s.%s.%s nval= %.2f has no change... skipping'%(geo_code, place_code, finish_code, nval))
return True
#=======================================================================
# #set the new value
#=======================================================================
dxcol.loc[geo_code, (place_code, finish_code)] = nval
if self.db_f:
if not nval == round(float(dxcol.loc[geo_code, (place_code, finish_code)]), 2):
raise Error('value didnt set')
"""
dxcol.loc[geo_code, (place_code, finish_code)] = 99.9
"""
log.debug('for %s.%s.%s set %.2f'%(geo_code, place_code, finish_code, nval))
#=======================================================================
# set the total value
#=======================================================================
dxcol.loc[geo_code, (place_code, 't')] = dxcol.loc[geo_code, idx[[place_code], ['u','f']]].sum()
#=======================================================================
# #handle the update
#=======================================================================
self.handle_upd('geo_dxcol', dxcol, weakref.proxy(self), call_func = 'update_geo_dxcol')
"""
for just hte basement, would be nice to only force updates on those that have changed
"""
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
if not nval == round(float(self.geo_dxcol.loc[geo_code, (place_code, finish_code)]), 2):
raise Error('value didnt set')
return True
def get_dfunc_df(self): #pull your dfunc_df
"""
20190512: added this to provide for dfunc handling on all the different acodes
the dfuncs should use this new
killing dfuncs and spawning new ones?
way more complicated...
this is what we're doing with dfeats
how do we tell the dfuncs about their new pars?
added a loop to the front of build_dfunc()
simulation reseting?
as all these pars are in teh dynp_handles (which get loaded into the reset_d automatically
changes here should be reset
#=======================================================================
# callers
#=======================================================================
dynp_handles (for acode_s and acode_c changes)
"""
log = self.logger.getChild('set_dfunc_df')
df_raw = self.model.dfunc_mstr_df.copy() #pull from teh session
"""this is configured by scripts_fdmg.Fdmg.load_pars_dfunc()"""
#get your slice
boolidx = np.logical_or(
df_raw['acode']==self.acode_s, #matching your structural dfuncs
df_raw['acode']==self.acode_c, #matching contents
)
df = df_raw[boolidx].copy() #set this
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
#length check
"""want to allow adding garage curves and removeing some dfuncs"""
if len(df) > 6:
raise Error('%s dfunc_df too long (%i) with acode_s=%s and acode_c=%s'
%(self.name, len(df), self.acode_s, self.acode_c))
return df
def calc_secondary_geo(self, #aset the default geometry for this attribute
place_code, finish_code, geo_code,
dxcol = None):
logger = self.logger.getChild('get_default_geo')
#=======================================================================
# get primary geometrty from frame
#=======================================================================
if dxcol is None: dxcol = self.geo_dxcol
area = dxcol.loc['area',(place_code, finish_code)]
height = dxcol.loc['height',(place_code, finish_code)]
#=======================================================================
# calculate the geometris
#=======================================================================
if geo_code == 'inta':
per = dxcol.loc['per',(place_code, finish_code)]
att_value = float(area + height * per)
elif geo_code == 'per':
per = 4*math.sqrt(area)
att_value = float(per)
else: raise IOError
logger.debug(" for \'%s\' found %.2f"%(geo_code, att_value))
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
for v in [area, height, per, att_value]:
if not 'float' in type(v).__name__:
raise IOError
if pd.isnull(v):
raise IOError
if not v >= 0:
raise IOError
return att_value
def xxxrun_bsmt_egrd(self):
logger = self.logger.getChild('run_bsmt_egrd')
def get_geo_from_other(self, #set the garage area
df_raw, attn_search):
"""
we need this here to replicate the scaling done by the legacy curves on teh garage dmg_feats
assuming column 1 is the cross refereence data
"""
logger = self.logger.getChild('get_geo_from_other')
#=======================================================================
# find the cross reference row
#=======================================================================
cross_attn = df_raw.columns[0]
cross_v = getattr(self, cross_attn) #get our value for this
boolidx = df_raw.iloc[:,0] == cross_v #locate our cross reference
#=======================================================================
# find the search column
#=======================================================================
boolcol = df_raw.columns == attn_search
value_fnd = df_raw.loc[boolidx, boolcol].iloc[0,0] #just take the first
if self.db_f:
if not boolidx.sum() == 1:
raise IOError
if not boolidx.sum() == 1:
raise IOError
return value_fnd
def run_hse(self, wsl, **kwargs):
'TODO: compile the total dfunc and use that instead?'
logger = self.logger.getChild('run_hse')
hse_depth = wsl - self.anchor_el
self.run_cnt += 1
#=======================================================================
# precheck
#=======================================================================
"""todo: check that floods are increasing
if self.db_f:
if self.last_floodo is None:
pass"""
if self.db_f:
#full check
self.check_house()
#make sure you dont have any updates qued
if len(self.upd_cmd_od) > 0:
raise IOError
#=======================================================================
# basement egrade reset check
#=======================================================================
"""because the grid power changes on each flood, we need to re-calc this"""
if self.model.bsmt_egrd_code == 'plpm':
#always calc on the first time
if self.run_cnt ==1:
cond = self.set_bsmt_egrd()
elif not self.bsmt_f:
cond='nobsmt'
#some change! re-run the calc
elif not self.gpwr_f == self.floodo.gpwr_f:
cond = self.set_bsmt_egrd()
else:
cond = 'nochng'
logger.debug('no change in gpwr_f. keeping bsmt egrd = %s'%self.bsmt_egrd)
else:
cond = 'no_plpm'
#===============================================================
# write the beg histor y
#===============================================================
if not self.model.beg_hist_df is None:
self.model.beg_hist_df.loc[self.dfloc, (self.floodo.ari, 'egrd')] = self.bsmt_egrd
self.model.beg_hist_df.loc[self.dfloc, (self.floodo.ari, 'cond')] = cond
#=======================================================================
# calculate the results
#=======================================================================
#check for tiny depths
if hse_depth < self.model.hse_skip_depth:
logger.debug('depth below hse_obj.vuln_el setting fdmg=0')
dmg_ser = pd.Series(name = self.name, index = list(self.dfunc_d.keys()))
dmg_ser.loc[:] = 0.0
else:
logger.debug('returning get_dmgs_wsls \n')
dmg_ser = self.get_dmgs_wsls(wsl, **kwargs)
#=======================================================================
# wrap up
#=======================================================================
self.floodo = None #clear this
return dmg_ser
def get_dmgs_wsls(self, #get damage at this depth from each Dfunc
wsl,
dmg_rat_f = False, #flat to include damage ratios in the outputs
):
"""
#=======================================================================
# INPUTS
#=======================================================================
res_ser: shortcut so that damage are added to this series
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_dmgs_wsls')
id_str = self.get_id()
#=======================================================================
# precheck
#=======================================================================
#=======================================================================
# fast calc
#=======================================================================
if not dmg_rat_f:
dmg_ser = pd.Series(name = self.name, index = list(self.dfunc_d.keys()))
"""
logger.debug('\'%s\' at wsl= %.4f anchor_el = %.4f for %i dfuncs bsmt_egrd \'%s\'\n'
%(id_str, wsl, self.anchor_el, len(dmg_ser), self.bsmt_egrd))"""
for dmg_type, dfunc in self.kids_d.items():
logger.debug('getting damages for \'%s\' \n'%dmg_type)
#get the damge
_, dmg_ser[dmg_type], _ = dfunc.run_dfunc(wsl)
dfunc.get_results() #store these outputs if told
#=======================================================================
# full calc
#=======================================================================
else:
raise IOError #check this
dmg_df = pd.DataFrame(index = list(self.dfunc_d.keys()), columns = ['depth', 'dmg', 'dmg_raw'])
dmg_ser = pd.Series()
logger.debug('\'%s\' at wsl= %.4f anchor_el = %.4f for %i dfuncs bsmt_egrd \'%s\''
%(id_str, wsl, self.anchor_el, len(dmg_df), self.bsmt_egrd))
for indx, row in dmg_df.iterrows():
dfunc = self.kids_d[indx]
row['depth'], row['dmg'], row['dmg_raw'] = dfunc.run_dfunc(wsl)
dfunc.get_results() #store these outputs if told
#enter into series
dmg_ser[indx] = row['dmg']
dmg_ser['%s_rat'%indx] = row['dmg_raw']
#=======================================================================
# post chekcs
#=======================================================================
#=======================================================================
# wrap up
#=======================================================================
logger.debug('at %s finished with %i dfuncs queried and res_ser: \n %s \n'
%(self.model.tstep_o.name, len(self.kids_d), dmg_ser.values.tolist()))
return dmg_ser
def raise_total_dfunc(self, #compile the total dd_df and raise it as a child
dmg_codes = None, place_codes = None):
""" this is mostly used for debugging and comparing of curves form differnet methods
#=======================================================================
# todo
#=======================================================================
allow totaling by
possible performance improvement;
compile the total for all objects, then have Flood.get_dmg_set only run the totals
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('raise_total_dfunc')
tot_name = self.get_tot_name(dmg_codes)
if dmg_codes is None: dmg_codes = self.model.dmg_codes
if place_codes is None: place_codes = self.model.place_codes
#=======================================================================
# get the metadata for the child
#=======================================================================
df_raw = self.session.pars_df_d['dfunc'] #start with the raw tab data
#search by placecode
boolidx1 = df_raw['place_code'] == 'total' #identify all the entries except total
#search by dmg_code where all strings in the list are a match
boolidx2 = hp_pd.search_str_fr_list(df_raw['dmg_code'], dmg_codes, all_any='any') #find
if boolidx2.sum() <1:
logger.warning('unable to find a match in the dfunc tab for %s. using default'%tot_name)
boolidx2 = pd.Series(index = boolidx2.index, dtype = np.bool) #all true
'todo: add some logic for only finding one of the damage codes'
#get this slice
boolidx = np.logical_and(boolidx1, boolidx2)
if not boolidx.sum() == 1:
logger.error('childmeta search boolidx.sum() = %i'%boolidx.sum())
raise IOError
att_ser = df_raw[boolidx].iloc[0]
'need ot add the name here as were not using the childname override'
logger.debug('for place_code: \'total\' and dmg_code: \'%s\' found child meta from dfunc_df'%(dmg_codes))
#=======================================================================
# raise the child
#=======================================================================
#set the name
child = self.spawn_child(att_ser = att_ser, childname = tot_name)
#=======================================================================
# #do custom edits for total
#=======================================================================
child.anchor_el = self.anchor_el
#set the dd_ar
dd_df = self.get_total_dd_df(dmg_codes, place_codes)
depths = dd_df['depth'].values - child.anchor_el #convert back to no datum
child.dd_ar = np.array([depths, dd_df['damage'].values])
#add this to thedictionary
self.kids_d[child.name] = child
logger.debug('copied and edited a child for %s'%child.name)
return child
def get_total_dd_df(self, dmg_codes, place_codes): #get the total dd_df (across all dmg_types)
logger = self.logger.getChild('get_total_dd_df')
#=======================================================================
# compile al lthe depth_damage entries
#=======================================================================
df_full = pd.DataFrame(columns = ['depth', 'damage_cum', 'source'])
# loop through and fill the df
cnt = 0
for datoname, dato in self.kids_d.items():
if not dato.dmg_code in dmg_codes: continue #skip this one
if not dato.place_code in place_codes: continue
cnt+=1
#===================================================================
# get the adjusted dd
#===================================================================
df_dato = pd.DataFrame() #blank frame
df_dato['depth'] = dato.dd_ar[0]+ dato.anchor_el #adjust the dd to the datum
df_dato['damage_cum'] = dato.dd_ar[1]
"""the native format of the dmg_ar is cumulative damages
to sum these, we need to back compute to incremental
"""
df_dato['damage_inc'] = hp_pd.get_incremental(df_dato['damage_cum'], logger=logger)
df_dato['source'] = datoname
#append these to the full
df_full = df_full.append(df_dato, ignore_index=True)
logger.debug('compiled all dd entries %s from %i dfuncs with dmg_clodes: %s'
%(str(df_full.shape), cnt, dmg_codes))
df_full = df_full.sort_values('depth').reset_index(drop=True)
#=======================================================================
# harmonize this into a dd_ar
#=======================================================================
#get depths
depths_list = df_full['depth'].sort_values().unique().tolist()
#get starter frame
dd_df = pd.DataFrame(columns = ['depth', 'damage'])
dd_df['depth'] = depths_list #add in the depths
for index, row in dd_df.iterrows(): #sort through and sum by depth
boolidx = df_full['depth'] <= row['depth'] #identify all those entries in the full
row['damage'] = df_full.loc[boolidx, 'damage_inc'].sum() #add these as the sum
dd_df.iloc[index,:] = row #update the master
logger.debug('harmonized and compiled dd_df %s'%str(dd_df.shape))
self.dd_df = dd_df
return dd_df
def get_tot_name(self, dmg_codes): #return the equilvanet tot name
'not sure whats going on here'
new_str = 'total_'
for dmg_code in dmg_codes: new_str = new_str + dmg_code
return new_str
def calc_statres_hse(self): #calculate statistics for the house (outside of a run)
"""
#=======================================================================
# CALLS
#=======================================================================
this is always called with mypost_update() executing each command in self.post_upd_func_s()
mypost_update() is called:
init_dyno() #first call before setting the OG values
session.post_update() #called at the end of all the update loops
"""
logger = self.logger.getChild('calc_statres_hse')
if self.acode_s == 'none':
"""
ToDo:
need to fix how we handle null assets:
acode_s='none':
this should be a place holder asset
only parcel attributs are read from the binv (parcel_area, asector)
all output attributes should be NULL
When we transition a 'none' to a real,
we should have some check to make sure we have all hte attributes we need?
acode_c='none'
fine... only calc structural damages (empty asset).
"""
raise Error('not sure how this manifests on the outputers')
s = self.session.outpars_d[self.__class__.__name__]
#=======================================================================
# BS_ints
#=======================================================================
if 'BS_ints' in s:
'I dont like this as it requires updating the child as well'
"""rfda curves also have this stat
if self.dfunc_type == 'dfeats':"""
#updat eht ekid
if not self.kids_d['BS'].calc_intg_stat():
raise IOError
self.BS_ints = self.kids_d['BS'].intg_stat
"""this is handled by set_og_vals()
if self.session.state == 'init':
self.reset_d['BS_ints'] = self.BS_ints"""
logger.debug('set BS_ints as %.4f'%self.BS_ints)
if 'vuln_el' in s:
self.set_vuln_el()
if 'max_dmg' in s:
self.max_dmg = self.get_max_dmg()
self.parent.childmeta_df.loc[self.dfloc, 'max_dmg'] = self.max_dmg #set into the binv_df
if 'dummy_cnt' in s:
cnt = 0
for dfunc in self.kids_d.values():
if dfunc.dummy_f:
cnt+=1
self.dummy_cnt = cnt
if 'kid_nm_t' in s:
self.kid_nm_t = tuple([kid.get_tag() for kid in self.kids_d.values()])
if 'max_dmg_nm' in s:
d = dict()
for name, dfunc in self.kids_d.items():
if dfunc.dummy_f:
d[dfunc.get_tag()] = 'dummy'
else:
d[dfunc.get_tag()] = "{:,.1f}".format(max(dfunc.dd_ar[1]))
self.max_dmg_nm = str(d)
if 'beg_hist' in s and (not self.model.beg_hist_df is None):
"""view(self.model.beg_hist_df)"""
self.beg_hist = str(self.model.beg_hist_df.loc[self.dfloc,:].dropna().to_dict())
return True
def set_vuln_el(self): #calcualte the minimum vulnerability elevation
"""
#=======================================================================
# CALLS
#=======================================================================
TODO: consider including some logic for bsmt_egrade and spill type
"""
#=======================================================================
# check frozen and dependenceis
#=======================================================================
logger = self.logger.getChild('set_vuln_el')
"""this is a stat, not a dynamic par
if self.is_frozen('vuln_el', logger=logger): return True"""
vuln_el = 99999 #starter value
for dmg_type, dfunc in self.kids_d.items():
if dfunc.dummy_f:
continue #skip these
else:
vuln_el = min(dfunc.anchor_el, vuln_el) #update with new minimum
logger.debug('set vuln_el = %.2f from %i dfuncs'%(vuln_el, len(self.kids_d)))
if vuln_el == 99999:
vuln_el = np.nan
self.vuln_el = vuln_el
return True
def get_max_dmg(self): #calculate the maximum damage for this house
#logger = self.logger.getChild('get_max_dmg')
#=======================================================================
# precheck
#=======================================================================
if self.db_f:
#loop and check dummies
for dmg_type, dfunc in self.kids_d.items():
if not dfunc.dummy_f:
if not len(dfunc.dd_ar)==2:
raise Error('%s.%s is real but got unexpected dd_ar length: %i'
%(self.name, dfunc.name, len(dfunc.dd_ar)))
#=======================================================================
# calcs
#=======================================================================
max_dmg = 0
for dfunc in self.kids_d.values():
if not dfunc.dummy_f:
max_dmg+= dfunc.dd_ar[1].max()
return max_dmg
"""sped this up
ser = pd.Series(index = list(self.kids_d.keys()))
#=======================================================================
# collect from each dfunc
#=======================================================================
for dmg_type, dfunc in self.kids_d.items():
try:
ser[dmg_type] = dfunc.dd_ar[1].max()
except: #should only trip for unreal baseements
ser[dmg_type] = 0.0
if self.db_f:
if self.bsmt_f:
raise Error('failed to get max damage and I have a basement')
return ser.sum()"""
def plot_dd_ars(self, #plot each dfunc on a single axis
datum='house', place_codes = None, dmg_codes = None, plot_tot = False,
annot=True, wtf=None, title=None, legon=False,
ax=None,
transparent = True, #flag to indicate whether the figure should have a transparent background
**kwargs):
"""
#=======================================================================
# INPUTS
#=======================================================================
datum: code to indicate what datum to plot the depth series of each dd_ar
None: raw depths (all start at zero)
real: depths relative to the project datum
house: depths relative to the hse_obj anchor (generally Main = 0)
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('plot_dd_ars')
if wtf==None: wtf= self.session._write_figs
if dmg_codes is None: dmg_codes = self.model.dmg_codes
if place_codes is None: place_codes = self.model.place_codes
if title is None:
title = 'plot_dd_ars on %s for %s and %s'%(self.name, dmg_codes, place_codes)
if plot_tot: title = title + 'and T'
'this should let the first plotter setup the axis '
logger.debug('for \n dmg_codes: %s \n place_codes: %s'%(dmg_codes, place_codes))
#=======================================================================
# plot the dfuncs that fit the criteria
#=======================================================================
dfunc_nl = [] #list of dfunc names fitting criteria
for datoname, dato in self.dfunc_d.items():
if not dato.dmg_code in dmg_codes: continue
if not dato.place_code in place_codes: continue
ax = dato.plot_dd_ar(ax=ax, datum = datum, wtf=False, title = title, **kwargs)
dfunc_nl.append(dato.name)
#=======================================================================
# add the total plot
#=======================================================================
if plot_tot:
#get the dato
tot_name = self.get_tot_name(dmg_codes)
if not tot_name in list(self.kids_d.keys()): #build it
'name searches should still work'
tot_dato = self.raise_total_dfunc(dmg_codes, place_codes)
else:
tot_dato = self.kids_d[tot_name]
#plot the dato
ax = tot_dato.plot_dd_ar(ax=ax, datum = datum, wtf=False, title = title, **kwargs)
#=======================================================================
# add annotation
#=======================================================================
if not annot is None:
if annot:
"""WARNING: not all attributes are generated for the differnt dfunc types
"""
B_f_height = float(self.geo_dxcol.loc['height',('B','f')]) #pull from frame
annot_str = 'acode = %s\n'%self.acode +\
' gis_area = %.2f m2\n'%self.gis_area +\
' anchor_el = %.2f \n'%self.anchor_el +\
' dem_el = %.2f\n'%self.dem_el +\
' B_f_height = %.2f\n'%B_f_height +\
' bsmt_egrd = %s\n'%self.bsmt_egrd +\
' AYOC = %i\n \n'%self.ayoc
#add info for each dfunc
for dname in dfunc_nl:
dfunc = self.dfunc_d[dname]
annot_str = annot_str + annot_builder(dfunc)
else: annot_str = annot
#=======================================================================
# Add text string 'annot' to lower left of plot
#=======================================================================
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
x_text = xmin + (xmax - xmin)*.7 # 1/10 to the right of the left axis
y_text = ymin + (ymax - ymin)*.01 #1/10 above the bottom axis
anno_obj = ax.text(x_text, y_text, annot_str)
#=======================================================================
# save figure
#=======================================================================
if wtf:
"""
self.outpath
"""
fig = ax.figure
flag = hp.plot.save_fig(self, fig, dpi = self.dpi, legon=legon, transparent = transparent)
if not flag: raise IOError
logger.debug('finished as %s'%title)
return ax
def write_all_dd_dfs(self, tailpath = None): #write all tehchildrens dd_dfs
if tailpath is None: tailpath = os.path.join(self.outpath, self.name)
if not os.path.exists(tailpath): os.makedirs(tailpath)
for gid, childo in self.kids_d.items():
if not childo.dfunc_type == 'dfeats': continue #skip this one\
filename = os.path.join(tailpath, childo.name + ' dd_df.csv')
childo.recompile_dd_df(outpath = filename)
|
{"hexsha": "a0896b54518780f386e89004c184f10ae5a3e16b", "size": 71009, "ext": "py", "lang": "Python", "max_stars_repo_path": "canflood/model/sofda/fdmg/house.py", "max_stars_repo_name": "jdngibson/CanFlood", "max_stars_repo_head_hexsha": "37f738be6944ea6b68dfcffeee6b6ac6ff7eb8a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-02-24T17:40:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T21:15:48.000Z", "max_issues_repo_path": "canflood/model/sofda/fdmg/house.py", "max_issues_repo_name": "jdngibson/CanFlood", "max_issues_repo_head_hexsha": "37f738be6944ea6b68dfcffeee6b6ac6ff7eb8a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2020-02-24T19:22:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-09T21:03:42.000Z", "max_forks_repo_path": "canflood/model/sofda/fdmg/house.py", "max_forks_repo_name": "jdngibson/CanFlood", "max_forks_repo_head_hexsha": "37f738be6944ea6b68dfcffeee6b6ac6ff7eb8a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7454438566, "max_line_length": 142, "alphanum_fraction": 0.3857116704, "include": true, "reason": "import numpy", "num_tokens": 12838}
|
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Iterator, Tuple, Union, TYPE_CHECKING
import numpy as np
import sympy
from cirq import linalg, protocols, value
from cirq.ops import linear_combinations, pauli_string_phasor
if TYPE_CHECKING:
import cirq
def _all_pauli_strings_commute(pauli_sum: 'cirq.PauliSum') -> bool:
for x in pauli_sum:
for y in pauli_sum:
if not protocols.commutes(x, y):
return False
return True
@value.value_equality(approximate=True)
class PauliSumExponential:
"""Represents an operator defined by the exponential of a PauliSum.
Given a hermitian/anti-hermitian PauliSum PS_1 + PS_2 + ... + PS_N, this
class returns an operation which is equivalent to
exp(j * exponent * (PS_1 + PS_2 + ... + PS_N)).
This class only supports commuting Pauli terms.
"""
def __init__(
self,
pauli_sum_like: 'cirq.PauliSumLike',
exponent: Union[int, float, sympy.Basic] = 1,
atol: float = 1e-8,
):
pauli_sum = linear_combinations.PauliSum.wrap(pauli_sum_like)
if not _all_pauli_strings_commute(pauli_sum):
raise ValueError("PauliSumExponential defined only for commuting pauli sums.")
self._multiplier = None
for pauli_string in pauli_sum:
coeff = pauli_string.coefficient
curr_multiplier = -1j if abs(coeff.imag) > atol else 1.0
if not self._multiplier:
self._multiplier = curr_multiplier
if (
abs(coeff.real) > atol and abs(coeff.imag) > atol
) or curr_multiplier != self._multiplier:
raise ValueError(
pauli_sum, "PauliSum should be either hermitian or anti-hermitian."
)
if not self._multiplier:
self._multiplier = 1.0
self._exponent = exponent
self._pauli_sum = pauli_sum
@property
def qubits(self) -> Tuple['cirq.Qid', ...]:
return self._pauli_sum.qubits
def _value_equality_values_(self) -> Any:
return (self._pauli_sum, self._exponent)
def with_qubits(self, *new_qubits: 'cirq.Qid') -> 'PauliSumExponential':
return PauliSumExponential(self._pauli_sum.with_qubits(*new_qubits), self._exponent)
def _resolve_parameters_(
self, resolver: 'cirq.ParamResolver', recursive: bool
) -> 'PauliSumExponential':
return PauliSumExponential(
self._pauli_sum,
exponent=protocols.resolve_parameters(self._exponent, resolver, recursive),
)
def __iter__(self) -> Iterator['cirq.PauliStringPhasor']:
for pauli_string in self._pauli_sum:
theta = pauli_string.coefficient * self._multiplier
theta *= self._exponent / np.pi
if isinstance(theta, complex):
theta = theta.real
yield pauli_string_phasor.PauliStringPhasor(
pauli_string.with_coefficient(1.0), exponent_neg=-theta, exponent_pos=theta
)
def matrix(self) -> np.ndarray:
"""Reconstructs matrix of self from underlying Pauli sum exponentials.
Raises:
ValueError: if exponent is parameterized.
"""
if protocols.is_parameterized(self._exponent):
raise ValueError("Exponent should not parameterized.")
ret = np.ones(1)
for pauli_string_exp in self:
ret = np.kron(ret, protocols.unitary(pauli_string_exp))
return ret
def _has_unitary_(self) -> bool:
return linalg.is_unitary(self.matrix())
def _unitary_(self) -> np.ndarray:
return self.matrix()
def __pow__(self, exponent: int) -> 'PauliSumExponential':
return PauliSumExponential(self._pauli_sum, self._exponent * exponent)
def __repr__(self) -> str:
class_name = self.__class__.__name__
return f'cirq.{class_name}({self._pauli_sum!r}, {self._exponent!r})'
def __str__(self) -> str:
if self._multiplier == 1:
return f'exp(j * {self._exponent!s} * ({self._pauli_sum!s}))'
else:
return f'exp({self._exponent!s} * ({self._pauli_sum!s}))'
|
{"hexsha": "60a314189f207d1b65bdf8617fe44774e9c15279", "size": 4746, "ext": "py", "lang": "Python", "max_stars_repo_path": "cirq-core/cirq/ops/pauli_sum_exponential.py", "max_stars_repo_name": "LLcat1217/Cirq", "max_stars_repo_head_hexsha": "b88069f7b01457e592ad69d6b413642ef11a56b8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3326, "max_stars_repo_stars_event_min_datetime": "2018-07-18T23:17:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:28:24.000Z", "max_issues_repo_path": "cirq-core/cirq/ops/pauli_sum_exponential.py", "max_issues_repo_name": "bradyb/Cirq", "max_issues_repo_head_hexsha": "610b0d4ea3a7862169610797266734c844ddcc1f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3443, "max_issues_repo_issues_event_min_datetime": "2018-07-18T21:07:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T20:23:21.000Z", "max_forks_repo_path": "cirq-core/cirq/ops/pauli_sum_exponential.py", "max_forks_repo_name": "bradyb/Cirq", "max_forks_repo_head_hexsha": "610b0d4ea3a7862169610797266734c844ddcc1f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 865, "max_forks_repo_forks_event_min_datetime": "2018-07-18T23:30:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T11:43:23.000Z", "avg_line_length": 36.5076923077, "max_line_length": 92, "alphanum_fraction": 0.6540244416, "include": true, "reason": "import numpy,import sympy", "num_tokens": 1209}
|
#!/usr/bin/env python
"""Mixture of Gaussians, with block Gibbs for inference.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from time import time
import edward as ed
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from edward.models import Dirichlet, Categorical, InverseGamma, ParamMixture, \
Normal
plt.style.use('ggplot')
# Generate data
true_mu = np.array([-1.0, 0.0, 1.0], np.float32) * 10
true_sigmasq = np.array([1.0**2, 2.0**2, 3.0**2], np.float32)
true_pi = np.array([0.2, 0.3, 0.5], np.float32)
N = 10000
K = len(true_mu)
true_z = np.random.choice(np.arange(K), size=N, p=true_pi)
x_data = true_mu[true_z] + np.random.randn(N) * np.sqrt(true_sigmasq[true_z])
# Prior hyperparameters
pi_alpha = np.ones(K, dtype=np.float32)
mu_sigma = np.std(true_mu)
sigmasq_alpha = 1.0
sigmasq_beta = 2.0
# Model
pi = Dirichlet(pi_alpha)
mu = Normal(0.0, mu_sigma, sample_shape=K)
sigmasq = InverseGamma(sigmasq_alpha, sigmasq_beta, sample_shape=K)
x = ParamMixture(pi, {'loc': mu, 'scale': tf.sqrt(sigmasq)}, Normal,
sample_shape=N)
z = x.cat
# Conditionals
mu_cond = ed.complete_conditional(mu)
sigmasq_cond = ed.complete_conditional(sigmasq)
pi_cond = ed.complete_conditional(pi)
z_cond = ed.complete_conditional(z)
sess = ed.get_session()
# Initialize randomly
pi_est, mu_est, sigmasq_est, z_est = sess.run([pi, mu, sigmasq, z])
print('Initial parameters:')
print('pi:', pi_est)
print('mu:', mu_est)
print('sigmasq:', sigmasq_est)
print()
# Gibbs sampler
cond_dict = {pi: pi_est, mu: mu_est, sigmasq: sigmasq_est, z: z_est, x: x_data}
t0 = time()
T = 500
for t in range(T):
z_est = sess.run(z_cond, cond_dict)
cond_dict[z] = z_est
pi_est, mu_est = sess.run([pi_cond, mu_cond], cond_dict)
cond_dict[pi] = pi_est
cond_dict[mu] = mu_est
sigmasq_est = sess.run(sigmasq_cond, cond_dict)
cond_dict[sigmasq] = sigmasq_est
print('took %.3f seconds to run %d iterations' % (time() - t0, T))
print()
print('Final sample for parameters::')
print('pi:', pi_est)
print('mu:', mu_est)
print('sigmasq:', sigmasq_est)
print()
print()
print('True parameters:')
print('pi:', true_pi)
print('mu:', true_mu)
print('sigmasq:', true_sigmasq)
print()
plt.figure(figsize=[10, 10])
plt.subplot(2, 1, 1)
plt.hist(x_data, 50)
plt.title('Empirical Distribution of $x$')
plt.xlabel('$x$')
plt.ylabel('frequency')
xl = plt.xlim()
plt.subplot(2, 1, 2)
plt.hist(sess.run(x, {pi: pi_est, mu: mu_est, sigmasq: sigmasq_est}), 50)
plt.title("Predictive distribution $p(x \mid \mathrm{inferred }\ "
"\pi, \mu, \sigma^2)$")
plt.xlabel('$x$')
plt.ylabel('frequency')
plt.xlim(xl)
plt.show()
|
{"hexsha": "f31e6cb0ac11a8400970bf832983575873533b17", "size": 2705, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/mixture_gaussian_gibbs.py", "max_stars_repo_name": "xiangze/edward", "max_stars_repo_head_hexsha": "6419751d1d849c84c502e5ff3f7249b9bbc7b3aa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-11T03:33:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-11T03:33:36.000Z", "max_issues_repo_path": "examples/mixture_gaussian_gibbs.py", "max_issues_repo_name": "xiangze/edward", "max_issues_repo_head_hexsha": "6419751d1d849c84c502e5ff3f7249b9bbc7b3aa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/mixture_gaussian_gibbs.py", "max_forks_repo_name": "xiangze/edward", "max_forks_repo_head_hexsha": "6419751d1d849c84c502e5ff3f7249b9bbc7b3aa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-12-22T08:21:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-16T02:45:04.000Z", "avg_line_length": 26.2621359223, "max_line_length": 79, "alphanum_fraction": 0.7053604436, "include": true, "reason": "import numpy", "num_tokens": 866}
|
import pickle
import cv2
import numpy as np
from sklearn.cluster import KMeans
from Board import Board
from util import *
from copy import deepcopy
def preprocess_frame (frame):
return frame [(frame.shape[0]/2):, :]
def annotate_image (image, km):
km_image = np.zeros (image.shape)
print km_image.shape
for i in range(image.shape[0]):
for j in range (image.shape[1]):
if km.predict (image[i][j]) == 0:
km_image[i][j] = (0, 0, 255)
if km.predict (image[i][j]) == 1:
km_image[i][j] = (0, 255, 0)
if km.predict (image[i][j]) == 2:
km_image[i][j] = (255, 0, 0)
return km_image
if __name__ == '__main__':
#=====[ Step 1: get images ]=====
img_empty = cv2.imread ('../data/videos/0.jpg')
img_1 = cv2.imread ('../data/videos/1.jpg')
img_2 = cv2.imread ('../data/videos/2.jpg')
#=====[ Step 2: initialize board ]=====
corner_classifier_filename = '../data/classifiers/corner_classifier.clf'
corner_classifier = pickle.load (open(corner_classifier_filename, 'r')) #more data
board = Board(corner_classifier=corner_classifier)
#=====[ Step 3: add first frame ]=====
board.add_frame (img_empty)
#####[ DEBUG: verify BIH is correct ]#####
# img = board.draw_vertices(img_empty)
# cv2.imshow ('BIH MARKED', img)
# key = 0
# while not key in [27, ord('Q'), ord('q')]:
# key = cv2.waitKey (30)
#=====[ Step 4: add second frame ]=====
board.add_frame (img_1)
board.get_occlusion_changes ()
#=====[ Step 5: get square representations ]=====
s_reg = [s.image_region for s in board.iter_squares ()]
s_norm = [s.image_region_normalized for s in board.iter_squares ()]
s_hsv = [cv2.cvtColor(s.image_region, cv2.COLOR_BGR2HSV) for s in board.iter_squares()]
#=====[ Step 6: reshape all data ]=====
reshaped = [s.reshape ((s.shape[0]*s.shape[1], 3)) for s in s_reg]
data = np.concatenate (reshaped, 0)
print data.shape
#=====[ Step 7: fit kmeans ]=====
km = KMeans (n_clusters=4)
km.fit (data)
print km.cluster_centers_
#=====[ Step 8: annotate some images ]=====
a0 = annotate_image (s_reg[0], km)
a30 = annotate_image (s_reg[30], km)
a50 = annotate_image (s_reg[50], km)
cv2.imshow ('s0', s_reg[0])
cv2.imshow ('a0', a0)
cv2.imshow ('s30', s_reg[30])
cv2.imshow ('a30', a30)
cv2.imshow ('s50', s_reg[50])
cv2.imshow ('a50', a50)
|
{"hexsha": "a5eee1a80d3b14dc016b2d738eb5309fe5f3b7f1", "size": 2303, "ext": "py", "lang": "Python", "max_stars_repo_path": "perception/Old/CVChess-master/src/kmeans_test.py", "max_stars_repo_name": "gabrieledamone/DE3-ROB1-CHESS", "max_stars_repo_head_hexsha": "19ec74f10317d27683817989e729cacd6fe55a3f", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2018-03-28T09:46:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T02:51:46.000Z", "max_issues_repo_path": "perception/Old/CVChess-master/src/kmeans_test.py", "max_issues_repo_name": "gabrieledamone/DE3-ROB1-CHESS", "max_issues_repo_head_hexsha": "19ec74f10317d27683817989e729cacd6fe55a3f", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-02-25T13:14:46.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-15T09:24:09.000Z", "max_forks_repo_path": "perception/Old/CVChess-master/src/kmeans_test.py", "max_forks_repo_name": "gabrieledamone/DE3-ROB1-CHESS", "max_forks_repo_head_hexsha": "19ec74f10317d27683817989e729cacd6fe55a3f", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2018-02-06T23:09:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-24T16:58:59.000Z", "avg_line_length": 27.7469879518, "max_line_length": 88, "alphanum_fraction": 0.6435084672, "include": true, "reason": "import numpy", "num_tokens": 730}
|
# !-*- coding: utf-8 -*-
# SimBERT 相似度任务测试
# 基于LCQMC语料
import numpy as np
from collections import Counter
from bert4keras.backend import keras, K
from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer
from bert4keras.snippets import sequence_padding
from bert4keras.snippets import uniout, open
from keras.models import Model
maxlen = 32
# bert配置
config_path = './models/chinese_simbert_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'models/chinese_simbert_L-12_H-768_A-12/bert_model.ckpt'
dict_path = './models/chinese_simbert_L-12_H-768_A-12/vocab.txt'
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True) # 建立分词器
# 建立加载模型
bert = build_transformer_model(
config_path,
checkpoint_path,
with_pool='linear',
application='unilm',
return_keras_model=False,
)
encoder = keras.models.Model(bert.model.inputs, bert.model.outputs[0])
def load_data(filename):
"""
text1,text2,label
:param filename:
:return:
"""
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
text1, text2, label = l.strip().split('\t')
D.append((text1, text2, int(label)))
return D
# 加载数据集
# train_data = load_data('datasets/lcqmc/lcqmc.train.data')
valid_data = load_data('./data/lcqmc_data/test.txt') # text1,text2,label
# test_data = load_data('datasets/lcqmc/lcqmc.test.data')
# 测试相似度效果
# data = valid_data
# a_token_ids, b_token_ids, labels = [], [], []
# texts = []
# for d in data: # (text1,text2,label)
# token_ids = tokenizer.encode(d[0], max_length=maxlen)[0]
# a_token_ids.append(token_ids)
# token_ids = tokenizer.encode(d[1], max_length=maxlen)[0]
# b_token_ids.append(token_ids)
# labels.append(d[2])
# texts.extend(d[:2])
#
# a_token_ids = sequence_padding(a_token_ids)
# b_token_ids = sequence_padding(b_token_ids)
# a_vecs = encoder.predict([a_token_ids, np.zeros_like(a_token_ids)],
# verbose=True)
# b_vecs = encoder.predict([b_token_ids, np.zeros_like(b_token_ids)],
# verbose=True)
# labels = np.array(labels)
# a_vecs = a_vecs / (a_vecs**2).sum(axis=1, keepdims=True)**0.5
# b_vecs = b_vecs / (b_vecs**2).sum(axis=1, keepdims=True)**0.5
# sims = (a_vecs * b_vecs).sum(axis=1)
#
# # 以0.9为阈值,acc为79.82%
# print('acc:', ((sims > 0.9) == labels.astype('bool')).mean())
# 测试全量检索能力
# vecs = np.concatenate([a_vecs, b_vecs], axis=1).reshape(-1, 768)
def most_similar(text, topn=10):
"""检索最相近的topn个句子
"""
token_ids, segment_ids = tokenizer.encode(text, max_length=maxlen)
# print("token_ids={},segment_ids={}".format(token_ids, segment_ids))
vec = encoder.predict([[token_ids], [segment_ids]])[0]
print(vec)
# print('vec size={}, element={}'.format(len(vec), vec[-1]))
# print('===========>之前vec={}'.format(vec[:5]))
# vec /= (vec**2).sum()**0.5
# print('===========>之后vec={}'.format(vec[:5]))
try:
print('vec size={}, element={}'.format(len(vec), vec[-1]))
except Exception as e:
print(e)
# sims = np.dot(vecs, vec)
# return [(texts[i], sims[i]) for i in sims.argsort()[::-1][:topn]]
def get_vec_by_query(text):
"""提取文本向量
"""
token_ids, segment_ids = tokenizer.encode(text, max_length=maxlen)
print(text)
print(token_ids, segment_ids)
vec = encoder.predict([[token_ids], [segment_ids]])[0]
return list(vec)
def save_query(infile, outfile):
'''
数据编码处理
:return:
'''
outfile_fp = open(outfile, encoding='utf-8', mode='w')
with open(infile,'r',encoding='utf-8') as fp:
# lines = fp.readlines()
for line in fp:
text = line.strip()
if text == '' or len(text) == 0:
continue
text = A + B
vec = get_vec_by_query(text)
outfile_fp.write("{}\t{}".format(text, vec))
outfile_fp.write("\n")
outfile_fp.close()
print('写入文件:{}'.format(outfile))
if __name__ == '__main__':
name = 'click.data.03'
infile = 'C:/Users/daijitao/Desktop/upload2/data/click.data/' + name
outfile = 'data/query_data/out.' + name + '.txt'
save_query(infile, outfile)
# fp = open(outfile, 'r',encoding='utf-8')
# for i in fp:
# print(i.split("\t")[-1])
"""
>>> most_similar(u'怎么开初婚未育证明', 20)
[
(u'开初婚未育证明怎么弄?', 0.9728098),
(u'初婚未育情况证明怎么开?', 0.9612292),
(u'到哪里开初婚未育证明?', 0.94987774),
(u'初婚未育证明在哪里开?', 0.9476072),
(u'男方也要开初婚证明吗?', 0.7712214),
(u'初婚证明除了村里开,单位可以开吗?', 0.63224965),
(u'生孩子怎么发', 0.40672967),
(u'是需要您到当地公安局开具变更证明的', 0.39978087),
(u'淘宝开店认证未通过怎么办', 0.39477515),
(u'您好,是需要当地公安局开具的变更证明的', 0.39288986),
(u'没有工作证明,怎么办信用卡', 0.37745982),
(u'未成年小孩还没办身份证怎么买高铁车票', 0.36504325),
(u'烟草证不给办,应该怎么办呢?', 0.35596085),
(u'怎么生孩子', 0.3493368),
(u'怎么开福利彩票站', 0.34158638),
(u'沈阳烟草证怎么办?好办不?', 0.33718678),
(u'男性不孕不育有哪些特征', 0.33530876),
(u'结婚证丢了一本怎么办离婚', 0.33166665),
(u'怎样到地税局开发票?', 0.33079252),
(u'男性不孕不育检查要注意什么?', 0.3274408)
]
"""
|
{"hexsha": "0a04e9eeb8388b07e22e9255d9c6c9d8d180502e", "size": 5066, "ext": "py", "lang": "Python", "max_stars_repo_path": "my_retrieval_test_03.py", "max_stars_repo_name": "DaiJitao/simbert", "max_stars_repo_head_hexsha": "6b562985db4004768613833c08d664a69a8a5294", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-01T08:05:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-01T08:05:02.000Z", "max_issues_repo_path": "my_retrieval_test_03.py", "max_issues_repo_name": "DaiJitao/simbert", "max_issues_repo_head_hexsha": "6b562985db4004768613833c08d664a69a8a5294", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "my_retrieval_test_03.py", "max_forks_repo_name": "DaiJitao/simbert", "max_forks_repo_head_hexsha": "6b562985db4004768613833c08d664a69a8a5294", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1149425287, "max_line_length": 75, "alphanum_fraction": 0.6259376234, "include": true, "reason": "import numpy", "num_tokens": 1864}
|
import numpy as np
rng = np.random.default_rng()
k = 3
mu = 1
sigma = 1
arr = rng.normal(mu, sigma, 10)
target = 0
distances = abs(arr - target)
indices = np.argpartition(distances, k)
partitioned_by_distance = arr[indices]
k_nearest = partitioned_by_distance[:k]
if __name__ == '__main__':
print('Data:\n', arr)
print('\nDistances from target value:\n', distances)
print('\nIndices of partitioned data:\n', indices)
print('\nPartitioned data:\n', partitioned_by_distance)
print('\nK=3 nearest data points\n', k_nearest)
|
{"hexsha": "170c676d63d8d7d6bfe53d9ba8f9ba24a3211c72", "size": 544, "ext": "py", "lang": "Python", "max_stars_repo_path": "NumPy/Transposing Sorting Concatenating/Partial Sort/task.py", "max_stars_repo_name": "jetbrains-academy/Python-Libraries-NumPy", "max_stars_repo_head_hexsha": "7ce0f2d08f87502d5d97bbc6921f0566184d4ebb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NumPy/Transposing Sorting Concatenating/Partial Sort/task.py", "max_issues_repo_name": "jetbrains-academy/Python-Libraries-NumPy", "max_issues_repo_head_hexsha": "7ce0f2d08f87502d5d97bbc6921f0566184d4ebb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2022-01-14T10:40:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T13:01:13.000Z", "max_forks_repo_path": "NumPy/Transposing Sorting Concatenating/Partial Sort/task.py", "max_forks_repo_name": "jetbrains-academy/Python-Libraries-NumPy", "max_forks_repo_head_hexsha": "7ce0f2d08f87502d5d97bbc6921f0566184d4ebb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9047619048, "max_line_length": 59, "alphanum_fraction": 0.7040441176, "include": true, "reason": "import numpy", "num_tokens": 157}
|
!> \file radlw_main.f
!! This file contains NCEP's modifications of the rrtmg-lw radiation
!! code from AER.
!!!!! ============================================================== !!!!!
!!!!! lw-rrtm3 radiation package description !!!!!
!!!!! ============================================================== !!!!!
! !
! this package includes ncep's modifications of the rrtm-lw radiation !
! code from aer inc. !
! !
! the lw-rrtm3 package includes these parts: !
! !
! 'radlw_rrtm3_param.f' !
! 'radlw_rrtm3_datatb.f' !
! 'radlw_rrtm3_main.f' !
! !
! the 'radlw_rrtm3_param.f' contains: !
! !
! 'module_radlw_parameters' -- band parameters set up !
! !
! the 'radlw_rrtm3_datatb.f' contains: !
! !
! 'module_radlw_avplank' -- plank flux data !
! 'module_radlw_ref' -- reference temperature and pressure !
! 'module_radlw_cldprlw' -- cloud property coefficients !
! 'module_radlw_kgbnn' -- absorption coeffients for 16 !
! bands, where nn = 01-16 !
! !
! the 'radlw_rrtm3_main.f' contains: !
! !
! 'rrtmg_lw' -- main lw radiation transfer !
! !
! in the main module 'rrtmg_lw' there are only two !
! externally callable subroutines: !
! !
! !
! 'lwrad' -- main lw radiation routine !
! inputs: !
! (plyr,plvl,tlyr,tlvl,qlyr,olyr,gasvmr, !
! clouds,icseed,aerosols,sfemis,sfgtmp, !
! dzlyr,delpin,de_lgth, !
! npts, nlay, nlp1, lprnt, !
! outputs: !
! hlwc,topflx,sfcflx,cldtau, !
!! optional outputs: !
! HLW0,HLWB,FLXPRF) !
! !
! 'rlwinit' -- initialization routine !
! inputs: !
! ( me ) !
! outputs: !
! (none) !
! !
! all the lw radiation subprograms become contained subprograms !
! in module 'rrtmg_lw' and many of them are not directly !
! accessable from places outside the module. !
! !
! derived data type constructs used: !
! !
! 1. radiation flux at toa: (from module 'module_radlw_parameters') !
! topflw_type - derived data type for toa rad fluxes !
! upfxc total sky upward flux at toa !
! upfx0 clear sky upward flux at toa !
! !
! 2. radiation flux at sfc: (from module 'module_radlw_parameters') !
! sfcflw_type - derived data type for sfc rad fluxes !
! upfxc total sky upward flux at sfc !
! upfx0 clear sky upward flux at sfc !
! dnfxc total sky downward flux at sfc !
! dnfx0 clear sky downward flux at sfc !
! !
! 3. radiation flux profiles(from module 'module_radlw_parameters') !
! proflw_type - derived data type for rad vertical prof !
! upfxc level upward flux for total sky !
! dnfxc level downward flux for total sky !
! upfx0 level upward flux for clear sky !
! dnfx0 level downward flux for clear sky !
! !
! external modules referenced: !
! !
! 'module physparam' !
! 'module physcons' !
! 'mersenne_twister' !
! !
! compilation sequence is: !
! !
! 'radlw_rrtm3_param.f' !
! 'radlw_rrtm3_datatb.f' !
! 'radlw_rrtm3_main.f' !
! !
! and all should be put in front of routines that use lw modules !
! !
!==========================================================================!
! !
! the original aer's program declarations: !
! !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! |
! Copyright 2002-2007, Atmospheric & Environmental Research, Inc. (AER). |
! This software may be used, copied, or redistributed as long as it is |
! not sold and this copyright notice is reproduced on each copy made. |
! This model is provided as is without any express or implied warranties. |
! (http://www.rtweb.aer.com/) |
! |
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! !
! ************************************************************************ !
! !
! rrtmg_lw !
! !
! !
! a rapid radiative transfer model !
! for the longwave region !
! for application to general circulation models !
! !
! !
! atmospheric and environmental research, inc. !
! 131 hartwell avenue !
! lexington, ma 02421 !
! !
! eli j. mlawer !
! jennifer s. delamere !
! michael j. iacono !
! shepard a. clough !
! !
! !
! email: miacono@aer.com !
! email: emlawer@aer.com !
! email: jdelamer@aer.com !
! !
! the authors wish to acknowledge the contributions of the !
! following people: steven j. taubman, karen cady-pereira, !
! patrick d. brown, ronald e. farren, luke chen, robert bergstrom. !
! !
! ************************************************************************ !
! !
! references: !
! (rrtm_lw/rrtmg_lw): !
! clough, s.A., m.w. shephard, e.j. mlawer, j.s. delamere, !
! m.j. iacono, k. cady-pereira, s. boukabara, and p.d. brown: !
! atmospheric radiative transfer modeling: a summary of the aer !
! codes, j. quant. spectrosc. radiat. transfer, 91, 233-244, 2005. !
! !
! mlawer, e.j., s.j. taubman, p.d. brown, m.j. iacono, and s.a. !
! clough: radiative transfer for inhomogeneous atmospheres: rrtm, !
! a validated correlated-k model for the longwave. j. geophys. res., !
! 102, 16663-16682, 1997. !
! !
! (mcica): !
! pincus, r., h. w. barker, and j.-j. morcrette: a fast, flexible, !
! approximation technique for computing radiative transfer in !
! inhomogeneous cloud fields, j. geophys. res., 108(d13), 4376, !
! doi:10.1029/2002JD003322, 2003. !
! !
! ************************************************************************ !
! !
! aer's revision history: !
! this version of rrtmg_lw has been modified from rrtm_lw to use a !
! reduced set of g-points for application to gcms. !
! !
! -- original version (derived from rrtm_lw), reduction of g-points, !
! other revisions for use with gcms. !
! 1999: m. j. iacono, aer, inc. !
! -- adapted for use with ncar/cam3. !
! may 2004: m. j. iacono, aer, inc. !
! -- revised to add mcica capability. !
! nov 2005: m. j. iacono, aer, inc. !
! -- conversion to f90 formatting for consistency with rrtmg_sw. !
! feb 2007: m. j. iacono, aer, inc. !
! -- modifications to formatting to use assumed-shape arrays. !
! aug 2007: m. j. iacono, aer, inc. !
! !
! ************************************************************************ !
! !
! ncep modifications history log: !
! !
! nov 1999, ken campana -- received the original code from !
! aer (1998 ncar ccm version), updated to link up with !
! ncep mrf model !
! jun 2000, ken campana -- added option to switch random and !
! maximum/random cloud overlap !
! 2001, shrinivas moorthi -- further updates for mrf model !
! may 2001, yu-tai hou -- updated on trace gases and cloud !
! property based on rrtm_v3.0 codes. !
! dec 2001, yu-tai hou -- rewritten code into fortran 90 std !
! set ncep radiation structure standard that contains !
! three plug-in compatable fortran program files: !
! 'radlw_param.f', 'radlw_datatb.f', 'radlw_main.f' !
! fixed bugs in subprograms taugb14, taugb2, etc. added !
! out-of-bounds protections. (a detailed note of !
! up_to_date modifications/corrections by ncep was sent !
! to aer in 2002) !
! jun 2004, yu-tai hou -- added mike iacono's apr 2004 !
! modification of variable diffusivity angles. !
! apr 2005, yu-tai hou -- minor modifications on module !
! structures include rain/snow effect (this version of !
! code was given back to aer in jun 2006) !
! mar 2007, yu-tai hou -- added aerosol effect for ncep !
! models using the generallized aerosol optical property!
! scheme for gfs model. !
! apr 2007, yu-tai hou -- added spectral band heating as an !
! optional output to support the 500 km gfs model's !
! upper stratospheric radiation calculations. and !
! restructure optional outputs for easy access by !
! different models. !
! oct 2008, yu-tai hou -- modified to include new features !
! from aer's newer release v4.4-v4.7, including the !
! mcica sub-grid cloud option. add rain/snow optical !
! properties support to cloudy sky calculations. !
! correct errors in mcica cloud optical properties for !
! ebert & curry scheme (ilwcice=1) that needs band !
! index conversion. simplified and unified sw and lw !
! sub-column cloud subroutines into one module by using !
! optional parameters. !
! mar 2009, yu-tai hou -- replaced the original random number!
! generator coming from the original code with ncep w3 !
! library to simplify the program and moved sub-column !
! cloud subroutines inside the main module. added !
! option of user provided permutation seeds that could !
! be randomly generated from forecast time stamp. !
! oct 2009, yu-tai hou -- modified subrtines "cldprop" and !
! "rlwinit" according updats from aer's rrtmg_lw v4.8. !
! nov 2009, yu-tai hou -- modified subrtine "taumol" according
! updats from aer's rrtmg_lw version 4.82. notice the !
! cloud ice/liquid are assumed as in-cloud quantities, !
! not as grid averaged quantities. !
! jun 2010, yu-tai hou -- optimized code to improve efficiency
! apr 2012, b. ferrier and y. hou -- added conversion factor to fu's!
! cloud-snow optical property scheme. !
! nov 2012, yu-tai hou -- modified control parameters thru !
! module 'physparam'. !
! FEB 2017 A.Cheng - add odpth output, effective radius input !
! jun 2018, h-m lin/y-t hou -- added new option of cloud overlap !
! method 'de-correlation-length' for mcica application !
! !
!!!!! ============================================================== !!!!!
!!!!! end descriptions !!!!!
!!!!! ============================================================== !!!!!
!> This module contains the CCPP-compliant NCEP's modifications of the
!! rrtm-lw radiation code from aer inc.
module rrtmg_lw
!
use physparam, only : ilwrate, ilwrgas, ilwcliq, ilwcice, &
& isubclw, icldflg, iovrlw, ivflip, &
& kind_phys
use physcons, only : con_g, con_cp, con_avgd, con_amd, &
& con_amw, con_amo3
use mersenne_twister, only : random_setseed, random_number, &
& random_stat
use module_radlw_parameters
!
use module_radlw_avplank, only : totplnk
use module_radlw_ref, only : preflog, tref, chi_mls
!
implicit none
!
private
!
! ... version tag and last revision date
character(40), parameter :: &
& VTAGLW='NCEP LW v5.1 Nov 2012 -RRTMG-LW v4.82 '
! & VTAGLW='NCEP LW v5.0 Aug 2012 -RRTMG-LW v4.82 '
! & VTAGLW='RRTMG-LW v4.82 Nov 2009 '
! & VTAGLW='RRTMG-LW v4.8 Oct 2009 '
! & VTAGLW='RRTMG-LW v4.71 Mar 2009 '
! & VTAGLW='RRTMG-LW v4.4 Oct 2008 '
! & VTAGLW='RRTM-LW v2.3g Mar 2007 '
! & VTAGLW='RRTM-LW v2.3g Apr 2004 '
! --- constant values
real (kind=kind_phys), parameter :: eps = 1.0e-6
real (kind=kind_phys), parameter :: oneminus= 1.0-eps
real (kind=kind_phys), parameter :: cldmin = tiny(cldmin)
real (kind=kind_phys), parameter :: bpade = 1.0/0.278 ! pade approx constant
real (kind=kind_phys), parameter :: stpfac = 296.0/1013.0
real (kind=kind_phys), parameter :: wtdiff = 0.5 ! weight for radiance to flux conversion
real (kind=kind_phys), parameter :: tblint = ntbl ! lookup table conversion factor
real (kind=kind_phys), parameter :: f_zero = 0.0
real (kind=kind_phys), parameter :: f_one = 1.0
! ... atomic weights for conversion from mass to volume mixing ratios
real (kind=kind_phys), parameter :: amdw = con_amd/con_amw
real (kind=kind_phys), parameter :: amdo3 = con_amd/con_amo3
! ... band indices
integer, dimension(nbands) :: nspa, nspb
data nspa / 1, 1, 9, 9, 9, 1, 9, 1, 9, 1, 1, 9, 9, 1, 9, 9 /
data nspb / 1, 1, 5, 5, 5, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0 /
! ... band wavenumber intervals
! real (kind=kind_phys) :: wavenum1(nbands), wavenum2(nbands)
! data wavenum1/ &
! & 10., 350., 500., 630., 700., 820., 980., 1080., &
!err & 1180., 1390., 1480., 1800., 2080., 2250., 2390., 2600. /
! & 1180., 1390., 1480., 1800., 2080., 2250., 2380., 2600. /
! data wavenum2/ &
! & 350., 500., 630., 700., 820., 980., 1080., 1180., &
!err & 1390., 1480., 1800., 2080., 2250., 2390., 2600., 3250. /
! & 1390., 1480., 1800., 2080., 2250., 2380., 2600., 3250. /
! real (kind=kind_phys) :: delwave(nbands)
! data delwave / 340., 150., 130., 70., 120., 160., 100., 100., &
! & 210., 90., 320., 280., 170., 130., 220., 650. /
! --- reset diffusivity angle for Bands 2-3 and 5-9 to vary (between 1.50
! and 1.80) as a function of total column water vapor. the function
! has been defined to minimize flux and cooling rate errors in these bands
! over a wide range of precipitable water values.
real (kind=kind_phys), dimension(nbands) :: a0, a1, a2
data a0 / 1.66, 1.55, 1.58, 1.66, 1.54, 1.454, 1.89, 1.33, &
& 1.668, 1.66, 1.66, 1.66, 1.66, 1.66, 1.66, 1.66 /
data a1 / 0.00, 0.25, 0.22, 0.00, 0.13, 0.446, -0.10, 0.40, &
& -0.006, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 /
data a2 / 0.00, -12.0, -11.7, 0.00, -0.72,-0.243, 0.19,-0.062, &
& 0.414, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 /
!! --- logical flags for optional output fields
logical :: lhlwb = .false.
logical :: lhlw0 = .false.
logical :: lflxprf= .false.
! --- those data will be set up only once by "rlwinit"
! ... fluxfac, heatfac are factors for fluxes (in w/m**2) and heating
! rates (in k/day, or k/sec set by subroutine 'rlwinit')
! semiss0 are default surface emissivity for each bands
real (kind=kind_phys) :: fluxfac, heatfac, semiss0(nbands)
data semiss0(:) / nbands*1.0 /
real (kind=kind_phys) :: tau_tbl(0:ntbl) !< clr-sky opt dep (for cldy transfer)
real (kind=kind_phys) :: exp_tbl(0:ntbl) !< transmittance lookup table
real (kind=kind_phys) :: tfn_tbl(0:ntbl) !< tau transition function; i.e. the
!< transition of planck func from mean lyr
!< temp to lyr boundary temp as a func of
!< opt dep. "linear in tau" method is used.
! --- the following variables are used for sub-column cloud scheme
integer, parameter :: ipsdlw0 = ngptlw ! initial permutation seed
! --- public accessable subprograms
public rrtmg_lw_init, rrtmg_lw_run, rrtmg_lw_finalize, rlwinit
! ================
contains
! ================
subroutine rrtmg_lw_init ()
end subroutine rrtmg_lw_init
!> \defgroup module_radlw_main GFS RRTMG Longwave Module
!! \brief This module includes NCEP's modifications of the RRTMG-LW radiation
!! code from AER.
!!
!! The RRTM-LW package includes three files:
!! - radlw_param.f, which contains:
!! - module_radlw_parameters: band parameters set up
!! - radlw_datatb.f, which contains modules:
!! - module_radlw_avplank: plank flux data
!! - module_radlw_ref: reference temperature and pressure
!! - module_radlw_cldprlw: cloud property coefficients
!! - module_radlw_kgbnn: absorption coeffients for 16 bands, where nn = 01-16
!! - radlw_main.f, which contains:
!! - rrtmg_lw_run(): the main LW radiation routine
!! - rlwinit(): the initialization routine
!!
!!\version NCEP LW v5.1 Nov 2012 -RRTMG-LW v4.82
!!
!!\copyright 2002-2007, Atmospheric & Environmental Research, Inc. (AER).
!! This software may be used, copied, or redistributed as long as it is
!! not sold and this copyright notice is reproduced on each copy made.
!! This model is provided as is without any express or implied warranties.
!! (http://www.rtweb.aer.com/)
!! \section arg_table_rrtmg_lw_run Argument Table
!! \htmlinclude rrtmg_lw_run.html
!!
!> \section gen_lwrad RRTMG Longwave Radiation Scheme General Algorithm
!> @{
subroutine rrtmg_lw_run &
& ( plyr,plvl,tlyr,tlvl,qlyr,olyr,gasvmr_co2, gasvmr_n2o, & ! --- inputs
& gasvmr_ch4, gasvmr_o2, gasvmr_co, gasvmr_cfc11, &
& gasvmr_cfc12, gasvmr_cfc22, gasvmr_ccl4, &
& icseed,aeraod,aerssa,sfemis,sfgtmp, &
& dzlyr,delpin,de_lgth, &
& npts, nlay, nlp1, lprnt, cld_cf, lslwr, &
& hlwc,topflx,sfcflx,cldtau, & ! --- outputs
& HLW0,HLWB,FLXPRF, & ! --- optional
& cld_lwp, cld_ref_liq, cld_iwp, cld_ref_ice, &
& cld_rwp,cld_ref_rain, cld_swp, cld_ref_snow, &
& cld_od, errmsg, errflg &
& )
! ==================== defination of variables ==================== !
! !
! input variables: !
! plyr (npts,nlay) : layer mean pressures (mb) !
! plvl (npts,nlp1) : interface pressures (mb) !
! tlyr (npts,nlay) : layer mean temperature (k) !
! tlvl (npts,nlp1) : interface temperatures (k) !
! qlyr (npts,nlay) : layer specific humidity (gm/gm) *see inside !
! olyr (npts,nlay) : layer ozone concentration (gm/gm) *see inside !
! gasvmr(npts,nlay,:): atmospheric gases amount: !
! (check module_radiation_gases for definition) !
! gasvmr(:,:,1) - co2 volume mixing ratio !
! gasvmr(:,:,2) - n2o volume mixing ratio !
! gasvmr(:,:,3) - ch4 volume mixing ratio !
! gasvmr(:,:,4) - o2 volume mixing ratio !
! gasvmr(:,:,5) - co volume mixing ratio !
! gasvmr(:,:,6) - cfc11 volume mixing ratio !
! gasvmr(:,:,7) - cfc12 volume mixing ratio !
! gasvmr(:,:,8) - cfc22 volume mixing ratio !
! gasvmr(:,:,9) - ccl4 volume mixing ratio !
! clouds(npts,nlay,:): layer cloud profiles: !
! (check module_radiation_clouds for definition) !
! clouds(:,:,1) - layer total cloud fraction !
! clouds(:,:,2) - layer in-cloud liq water path (g/m**2) !
! clouds(:,:,3) - mean eff radius for liq cloud (micron) !
! clouds(:,:,4) - layer in-cloud ice water path (g/m**2) !
! clouds(:,:,5) - mean eff radius for ice cloud (micron) !
! clouds(:,:,6) - layer rain drop water path (g/m**2) !
! clouds(:,:,7) - mean eff radius for rain drop (micron) !
! clouds(:,:,8) - layer snow flake water path (g/m**2) !
! clouds(:,:,9) - mean eff radius for snow flake (micron) !
! icseed(npts) : auxiliary special cloud related array !
! when module variable isubclw=2, it provides !
! permutation seed for each column profile that !
! are used for generating random numbers. !
! when isubclw /=2, it will not be used. !
! aerosols(npts,nlay,nbands,:) : aerosol optical properties !
! (check module_radiation_aerosols for definition)!
! (:,:,:,1) - optical depth !
! (:,:,:,2) - single scattering albedo !
! (:,:,:,3) - asymmetry parameter !
! sfemis (npts) : surface emissivity !
! sfgtmp (npts) : surface ground temperature (k) !
! dzlyr(npts,nlay) : layer thickness (km) !
! delpin(npts,nlay): layer pressure thickness (mb) !
! de_lgth(npts) : cloud decorrelation length (km) !
! npts : total number of horizontal points !
! nlay, nlp1 : total number of vertical layers, levels !
! lprnt : cntl flag for diagnostic print out !
! !
! output variables: !
! hlwc (npts,nlay): total sky heating rate (k/day or k/sec) !
! topflx(npts) : radiation fluxes at top, component: !
! (check module_radlw_paramters for definition) !
! upfxc - total sky upward flux at top (w/m2) !
! upfx0 - clear sky upward flux at top (w/m2) !
! sfcflx(npts) : radiation fluxes at sfc, component: !
! (check module_radlw_paramters for definition) !
! upfxc - total sky upward flux at sfc (w/m2) !
! upfx0 - clear sky upward flux at sfc (w/m2) !
! dnfxc - total sky downward flux at sfc (w/m2) !
! dnfx0 - clear sky downward flux at sfc (w/m2) !
! cldtau(npts,nlay): approx 10mu band layer cloud optical depth !
! !
!! optional output variables: !
! hlwb(npts,nlay,nbands): spectral band total sky heating rates !
! hlw0 (npts,nlay): clear sky heating rate (k/day or k/sec) !
! flxprf(npts,nlp1): level radiative fluxes (w/m2), components: !
! (check module_radlw_paramters for definition) !
! upfxc - total sky upward flux !
! dnfxc - total sky dnward flux !
! upfx0 - clear sky upward flux !
! dnfx0 - clear sky dnward flux !
! !
! external module variables: (in physparam) !
! ilwrgas - control flag for rare gases (ch4,n2o,o2,cfcs, etc.) !
! =0: do not include rare gases !
! >0: include all rare gases !
! ilwcliq - control flag for liq-cloud optical properties !
! =1: input cld liqp & reliq, hu & stamnes (1993) !
! =2: not used !
! ilwcice - control flag for ice-cloud optical properties !
! =1: input cld icep & reice, ebert & curry (1997) !
! =2: input cld icep & reice, streamer (1996) !
! =3: input cld icep & reice, fu (1998) !
! isubclw - sub-column cloud approximation control flag !
! =0: no sub-col cld treatment, use grid-mean cld quantities !
! =1: mcica sub-col, prescribed seeds to get random numbers !
! =2: mcica sub-col, providing array icseed for random numbers!
! iovrlw - cloud overlapping control flag !
! =0: random overlapping clouds !
! =1: maximum/random overlapping clouds !
! =2: maximum overlap cloud (used for isubclw>0 only) !
! =3: decorrelation-length overlap (for isubclw>0 only) !
! ivflip - control flag for vertical index direction !
! =0: vertical index from toa to surface !
! =1: vertical index from surface to toa !
! !
! module parameters, control variables: !
! nbands - number of longwave spectral bands !
! maxgas - maximum number of absorbing gaseous !
! maxxsec - maximum number of cross-sections !
! ngptlw - total number of g-point subintervals !
! ng## - number of g-points in band (##=1-16) !
! ngb(ngptlw) - band indices for each g-point !
! bpade - pade approximation constant (1/0.278) !
! nspa,nspb(nbands)- number of lower/upper ref atm's per band !
! delwave(nbands) - longwave band width (wavenumbers) !
! ipsdlw0 - permutation seed for mcica sub-col clds !
! !
! major local variables: !
! pavel (nlay) - layer pressures (mb) !
! delp (nlay) - layer pressure thickness (mb) !
! tavel (nlay) - layer temperatures (k) !
! tz (0:nlay) - level (interface) temperatures (k) !
! semiss (nbands) - surface emissivity for each band !
! wx (nlay,maxxsec) - cross-section molecules concentration !
! coldry (nlay) - dry air column amount !
! (1.e-20*molecules/cm**2) !
! cldfrc (0:nlp1) - layer cloud fraction !
! taucld (nbands,nlay) - layer cloud optical depth for each band !
! cldfmc (ngptlw,nlay) - layer cloud fraction for each g-point !
! tauaer (nbands,nlay) - aerosol optical depths !
! fracs (ngptlw,nlay) - planck fractions !
! tautot (ngptlw,nlay) - total optical depths (gaseous+aerosols) !
! colamt (nlay,maxgas) - column amounts of absorbing gases !
! 1-maxgas are for watervapor, carbon !
! dioxide, ozone, nitrous oxide, methane, !
! oxigen, carbon monoxide, respectively !
! (molecules/cm**2) !
! pwvcm - column precipitable water vapor (cm) !
! secdiff(nbands) - variable diffusivity angle defined as !
! an exponential function of the column !
! water amount in bands 2-3 and 5-9. !
! this reduces the bias of several w/m2 in !
! downward surface flux in high water !
! profiles caused by using the constant !
! diffusivity angle of 1.66. (mji) !
! facij (nlay) - indicator of interpolation factors !
! =0/1: indicate lower/higher temp & height !
! selffac(nlay) - scale factor for self-continuum, equals !
! (w.v. density)/(atm density at 296K,1013 mb) !
! selffrac(nlay) - factor for temp interpolation of ref !
! self-continuum data !
! indself(nlay) - index of the lower two appropriate ref !
! temp for the self-continuum interpolation !
! forfac (nlay) - scale factor for w.v. foreign-continuum !
! forfrac(nlay) - factor for temp interpolation of ref !
! w.v. foreign-continuum data !
! indfor (nlay) - index of the lower two appropriate ref !
! temp for the foreign-continuum interp !
! laytrop - tropopause layer index at which switch is !
! made from one conbination kew species to !
! another. !
! jp(nlay),jt(nlay),jt1(nlay) !
! - lookup table indexes !
! totuflux(0:nlay) - total-sky upward longwave flux (w/m2) !
! totdflux(0:nlay) - total-sky downward longwave flux (w/m2) !
! htr(nlay) - total-sky heating rate (k/day or k/sec) !
! totuclfl(0:nlay) - clear-sky upward longwave flux (w/m2) !
! totdclfl(0:nlay) - clear-sky downward longwave flux (w/m2) !
! htrcl(nlay) - clear-sky heating rate (k/day or k/sec) !
! fnet (0:nlay) - net longwave flux (w/m2) !
! fnetc (0:nlay) - clear-sky net longwave flux (w/m2) !
! !
! !
! ====================== end of definitions =================== !
! --- inputs:
integer, intent(in) :: npts, nlay, nlp1
integer, intent(in) :: icseed(npts)
logical, intent(in) :: lprnt
real (kind=kind_phys), dimension(npts,nlp1), intent(in) :: plvl, &
& tlvl
real (kind=kind_phys), dimension(npts,nlay), intent(in) :: plyr, &
& tlyr, qlyr, olyr, dzlyr, delpin
real (kind=kind_phys),dimension(npts,nlay),intent(in)::gasvmr_co2,&
& gasvmr_n2o, gasvmr_ch4, gasvmr_o2, gasvmr_co, gasvmr_cfc11, &
& gasvmr_cfc12, gasvmr_cfc22, gasvmr_ccl4
real (kind=kind_phys), dimension(npts,nlay),intent(in):: cld_cf
real (kind=kind_phys), dimension(npts,nlay),intent(in),optional:: &
& cld_lwp, cld_ref_liq, cld_iwp, cld_ref_ice, &
& cld_rwp, cld_ref_rain, cld_swp, cld_ref_snow, &
& cld_od
real (kind=kind_phys), dimension(npts), intent(in) :: sfemis, &
& sfgtmp, de_lgth
real (kind=kind_phys), dimension(npts,nlay,nbands),intent(in):: &
& aeraod, aerssa
! --- outputs:
real (kind=kind_phys), dimension(npts,nlay), intent(inout) :: hlwc
real (kind=kind_phys), dimension(npts,nlay), intent(inout) :: &
& cldtau
type (topflw_type), dimension(npts), intent(inout) :: topflx
type (sfcflw_type), dimension(npts), intent(inout) :: sfcflx
character(len=*), intent(out) :: errmsg
integer, intent(out) :: errflg
!! --- optional outputs:
real (kind=kind_phys), dimension(npts,nlay,nbands),optional, &
& intent(inout) :: hlwb
real (kind=kind_phys), dimension(npts,nlay), optional, &
& intent(inout) :: hlw0
type (proflw_type), dimension(npts,nlp1), optional, &
& intent(inout) :: flxprf
logical, intent(in) :: lslwr
! --- locals:
real (kind=kind_phys), dimension(0:nlp1) :: cldfrc
real (kind=kind_phys), dimension(0:nlay) :: totuflux, totdflux, &
& totuclfl, totdclfl, tz
real (kind=kind_phys), dimension(nlay) :: htr, htrcl
real (kind=kind_phys), dimension(nlay) :: pavel, tavel, delp, &
& clwp, ciwp, relw, reiw, cda1, cda2, cda3, cda4, &
& coldry, colbrd, h2ovmr, o3vmr, fac00, fac01, fac10, fac11, &
& selffac, selffrac, forfac, forfrac, minorfrac, scaleminor, &
& scaleminorn2, temcol, dz
real (kind=kind_phys), dimension(nbands,0:nlay) :: pklev, pklay
real (kind=kind_phys), dimension(nlay,nbands) :: htrb
real (kind=kind_phys), dimension(nbands,nlay) :: taucld, tauaer
real (kind=kind_phys), dimension(ngptlw,nlay) :: fracs, tautot, &
& cldfmc
real (kind=kind_phys), dimension(nbands) :: semiss, secdiff
! --- column amount of absorbing gases:
! (:,m) m = 1-h2o, 2-co2, 3-o3, 4-n2o, 5-ch4, 6-o2, 7-co
real (kind=kind_phys) :: colamt(nlay,maxgas)
! --- column cfc cross-section amounts:
! (:,m) m = 1-ccl4, 2-cfc11, 3-cfc12, 4-cfc22
real (kind=kind_phys) :: wx(nlay,maxxsec)
! --- reference ratios of binary species parameter in lower atmosphere:
! (:,m,:) m = 1-h2o/co2, 2-h2o/o3, 3-h2o/n2o, 4-h2o/ch4, 5-n2o/co2, 6-o3/co2
real (kind=kind_phys) :: rfrate(nlay,nrates,2)
real (kind=kind_phys) :: tem0, tem1, tem2, pwvcm, summol, stemp, &
& delgth
integer, dimension(npts) :: ipseed
integer, dimension(nlay) :: jp, jt, jt1, indself, indfor, indminor
integer :: laytrop, iplon, i, j, k, k1
logical :: lcf1
!
!===> ... begin here
!
! Initialize CCPP error handling variables
errmsg = ''
errflg = 0
!
if (.not. lslwr) return
! --- ... initialization
lhlwb = present ( hlwb )
lhlw0 = present ( hlw0 )
lflxprf= present ( flxprf )
colamt(:,:) = f_zero
cldtau(:,:) = f_zero
!! --- check for optional input arguments, depending on cloud method
if (ilwcliq > 0) then ! use prognostic cloud method
if ( .not.present(cld_lwp) .or. .not.present(cld_ref_liq) .or. &
& .not.present(cld_iwp) .or. .not.present(cld_ref_ice) .or. &
& .not.present(cld_rwp) .or. .not.present(cld_ref_rain) .or. &
& .not.present(cld_swp) .or. .not.present(cld_ref_snow)) then
write(errmsg,'(*(a))') &
& 'Logic error: ilwcliq>0 requires the following', &
& ' optional arguments to be present:', &
& ' cld_lwp, cld_ref_liq, cld_iwp, cld_ref_ice,', &
& ' cld_rwp, cld_ref_rain, cld_swp, cld_ref_snow'
errflg = 1
return
end if
else ! use diagnostic cloud method
if ( .not.present(cld_od) ) then
write(errmsg,'(*(a))') &
& 'Logic error: ilwcliq<=0 requires the following', &
& ' optional argument to be present: cld_od'
errflg = 1
return
end if
endif ! end if_ilwcliq
!> -# Change random number seed value for each radiation invocation
!! (isubclw =1 or 2).
if ( isubclw == 1 ) then ! advance prescribed permutation seed
do i = 1, npts
ipseed(i) = ipsdlw0 + i
enddo
elseif ( isubclw == 2 ) then ! use input array of permutaion seeds
do i = 1, npts
ipseed(i) = icseed(i)
enddo
endif
! if ( lprnt ) then
! print *,' In rrtmg_lw, isubclw, ipsdlw0,ipseed =', &
! & isubclw, ipsdlw0, ipseed
! endif
! --- ... loop over horizontal npts profiles
lab_do_iplon : do iplon = 1, npts
!> -# Read surface emissivity.
if (sfemis(iplon) > eps .and. sfemis(iplon) <= 1.0) then ! input surface emissivity
do j = 1, nbands
semiss(j) = sfemis(iplon)
enddo
else ! use default values
do j = 1, nbands
semiss(j) = semiss0(j)
enddo
endif
stemp = sfgtmp(iplon) ! surface ground temp
if (iovrlw == 3) delgth= de_lgth(iplon) ! clouds decorr-length
!> -# Prepare atmospheric profile for use in rrtm.
! the vertical index of internal array is from surface to top
! --- ... molecular amounts are input or converted to volume mixing ratio
! and later then converted to molecular amount (molec/cm2) by the
! dry air column coldry (in molec/cm2) which is calculated from the
! layer pressure thickness (in mb), based on the hydrostatic equation
! --- ... and includes a correction to account for h2o in the layer.
if (ivflip == 0) then ! input from toa to sfc
tem1 = 100.0 * con_g
tem2 = 1.0e-20 * 1.0e3 * con_avgd
tz(0) = tlvl(iplon,nlp1)
do k = 1, nlay
k1 = nlp1 - k
pavel(k)= plyr(iplon,k1)
delp(k) = delpin(iplon,k1)
tavel(k)= tlyr(iplon,k1)
tz(k) = tlvl(iplon,k1)
dz(k) = dzlyr(iplon,k1)
!> -# Set absorber amount for h2o, co2, and o3.
!test use
! h2ovmr(k)= max(f_zero,qlyr(iplon,k1)*amdw) ! input mass mixing ratio
! h2ovmr(k)= max(f_zero,qlyr(iplon,k1)) ! input vol mixing ratio
! o3vmr (k)= max(f_zero,olyr(iplon,k1)) ! input vol mixing ratio
!ncep model use
h2ovmr(k)= max(f_zero,qlyr(iplon,k1) &
& *amdw/(f_one-qlyr(iplon,k1))) ! input specific humidity
o3vmr (k)= max(f_zero,olyr(iplon,k1)*amdo3) ! input mass mixing ratio
! --- ... tem0 is the molecular weight of moist air
tem0 = (f_one - h2ovmr(k))*con_amd + h2ovmr(k)*con_amw
coldry(k) = tem2*delp(k) / (tem1*tem0*(f_one+h2ovmr(k)))
temcol(k) = 1.0e-12 * coldry(k)
colamt(k,1) = max(f_zero, coldry(k)*h2ovmr(k)) ! h2o
colamt(k,2) = max(temcol(k), coldry(k)*gasvmr_co2(iplon,k1)) ! co2
colamt(k,3) = max(temcol(k), coldry(k)*o3vmr(k)) ! o3
enddo
!> -# Set up column amount for rare gases n2o,ch4,o2,co,ccl4,cf11,cf12,
!! cf22, convert from volume mixing ratio to molec/cm2 based on
!! coldry (scaled to 1.0e-20).
if (ilwrgas > 0) then
do k = 1, nlay
k1 = nlp1 - k
colamt(k,4)=max(temcol(k), coldry(k)*gasvmr_n2o(iplon,k1)) ! n2o
colamt(k,5)=max(temcol(k), coldry(k)*gasvmr_ch4(iplon,k1)) ! ch4
colamt(k,6)=max(f_zero, coldry(k)*gasvmr_o2(iplon,k1)) ! o2
colamt(k,7)=max(f_zero, coldry(k)*gasvmr_co(iplon,k1)) ! co
wx(k,1) = max( f_zero, coldry(k)*gasvmr_ccl4(iplon,k1) ) ! ccl4
wx(k,2) = max( f_zero, coldry(k)*gasvmr_cfc11(iplon,k1) ) ! cf11
wx(k,3) = max( f_zero, coldry(k)*gasvmr_cfc12(iplon,k1) ) ! cf12
wx(k,4) = max( f_zero, coldry(k)*gasvmr_cfc22(iplon,k1) ) ! cf22
enddo
else
do k = 1, nlay
colamt(k,4) = f_zero ! n2o
colamt(k,5) = f_zero ! ch4
colamt(k,6) = f_zero ! o2
colamt(k,7) = f_zero ! co
wx(k,1) = f_zero
wx(k,2) = f_zero
wx(k,3) = f_zero
wx(k,4) = f_zero
enddo
endif
!> -# Set aerosol optical properties.
do k = 1, nlay
k1 = nlp1 - k
do j = 1, nbands
tauaer(j,k) = aeraod(iplon,k1,j) &
& * (f_one - aerssa(iplon,k1,j))
enddo
enddo
!> -# Read cloud optical properties.
if (ilwcliq > 0) then ! use prognostic cloud method
do k = 1, nlay
k1 = nlp1 - k
cldfrc(k)= cld_cf(iplon,k1)
clwp(k) = cld_lwp(iplon,k1)
relw(k) = cld_ref_liq(iplon,k1)
ciwp(k) = cld_iwp(iplon,k1)
reiw(k) = cld_ref_ice(iplon,k1)
cda1(k) = cld_rwp(iplon,k1)
cda2(k) = cld_ref_rain(iplon,k1)
cda3(k) = cld_swp(iplon,k1)
cda4(k) = cld_ref_snow(iplon,k1)
enddo
else ! use diagnostic cloud method
do k = 1, nlay
k1 = nlp1 - k
cldfrc(k)= cld_cf(iplon,k1)
cda1(k) = cld_od(iplon,k1)
enddo
endif ! end if_ilwcliq
cldfrc(0) = f_one ! padding value only
cldfrc(nlp1) = f_zero ! padding value only
!> -# Compute precipitable water vapor for diffusivity angle adjustments.
tem1 = f_zero
tem2 = f_zero
do k = 1, nlay
tem1 = tem1 + coldry(k) + colamt(k,1)
tem2 = tem2 + colamt(k,1)
enddo
tem0 = 10.0 * tem2 / (amdw * tem1 * con_g)
pwvcm = tem0 * plvl(iplon,nlp1)
else ! input from sfc to toa
tem1 = 100.0 * con_g
tem2 = 1.0e-20 * 1.0e3 * con_avgd
tz(0) = tlvl(iplon,1)
do k = 1, nlay
pavel(k)= plyr(iplon,k)
delp(k) = delpin(iplon,k)
tavel(k)= tlyr(iplon,k)
tz(k) = tlvl(iplon,k+1)
dz(k) = dzlyr(iplon,k)
! --- ... set absorber amount
!test use
! h2ovmr(k)= max(f_zero,qlyr(iplon,k)*amdw) ! input mass mixing ratio
! h2ovmr(k)= max(f_zero,qlyr(iplon,k)) ! input vol mixing ratio
! o3vmr (k)= max(f_zero,olyr(iplon,k)) ! input vol mixing ratio
!ncep model use
h2ovmr(k)= max(f_zero,qlyr(iplon,k) &
& *amdw/(f_one-qlyr(iplon,k))) ! input specific humidity
o3vmr (k)= max(f_zero,olyr(iplon,k)*amdo3) ! input mass mixing ratio
! --- ... tem0 is the molecular weight of moist air
tem0 = (f_one - h2ovmr(k))*con_amd + h2ovmr(k)*con_amw
coldry(k) = tem2*delp(k) / (tem1*tem0*(f_one+h2ovmr(k)))
temcol(k) = 1.0e-12 * coldry(k)
colamt(k,1) = max(f_zero, coldry(k)*h2ovmr(k)) ! h2o
colamt(k,2) = max(temcol(k), coldry(k)*gasvmr_co2(iplon,k))! co2
colamt(k,3) = max(temcol(k), coldry(k)*o3vmr(k)) ! o3
enddo
! --- ... set up col amount for rare gases, convert from volume mixing ratio
! to molec/cm2 based on coldry (scaled to 1.0e-20)
if (ilwrgas > 0) then
do k = 1, nlay
colamt(k,4)=max(temcol(k), coldry(k)*gasvmr_n2o(iplon,k)) ! n2o
colamt(k,5)=max(temcol(k), coldry(k)*gasvmr_ch4(iplon,k)) ! ch4
colamt(k,6)=max(f_zero, coldry(k)*gasvmr_o2(iplon,k)) ! o2
colamt(k,7)=max(f_zero, coldry(k)*gasvmr_co(iplon,k)) ! co
wx(k,1) = max( f_zero, coldry(k)*gasvmr_ccl4(iplon,k) ) ! ccl4
wx(k,2) = max( f_zero, coldry(k)*gasvmr_cfc11(iplon,k) ) ! cf11
wx(k,3) = max( f_zero, coldry(k)*gasvmr_cfc12(iplon,k) ) ! cf12
wx(k,4) = max( f_zero, coldry(k)*gasvmr_cfc22(iplon,k) ) ! cf22
enddo
else
do k = 1, nlay
colamt(k,4) = f_zero ! n2o
colamt(k,5) = f_zero ! ch4
colamt(k,6) = f_zero ! o2
colamt(k,7) = f_zero ! co
wx(k,1) = f_zero
wx(k,2) = f_zero
wx(k,3) = f_zero
wx(k,4) = f_zero
enddo
endif
! --- ... set aerosol optical properties
do j = 1, nbands
do k = 1, nlay
tauaer(j,k) = aeraod(iplon,k,j) &
& * (f_one - aerssa(iplon,k,j))
enddo
enddo
if (ilwcliq > 0) then ! use prognostic cloud method
do k = 1, nlay
cldfrc(k)= cld_cf(iplon,k)
clwp(k) = cld_lwp(iplon,k)
relw(k) = cld_ref_liq(iplon,k)
ciwp(k) = cld_iwp(iplon,k)
reiw(k) = cld_ref_ice(iplon,k)
cda1(k) = cld_rwp(iplon,k)
cda2(k) = cld_ref_rain(iplon,k)
cda3(k) = cld_swp(iplon,k)
cda4(k) = cld_ref_snow(iplon,k)
enddo
else ! use diagnostic cloud method
do k = 1, nlay
cldfrc(k)= cld_cf(iplon,k)
cda1(k) = cld_od(iplon,k)
enddo
endif ! end if_ilwcliq
cldfrc(0) = f_one ! padding value only
cldfrc(nlp1) = f_zero ! padding value only
! --- ... compute precipitable water vapor for diffusivity angle adjustments
tem1 = f_zero
tem2 = f_zero
do k = 1, nlay
tem1 = tem1 + coldry(k) + colamt(k,1)
tem2 = tem2 + colamt(k,1)
enddo
tem0 = 10.0 * tem2 / (amdw * tem1 * con_g)
pwvcm = tem0 * plvl(iplon,1)
endif ! if_ivflip
!> -# Compute column amount for broadening gases.
do k = 1, nlay
summol = f_zero
do i = 2, maxgas
summol = summol + colamt(k,i)
enddo
colbrd(k) = coldry(k) - summol
enddo
!> -# Compute diffusivity angle adjustments.
tem1 = 1.80
tem2 = 1.50
do j = 1, nbands
if (j==1 .or. j==4 .or. j==10) then
secdiff(j) = 1.66
else
secdiff(j) = min( tem1, max( tem2, &
& a0(j)+a1(j)*exp(a2(j)*pwvcm) ))
endif
enddo
! if (lprnt) then
! print *,' coldry',coldry
! print *,' wx(*,1) ',(wx(k,1),k=1,NLAY)
! print *,' wx(*,2) ',(wx(k,2),k=1,NLAY)
! print *,' wx(*,3) ',(wx(k,3),k=1,NLAY)
! print *,' wx(*,4) ',(wx(k,4),k=1,NLAY)
! print *,' iplon ',iplon
! print *,' pavel ',pavel
! print *,' delp ',delp
! print *,' tavel ',tavel
! print *,' tz ',tz
! print *,' h2ovmr ',h2ovmr
! print *,' o3vmr ',o3vmr
! endif
!> -# For cloudy atmosphere, call cldprop() to set cloud optical
!! properties.
lcf1 = .false.
lab_do_k0 : do k = 1, nlay
if ( cldfrc(k) > eps ) then
lcf1 = .true.
exit lab_do_k0
endif
enddo lab_do_k0
if ( lcf1 ) then
call cldprop &
! --- inputs:
& ( cldfrc,clwp,relw,ciwp,reiw,cda1,cda2,cda3,cda4, &
& nlay, nlp1, ipseed(iplon), dz, delgth, &
! --- outputs:
& cldfmc, taucld &
& )
! --- ... save computed layer cloud optical depth for output
! rrtm band-7 is apprx 10mu channel (or use spectral mean of bands 6-8)
if (ivflip == 0) then ! input from toa to sfc
do k = 1, nlay
k1 = nlp1 - k
cldtau(iplon,k1) = taucld( 7,k)
enddo
else ! input from sfc to toa
do k = 1, nlay
cldtau(iplon,k) = taucld( 7,k)
enddo
endif ! end if_ivflip_block
else
cldfmc = f_zero
taucld = f_zero
endif
! if (lprnt) then
! print *,' after cldprop'
! print *,' clwp',clwp
! print *,' ciwp',ciwp
! print *,' relw',relw
! print *,' reiw',reiw
! print *,' taucl',cda1
! print *,' cldfrac',cldfrc
! endif
!> -# Calling setcoef() to compute various coefficients needed in
!! radiative transfer calculations.
call setcoef &
! --- inputs:
& ( pavel,tavel,tz,stemp,h2ovmr,colamt,coldry,colbrd, &
& nlay, nlp1, &
! --- outputs:
& laytrop,pklay,pklev,jp,jt,jt1, &
& rfrate,fac00,fac01,fac10,fac11, &
& selffac,selffrac,indself,forfac,forfrac,indfor, &
& minorfrac,scaleminor,scaleminorn2,indminor &
& )
! if (lprnt) then
! print *,'laytrop',laytrop
! print *,'colh2o',(colamt(k,1),k=1,NLAY)
! print *,'colco2',(colamt(k,2),k=1,NLAY)
! print *,'colo3', (colamt(k,3),k=1,NLAY)
! print *,'coln2o',(colamt(k,4),k=1,NLAY)
! print *,'colch4',(colamt(k,5),k=1,NLAY)
! print *,'fac00',fac00
! print *,'fac01',fac01
! print *,'fac10',fac10
! print *,'fac11',fac11
! print *,'jp',jp
! print *,'jt',jt
! print *,'jt1',jt1
! print *,'selffac',selffac
! print *,'selffrac',selffrac
! print *,'indself',indself
! print *,'forfac',forfac
! print *,'forfrac',forfrac
! print *,'indfor',indfor
! endif
!> -# Call taumol() to calculte the gaseous optical depths and Plank
!! fractions for each longwave spectral band.
call taumol &
! --- inputs:
& ( laytrop,pavel,coldry,colamt,colbrd,wx,tauaer, &
& rfrate,fac00,fac01,fac10,fac11,jp,jt,jt1, &
& selffac,selffrac,indself,forfac,forfrac,indfor, &
& minorfrac,scaleminor,scaleminorn2,indminor, &
& nlay, &
! --- outputs:
& fracs, tautot &
& )
! if (lprnt) then
! print *,' after taumol'
! do k = 1, nlay
! write(6,121) k
!121 format(' k =',i3,5x,'FRACS')
! write(6,122) (fracs(j,k),j=1,ngptlw)
!122 format(10e14.7)
! write(6,123) k
!123 format(' k =',i3,5x,'TAUTOT')
! write(6,122) (tautot(j,k),j=1,ngptlw)
! enddo
! endif
!> -# Call the radiative transfer routine based on cloud scheme
!! selection. Compute the upward/downward radiative fluxes, and
!! heating rates for both clear or cloudy atmosphere.
!!\n - call rtrn(): clouds are assumed as randomly overlaping in a
!! vertical column
!!\n - call rtrnmr(): clouds are assumed as in maximum-randomly
!! overlaping in a vertical column;
!!\n - call rtrnmc(): clouds are treated with the mcica stochastic
!! approach.
if (isubclw <= 0) then
if (iovrlw <= 0) then
call rtrn &
! --- inputs:
& ( semiss,delp,cldfrc,taucld,tautot,pklay,pklev, &
& fracs,secdiff,nlay,nlp1, &
! --- outputs:
& totuflux,totdflux,htr, totuclfl,totdclfl,htrcl, htrb &
& )
else
call rtrnmr &
! --- inputs:
& ( semiss,delp,cldfrc,taucld,tautot,pklay,pklev, &
& fracs,secdiff,nlay,nlp1, &
! --- outputs:
& totuflux,totdflux,htr, totuclfl,totdclfl,htrcl, htrb &
& )
endif ! end if_iovrlw_block
else
call rtrnmc &
! --- inputs:
& ( semiss,delp,cldfmc,taucld,tautot,pklay,pklev, &
& fracs,secdiff,nlay,nlp1, &
! --- outputs:
& totuflux,totdflux,htr, totuclfl,totdclfl,htrcl, htrb &
& )
endif ! end if_isubclw_block
!> -# Save outputs.
topflx(iplon)%upfxc = totuflux(nlay)
topflx(iplon)%upfx0 = totuclfl(nlay)
sfcflx(iplon)%upfxc = totuflux(0)
sfcflx(iplon)%upfx0 = totuclfl(0)
sfcflx(iplon)%dnfxc = totdflux(0)
sfcflx(iplon)%dnfx0 = totdclfl(0)
if (ivflip == 0) then ! output from toa to sfc
!! --- ... optional fluxes
if ( lflxprf ) then
do k = 0, nlay
k1 = nlp1 - k
flxprf(iplon,k1)%upfxc = totuflux(k)
flxprf(iplon,k1)%dnfxc = totdflux(k)
flxprf(iplon,k1)%upfx0 = totuclfl(k)
flxprf(iplon,k1)%dnfx0 = totdclfl(k)
enddo
endif
do k = 1, nlay
k1 = nlp1 - k
hlwc(iplon,k1) = htr(k)
enddo
!! --- ... optional clear sky heating rate
if ( lhlw0 ) then
do k = 1, nlay
k1 = nlp1 - k
hlw0(iplon,k1) = htrcl(k)
enddo
endif
!! --- ... optional spectral band heating rate
if ( lhlwb ) then
do j = 1, nbands
do k = 1, nlay
k1 = nlp1 - k
hlwb(iplon,k1,j) = htrb(k,j)
enddo
enddo
endif
else ! output from sfc to toa
!! --- ... optional fluxes
if ( lflxprf ) then
do k = 0, nlay
flxprf(iplon,k+1)%upfxc = totuflux(k)
flxprf(iplon,k+1)%dnfxc = totdflux(k)
flxprf(iplon,k+1)%upfx0 = totuclfl(k)
flxprf(iplon,k+1)%dnfx0 = totdclfl(k)
enddo
endif
do k = 1, nlay
hlwc(iplon,k) = htr(k)
enddo
!! --- ... optional clear sky heating rate
if ( lhlw0 ) then
do k = 1, nlay
hlw0(iplon,k) = htrcl(k)
enddo
endif
!! --- ... optional spectral band heating rate
if ( lhlwb ) then
do j = 1, nbands
do k = 1, nlay
hlwb(iplon,k,j) = htrb(k,j)
enddo
enddo
endif
endif ! if_ivflip
enddo lab_do_iplon
!...................................
end subroutine rrtmg_lw_run
!-----------------------------------
!> @}
subroutine rrtmg_lw_finalize ()
end subroutine rrtmg_lw_finalize
!> \ingroup module_radlw_main
!> \brief This subroutine performs calculations necessary for the initialization
!! of the longwave model, which includes non-varying model variables, conversion
!! factors, and look-up tables
!!
!! Lookup tables are computed for use in the lw
!! radiative transfer, and input absorption coefficient data for each
!! spectral band are reduced from 256 g-point intervals to 140.
!!\param me print control for parallel process
!!\section rlwinit_gen rlwinit General Algorithm
!! @{
subroutine rlwinit &
& ( me ) ! --- inputs
! --- outputs: (none)
! =================== program usage description =================== !
! !
! purpose: initialize non-varying module variables, conversion factors,!
! and look-up tables. !
! !
! subprograms called: none !
! !
! ==================== defination of variables ==================== !
! !
! inputs: !
! me - print control for parallel process !
! !
! outputs: (none) !
! !
! external module variables: (in physparam) !
! ilwrate - heating rate unit selections !
! =1: output in k/day !
! =2: output in k/second !
! ilwrgas - control flag for rare gases (ch4,n2o,o2,cfcs, etc.) !
! =0: do not include rare gases !
! >0: include all rare gases !
! ilwcliq - liquid cloud optical properties contrl flag !
! =0: input cloud opt depth from diagnostic scheme !
! >0: input cwp,rew, and other cloud content parameters !
! isubclw - sub-column cloud approximation control flag !
! =0: no sub-col cld treatment, use grid-mean cld quantities !
! =1: mcica sub-col, prescribed seeds to get random numbers !
! =2: mcica sub-col, providing array icseed for random numbers!
! icldflg - cloud scheme control flag !
! =0: diagnostic scheme gives cloud tau, omiga, and g. !
! =1: prognostic scheme gives cloud liq/ice path, etc. !
! iovrlw - clouds vertical overlapping control flag !
! =0: random overlapping clouds !
! =1: maximum/random overlapping clouds !
! =2: maximum overlap cloud (isubcol>0 only) !
! =3: decorrelation-length overlap (for isubclw>0 only) !
! !
! ******************************************************************* !
! original code description !
! !
! original version: michael j. iacono; july, 1998 !
! first revision for ncar ccm: september, 1998 !
! second revision for rrtm_v3.0: september, 2002 !
! !
! this subroutine performs calculations necessary for the initialization
! of the longwave model. lookup tables are computed for use in the lw !
! radiative transfer, and input absorption coefficient data for each !
! spectral band are reduced from 256 g-point intervals to 140. !
! !
! ******************************************************************* !
! !
! definitions: !
! arrays for 10000-point look-up tables: !
! tau_tbl - clear-sky optical depth (used in cloudy radiative transfer!
! exp_tbl - exponential lookup table for tansmittance !
! tfn_tbl - tau transition function; i.e. the transition of the Planck!
! function from that for the mean layer temperature to that !
! for the layer boundary temperature as a function of optical
! depth. the "linear in tau" method is used to make the table
! !
! ******************************************************************* !
! !
! ====================== end of description block ================= !
! --- inputs:
integer, intent(in) :: me
! --- outputs: none
! --- locals:
real (kind=kind_phys), parameter :: expeps = 1.e-20
real (kind=kind_phys) :: tfn, pival, explimit
integer :: i
!
!===> ... begin here
!
if ( iovrlw<0 .or. iovrlw>3 ) then
print *,' *** Error in specification of cloud overlap flag', &
& ' IOVRLW=',iovrlw,' in RLWINIT !!'
stop
elseif ( iovrlw>=2 .and. isubclw==0 ) then
if (me == 0) then
print *,' *** IOVRLW=',iovrlw,' is not available for', &
& ' ISUBCLW=0 setting!!'
print *,' The program uses maximum/random overlap', &
& ' instead.'
endif
iovrlw = 1
endif
if (me == 0) then
print *,' - Using AER Longwave Radiation, Version: ', VTAGLW
if (ilwrgas > 0) then
print *,' --- Include rare gases N2O, CH4, O2, CFCs ', &
& 'absorptions in LW'
else
print *,' --- Rare gases effect is NOT included in LW'
endif
if ( isubclw == 0 ) then
print *,' --- Using standard grid average clouds, no ', &
& 'sub-column clouds approximation applied'
elseif ( isubclw == 1 ) then
print *,' --- Using MCICA sub-colum clouds approximation ', &
& 'with a prescribed sequence of permutaion seeds'
elseif ( isubclw == 2 ) then
print *,' --- Using MCICA sub-colum clouds approximation ', &
& 'with provided input array of permutation seeds'
else
print *,' *** Error in specification of sub-column cloud ', &
& ' control flag isubclw =',isubclw,' !!'
stop
endif
endif
!> -# Check cloud flags for consistency.
if ((icldflg == 0 .and. ilwcliq /= 0) .or. &
& (icldflg == 1 .and. ilwcliq == 0)) then
print *,' *** Model cloud scheme inconsistent with LW', &
& ' radiation cloud radiative property setup !!'
stop
endif
!> -# Setup default surface emissivity for each band.
semiss0(:) = f_one
!> -# Setup constant factors for flux and heating rate
!! the 1.0e-2 is to convert pressure from mb to \f$N/m^2\f$.
pival = 2.0 * asin(f_one)
fluxfac = pival * 2.0d4
! fluxfac = 62831.85307179586 ! = 2 * pi * 1.0e4
if (ilwrate == 1) then
! heatfac = 8.4391
! heatfac = con_g * 86400. * 1.0e-2 / con_cp ! (in k/day)
heatfac = con_g * 864.0 / con_cp ! (in k/day)
else
heatfac = con_g * 1.0e-2 / con_cp ! (in k/second)
endif
!> -# Compute lookup tables for transmittance, tau transition
!! function, and clear sky tau (for the cloudy sky radiative
!! transfer). tau is computed as a function of the tau
!! transition function, transmittance is calculated as a
!! function of tau, and the tau transition function is
!! calculated using the linear in tau formulation at values of
!! tau above 0.01. tf is approximated as tau/6 for tau < 0.01.
!! all tables are computed at intervals of 0.001. the inverse
!! of the constant used in the pade approximation to the tau
!! transition function is set to b.
tau_tbl(0) = f_zero
exp_tbl(0) = f_one
tfn_tbl(0) = f_zero
tau_tbl(ntbl) = 1.e10
exp_tbl(ntbl) = expeps
tfn_tbl(ntbl) = f_one
explimit = aint( -log(tiny(exp_tbl(0))) )
do i = 1, ntbl-1
!org tfn = float(i) / float(ntbl)
!org tau_tbl(i) = bpade * tfn / (f_one - tfn)
tfn = real(i, kind_phys) / real(ntbl-i, kind_phys)
tau_tbl(i) = bpade * tfn
if (tau_tbl(i) >= explimit) then
exp_tbl(i) = expeps
else
exp_tbl(i) = exp( -tau_tbl(i) )
endif
if (tau_tbl(i) < 0.06) then
tfn_tbl(i) = tau_tbl(i) / 6.0
else
tfn_tbl(i) = f_one - 2.0*( (f_one / tau_tbl(i)) &
& - ( exp_tbl(i) / (f_one - exp_tbl(i)) ) )
endif
enddo
!...................................
end subroutine rlwinit
!! @}
!-----------------------------------
!>\ingroup module_radlw_main
!> \brief This subroutine computes the cloud optical depth(s) for each cloudy
!! layer and g-point interval.
!!\param cfrac layer cloud fraction
!!\n --- for ilwcliq > 0 (prognostic cloud scheme) - - -
!!\param cliqp layer in-cloud liq water path (\f$g/m^2\f$)
!!\param reliq mean eff radius for liq cloud (micron)
!!\param cicep layer in-cloud ice water path (\f$g/m^2\f$)
!!\param reice mean eff radius for ice cloud (micron)
!!\param cdat1 layer rain drop water path (\f$g/m^2\f$)
!!\param cdat2 effective radius for rain drop (micron)
!!\param cdat3 layer snow flake water path(\f$g/m^2\f$)
!!\param cdat4 mean effective radius for snow flake(micron)
!!\n --- for ilwcliq = 0 (diagnostic cloud scheme) - - -
!!\param cliqp not used
!!\param cicep not used
!!\param reliq not used
!!\param reice not used
!!\param cdat1 layer cloud optical depth
!!\param cdat2 layer cloud single scattering albedo
!!\param cdat3 layer cloud asymmetry factor
!!\param cdat4 optional use
!!\param nlay number of layer number
!!\param nlp1 number of veritcal levels
!!\param ipseed permutation seed for generating random numbers (isubclw>0)
!!\param dz layer thickness (km)
!!\param de_lgth layer cloud decorrelation length (km)
!!\param cldfmc cloud fraction for each sub-column
!!\param taucld cloud optical depth for bands (non-mcica)
!!\section gen_cldprop cldprop General Algorithm
!> @{
subroutine cldprop &
& ( cfrac,cliqp,reliq,cicep,reice,cdat1,cdat2,cdat3,cdat4, & ! --- inputs
& nlay, nlp1, ipseed, dz, de_lgth, &
& cldfmc, taucld & ! --- outputs
& )
! =================== program usage description =================== !
! !
! purpose: compute the cloud optical depth(s) for each cloudy layer !
! and g-point interval. !
! !
! subprograms called: none !
! !
! ==================== defination of variables ==================== !
! !
! inputs: -size- !
! cfrac - real, layer cloud fraction 0:nlp1 !
! ..... for ilwcliq > 0 (prognostic cloud sckeme) - - - !
! cliqp - real, layer in-cloud liq water path (g/m**2) nlay !
! reliq - real, mean eff radius for liq cloud (micron) nlay !
! cicep - real, layer in-cloud ice water path (g/m**2) nlay !
! reice - real, mean eff radius for ice cloud (micron) nlay !
! cdat1 - real, layer rain drop water path (g/m**2) nlay !
! cdat2 - real, effective radius for rain drop (microm) nlay !
! cdat3 - real, layer snow flake water path (g/m**2) nlay !
! cdat4 - real, effective radius for snow flakes (micron) nlay !
! ..... for ilwcliq = 0 (diagnostic cloud sckeme) - - - !
! cdat1 - real, input cloud optical depth nlay !
! cdat2 - real, layer cloud single scattering albedo nlay !
! cdat3 - real, layer cloud asymmetry factor nlay !
! cdat4 - real, optional use nlay !
! cliqp - not used nlay !
! reliq - not used nlay !
! cicep - not used nlay !
! reice - not used nlay !
! !
! dz - real, layer thickness (km) nlay !
! de_lgth- real, layer cloud decorrelation length (km) 1 !
! nlay - integer, number of vertical layers 1 !
! nlp1 - integer, number of vertical levels 1 !
! ipseed- permutation seed for generating random numbers (isubclw>0) !
! !
! outputs: !
! cldfmc - real, cloud fraction for each sub-column ngptlw*nlay!
! taucld - real, cld opt depth for bands (non-mcica) nbands*nlay!
! !
! explanation of the method for each value of ilwcliq, and ilwcice. !
! set up in module "module_radlw_cntr_para" !
! !
! ilwcliq=0 : input cloud optical property (tau, ssa, asy). !
! (used for diagnostic cloud method) !
! ilwcliq>0 : input cloud liq/ice path and effective radius, also !
! require the user of 'ilwcice' to specify the method !
! used to compute aborption due to water/ice parts. !
! ................................................................... !
! !
! ilwcliq=1: the water droplet effective radius (microns) is input!
! and the opt depths due to water clouds are computed !
! as in hu and stamnes, j., clim., 6, 728-742, (1993). !
! the values for absorption coefficients appropriate for
! the spectral bands in rrtm have been obtained for a !
! range of effective radii by an averaging procedure !
! based on the work of j. pinto (private communication).
! linear interpolation is used to get the absorption !
! coefficients for the input effective radius. !
! !
! ilwcice=1: the cloud ice path (g/m2) and ice effective radius !
! (microns) are input and the optical depths due to ice!
! clouds are computed as in ebert and curry, jgr, 97, !
! 3831-3836 (1992). the spectral regions in this work !
! have been matched with the spectral bands in rrtm to !
! as great an extent as possible: !
! e&c 1 ib = 5 rrtm bands 9-16 !
! e&c 2 ib = 4 rrtm bands 6-8 !
! e&c 3 ib = 3 rrtm bands 3-5 !
! e&c 4 ib = 2 rrtm band 2 !
! e&c 5 ib = 1 rrtm band 1 !
! ilwcice=2: the cloud ice path (g/m2) and ice effective radius !
! (microns) are input and the optical depths due to ice!
! clouds are computed as in rt code, streamer v3.0 !
! (ref: key j., streamer user's guide, cooperative !
! institute for meteorological satellite studies, 2001,!
! 96 pp.) valid range of values for re are between 5.0 !
! and 131.0 micron. !
! ilwcice=3: the ice generalized effective size (dge) is input and!
! the optical properties, are calculated as in q. fu, !
! j. climate, (1998). q. fu provided high resolution !
! tales which were appropriately averaged for the bands!
! in rrtm_lw. linear interpolation is used to get the !
! coeff from the stored tables. valid range of values !
! for deg are between 5.0 and 140.0 micron. !
! !
! other cloud control module variables: !
! isubclw =0: standard cloud scheme, no sub-col cloud approximation !
! >0: mcica sub-col cloud scheme using ipseed as permutation!
! seed for generating rundom numbers !
! !
! ====================== end of description block ================= !
!
use module_radlw_cldprlw
! --- inputs:
integer, intent(in) :: nlay, nlp1, ipseed
real (kind=kind_phys), dimension(0:nlp1), intent(in) :: cfrac
real (kind=kind_phys), dimension(nlay), intent(in) :: cliqp, &
& reliq, cicep, reice, cdat1, cdat2, cdat3, cdat4, dz
real (kind=kind_phys), intent(in) :: de_lgth
! --- outputs:
real (kind=kind_phys), dimension(ngptlw,nlay),intent(out):: cldfmc
real (kind=kind_phys), dimension(nbands,nlay),intent(out):: taucld
! --- locals:
real (kind=kind_phys), dimension(nbands) :: tauliq, tauice
real (kind=kind_phys), dimension(nlay) :: cldf
real (kind=kind_phys) :: dgeice, factor, fint, tauran, tausnw, &
& cldliq, refliq, cldice, refice
logical :: lcloudy(ngptlw,nlay)
integer :: ia, ib, ig, k, index
!
!===> ... begin here
!
do k = 1, nlay
do ib = 1, nbands
taucld(ib,k) = f_zero
enddo
enddo
do k = 1, nlay
do ig = 1, ngptlw
cldfmc(ig,k) = f_zero
enddo
enddo
!> -# Compute cloud radiative properties for a cloudy column:
!!\n - Compute cloud radiative properties for rain and snow (tauran,tausnw)
!!\n - Calculation of absorption coefficients due to water clouds(tauliq)
!!\n - Calculation of absorption coefficients due to ice clouds (tauice).
!!\n - For prognostic cloud scheme: sum up the cloud optical property:
!!\n \f$ taucld=tauice+tauliq+tauran+tausnw \f$
! --- ... compute cloud radiative properties for a cloudy column
lab_if_ilwcliq : if (ilwcliq > 0) then
lab_do_k : do k = 1, nlay
lab_if_cld : if (cfrac(k) > cldmin) then
tauran = absrain * cdat1(k) ! ncar formula
!! tausnw = abssnow1 * cdat3(k) ! ncar formula
! --- if use fu's formula it needs to be normalized by snow density
! !not use snow density = 0.1 g/cm**3 = 0.1 g/(mu * m**2)
! use ice density = 0.9167 g/cm**3 = 0.9167 g/(mu * m**2)
! factor 1.5396=8/(3*sqrt(3)) converts reff to generalized ice particle size
! use newer factor value 1.0315
! 1/(0.9167*1.0315) = 1.05756
if (cdat3(k)>f_zero .and. cdat4(k)>10.0_kind_phys) then
tausnw = abssnow0*1.05756*cdat3(k)/cdat4(k) ! fu's formula
else
tausnw = f_zero
endif
cldliq = cliqp(k)
cldice = cicep(k)
! refliq = max(2.5e0, min(60.0e0, reliq(k) ))
! refice = max(5.0e0, reice(k) )
refliq = reliq(k)
refice = reice(k)
! --- ... calculation of absorption coefficients due to water clouds.
if ( cldliq <= f_zero ) then
do ib = 1, nbands
tauliq(ib) = f_zero
enddo
else
if ( ilwcliq == 1 ) then
factor = refliq - 1.5
index = max( 1, min( 57, int( factor ) ))
fint = factor - float(index)
do ib = 1, nbands
tauliq(ib) = max(f_zero, cldliq*(absliq1(index,ib) &
& + fint*(absliq1(index+1,ib)-absliq1(index,ib)) ))
enddo
endif ! end if_ilwcliq_block
endif ! end if_cldliq_block
! --- ... calculation of absorption coefficients due to ice clouds.
if ( cldice <= f_zero ) then
do ib = 1, nbands
tauice(ib) = f_zero
enddo
else
! --- ... ebert and curry approach for all particle sizes though somewhat
! unjustified for large ice particles
if ( ilwcice == 1 ) then
refice = min(130.0, max(13.0, real(refice) ))
do ib = 1, nbands
ia = ipat(ib) ! eb_&_c band index for ice cloud coeff
tauice(ib) = max(f_zero, cldice*(absice1(1,ia) &
& + absice1(2,ia)/refice) )
enddo
! --- ... streamer approach for ice effective radius between 5.0 and 131.0 microns
! and ebert and curry approach for ice eff radius greater than 131.0 microns.
! no smoothing between the transition of the two methods.
elseif ( ilwcice == 2 ) then
factor = (refice - 2.0) / 3.0
index = max( 1, min( 42, int( factor ) ))
fint = factor - float(index)
do ib = 1, nbands
tauice(ib) = max(f_zero, cldice*(absice2(index,ib) &
& + fint*(absice2(index+1,ib) - absice2(index,ib)) ))
enddo
! --- ... fu's approach for ice effective radius between 4.8 and 135 microns
! (generalized effective size from 5 to 140 microns)
elseif ( ilwcice == 3 ) then
! dgeice = max(5.0, 1.5396*refice) ! v4.4 value
dgeice = max(5.0, 1.0315*refice) ! v4.71 value
factor = (dgeice - 2.0) / 3.0
index = max( 1, min( 45, int( factor ) ))
fint = factor - float(index)
do ib = 1, nbands
tauice(ib) = max(f_zero, cldice*(absice3(index,ib) &
& + fint*(absice3(index+1,ib) - absice3(index,ib)) ))
enddo
endif ! end if_ilwcice_block
endif ! end if_cldice_block
do ib = 1, nbands
taucld(ib,k) = tauice(ib) + tauliq(ib) + tauran + tausnw
enddo
endif lab_if_cld
enddo lab_do_k
else lab_if_ilwcliq
do k = 1, nlay
if (cfrac(k) > cldmin) then
do ib = 1, nbands
taucld(ib,k) = cdat1(k)
enddo
endif
enddo
endif lab_if_ilwcliq
!> -# if physparam::isubclw > 0, call mcica_subcol() to distribute
!! cloud properties to each g-point.
if ( isubclw > 0 ) then ! mcica sub-col clouds approx
do k = 1, nlay
if ( cfrac(k) < cldmin ) then
cldf(k) = f_zero
else
cldf(k) = cfrac(k)
endif
enddo
! --- ... call sub-column cloud generator
call mcica_subcol &
! --- inputs:
& ( cldf, nlay, ipseed, dz, de_lgth, &
! --- output:
& lcloudy &
& )
do k = 1, nlay
do ig = 1, ngptlw
if ( lcloudy(ig,k) ) then
cldfmc(ig,k) = f_one
else
cldfmc(ig,k) = f_zero
endif
enddo
enddo
endif ! end if_isubclw_block
return
! ..................................
end subroutine cldprop
! ----------------------------------
!> @}
!>\ingroup module_radlw_main
!>\brief This suroutine computes sub-colum cloud profile flag array.
!!\param cldf layer cloud fraction
!!\param nlay number of model vertical layers
!!\param ipseed permute seed for random num generator
!!\param dz layer thickness
!!\param de_lgth layer cloud decorrelation length (km)
!!\param lcloudy sub-colum cloud profile flag array
!!\section mcica_subcol_gen mcica_subcol General Algorithm
!! @{
subroutine mcica_subcol &
& ( cldf, nlay, ipseed, dz, de_lgth, & ! --- inputs
& lcloudy & ! --- outputs
& )
! ==================== defination of variables ==================== !
! !
! input variables: size !
! cldf - real, layer cloud fraction nlay !
! nlay - integer, number of model vertical layers 1 !
! ipseed - integer, permute seed for random num generator 1 !
! ** note : if the cloud generator is called multiple times, need !
! to permute the seed between each call; if between calls !
! for lw and sw, use values differ by the number of g-pts. !
! dz - real, layer thickness (km) nlay !
! de_lgth - real, layer cloud decorrelation length (km) 1 !
! !
! output variables: !
! lcloudy - logical, sub-colum cloud profile flag array ngptlw*nlay!
! !
! other control flags from module variables: !
! iovrlw : control flag for cloud overlapping method !
! =0:random; =1:maximum/random: =2:maximum; =3:decorr !
! !
! ===================== end of definitions ==================== !
implicit none
! --- inputs:
integer, intent(in) :: nlay, ipseed
real (kind=kind_phys), dimension(nlay), intent(in) :: cldf, dz
real (kind=kind_phys), intent(in) :: de_lgth
! --- outputs:
logical, dimension(ngptlw,nlay), intent(out) :: lcloudy
! --- locals:
real (kind=kind_phys) :: cdfunc(ngptlw,nlay), rand1d(ngptlw), &
& rand2d(nlay*ngptlw), tem1, fac_lcf(nlay), &
& cdfun2(ngptlw,nlay)
type (random_stat) :: stat ! for thread safe random generator
integer :: k, n, k1
!
!===> ... begin here
!
!> -# Call random_setseed() to advance randum number generator by ipseed values.
call random_setseed &
! --- inputs:
& ( ipseed, &
! --- outputs:
& stat &
& )
!> -# Sub-column set up according to overlapping assumption:
!! - For random overlap, pick a random value at every level
!! - For max-random overlap, pick a random value at every level
!! - For maximum overlap, pick same random numebr at every level
select case ( iovrlw )
case( 0 ) ! random overlap, pick a random value at every level
call random_number &
! --- inputs: ( none )
! --- outputs:
& ( rand2d, stat )
k1 = 0
do n = 1, ngptlw
do k = 1, nlay
k1 = k1 + 1
cdfunc(n,k) = rand2d(k1)
enddo
enddo
case( 1 ) ! max-ran overlap
call random_number &
! --- inputs: ( none )
! --- outputs:
& ( rand2d, stat )
k1 = 0
do n = 1, ngptlw
do k = 1, nlay
k1 = k1 + 1
cdfunc(n,k) = rand2d(k1)
enddo
enddo
! --- first pick a random number for bottom (or top) layer.
! then walk up the column: (aer's code)
! if layer below is cloudy, use the same rand num in the layer below
! if layer below is clear, use a new random number
! --- from bottom up
do k = 2, nlay
k1 = k - 1
tem1 = f_one - cldf(k1)
do n = 1, ngptlw
if ( cdfunc(n,k1) > tem1 ) then
cdfunc(n,k) = cdfunc(n,k1)
else
cdfunc(n,k) = cdfunc(n,k) * tem1
endif
enddo
enddo
! --- or walk down the column: (if use original author's method)
! if layer above is cloudy, use the same rand num in the layer above
! if layer above is clear, use a new random number
! --- from top down
! do k = nlay-1, 1, -1
! k1 = k + 1
! tem1 = f_one - cldf(k1)
! do n = 1, ngptlw
! if ( cdfunc(n,k1) > tem1 ) then
! cdfunc(n,k) = cdfunc(n,k1)
! else
! cdfunc(n,k) = cdfunc(n,k) * tem1
! endif
! enddo
! enddo
case( 2 ) !< - For maximum overlap, pick same random numebr at every level
call random_number &
! --- inputs: ( none )
! --- outputs:
& ( rand1d, stat )
do n = 1, ngptlw
tem1 = rand1d(n)
do k = 1, nlay
cdfunc(n,k) = tem1
enddo
enddo
case( 3 ) ! decorrelation length overlap
! --- compute overlapping factors based on layer midpoint distances
! and decorrelation depths
do k = nlay, 2, -1
fac_lcf(k) = exp( -0.5 * (dz(k)+dz(k-1)) / de_lgth )
enddo
! --- setup 2 sets of random numbers
call random_number ( rand2d, stat )
k1 = 0
do k = 1, nlay
do n = 1, ngptlw
k1 = k1 + 1
cdfunc(n,k) = rand2d(k1)
enddo
enddo
call random_number ( rand2d, stat )
k1 = 0
do k = 1, nlay
do n = 1, ngptlw
k1 = k1 + 1
cdfun2(n,k) = rand2d(k1)
enddo
enddo
! --- then working from the top down:
! if a random number (from an independent set -cdfun2) is smaller then the
! scale factor: use the upper layer's number, otherwise use a new random
! number (keep the original assigned one).
do k = nlay-1, 1, -1
k1 = k + 1
do n = 1, ngptlw
if ( cdfun2(n,k) <= fac_lcf(k1) ) then
cdfunc(n,k) = cdfunc(n,k1)
endif
enddo
enddo
end select
!> -# Generate subcolumns for homogeneous clouds.
do k = 1, nlay
tem1 = f_one - cldf(k)
do n = 1, ngptlw
lcloudy(n,k) = cdfunc(n,k) >= tem1
enddo
enddo
return
! ..................................
end subroutine mcica_subcol
!! @}
! ----------------------------------
!>\ingroup module_radlw_main
!> This subroutine computes various coefficients needed in radiative
!! transfer calculations.
!!\param pavel layer pressure (mb)
!!\param tavel layer temperature (K)
!!\param tz level(interface) temperatures (K)
!!\param stemp surface ground temperature (K)
!!\param h2ovmr layer w.v. volumn mixing ratio (kg/kg)
!!\param colamt column amounts of absorbing gases.
!! 2nd indices range: 1-maxgas, for watervapor,carbon dioxide, ozone,
!! nitrous oxide, methane,oxigen, carbon monoxide,etc. \f$(mol/cm^2)\f$
!!\param coldry dry air column amount
!!\param colbrd column amount of broadening gases
!!\param nlay total number of vertical layers
!!\param nlp1 total number of vertical levels
!!\param laytrop tropopause layer index (unitless)
!!\param pklay integrated planck func at lay temp
!!\param pklev integrated planck func at lev temp
!!\param jp indices of lower reference pressure
!!\param jt, jt1 indices of lower reference temperatures
!!\param rfrate ref ratios of binary species param
!!\n (:,m,:)m=1-h2o/co2,2-h2o/o3,3-h2o/n2o,
!! 4-h2o/ch4,5-n2o/co2,6-o3/co2
!!\n (:,:,n)n=1,2: the rates of ref press at
!! the 2 sides of the layer
!!\param fac00,fac01,fac10,fac11 factors multiply the reference ks, i,j=0/1 for
!! lower/higher of the 2 appropriate temperatures
!! and altitudes.
!!\param selffac scale factor for w. v. self-continuum equals
!! (w. v. density)/(atmospheric density at 296k and 1013 mb)
!!\param selffrac factor for temperature interpolation of
!! reference w. v. self-continuum data
!!\param indself index of lower ref temp for selffac
!!\param forfac scale factor for w. v. foreign-continuum
!!\param forfrac factor for temperature interpolation of
!! reference w.v. foreign-continuum data
!!\param indfor index of lower ref temp for forfac
!!\param minorfrac factor for minor gases
!!\param scaleminor,scaleminorn2 scale factors for minor gases
!!\param indminor index of lower ref temp for minor gases
!>\section setcoef_gen setcoef General Algorithm
!> @{
subroutine setcoef &
& ( pavel,tavel,tz,stemp,h2ovmr,colamt,coldry,colbrd, & ! --- inputs:
& nlay, nlp1, &
& laytrop,pklay,pklev,jp,jt,jt1, & ! --- outputs:
& rfrate,fac00,fac01,fac10,fac11, &
& selffac,selffrac,indself,forfac,forfrac,indfor, &
& minorfrac,scaleminor,scaleminorn2,indminor &
& )
! =================== program usage description =================== !
! !
! purpose: compute various coefficients needed in radiative transfer !
! calculations. !
! !
! subprograms called: none !
! !
! ==================== defination of variables ==================== !
! !
! inputs: -size- !
! pavel - real, layer pressures (mb) nlay !
! tavel - real, layer temperatures (k) nlay !
! tz - real, level (interface) temperatures (k) 0:nlay !
! stemp - real, surface ground temperature (k) 1 !
! h2ovmr - real, layer w.v. volum mixing ratio (kg/kg) nlay !
! colamt - real, column amounts of absorbing gases nlay*maxgas!
! 2nd indices range: 1-maxgas, for watervapor, !
! carbon dioxide, ozone, nitrous oxide, methane, !
! oxigen, carbon monoxide,etc. (molecules/cm**2) !
! coldry - real, dry air column amount nlay !
! colbrd - real, column amount of broadening gases nlay !
! nlay/nlp1 - integer, total number of vertical layers, levels 1 !
! !
! outputs: !
! laytrop - integer, tropopause layer index (unitless) 1 !
! pklay - real, integrated planck func at lay temp nbands*0:nlay!
! pklev - real, integrated planck func at lev temp nbands*0:nlay!
! jp - real, indices of lower reference pressure nlay !
! jt, jt1 - real, indices of lower reference temperatures nlay !
! rfrate - real, ref ratios of binary species param nlay*nrates*2!
! (:,m,:)m=1-h2o/co2,2-h2o/o3,3-h2o/n2o,4-h2o/ch4,5-n2o/co2,6-o3/co2!
! (:,:,n)n=1,2: the rates of ref press at the 2 sides of the layer !
! facij - real, factors multiply the reference ks, nlay !
! i,j=0/1 for lower/higher of the 2 appropriate !
! temperatures and altitudes. !
! selffac - real, scale factor for w. v. self-continuum nlay !
! equals (w. v. density)/(atmospheric density !
! at 296k and 1013 mb) !
! selffrac - real, factor for temperature interpolation of nlay !
! reference w. v. self-continuum data !
! indself - integer, index of lower ref temp for selffac nlay !
! forfac - real, scale factor for w. v. foreign-continuum nlay !
! forfrac - real, factor for temperature interpolation of nlay !
! reference w.v. foreign-continuum data !
! indfor - integer, index of lower ref temp for forfac nlay !
! minorfrac - real, factor for minor gases nlay !
! scaleminor,scaleminorn2 !
! - real, scale factors for minor gases nlay !
! indminor - integer, index of lower ref temp for minor gases nlay !
! !
! ====================== end of definitions =================== !
! --- inputs:
integer, intent(in) :: nlay, nlp1
real (kind=kind_phys), dimension(nlay,maxgas),intent(in):: colamt
real (kind=kind_phys), dimension(0:nlay), intent(in):: tz
real (kind=kind_phys), dimension(nlay), intent(in) :: pavel, &
& tavel, h2ovmr, coldry, colbrd
real (kind=kind_phys), intent(in) :: stemp
! --- outputs:
integer, dimension(nlay), intent(out) :: jp, jt, jt1, indself, &
& indfor, indminor
integer, intent(out) :: laytrop
real (kind=kind_phys), dimension(nlay,nrates,2), intent(out) :: &
& rfrate
real (kind=kind_phys), dimension(nbands,0:nlay), intent(out) :: &
& pklev, pklay
real (kind=kind_phys), dimension(nlay), intent(out) :: &
& fac00, fac01, fac10, fac11, selffac, selffrac, forfac, &
& forfrac, minorfrac, scaleminor, scaleminorn2
! --- locals:
real (kind=kind_phys) :: tlvlfr, tlyrfr, plog, fp, ft, ft1, &
& tem1, tem2
integer :: i, k, jp1, indlev, indlay
!
!===> ... begin here
!
!> -# Calculate information needed by the radiative transfer routine
!! that is specific to this atmosphere, especially some of the
!! coefficients and indices needed to compute the optical depths
!! by interpolating data from stored reference atmospheres.
indlay = min(180, max(1, int(stemp-159.0) ))
indlev = min(180, max(1, int(tz(0)-159.0) ))
tlyrfr = stemp - int(stemp)
tlvlfr = tz(0) - int(tz(0))
do i = 1, nbands
tem1 = totplnk(indlay+1,i) - totplnk(indlay,i)
tem2 = totplnk(indlev+1,i) - totplnk(indlev,i)
pklay(i,0) = delwave(i) * (totplnk(indlay,i) + tlyrfr*tem1)
pklev(i,0) = delwave(i) * (totplnk(indlev,i) + tlvlfr*tem2)
enddo
! --- ... begin layer loop
!> -# Calculate the integrated Planck functions for each band at the
!! surface, level, and layer temperatures.
laytrop = 0
do k = 1, nlay
indlay = min(180, max(1, int(tavel(k)-159.0) ))
tlyrfr = tavel(k) - int(tavel(k))
indlev = min(180, max(1, int(tz(k)-159.0) ))
tlvlfr = tz(k) - int(tz(k))
! --- ... begin spectral band loop
do i = 1, nbands
pklay(i,k) = delwave(i) * (totplnk(indlay,i) + tlyrfr &
& * (totplnk(indlay+1,i) - totplnk(indlay,i)) )
pklev(i,k) = delwave(i) * (totplnk(indlev,i) + tlvlfr &
& * (totplnk(indlev+1,i) - totplnk(indlev,i)) )
enddo
!> -# Find the two reference pressures on either side of the
!! layer pressure. store them in jp and jp1. store in fp the
!! fraction of the difference (in ln(pressure)) between these
!! two values that the layer pressure lies.
plog = log(pavel(k))
jp(k)= max(1, min(58, int(36.0 - 5.0*(plog+0.04)) ))
jp1 = jp(k) + 1
! --- ... limit pressure extrapolation at the top
fp = max(f_zero, min(f_one, 5.0*(preflog(jp(k))-plog) ))
!org fp = 5.0 * (preflog(jp(k)) - plog)
!> -# Determine, for each reference pressure (jp and jp1), which
!! reference temperature (these are different for each
!! reference pressure) is nearest the layer temperature but does
!! not exceed it. store these indices in jt and jt1, resp.
!! store in ft (resp. ft1) the fraction of the way between jt
!! (jt1) and the next highest reference temperature that the
!! layer temperature falls.
tem1 = (tavel(k)-tref(jp(k))) / 15.0
tem2 = (tavel(k)-tref(jp1 )) / 15.0
jt (k) = max(1, min(4, int(3.0 + tem1) ))
jt1(k) = max(1, min(4, int(3.0 + tem2) ))
! --- ... restrict extrapolation ranges by limiting abs(det t) < 37.5 deg
ft = max(-0.5, min(1.5, tem1 - float(jt (k) - 3) ))
ft1 = max(-0.5, min(1.5, tem2 - float(jt1(k) - 3) ))
!org ft = tem1 - float(jt (k) - 3)
!org ft1 = tem2 - float(jt1(k) - 3)
!> -# We have now isolated the layer ln pressure and temperature,
!! between two reference pressures and two reference temperatures
!!(for each reference pressure). we multiply the pressure
!! fraction fp with the appropriate temperature fractions to get
!! the factors that will be needed for the interpolation that yields
!! the optical depths (performed in routines taugbn for band n).
tem1 = f_one - fp
fac10(k) = tem1 * ft
fac00(k) = tem1 * (f_one - ft)
fac11(k) = fp * ft1
fac01(k) = fp * (f_one - ft1)
forfac(k) = pavel(k)*stpfac / (tavel(k)*(1.0 + h2ovmr(k)))
selffac(k) = h2ovmr(k) * forfac(k)
!> -# Set up factors needed to separately include the minor gases
!! in the calculation of absorption coefficient.
scaleminor(k) = pavel(k) / tavel(k)
scaleminorn2(k) = (pavel(k) / tavel(k)) &
& * (colbrd(k)/(coldry(k) + colamt(k,1)))
tem1 = (tavel(k) - 180.8) / 7.2
indminor(k) = min(18, max(1, int(tem1)))
minorfrac(k) = tem1 - float(indminor(k))
!> -# If the pressure is less than ~100mb, perform a different
!! set of species interpolations.
if (plog > 4.56) then
laytrop = laytrop + 1
tem1 = (332.0 - tavel(k)) / 36.0
indfor(k) = min(2, max(1, int(tem1)))
forfrac(k) = tem1 - float(indfor(k))
!> -# Set up factors needed to separately include the water vapor
!! self-continuum in the calculation of absorption coefficient.
tem1 = (tavel(k) - 188.0) / 7.2
indself(k) = min(9, max(1, int(tem1)-7))
selffrac(k) = tem1 - float(indself(k) + 7)
!> -# Setup reference ratio to be used in calculation of binary
!! species parameter in lower atmosphere.
rfrate(k,1,1) = chi_mls(1,jp(k)) / chi_mls(2,jp(k))
rfrate(k,1,2) = chi_mls(1,jp(k)+1) / chi_mls(2,jp(k)+1)
rfrate(k,2,1) = chi_mls(1,jp(k)) / chi_mls(3,jp(k))
rfrate(k,2,2) = chi_mls(1,jp(k)+1) / chi_mls(3,jp(k)+1)
rfrate(k,3,1) = chi_mls(1,jp(k)) / chi_mls(4,jp(k))
rfrate(k,3,2) = chi_mls(1,jp(k)+1) / chi_mls(4,jp(k)+1)
rfrate(k,4,1) = chi_mls(1,jp(k)) / chi_mls(6,jp(k))
rfrate(k,4,2) = chi_mls(1,jp(k)+1) / chi_mls(6,jp(k)+1)
rfrate(k,5,1) = chi_mls(4,jp(k)) / chi_mls(2,jp(k))
rfrate(k,5,2) = chi_mls(4,jp(k)+1) / chi_mls(2,jp(k)+1)
else
tem1 = (tavel(k) - 188.0) / 36.0
indfor(k) = 3
forfrac(k) = tem1 - f_one
indself(k) = 0
selffrac(k) = f_zero
!> -# Setup reference ratio to be used in calculation of binary
!! species parameter in upper atmosphere.
rfrate(k,1,1) = chi_mls(1,jp(k)) / chi_mls(2,jp(k))
rfrate(k,1,2) = chi_mls(1,jp(k)+1) / chi_mls(2,jp(k)+1)
rfrate(k,6,1) = chi_mls(3,jp(k)) / chi_mls(2,jp(k))
rfrate(k,6,2) = chi_mls(3,jp(k)+1) / chi_mls(2,jp(k)+1)
endif
!> -# Rescale \a selffac and \a forfac for use in taumol.
selffac(k) = colamt(k,1) * selffac(k)
forfac(k) = colamt(k,1) * forfac(k)
enddo ! end do_k layer loop
return
! ..................................
end subroutine setcoef
!> @}
! ----------------------------------
!>\ingroup module_radlw_main
!> This subroutine computes the upward/downward radiative fluxes, and
!! heating rates for both clear or cloudy atmosphere. Clouds assumed as
!! randomly overlaping in a vertical column.
!!\brief Original Code Description: this program calculates the upward
!! fluxes, downward fluxes, and heating rates for an arbitrary clear or
!! cloudy atmosphere. The input to this program is the atmospheric
!! profile, all Planck function information, and the cloud fraction by
!! layer. A variable diffusivity angle (secdif) is used for the angle
!! integration. Bands 2-3 and 5-9 use a value for secdif that varies
!! from 1.50 to 1.80 as a function of the column water vapor, and other
!! bands use a value of 1.66. The gaussian weight appropriate to this
!! angle (wtdiff =0.5) is applied here. Note that use of the emissivity
!! angle for the flux integration can cause errors of 1 to 4 \f$W/m^2\f$
!! within cloudy layers. Clouds are treated with a random cloud overlap
!! method.
!!\param semiss lw surface emissivity
!!\param delp layer pressure thickness (mb)
!!\param cldfrc layer cloud fraction
!!\param taucld layer cloud opt depth
!!\param tautot total optical depth (gas+aerosols)
!!\param pklay integrated planck function at lay temp
!!\param pklev integrated planck func at lev temp
!!\param fracs planck fractions
!!\param secdif secant of diffusivity angle
!!\param nlay number of vertical layers
!!\param nlp1 number of vertical levels (interfaces)
!!\param totuflux total sky upward flux \f$(w/m^2)\f$
!!\param totdflux total sky downward flux \f$(w/m^2)\f$
!!\param htr total sky heating rate (k/sec or k/day)
!!\param totuclfl clear sky upward flux \f$(w/m^2)\f$
!!\param totdclfl clear sky downward flux \f$(w/m^2)\f$
!!\param htrcl clear sky heating rate (k/sec or k/day)
!!\param htrb spectral band lw heating rate (k/day)
!>\section gen_rtrn rtrn General Algorithm
!! @{
! ----------------------------------
subroutine rtrn &
& ( semiss,delp,cldfrc,taucld,tautot,pklay,pklev, & ! --- inputs
& fracs,secdif, nlay,nlp1, &
& totuflux,totdflux,htr, totuclfl,totdclfl,htrcl, htrb & ! --- outputs
& )
! =================== program usage description =================== !
! !
! purpose: compute the upward/downward radiative fluxes, and heating !
! rates for both clear or cloudy atmosphere. clouds are assumed as !
! randomly overlaping in a vertical colum. !
! !
! subprograms called: none !
! !
! ==================== defination of variables ==================== !
! !
! inputs: -size- !
! semiss - real, lw surface emissivity nbands!
! delp - real, layer pressure thickness (mb) nlay !
! cldfrc - real, layer cloud fraction 0:nlp1 !
! taucld - real, layer cloud opt depth nbands,nlay!
! tautot - real, total optical depth (gas+aerosols) ngptlw,nlay!
! pklay - real, integrated planck func at lay temp nbands*0:nlay!
! pklev - real, integrated planck func at lev temp nbands*0:nlay!
! fracs - real, planck fractions ngptlw,nlay!
! secdif - real, secant of diffusivity angle nbands!
! nlay - integer, number of vertical layers 1 !
! nlp1 - integer, number of vertical levels (interfaces) 1 !
! !
! outputs: !
! totuflux- real, total sky upward flux (w/m2) 0:nlay !
! totdflux- real, total sky downward flux (w/m2) 0:nlay !
! htr - real, total sky heating rate (k/sec or k/day) nlay !
! totuclfl- real, clear sky upward flux (w/m2) 0:nlay !
! totdclfl- real, clear sky downward flux (w/m2) 0:nlay !
! htrcl - real, clear sky heating rate (k/sec or k/day) nlay !
! htrb - real, spectral band lw heating rate (k/day) nlay*nbands!
! !
! module veriables: !
! ngb - integer, band index for each g-value ngptlw!
! fluxfac - real, conversion factor for fluxes (pi*2.e4) 1 !
! heatfac - real, conversion factor for heating rates (g/cp*1e-2) 1 !
! tblint - real, conversion factor for look-up tbl (float(ntbl) 1 !
! bpade - real, pade approx constant (1/0.278) 1 !
! wtdiff - real, weight for radiance to flux conversion 1 !
! ntbl - integer, dimension of look-up tables 1 !
! tau_tbl - real, clr-sky opt dep lookup table 0:ntbl !
! exp_tbl - real, transmittance lookup table 0:ntbl !
! tfn_tbl - real, tau transition function 0:ntbl !
! !
! local variables: !
! itgas - integer, index for gases contribution look-up table 1 !
! ittot - integer, index for gases plus clouds look-up table 1 !
! reflct - real, surface reflectance 1 !
! atrgas - real, gaseous absorptivity 1 !
! atrtot - real, gaseous and cloud absorptivity 1 !
! odcld - real, cloud optical depth 1 !
! efclrfr- real, effective clear sky fraction (1-efcldfr) nlay !
! odepth - real, optical depth of gaseous only 1 !
! odtot - real, optical depth of gas and cloud 1 !
! gasfac - real, gas-only pade factor, used for planck fn 1 !
! totfac - real, gas+cld pade factor, used for planck fn 1 !
! bbdgas - real, gas-only planck function for downward rt 1 !
! bbugas - real, gas-only planck function for upward rt 1 !
! bbdtot - real, gas and cloud planck function for downward rt 1 !
! bbutot - real, gas and cloud planck function for upward rt 1 !
! gassrcu- real, upwd source radiance due to gas only nlay!
! totsrcu- real, upwd source radiance due to gas+cld nlay!
! gassrcd- real, dnwd source radiance due to gas only 1 !
! totsrcd- real, dnwd source radiance due to gas+cld 1 !
! radtotu- real, spectrally summed total sky upwd radiance 1 !
! radclru- real, spectrally summed clear sky upwd radiance 1 !
! radtotd- real, spectrally summed total sky dnwd radiance 1 !
! radclrd- real, spectrally summed clear sky dnwd radiance 1 !
! toturad- real, total sky upward radiance by layer 0:nlay*nbands!
! clrurad- real, clear sky upward radiance by layer 0:nlay*nbands!
! totdrad- real, total sky downward radiance by layer 0:nlay*nbands!
! clrdrad- real, clear sky downward radiance by layer 0:nlay*nbands!
! fnet - real, net longwave flux (w/m2) 0:nlay !
! fnetc - real, clear sky net longwave flux (w/m2) 0:nlay !
! !
! !
! ******************************************************************* !
! original code description !
! !
! original version: e. j. mlawer, et al. rrtm_v3.0 !
! revision for gcms: michael j. iacono; october, 2002 !
! revision for f90: michael j. iacono; june, 2006 !
! !
! this program calculates the upward fluxes, downward fluxes, and !
! heating rates for an arbitrary clear or cloudy atmosphere. the input !
! to this program is the atmospheric profile, all Planck function !
! information, and the cloud fraction by layer. a variable diffusivity!
! angle (secdif) is used for the angle integration. bands 2-3 and 5-9 !
! use a value for secdif that varies from 1.50 to 1.80 as a function !
! of the column water vapor, and other bands use a value of 1.66. the !
! gaussian weight appropriate to this angle (wtdiff=0.5) is applied !
! here. note that use of the emissivity angle for the flux integration!
! can cause errors of 1 to 4 W/m2 within cloudy layers. !
! clouds are treated with a random cloud overlap method. !
! !
! ******************************************************************* !
! ====================== end of description block ================= !
! --- inputs:
integer, intent(in) :: nlay, nlp1
real (kind=kind_phys), dimension(0:nlp1), intent(in) :: cldfrc
real (kind=kind_phys), dimension(nbands), intent(in) :: semiss, &
& secdif
real (kind=kind_phys), dimension(nlay), intent(in) :: delp
real (kind=kind_phys), dimension(nbands,nlay),intent(in):: taucld
real (kind=kind_phys), dimension(ngptlw,nlay),intent(in):: fracs, &
& tautot
real (kind=kind_phys), dimension(nbands,0:nlay), intent(in) :: &
& pklev, pklay
! --- outputs:
real (kind=kind_phys), dimension(nlay), intent(out) :: htr, htrcl
real (kind=kind_phys), dimension(nlay,nbands),intent(out) :: htrb
real (kind=kind_phys), dimension(0:nlay), intent(out) :: &
& totuflux, totdflux, totuclfl, totdclfl
! --- locals:
real (kind=kind_phys), parameter :: rec_6 = 0.166667
real (kind=kind_phys), dimension(0:nlay,nbands) :: clrurad, &
& clrdrad, toturad, totdrad
real (kind=kind_phys), dimension(nlay) :: gassrcu, totsrcu, &
& trngas, efclrfr, rfdelp
real (kind=kind_phys), dimension(0:nlay) :: fnet, fnetc
real (kind=kind_phys) :: totsrcd, gassrcd, tblind, odepth, odtot, &
& odcld, atrtot, atrgas, reflct, totfac, gasfac, flxfac, &
& plfrac, blay, bbdgas, bbdtot, bbugas, bbutot, dplnku, &
& dplnkd, radtotu, radclru, radtotd, radclrd, rad0, &
& clfr, trng, gasu
integer :: ittot, itgas, ib, ig, k
!
!===> ... begin here
!
do ib = 1, NBANDS
do k = 0, NLAY
toturad(k,ib) = f_zero
totdrad(k,ib) = f_zero
clrurad(k,ib) = f_zero
clrdrad(k,ib) = f_zero
enddo
enddo
do k = 0, nlay
totuflux(k) = f_zero
totdflux(k) = f_zero
totuclfl(k) = f_zero
totdclfl(k) = f_zero
enddo
! --- ... loop over all g-points
do ig = 1, ngptlw
ib = ngb(ig)
radtotd = f_zero
radclrd = f_zero
!> -# Downward radiative transfer loop.
do k = nlay, 1, -1
!!\n - clear sky, gases contribution
odepth = max( f_zero, secdif(ib)*tautot(ig,k) )
if (odepth <= 0.06) then
atrgas = odepth - 0.5*odepth*odepth
trng = f_one - atrgas
gasfac = rec_6 * odepth
else
tblind = odepth / (bpade + odepth)
itgas = tblint*tblind + 0.5
trng = exp_tbl(itgas)
atrgas = f_one - trng
gasfac = tfn_tbl(itgas)
odepth = tau_tbl(itgas)
endif
plfrac = fracs(ig,k)
blay = pklay(ib,k)
dplnku = pklev(ib,k ) - blay
dplnkd = pklev(ib,k-1) - blay
bbdgas = plfrac * (blay + dplnkd*gasfac)
bbugas = plfrac * (blay + dplnku*gasfac)
gassrcd= bbdgas * atrgas
gassrcu(k)= bbugas * atrgas
trngas(k) = trng
!!\n - total sky, gases+clouds contribution
clfr = cldfrc(k)
if (clfr >= eps) then
!!\n - cloudy layer
odcld = secdif(ib) * taucld(ib,k)
efclrfr(k) = f_one-(f_one - exp(-odcld))*clfr
odtot = odepth + odcld
if (odtot < 0.06) then
totfac = rec_6 * odtot
atrtot = odtot - 0.5*odtot*odtot
else
tblind = odtot / (bpade + odtot)
ittot = tblint*tblind + 0.5
totfac = tfn_tbl(ittot)
atrtot = f_one - exp_tbl(ittot)
endif
bbdtot = plfrac * (blay + dplnkd*totfac)
bbutot = plfrac * (blay + dplnku*totfac)
totsrcd= bbdtot * atrtot
totsrcu(k)= bbutot * atrtot
! --- ... total sky radiance
radtotd = radtotd*trng*efclrfr(k) + gassrcd &
& + clfr*(totsrcd - gassrcd)
totdrad(k-1,ib) = totdrad(k-1,ib) + radtotd
! --- ... clear sky radiance
radclrd = radclrd*trng + gassrcd
clrdrad(k-1,ib) = clrdrad(k-1,ib) + radclrd
else
! --- ... clear layer
! --- ... total sky radiance
radtotd = radtotd*trng + gassrcd
totdrad(k-1,ib) = totdrad(k-1,ib) + radtotd
! --- ... clear sky radiance
radclrd = radclrd*trng + gassrcd
clrdrad(k-1,ib) = clrdrad(k-1,ib) + radclrd
endif ! end if_clfr_block
enddo ! end do_k_loop
!> -# Compute spectral emissivity & reflectance, include the
!! contribution of spectrally varying longwave emissivity and
!! reflection from the surface to the upward radiative transfer.
! note: spectral and Lambertian reflection are identical for the
! diffusivity angle flux integration used here.
reflct = f_one - semiss(ib)
rad0 = semiss(ib) * fracs(ig,1) * pklay(ib,0)
!> -# Compute total sky radiance.
radtotu = rad0 + reflct*radtotd
toturad(0,ib) = toturad(0,ib) + radtotu
!> -# Compute clear sky radiance
radclru = rad0 + reflct*radclrd
clrurad(0,ib) = clrurad(0,ib) + radclru
!> -# Upward radiative transfer loop.
do k = 1, nlay
clfr = cldfrc(k)
trng = trngas(k)
gasu = gassrcu(k)
if (clfr >= eps) then
! --- ... cloudy layer
! --- ... total sky radiance
radtotu = radtotu*trng*efclrfr(k) + gasu &
& + clfr*(totsrcu(k) - gasu)
toturad(k,ib) = toturad(k,ib) + radtotu
! --- ... clear sky radiance
radclru = radclru*trng + gasu
clrurad(k,ib) = clrurad(k,ib) + radclru
else
! --- ... clear layer
! --- ... total sky radiance
radtotu = radtotu*trng + gasu
toturad(k,ib) = toturad(k,ib) + radtotu
! --- ... clear sky radiance
radclru = radclru*trng + gasu
clrurad(k,ib) = clrurad(k,ib) + radclru
endif ! end if_clfr_block
enddo ! end do_k_loop
enddo ! end do_ig_loop
!> -# Process longwave output from band for total and clear streams.
!! Calculate upward, downward, and net flux.
flxfac = wtdiff * fluxfac
do k = 0, nlay
do ib = 1, nbands
totuflux(k) = totuflux(k) + toturad(k,ib)
totdflux(k) = totdflux(k) + totdrad(k,ib)
totuclfl(k) = totuclfl(k) + clrurad(k,ib)
totdclfl(k) = totdclfl(k) + clrdrad(k,ib)
enddo
totuflux(k) = totuflux(k) * flxfac
totdflux(k) = totdflux(k) * flxfac
totuclfl(k) = totuclfl(k) * flxfac
totdclfl(k) = totdclfl(k) * flxfac
enddo
! --- ... calculate net fluxes and heating rates
fnet(0) = totuflux(0) - totdflux(0)
do k = 1, nlay
rfdelp(k) = heatfac / delp(k)
fnet(k) = totuflux(k) - totdflux(k)
htr (k) = (fnet(k-1) - fnet(k)) * rfdelp(k)
enddo
!! --- ... optional clear sky heating rates
if ( lhlw0 ) then
fnetc(0) = totuclfl(0) - totdclfl(0)
do k = 1, nlay
fnetc(k) = totuclfl(k) - totdclfl(k)
htrcl(k) = (fnetc(k-1) - fnetc(k)) * rfdelp(k)
enddo
endif
!! --- ... optional spectral band heating rates
if ( lhlwb ) then
do ib = 1, nbands
fnet(0) = (toturad(0,ib) - totdrad(0,ib)) * flxfac
do k = 1, nlay
fnet(k) = (toturad(k,ib) - totdrad(k,ib)) * flxfac
htrb(k,ib) = (fnet(k-1) - fnet(k)) * rfdelp(k)
enddo
enddo
endif
! ..................................
end subroutine rtrn
!! @}
! ----------------------------------
!>\ingroup module_radlw_main
!> This subroutine computes the upward/downward radiative fluxes, and
!! heating rates for both clear or cloudy atmosphere. Clouds are
!! assumed as in maximum-randomly overlaping in a vertical column.
!!\param semiss lw surface emissivity
!!\param delp layer pressure thickness (mb)
!!\param cldfrc layer cloud fraction
!!\param taucld layer cloud opt depth
!!\param tautot total optical depth (gas+aerosols)
!!\param pklay integrated planck func at lay temp
!!\param pklev integrated planck func at lev temp
!!\param fracs planck fractions
!!\param secdif secant of diffusivity angle
!!\param nlay number of vertical layers
!!\param nlp1 number of vertical levels (interfaces)
!!\param totuflux total sky upward flux (\f$w/m^2\f$)
!!\param totdflux total sky downward flux (\f$w/m^2\f$)
!!\param htr total sky heating rate (k/sec or k/day)
!!\param totuclfl clear sky upward flux (\f$w/m^2\f$)
!!\param totdclfl clear sky downward flux (\f$w/m^2\f$)
!!\param htrcl clear sky heating rate (k/sec or k/day)
!!\param htrb spectral band lw heating rate (k/day)
!!\section gen_rtrnmr rtrnmr General Algorithm
!> @{
! ----------------------------------
subroutine rtrnmr &
& ( semiss,delp,cldfrc,taucld,tautot,pklay,pklev, &! --- inputs
& fracs,secdif, nlay,nlp1, &
& totuflux,totdflux,htr, totuclfl,totdclfl,htrcl, htrb & ! --- outputs:
& )
! =================== program usage description =================== !
! !
! purpose: compute the upward/downward radiative fluxes, and heating !
! rates for both clear or cloudy atmosphere. clouds are assumed as in !
! maximum-randomly overlaping in a vertical colum. !
! !
! subprograms called: none !
! !
! ==================== defination of variables ==================== !
! !
! inputs: -size- !
! semiss - real, lw surface emissivity nbands!
! delp - real, layer pressure thickness (mb) nlay !
! cldfrc - real, layer cloud fraction 0:nlp1 !
! taucld - real, layer cloud opt depth nbands,nlay!
! tautot - real, total optical depth (gas+aerosols) ngptlw,nlay!
! pklay - real, integrated planck func at lay temp nbands*0:nlay!
! pklev - real, integrated planck func at lev temp nbands*0:nlay!
! fracs - real, planck fractions ngptlw,nlay!
! secdif - real, secant of diffusivity angle nbands!
! nlay - integer, number of vertical layers 1 !
! nlp1 - integer, number of vertical levels (interfaces) 1 !
! !
! outputs: !
! totuflux- real, total sky upward flux (w/m2) 0:nlay !
! totdflux- real, total sky downward flux (w/m2) 0:nlay !
! htr - real, total sky heating rate (k/sec or k/day) nlay !
! totuclfl- real, clear sky upward flux (w/m2) 0:nlay !
! totdclfl- real, clear sky downward flux (w/m2) 0:nlay !
! htrcl - real, clear sky heating rate (k/sec or k/day) nlay !
! htrb - real, spectral band lw heating rate (k/day) nlay*nbands!
! !
! module veriables: !
! ngb - integer, band index for each g-value ngptlw!
! fluxfac - real, conversion factor for fluxes (pi*2.e4) 1 !
! heatfac - real, conversion factor for heating rates (g/cp*1e-2) 1 !
! tblint - real, conversion factor for look-up tbl (float(ntbl) 1 !
! bpade - real, pade approx constant (1/0.278) 1 !
! wtdiff - real, weight for radiance to flux conversion 1 !
! ntbl - integer, dimension of look-up tables 1 !
! tau_tbl - real, clr-sky opt dep lookup table 0:ntbl !
! exp_tbl - real, transmittance lookup table 0:ntbl !
! tfn_tbl - real, tau transition function 0:ntbl !
! !
! local variables: !
! itgas - integer, index for gases contribution look-up table 1 !
! ittot - integer, index for gases plus clouds look-up table 1 !
! reflct - real, surface reflectance 1 !
! atrgas - real, gaseous absorptivity 1 !
! atrtot - real, gaseous and cloud absorptivity 1 !
! odcld - real, cloud optical depth 1 !
! odepth - real, optical depth of gaseous only 1 !
! odtot - real, optical depth of gas and cloud 1 !
! gasfac - real, gas-only pade factor, used for planck fn 1 !
! totfac - real, gas+cld pade factor, used for planck fn 1 !
! bbdgas - real, gas-only planck function for downward rt 1 !
! bbugas - real, gas-only planck function for upward rt 1 !
! bbdtot - real, gas and cloud planck function for downward rt 1 !
! bbutot - real, gas and cloud planck function for upward rt 1 !
! gassrcu- real, upwd source radiance due to gas only nlay!
! totsrcu- real, upwd source radiance due to gas + cld nlay!
! gassrcd- real, dnwd source radiance due to gas only 1 !
! totsrcd- real, dnwd source radiance due to gas + cld 1 !
! radtotu- real, spectrally summed total sky upwd radiance 1 !
! radclru- real, spectrally summed clear sky upwd radiance 1 !
! radtotd- real, spectrally summed total sky dnwd radiance 1 !
! radclrd- real, spectrally summed clear sky dnwd radiance 1 !
! toturad- real, total sky upward radiance by layer 0:nlay*nbands!
! clrurad- real, clear sky upward radiance by layer 0:nlay*nbands!
! totdrad- real, total sky downward radiance by layer 0:nlay*nbands!
! clrdrad- real, clear sky downward radiance by layer 0:nlay*nbands!
! fnet - real, net longwave flux (w/m2) 0:nlay !
! fnetc - real, clear sky net longwave flux (w/m2) 0:nlay !
! !
! !
! ******************************************************************* !
! original code description !
! !
! original version: e. j. mlawer, et al. rrtm_v3.0 !
! revision for gcms: michael j. iacono; october, 2002 !
! revision for f90: michael j. iacono; june, 2006 !
! !
! this program calculates the upward fluxes, downward fluxes, and !
! heating rates for an arbitrary clear or cloudy atmosphere. the input !
! to this program is the atmospheric profile, all Planck function !
! information, and the cloud fraction by layer. a variable diffusivity!
! angle (secdif) is used for the angle integration. bands 2-3 and 5-9 !
! use a value for secdif that varies from 1.50 to 1.80 as a function !
! of the column water vapor, and other bands use a value of 1.66. the !
! gaussian weight appropriate to this angle (wtdiff=0.5) is applied !
! here. note that use of the emissivity angle for the flux integration!
! can cause errors of 1 to 4 W/m2 within cloudy layers. !
! clouds are treated with a maximum-random cloud overlap method. !
! !
! ******************************************************************* !
! ====================== end of description block ================= !
! --- inputs:
integer, intent(in) :: nlay, nlp1
real (kind=kind_phys), dimension(0:nlp1), intent(in) :: cldfrc
real (kind=kind_phys), dimension(nbands), intent(in) :: semiss, &
& secdif
real (kind=kind_phys), dimension(nlay), intent(in) :: delp
real (kind=kind_phys), dimension(nbands,nlay),intent(in):: taucld
real (kind=kind_phys), dimension(ngptlw,nlay),intent(in):: fracs, &
& tautot
real (kind=kind_phys), dimension(nbands,0:nlay), intent(in) :: &
& pklev, pklay
! --- outputs:
real (kind=kind_phys), dimension(nlay), intent(out) :: htr, htrcl
real (kind=kind_phys), dimension(nlay,nbands),intent(out) :: htrb
real (kind=kind_phys), dimension(0:nlay), intent(out) :: &
& totuflux, totdflux, totuclfl, totdclfl
! --- locals:
real (kind=kind_phys), parameter :: rec_6 = 0.166667
real (kind=kind_phys), dimension(0:nlay,nbands) :: clrurad, &
& clrdrad, toturad, totdrad
real (kind=kind_phys), dimension(nlay) :: gassrcu, totsrcu, &
& trngas, trntot, rfdelp
real (kind=kind_phys), dimension(0:nlay) :: fnet, fnetc
real (kind=kind_phys) :: totsrcd, gassrcd, tblind, odepth, odtot, &
& odcld, atrtot, atrgas, reflct, totfac, gasfac, flxfac, &
& plfrac, blay, bbdgas, bbdtot, bbugas, bbutot, dplnku, &
& dplnkd, radtotu, radclru, radtotd, radclrd, rad0, rad, &
& totradd, clrradd, totradu, clrradu, fmax, fmin, rat1, rat2,&
& radmod, clfr, trng, trnt, gasu, totu
integer :: ittot, itgas, ib, ig, k
! dimensions for cloud overlap adjustment
real (kind=kind_phys), dimension(nlp1) :: faccld1u, faccld2u, &
& facclr1u, facclr2u, faccmb1u, faccmb2u
real (kind=kind_phys), dimension(0:nlay) :: faccld1d, faccld2d, &
& facclr1d, facclr2d, faccmb1d, faccmb2d
logical :: lstcldu(nlay), lstcldd(nlay)
!
!===> ... begin here
!
do k = 1, nlp1
faccld1u(k) = f_zero
faccld2u(k) = f_zero
facclr1u(k) = f_zero
facclr2u(k) = f_zero
faccmb1u(k) = f_zero
faccmb2u(k) = f_zero
enddo
lstcldu(1) = cldfrc(1) > eps
rat1 = f_zero
rat2 = f_zero
do k = 1, nlay-1
lstcldu(k+1) = cldfrc(k+1)>eps .and. cldfrc(k)<=eps
if (cldfrc(k) > eps) then
!> -# Setup maximum/random cloud overlap.
if (cldfrc(k+1) >= cldfrc(k)) then
if (lstcldu(k)) then
if (cldfrc(k) < f_one) then
facclr2u(k+1) = (cldfrc(k+1) - cldfrc(k)) &
& / (f_one - cldfrc(k))
endif
facclr2u(k) = f_zero
faccld2u(k) = f_zero
else
fmax = max(cldfrc(k), cldfrc(k-1))
if (cldfrc(k+1) > fmax) then
facclr1u(k+1) = rat2
facclr2u(k+1) = (cldfrc(k+1) - fmax)/(f_one - fmax)
elseif (cldfrc(k+1) < fmax) then
facclr1u(k+1) = (cldfrc(k+1) - cldfrc(k)) &
& / (cldfrc(k-1) - cldfrc(k))
else
facclr1u(k+1) = rat2
endif
endif
if (facclr1u(k+1)>f_zero .or. facclr2u(k+1)>f_zero) then
rat1 = f_one
rat2 = f_zero
else
rat1 = f_zero
rat2 = f_zero
endif
else
if (lstcldu(k)) then
faccld2u(k+1) = (cldfrc(k) - cldfrc(k+1)) / cldfrc(k)
facclr2u(k) = f_zero
faccld2u(k) = f_zero
else
fmin = min(cldfrc(k), cldfrc(k-1))
if (cldfrc(k+1) <= fmin) then
faccld1u(k+1) = rat1
faccld2u(k+1) = (fmin - cldfrc(k+1)) / fmin
else
faccld1u(k+1) = (cldfrc(k) - cldfrc(k+1)) &
& / (cldfrc(k) - fmin)
endif
endif
if (faccld1u(k+1)>f_zero .or. faccld2u(k+1)>f_zero) then
rat1 = f_zero
rat2 = f_one
else
rat1 = f_zero
rat2 = f_zero
endif
endif
faccmb1u(k+1) = facclr1u(k+1) * faccld2u(k) * cldfrc(k-1)
faccmb2u(k+1) = faccld1u(k+1) * facclr2u(k) &
& * (f_one - cldfrc(k-1))
endif
enddo
do k = 0, nlay
faccld1d(k) = f_zero
faccld2d(k) = f_zero
facclr1d(k) = f_zero
facclr2d(k) = f_zero
faccmb1d(k) = f_zero
faccmb2d(k) = f_zero
enddo
lstcldd(nlay) = cldfrc(nlay) > eps
rat1 = f_zero
rat2 = f_zero
do k = nlay, 2, -1
lstcldd(k-1) = cldfrc(k-1) > eps .and. cldfrc(k)<=eps
if (cldfrc(k) > eps) then
if (cldfrc(k-1) >= cldfrc(k)) then
if (lstcldd(k)) then
if (cldfrc(k) < f_one) then
facclr2d(k-1) = (cldfrc(k-1) - cldfrc(k)) &
& / (f_one - cldfrc(k))
endif
facclr2d(k) = f_zero
faccld2d(k) = f_zero
else
fmax = max(cldfrc(k), cldfrc(k+1))
if (cldfrc(k-1) > fmax) then
facclr1d(k-1) = rat2
facclr2d(k-1) = (cldfrc(k-1) - fmax) / (f_one - fmax)
elseif (cldfrc(k-1) < fmax) then
facclr1d(k-1) = (cldfrc(k-1) - cldfrc(k)) &
& / (cldfrc(k+1) - cldfrc(k))
else
facclr1d(k-1) = rat2
endif
endif
if (facclr1d(k-1)>f_zero .or. facclr2d(k-1)>f_zero) then
rat1 = f_one
rat2 = f_zero
else
rat1 = f_zero
rat2 = f_zero
endif
else
if (lstcldd(k)) then
faccld2d(k-1) = (cldfrc(k) - cldfrc(k-1)) / cldfrc(k)
facclr2d(k) = f_zero
faccld2d(k) = f_zero
else
fmin = min(cldfrc(k), cldfrc(k+1))
if (cldfrc(k-1) <= fmin) then
faccld1d(k-1) = rat1
faccld2d(k-1) = (fmin - cldfrc(k-1)) / fmin
else
faccld1d(k-1) = (cldfrc(k) - cldfrc(k-1)) &
& / (cldfrc(k) - fmin)
endif
endif
if (faccld1d(k-1)>f_zero .or. faccld2d(k-1)>f_zero) then
rat1 = f_zero
rat2 = f_one
else
rat1 = f_zero
rat2 = f_zero
endif
endif
faccmb1d(k-1) = facclr1d(k-1) * faccld2d(k) * cldfrc(k+1)
faccmb2d(k-1) = faccld1d(k-1) * facclr2d(k) &
& * (f_one - cldfrc(k+1))
endif
enddo
!> -# Initialize for radiative transfer
do ib = 1, NBANDS
do k = 0, NLAY
toturad(k,ib) = f_zero
totdrad(k,ib) = f_zero
clrurad(k,ib) = f_zero
clrdrad(k,ib) = f_zero
enddo
enddo
do k = 0, nlay
totuflux(k) = f_zero
totdflux(k) = f_zero
totuclfl(k) = f_zero
totdclfl(k) = f_zero
enddo
! --- ... loop over all g-points
do ig = 1, ngptlw
ib = ngb(ig)
radtotd = f_zero
radclrd = f_zero
!> -# Downward radiative transfer loop:
do k = nlay, 1, -1
! --- ... clear sky, gases contribution
odepth = max( f_zero, secdif(ib)*tautot(ig,k) )
if (odepth <= 0.06) then
atrgas = odepth - 0.5*odepth*odepth
trng = f_one - atrgas
gasfac = rec_6 * odepth
else
tblind = odepth / (bpade + odepth)
itgas = tblint*tblind + 0.5
trng = exp_tbl(itgas)
atrgas = f_one - trng
gasfac = tfn_tbl(itgas)
odepth = tau_tbl(itgas)
endif
plfrac = fracs(ig,k)
blay = pklay(ib,k)
dplnku = pklev(ib,k ) - blay
dplnkd = pklev(ib,k-1) - blay
bbdgas = plfrac * (blay + dplnkd*gasfac)
bbugas = plfrac * (blay + dplnku*gasfac)
gassrcd = bbdgas * atrgas
gassrcu(k)= bbugas * atrgas
trngas(k) = trng
! --- ... total sky, gases+clouds contribution
clfr = cldfrc(k)
if (lstcldd(k)) then
totradd = clfr * radtotd
clrradd = radtotd - totradd
rad = f_zero
endif
if (clfr >= eps) then
!> - cloudy layer
odcld = secdif(ib) * taucld(ib,k)
odtot = odepth + odcld
if (odtot < 0.06) then
totfac = rec_6 * odtot
atrtot = odtot - 0.5*odtot*odtot
trnt = f_one - atrtot
else
tblind = odtot / (bpade + odtot)
ittot = tblint*tblind + 0.5
totfac = tfn_tbl(ittot)
trnt = exp_tbl(ittot)
atrtot = f_one - trnt
endif
bbdtot = plfrac * (blay + dplnkd*totfac)
bbutot = plfrac * (blay + dplnku*totfac)
totsrcd = bbdtot * atrtot
totsrcu(k)= bbutot * atrtot
trntot(k) = trnt
totradd = totradd*trnt + clfr*totsrcd
clrradd = clrradd*trng + (f_one - clfr)*gassrcd
!> - total sky radiance
radtotd = totradd + clrradd
totdrad(k-1,ib) = totdrad(k-1,ib) + radtotd
!> - clear sky radiance
radclrd = radclrd*trng + gassrcd
clrdrad(k-1,ib) = clrdrad(k-1,ib) + radclrd
radmod = rad*(facclr1d(k-1)*trng + faccld1d(k-1)*trnt) &
& - faccmb1d(k-1)*gassrcd + faccmb2d(k-1)*totsrcd
rad = -radmod + facclr2d(k-1)*(clrradd + radmod) &
& - faccld2d(k-1)*(totradd - radmod)
totradd = totradd + rad
clrradd = clrradd - rad
else
! --- ... clear layer
! --- ... total sky radiance
radtotd = radtotd*trng + gassrcd
totdrad(k-1,ib) = totdrad(k-1,ib) + radtotd
! --- ... clear sky radiance
radclrd = radclrd*trng + gassrcd
clrdrad(k-1,ib) = clrdrad(k-1,ib) + radclrd
endif ! end if_clfr_block
enddo ! end do_k_loop
!> -# Compute spectral emissivity & reflectance, include the
!! contribution of spectrally varying longwave emissivity and
!! reflection from the surface to the upward radiative transfer.
! note: spectral and Lambertian reflection are identical for the
! diffusivity angle flux integration used here.
reflct = f_one - semiss(ib)
rad0 = semiss(ib) * fracs(ig,1) * pklay(ib,0)
!> -# Compute total sky radiance.
radtotu = rad0 + reflct*radtotd
toturad(0,ib) = toturad(0,ib) + radtotu
!> -# Compute clear sky radiance.
radclru = rad0 + reflct*radclrd
clrurad(0,ib) = clrurad(0,ib) + radclru
!> -# Upward radiative transfer loop:
do k = 1, nlay
clfr = cldfrc(k)
trng = trngas(k)
gasu = gassrcu(k)
if (lstcldu(k)) then
totradu = clfr * radtotu
clrradu = radtotu - totradu
rad = f_zero
endif
if (clfr >= eps) then
!> - cloudy layer radiance
trnt = trntot(k)
totu = totsrcu(k)
totradu = totradu*trnt + clfr*totu
clrradu = clrradu*trng + (f_one - clfr)*gasu
!> - total sky radiance
radtotu = totradu + clrradu
toturad(k,ib) = toturad(k,ib) + radtotu
!> - clear sky radiance
radclru = radclru*trng + gasu
clrurad(k,ib) = clrurad(k,ib) + radclru
radmod = rad*(facclr1u(k+1)*trng + faccld1u(k+1)*trnt) &
& - faccmb1u(k+1)*gasu + faccmb2u(k+1)*totu
rad = -radmod + facclr2u(k+1)*(clrradu + radmod) &
& - faccld2u(k+1)*(totradu - radmod)
totradu = totradu + rad
clrradu = clrradu - rad
else
! --- ... clear layer
! --- ... total sky radiance
radtotu = radtotu*trng + gasu
toturad(k,ib) = toturad(k,ib) + radtotu
! --- ... clear sky radiance
radclru = radclru*trng + gasu
clrurad(k,ib) = clrurad(k,ib) + radclru
endif ! end if_clfr_block
enddo ! end do_k_loop
enddo ! end do_ig_loop
!> -# Process longwave output from band for total and clear streams.
!! calculate upward, downward, and net flux.
flxfac = wtdiff * fluxfac
do k = 0, nlay
do ib = 1, nbands
totuflux(k) = totuflux(k) + toturad(k,ib)
totdflux(k) = totdflux(k) + totdrad(k,ib)
totuclfl(k) = totuclfl(k) + clrurad(k,ib)
totdclfl(k) = totdclfl(k) + clrdrad(k,ib)
enddo
totuflux(k) = totuflux(k) * flxfac
totdflux(k) = totdflux(k) * flxfac
totuclfl(k) = totuclfl(k) * flxfac
totdclfl(k) = totdclfl(k) * flxfac
enddo
! --- ... calculate net fluxes and heating rates
fnet(0) = totuflux(0) - totdflux(0)
do k = 1, nlay
rfdelp(k) = heatfac / delp(k)
fnet(k) = totuflux(k) - totdflux(k)
htr (k) = (fnet(k-1) - fnet(k)) * rfdelp(k)
enddo
!! --- ... optional clear sky heating rates
if ( lhlw0 ) then
fnetc(0) = totuclfl(0) - totdclfl(0)
do k = 1, nlay
fnetc(k) = totuclfl(k) - totdclfl(k)
htrcl(k) = (fnetc(k-1) - fnetc(k)) * rfdelp(k)
enddo
endif
!! --- ... optional spectral band heating rates
if ( lhlwb ) then
do ib = 1, nbands
fnet(0) = (toturad(0,ib) - totdrad(0,ib)) * flxfac
do k = 1, nlay
fnet(k) = (toturad(k,ib) - totdrad(k,ib)) * flxfac
htrb(k,ib) = (fnet(k-1) - fnet(k)) * rfdelp(k)
enddo
enddo
endif
! .................................
end subroutine rtrnmr
! ---------------------------------
!> @}
!>\ingroup module_radlw_main
!> \brief This subroutine computes the upward/downward radiative fluxes, and
!! heating rates for both clear or cloudy atmosphere.Clouds are treated
!! with the mcica stochastic approach.
!!
!!\param semiss lw surface emissivity
!!\param delp layer pressure thickness (mb)
!!\param cldfmc layer cloud fraction (sub-column)
!!\param taucld layer cloud opt depth
!!\param tautot total optical depth (gas+aerosols)
!!\param pklay integrated planck func at lay temp
!!\param pklev integrated planck func at lev temp
!!\param fracs planck fractions
!!\param secdif secant of diffusivity angle
!!\param nlay number of vertical layers
!!\param nlp1 number of vertical levels (interfaces)
!!\param totuflux total sky upward flux \f$(w/m^2)\f$
!!\param totdflux total sky downward flux \f$(w/m^2)\f$
!!\param htr total sky heating rate (k/sec or k/day)
!!\param totuclfl clear sky upward flux \f$(w/m^2)\f$
!!\param totdclfl clear sky downward flux \f$(w/m^2)\f$
!!\param htrcl clear sky heating rate (k/sec or k/day)
!!\param htrb spectral band lw heating rate (k/day)
!!\section gen_rtrnmc rtrnmc General Algorithm
!> @{
! ---------------------------------
subroutine rtrnmc &
& ( semiss,delp,cldfmc,taucld,tautot,pklay,pklev, & ! --- inputs:
& fracs,secdif, nlay,nlp1, &
& totuflux,totdflux,htr, totuclfl,totdclfl,htrcl, htrb & ! --- outputs:
& )
! =================== program usage description =================== !
! !
! purpose: compute the upward/downward radiative fluxes, and heating !
! rates for both clear or cloudy atmosphere. clouds are treated with !
! the mcica stochastic approach. !
! !
! subprograms called: none !
! !
! ==================== defination of variables ==================== !
! !
! inputs: -size- !
! semiss - real, lw surface emissivity nbands!
! delp - real, layer pressure thickness (mb) nlay !
! cldfmc - real, layer cloud fraction (sub-column) ngptlw*nlay!
! taucld - real, layer cloud opt depth nbands*nlay!
! tautot - real, total optical depth (gas+aerosols) ngptlw*nlay!
! pklay - real, integrated planck func at lay temp nbands*0:nlay!
! pklev - real, integrated planck func at lev temp nbands*0:nlay!
! fracs - real, planck fractions ngptlw*nlay!
! secdif - real, secant of diffusivity angle nbands!
! nlay - integer, number of vertical layers 1 !
! nlp1 - integer, number of vertical levels (interfaces) 1 !
! !
! outputs: !
! totuflux- real, total sky upward flux (w/m2) 0:nlay !
! totdflux- real, total sky downward flux (w/m2) 0:nlay !
! htr - real, total sky heating rate (k/sec or k/day) nlay !
! totuclfl- real, clear sky upward flux (w/m2) 0:nlay !
! totdclfl- real, clear sky downward flux (w/m2) 0:nlay !
! htrcl - real, clear sky heating rate (k/sec or k/day) nlay !
! htrb - real, spectral band lw heating rate (k/day) nlay*nbands!
! !
! module veriables: !
! ngb - integer, band index for each g-value ngptlw!
! fluxfac - real, conversion factor for fluxes (pi*2.e4) 1 !
! heatfac - real, conversion factor for heating rates (g/cp*1e-2) 1 !
! tblint - real, conversion factor for look-up tbl (float(ntbl) 1 !
! bpade - real, pade approx constant (1/0.278) 1 !
! wtdiff - real, weight for radiance to flux conversion 1 !
! ntbl - integer, dimension of look-up tables 1 !
! tau_tbl - real, clr-sky opt dep lookup table 0:ntbl !
! exp_tbl - real, transmittance lookup table 0:ntbl !
! tfn_tbl - real, tau transition function 0:ntbl !
! !
! local variables: !
! itgas - integer, index for gases contribution look-up table 1 !
! ittot - integer, index for gases plus clouds look-up table 1 !
! reflct - real, surface reflectance 1 !
! atrgas - real, gaseous absorptivity 1 !
! atrtot - real, gaseous and cloud absorptivity 1 !
! odcld - real, cloud optical depth 1 !
! efclrfr- real, effective clear sky fraction (1-efcldfr) nlay!
! odepth - real, optical depth of gaseous only 1 !
! odtot - real, optical depth of gas and cloud 1 !
! gasfac - real, gas-only pade factor, used for planck function 1 !
! totfac - real, gas and cloud pade factor, used for planck fn 1 !
! bbdgas - real, gas-only planck function for downward rt 1 !
! bbugas - real, gas-only planck function for upward rt 1 !
! bbdtot - real, gas and cloud planck function for downward rt 1 !
! bbutot - real, gas and cloud planck function for upward rt 1 !
! gassrcu- real, upwd source radiance due to gas nlay!
! totsrcu- real, upwd source radiance due to gas+cld nlay!
! gassrcd- real, dnwd source radiance due to gas 1 !
! totsrcd- real, dnwd source radiance due to gas+cld 1 !
! radtotu- real, spectrally summed total sky upwd radiance 1 !
! radclru- real, spectrally summed clear sky upwd radiance 1 !
! radtotd- real, spectrally summed total sky dnwd radiance 1 !
! radclrd- real, spectrally summed clear sky dnwd radiance 1 !
! toturad- real, total sky upward radiance by layer 0:nlay*nbands!
! clrurad- real, clear sky upward radiance by layer 0:nlay*nbands!
! totdrad- real, total sky downward radiance by layer 0:nlay*nbands!
! clrdrad- real, clear sky downward radiance by layer 0:nlay*nbands!
! fnet - real, net longwave flux (w/m2) 0:nlay !
! fnetc - real, clear sky net longwave flux (w/m2) 0:nlay !
! !
! !
! ******************************************************************* !
! original code description !
! !
! original version: e. j. mlawer, et al. rrtm_v3.0 !
! revision for gcms: michael j. iacono; october, 2002 !
! revision for f90: michael j. iacono; june, 2006 !
! !
! this program calculates the upward fluxes, downward fluxes, and !
! heating rates for an arbitrary clear or cloudy atmosphere. the input !
! to this program is the atmospheric profile, all Planck function !
! information, and the cloud fraction by layer. a variable diffusivity!
! angle (secdif) is used for the angle integration. bands 2-3 and 5-9 !
! use a value for secdif that varies from 1.50 to 1.80 as a function !
! of the column water vapor, and other bands use a value of 1.66. the !
! gaussian weight appropriate to this angle (wtdiff=0.5) is applied !
! here. note that use of the emissivity angle for the flux integration!
! can cause errors of 1 to 4 W/m2 within cloudy layers. !
! clouds are treated with the mcica stochastic approach and !
! maximum-random cloud overlap. !
! !
! ******************************************************************* !
! ====================== end of description block ================= !
! --- inputs:
integer, intent(in) :: nlay, nlp1
real (kind=kind_phys), dimension(nbands), intent(in) :: semiss, &
& secdif
real (kind=kind_phys), dimension(nlay), intent(in) :: delp
real (kind=kind_phys), dimension(nbands,nlay),intent(in):: taucld
real (kind=kind_phys), dimension(ngptlw,nlay),intent(in):: fracs, &
& tautot, cldfmc
real (kind=kind_phys), dimension(nbands,0:nlay), intent(in) :: &
& pklev, pklay
! --- outputs:
real (kind=kind_phys), dimension(nlay), intent(out) :: htr, htrcl
real (kind=kind_phys), dimension(nlay,nbands),intent(out) :: htrb
real (kind=kind_phys), dimension(0:nlay), intent(out) :: &
& totuflux, totdflux, totuclfl, totdclfl
! --- locals:
real (kind=kind_phys), parameter :: rec_6 = 0.166667
real (kind=kind_phys), dimension(0:nlay,nbands) :: clrurad, &
& clrdrad, toturad, totdrad
real (kind=kind_phys), dimension(nlay) :: gassrcu, totsrcu, &
& trngas, efclrfr, rfdelp
real (kind=kind_phys), dimension(0:nlay) :: fnet, fnetc
real (kind=kind_phys) :: totsrcd, gassrcd, tblind, odepth, odtot, &
& odcld, atrtot, atrgas, reflct, totfac, gasfac, flxfac, &
& plfrac, blay, bbdgas, bbdtot, bbugas, bbutot, dplnku, &
& dplnkd, radtotu, radclru, radtotd, radclrd, rad0, &
& clfm, trng, gasu
integer :: ittot, itgas, ib, ig, k
!
!===> ... begin here
!
do ib = 1, NBANDS
do k = 0, NLAY
toturad(k,ib) = f_zero
totdrad(k,ib) = f_zero
clrurad(k,ib) = f_zero
clrdrad(k,ib) = f_zero
enddo
enddo
do k = 0, nlay
totuflux(k) = f_zero
totdflux(k) = f_zero
totuclfl(k) = f_zero
totdclfl(k) = f_zero
enddo
! --- ... loop over all g-points
do ig = 1, ngptlw
ib = ngb(ig)
radtotd = f_zero
radclrd = f_zero
!> -# Downward radiative transfer loop.
!!\n - Clear sky, gases contribution
!!\n - Total sky, gases+clouds contribution
!!\n - Cloudy layer
!!\n - Total sky radiance
!!\n - Clear sky radiance
do k = nlay, 1, -1
! --- ... clear sky, gases contribution
odepth = max( f_zero, secdif(ib)*tautot(ig,k) )
if (odepth <= 0.06) then
atrgas = odepth - 0.5*odepth*odepth
trng = f_one - atrgas
gasfac = rec_6 * odepth
else
tblind = odepth / (bpade + odepth)
itgas = tblint*tblind + 0.5
trng = exp_tbl(itgas)
atrgas = f_one - trng
gasfac = tfn_tbl(itgas)
odepth = tau_tbl(itgas)
endif
plfrac = fracs(ig,k)
blay = pklay(ib,k)
dplnku = pklev(ib,k ) - blay
dplnkd = pklev(ib,k-1) - blay
bbdgas = plfrac * (blay + dplnkd*gasfac)
bbugas = plfrac * (blay + dplnku*gasfac)
gassrcd= bbdgas * atrgas
gassrcu(k)= bbugas * atrgas
trngas(k) = trng
! --- ... total sky, gases+clouds contribution
clfm = cldfmc(ig,k)
if (clfm >= eps) then
! --- ... cloudy layer
odcld = secdif(ib) * taucld(ib,k)
efclrfr(k) = f_one - (f_one - exp(-odcld))*clfm
odtot = odepth + odcld
if (odtot < 0.06) then
totfac = rec_6 * odtot
atrtot = odtot - 0.5*odtot*odtot
else
tblind = odtot / (bpade + odtot)
ittot = tblint*tblind + 0.5
totfac = tfn_tbl(ittot)
atrtot = f_one - exp_tbl(ittot)
endif
bbdtot = plfrac * (blay + dplnkd*totfac)
bbutot = plfrac * (blay + dplnku*totfac)
totsrcd= bbdtot * atrtot
totsrcu(k)= bbutot * atrtot
! --- ... total sky radiance
radtotd = radtotd*trng*efclrfr(k) + gassrcd &
& + clfm*(totsrcd - gassrcd)
totdrad(k-1,ib) = totdrad(k-1,ib) + radtotd
! --- ... clear sky radiance
radclrd = radclrd*trng + gassrcd
clrdrad(k-1,ib) = clrdrad(k-1,ib) + radclrd
else
! --- ... clear layer
! --- ... total sky radiance
radtotd = radtotd*trng + gassrcd
totdrad(k-1,ib) = totdrad(k-1,ib) + radtotd
! --- ... clear sky radiance
radclrd = radclrd*trng + gassrcd
clrdrad(k-1,ib) = clrdrad(k-1,ib) + radclrd
endif ! end if_clfm_block
enddo ! end do_k_loop
!> -# Compute spectral emissivity & reflectance, include the
!! contribution of spectrally varying longwave emissivity and
!! reflection from the surface to the upward radiative transfer.
! note: spectral and Lambertian reflection are identical for the
! diffusivity angle flux integration used here.
reflct = f_one - semiss(ib)
rad0 = semiss(ib) * fracs(ig,1) * pklay(ib,0)
!> -# Compute total sky radiance.
radtotu = rad0 + reflct*radtotd
toturad(0,ib) = toturad(0,ib) + radtotu
!> -# Compute clear sky radiance.
radclru = rad0 + reflct*radclrd
clrurad(0,ib) = clrurad(0,ib) + radclru
!> -# Upward radiative transfer loop.
!!\n - Compute total sky radiance
!!\n - Compute clear sky radiance
! toturad holds summed radiance for total sky stream
! clrurad holds summed radiance for clear sky stream
do k = 1, nlay
clfm = cldfmc(ig,k)
trng = trngas(k)
gasu = gassrcu(k)
if (clfm > eps) then
! --- ... cloudy layer
! --- ... total sky radiance
radtotu = radtotu*trng*efclrfr(k) + gasu &
& + clfm*(totsrcu(k) - gasu)
toturad(k,ib) = toturad(k,ib) + radtotu
! --- ... clear sky radiance
radclru = radclru*trng + gasu
clrurad(k,ib) = clrurad(k,ib) + radclru
else
! --- ... clear layer
! --- ... total sky radiance
radtotu = radtotu*trng + gasu
toturad(k,ib) = toturad(k,ib) + radtotu
! --- ... clear sky radiance
radclru = radclru*trng + gasu
clrurad(k,ib) = clrurad(k,ib) + radclru
endif ! end if_clfm_block
enddo ! end do_k_loop
enddo ! end do_ig_loop
!> -# Process longwave output from band for total and clear streams.
!! Calculate upward, downward, and net flux.
flxfac = wtdiff * fluxfac
do k = 0, nlay
do ib = 1, nbands
totuflux(k) = totuflux(k) + toturad(k,ib)
totdflux(k) = totdflux(k) + totdrad(k,ib)
totuclfl(k) = totuclfl(k) + clrurad(k,ib)
totdclfl(k) = totdclfl(k) + clrdrad(k,ib)
enddo
totuflux(k) = totuflux(k) * flxfac
totdflux(k) = totdflux(k) * flxfac
totuclfl(k) = totuclfl(k) * flxfac
totdclfl(k) = totdclfl(k) * flxfac
enddo
!> -# Calculate net fluxes and heating rates.
fnet(0) = totuflux(0) - totdflux(0)
do k = 1, nlay
rfdelp(k) = heatfac / delp(k)
fnet(k) = totuflux(k) - totdflux(k)
htr (k) = (fnet(k-1) - fnet(k)) * rfdelp(k)
enddo
!> -# Optional clear sky heating rates.
if ( lhlw0 ) then
fnetc(0) = totuclfl(0) - totdclfl(0)
do k = 1, nlay
fnetc(k) = totuclfl(k) - totdclfl(k)
htrcl(k) = (fnetc(k-1) - fnetc(k)) * rfdelp(k)
enddo
endif
!> -# Optional spectral band heating rates.
if ( lhlwb ) then
do ib = 1, nbands
fnet(0) = (toturad(0,ib) - totdrad(0,ib)) * flxfac
do k = 1, nlay
fnet(k) = (toturad(k,ib) - totdrad(k,ib)) * flxfac
htrb(k,ib) = (fnet(k-1) - fnet(k)) * rfdelp(k)
enddo
enddo
endif
! ..................................
end subroutine rtrnmc
! ----------------------------------
!> @}
!>\ingroup module_radlw_main
!>\brief This subroutine contains optical depths developed for the rapid
!! radiative transfer model.
!!
!! It contains the subroutines \a taugbn (where n goes from
!! 1 to 16). \a taugbn calculates the optical depths and planck fractions
!! per g-value and layer for band n.
!!\param laytrop tropopause layer index (unitless) layer at
!! which switch is made for key species
!!\param pavel layer pressures (mb)
!!\param coldry column amount for dry air \f$(mol/cm^2)\f$
!!\param colamt column amounts of h2o, co2, o3, n2o, ch4,o2,
!! co \f$(mol/cm^2)\f$
!!\param colbrd column amount of broadening gases
!!\param wx cross-section amounts \f$(mol/cm^2)\f$
!!\param tauaer aerosol optical depth
!!\param rfrate reference ratios of binary species parameter
!!\n (:,m,:)m=1-h2o/co2,2-h2o/o3,3-h2o/n2o,4-h2o/ch4,
!! 5-n2o/co2,6-o3/co2
!!\n (:,:,n)n=1,2: the rates of ref press at the 2
!! sides of the layer
!!\param fac00,fac01,fac10,fac11 factors multiply the reference ks, i,j of 0/1
!! for lower/higher of the 2 appropriate
!! temperatures and altitudes
!!\param jp index of lower reference pressure
!!\param jt, jt1 indices of lower reference temperatures for
!! pressure levels jp and jp+1, respectively
!!\param selffac scale factor for water vapor self-continuum
!! equals (water vapor density)/(atmospheric
!! density at 296k and 1013 mb)
!!\param selffrac factor for temperature interpolation of
!! reference water vapor self-continuum data
!!\param indself index of lower reference temperature for the
!! self-continuum interpolation
!!\param forfac scale factor for w. v. foreign-continuum
!!\param forfrac factor for temperature interpolation of
!! reference w.v. foreign-continuum data
!!\param indfor index of lower reference temperature for the
!! foreign-continuum interpolation
!!\param minorfrac factor for minor gases
!!\param scaleminor,scaleminorn2 scale factors for minor gases
!!\param indminor index of lower reference temperature for
!! minor gases
!!\param nlay total number of layers
!!\param fracs planck fractions
!!\param tautot total optical depth (gas+aerosols)
!>\section taumol_gen taumol General Algorithm
!! @{
!! subprograms called: taugb## (## = 01 -16)
subroutine taumol &
& ( laytrop,pavel,coldry,colamt,colbrd,wx,tauaer, & ! --- inputs
& rfrate,fac00,fac01,fac10,fac11,jp,jt,jt1, &
& selffac,selffrac,indself,forfac,forfrac,indfor, &
& minorfrac,scaleminor,scaleminorn2,indminor, &
& nlay, &
& fracs, tautot & ! --- outputs
& )
! ************ original subprogram description *************** !
! !
! optical depths developed for the !
! !
! rapid radiative transfer model (rrtm) !
! !
! atmospheric and environmental research, inc. !
! 131 hartwell avenue !
! lexington, ma 02421 !
! !
! eli j. mlawer !
! jennifer delamere !
! steven j. taubman !
! shepard a. clough !
! !
! email: mlawer@aer.com !
! email: jdelamer@aer.com !
! !
! the authors wish to acknowledge the contributions of the !
! following people: karen cady-pereira, patrick d. brown, !
! michael j. iacono, ronald e. farren, luke chen, !
! robert bergstrom. !
! !
! revision for g-point reduction: michael j. iacono; aer, inc. !
! !
! taumol !
! !
! this file contains the subroutines taugbn (where n goes from !
! 1 to 16). taugbn calculates the optical depths and planck !
! fractions per g-value and layer for band n. !
! !
! ******************************************************************* !
! ================== program usage description ================== !
! !
! call taumol !
! inputs: !
! ( laytrop,pavel,coldry,colamt,colbrd,wx,tauaer, !
! rfrate,fac00,fac01,fac10,fac11,jp,jt,jt1, !
! selffac,selffrac,indself,forfac,forfrac,indfor, !
! minorfrac,scaleminor,scaleminorn2,indminor, !
! nlay, !
! outputs: !
! fracs, tautot ) !
! !
! subprograms called: taugb## (## = 01 -16) !
! !
! !
! ==================== defination of variables ==================== !
! !
! inputs: size !
! laytrop - integer, tropopause layer index (unitless) 1 !
! layer at which switch is made for key species !
! pavel - real, layer pressures (mb) nlay !
! coldry - real, column amount for dry air (mol/cm2) nlay !
! colamt - real, column amounts of h2o, co2, o3, n2o, ch4, !
! o2, co (mol/cm**2) nlay*maxgas!
! colbrd - real, column amount of broadening gases nlay !
! wx - real, cross-section amounts(mol/cm2) nlay*maxxsec!
! tauaer - real, aerosol optical depth nbands*nlay !
! rfrate - real, reference ratios of binary species parameter !
! (:,m,:)m=1-h2o/co2,2-h2o/o3,3-h2o/n2o,4-h2o/ch4,5-n2o/co2,6-o3/co2!
! (:,:,n)n=1,2: the rates of ref press at the 2 sides of the layer !
! nlay*nrates*2!
! facij - real, factors multiply the reference ks, i,j of 0/1 !
! for lower/higher of the 2 appropriate temperatures !
! and altitudes nlay !
! jp - real, index of lower reference pressure nlay !
! jt, jt1 - real, indices of lower reference temperatures nlay !
! for pressure levels jp and jp+1, respectively !
! selffac - real, scale factor for water vapor self-continuum !
! equals (water vapor density)/(atmospheric density !
! at 296k and 1013 mb) nlay !
! selffrac - real, factor for temperature interpolation of !
! reference water vapor self-continuum data nlay !
! indself - integer, index of lower reference temperature for !
! the self-continuum interpolation nlay !
! forfac - real, scale factor for w. v. foreign-continuum nlay !
! forfrac - real, factor for temperature interpolation of !
! reference w.v. foreign-continuum data nlay !
! indfor - integer, index of lower reference temperature for !
! the foreign-continuum interpolation nlay !
! minorfrac - real, factor for minor gases nlay !
! scaleminor,scaleminorn2 !
! - real, scale factors for minor gases nlay !
! indminor - integer, index of lower reference temperature for !
! minor gases nlay !
! nlay - integer, total number of layers 1 !
! !
! outputs: !
! fracs - real, planck fractions ngptlw,nlay!
! tautot - real, total optical depth (gas+aerosols) ngptlw,nlay!
! !
! internal variables: !
! ng## - integer, number of g-values in band ## (##=01-16) 1 !
! nspa - integer, for lower atmosphere, the number of ref !
! atmos, each has different relative amounts of the !
! key species for the band nbands!
! nspb - integer, same but for upper atmosphere nbands!
! absa - real, k-values for lower ref atmospheres (no w.v. !
! self-continuum) (cm**2/molecule) nspa(##)*5*13*ng##!
! absb - real, k-values for high ref atmospheres (all sources) !
! (cm**2/molecule) nspb(##)*5*13:59*ng##!
! ka_m'mgas'- real, k-values for low ref atmospheres minor species !
! (cm**2/molecule) mmn##*ng##!
! kb_m'mgas'- real, k-values for high ref atmospheres minor species !
! (cm**2/molecule) mmn##*ng##!
! selfref - real, k-values for w.v. self-continuum for ref atmos !
! used below laytrop (cm**2/mol) 10*ng##!
! forref - real, k-values for w.v. foreign-continuum for ref atmos
! used below/above laytrop (cm**2/mol) 4*ng##!
! !
! ****************************************************************** !
! --- inputs:
integer, intent(in) :: nlay, laytrop
integer, dimension(nlay), intent(in) :: jp, jt, jt1, indself, &
& indfor, indminor
real (kind=kind_phys), dimension(nlay), intent(in) :: pavel, &
& coldry, colbrd, fac00, fac01, fac10, fac11, selffac, &
& selffrac, forfac, forfrac, minorfrac, scaleminor, &
& scaleminorn2
real (kind=kind_phys), dimension(nlay,maxgas), intent(in):: colamt
real (kind=kind_phys), dimension(nlay,maxxsec),intent(in):: wx
real (kind=kind_phys), dimension(nbands,nlay), intent(in):: tauaer
real (kind=kind_phys), dimension(nlay,nrates,2), intent(in) :: &
& rfrate
! --- outputs:
real (kind=kind_phys), dimension(ngptlw,nlay), intent(out) :: &
& fracs, tautot
! --- locals
real (kind=kind_phys), dimension(ngptlw,nlay) :: taug
integer :: ib, ig, k
!
!===> ... begin here
!
call taugb01
call taugb02
call taugb03
call taugb04
call taugb05
call taugb06
call taugb07
call taugb08
call taugb09
call taugb10
call taugb11
call taugb12
call taugb13
call taugb14
call taugb15
call taugb16
! --- combine gaseous and aerosol optical depths
do ig = 1, ngptlw
ib = ngb(ig)
do k = 1, nlay
tautot(ig,k) = taug(ig,k) + tauaer(ib,k)
enddo
enddo
! =================
contains
! =================
!>\ingroup module_radlw_main
!> band 1: 10-350 cm-1 (low key - h2o; low minor - n2);
!! (high key - h2o; high minor - n2)
! ----------------------------------
subroutine taugb01
! ..................................
! ------------------------------------------------------------------ !
! written by eli j. mlawer, atmospheric & environmental research. !
! revised by michael j. iacono, atmospheric & environmental research. !
! !
! band 1: 10-350 cm-1 (low key - h2o; low minor - n2) !
! (high key - h2o; high minor - n2) !
! !
! compute the optical depth by interpolating in ln(pressure) and !
! temperature. below laytrop, the water vapor self-continuum and !
! foreign continuum is interpolated (in temperature) separately. !
! ------------------------------------------------------------------ !
use module_radlw_kgb01
! --- locals:
integer :: k, ind0, ind0p, ind1, ind1p, inds, indsp, indf, indfp, &
& indm, indmp, ig
real (kind=kind_phys) :: pp, corradj, scalen2, tauself, taufor, &
& taun2
!
!===> ... begin here
!
! --- minor gas mapping levels:
! lower - n2, p = 142.5490 mbar, t = 215.70 k
! upper - n2, p = 142.5490 mbar, t = 215.70 k
! --- ... lower atmosphere loop
do k = 1, laytrop
ind0 = ((jp(k)-1)*5 + (jt (k)-1)) * nspa(1) + 1
ind1 = ( jp(k) *5 + (jt1(k)-1)) * nspa(1) + 1
inds = indself(k)
indf = indfor(k)
indm = indminor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indsp = inds + 1
indfp = indf + 1
indmp = indm + 1
pp = pavel(k)
scalen2 = colbrd(k) * scaleminorn2(k)
if (pp < 250.0) then
corradj = f_one - 0.15 * (250.0-pp) / 154.4
else
corradj = f_one
endif
do ig = 1, ng01
tauself = selffac(k) * (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
taun2 = scalen2 * (ka_mn2(ig,indm) + minorfrac(k) &
& * (ka_mn2(ig,indmp) - ka_mn2(ig,indm)))
taug(ig,k) = corradj * (colamt(k,1) &
& * (fac00(k)*absa(ig,ind0) + fac10(k)*absa(ig,ind0p) &
& + fac01(k)*absa(ig,ind1) + fac11(k)*absa(ig,ind1p)) &
& + tauself + taufor + taun2)
fracs(ig,k) = fracrefa(ig)
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
ind0 = ((jp(k)-13)*5 + (jt (k)-1)) * nspb(1) + 1
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(1) + 1
indf = indfor(k)
indm = indminor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indfp = indf + 1
indmp = indm + 1
scalen2 = colbrd(k) * scaleminorn2(k)
corradj = f_one - 0.15 * (pavel(k) / 95.6)
do ig = 1, ng01
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
taun2 = scalen2 * (kb_mn2(ig,indm) + minorfrac(k) &
& * (kb_mn2(ig,indmp) - kb_mn2(ig,indm)))
taug(ig,k) = corradj * (colamt(k,1) &
& * (fac00(k)*absb(ig,ind0) + fac10(k)*absb(ig,ind0p) &
& + fac01(k)*absb(ig,ind1) + fac11(k)*absb(ig,ind1p)) &
& + taufor + taun2)
fracs(ig,k) = fracrefb(ig)
enddo
enddo
! ..................................
end subroutine taugb01
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 2: 350-500 cm-1 (low key - h2o; high key - h2o)
! ----------------------------------
subroutine taugb02
! ..................................
! ------------------------------------------------------------------ !
! band 2: 350-500 cm-1 (low key - h2o; high key - h2o) !
! ------------------------------------------------------------------ !
use module_radlw_kgb02
! --- locals:
integer :: k, ind0, ind0p, ind1, ind1p, inds, indsp, indf, indfp, &
& ig
real (kind=kind_phys) :: corradj, tauself, taufor
!
!===> ... begin here
!
! --- ... lower atmosphere loop
do k = 1, laytrop
ind0 = ((jp(k)-1)*5 + (jt (k)-1)) * nspa(2) + 1
ind1 = ( jp(k) *5 + (jt1(k)-1)) * nspa(2) + 1
inds = indself(k)
indf = indfor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indsp = inds + 1
indfp = indf + 1
corradj = f_one - 0.05 * (pavel(k) - 100.0) / 900.0
do ig = 1, ng02
tauself = selffac(k) * (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
taug(ns02+ig,k) = corradj * (colamt(k,1) &
& * (fac00(k)*absa(ig,ind0) + fac10(k)*absa(ig,ind0p) &
& + fac01(k)*absa(ig,ind1) + fac11(k)*absa(ig,ind1p)) &
& + tauself + taufor)
fracs(ns02+ig,k) = fracrefa(ig)
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
ind0 = ((jp(k)-13)*5 + (jt (k)-1)) * nspb(2) + 1
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(2) + 1
indf = indfor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indfp = indf + 1
do ig = 1, ng02
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
taug(ns02+ig,k) = colamt(k,1) &
& * (fac00(k)*absb(ig,ind0) + fac10(k)*absb(ig,ind0p) &
& + fac01(k)*absb(ig,ind1) + fac11(k)*absb(ig,ind1p)) &
& + taufor
fracs(ns02+ig,k) = fracrefb(ig)
enddo
enddo
! ..................................
end subroutine taugb02
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 3: 500-630 cm-1 (low key - h2o,co2; low minor - n2o);
!! (high key - h2o,co2; high minor - n2o)
! ----------------------------------
subroutine taugb03
! ..................................
! ------------------------------------------------------------------ !
! band 3: 500-630 cm-1 (low key - h2o,co2; low minor - n2o) !
! (high key - h2o,co2; high minor - n2o) !
! ------------------------------------------------------------------ !
use module_radlw_kgb03
! --- locals:
integer :: k, ind0, ind1, inds, indsp, indf, indfp, indm, indmp, &
& id000, id010, id100, id110, id200, id210, jmn2o, jmn2op, &
& id001, id011, id101, id111, id201, id211, jpl, jplp, &
& ig, js, js1
real (kind=kind_phys) :: absn2o, ratn2o, adjfac, adjcoln2o, &
& speccomb, specparm, specmult, fs, &
& speccomb1, specparm1, specmult1, fs1, &
& speccomb_mn2o, specparm_mn2o, specmult_mn2o, fmn2o, &
& speccomb_planck,specparm_planck,specmult_planck,fpl, &
& refrat_planck_a, refrat_planck_b, refrat_m_a, refrat_m_b, &
& fac000, fac100, fac200, fac010, fac110, fac210, &
& fac001, fac101, fac201, fac011, fac111, fac211, &
& tau_major, tau_major1, tauself, taufor, n2om1, n2om2, &
& p, p4, fk0, fk1, fk2
!
!===> ... begin here
!
! --- ... minor gas mapping levels:
! lower - n2o, p = 706.272 mbar, t = 278.94 k
! upper - n2o, p = 95.58 mbar, t = 215.7 k
refrat_planck_a = chi_mls(1,9)/chi_mls(2,9) ! P = 212.725 mb
refrat_planck_b = chi_mls(1,13)/chi_mls(2,13) ! P = 95.58 mb
refrat_m_a = chi_mls(1,3)/chi_mls(2,3) ! P = 706.270 mb
refrat_m_b = chi_mls(1,13)/chi_mls(2,13) ! P = 95.58 mb
! --- ... lower atmosphere loop
do k = 1, laytrop
speccomb = colamt(k,1) + rfrate(k,1,1)*colamt(k,2)
specparm = colamt(k,1) / speccomb
specmult = 8.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-1)*5 + (jt(k)-1)) * nspa(3) + js
speccomb1 = colamt(k,1) + rfrate(k,1,2)*colamt(k,2)
specparm1 = colamt(k,1) / speccomb1
specmult1 = 8.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = (jp(k)*5 + (jt1(k)-1)) * nspa(3) + js1
speccomb_mn2o = colamt(k,1) + refrat_m_a*colamt(k,2)
specparm_mn2o = colamt(k,1) / speccomb_mn2o
specmult_mn2o = 8.0 * min(specparm_mn2o, oneminus)
jmn2o = 1 + int(specmult_mn2o)
fmn2o = mod(specmult_mn2o, f_one)
speccomb_planck = colamt(k,1) + refrat_planck_a*colamt(k,2)
specparm_planck = colamt(k,1) / speccomb_planck
specmult_planck = 8.0 * min(specparm_planck, oneminus)
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, f_one)
inds = indself(k)
indf = indfor(k)
indm = indminor(k)
indsp = inds + 1
indfp = indf + 1
indmp = indm + 1
jmn2op= jmn2o+ 1
jplp = jpl + 1
! --- ... in atmospheres where the amount of n2O is too great to be considered
! a minor species, adjust the column amount of n2O by an empirical factor
! to obtain the proper contribution.
p = coldry(k) * chi_mls(4,jp(k)+1)
ratn2o = colamt(k,4) / p
if (ratn2o > 1.5) then
adjfac = 0.5 + (ratn2o - 0.5)**0.65
adjcoln2o = adjfac * p
else
adjcoln2o = colamt(k,4)
endif
if (specparm < 0.125) then
p = fs - f_one
p4 = p**4
fk0 = p4
fk1 = f_one - p - 2.0*p4
fk2 = p + p4
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0 + 2
id210 = ind0 +11
else if (specparm > 0.875) then
p = -fs
p4 = p**4
fk0 = p4
fk1 = f_one - p - 2.0*p4
fk2 = p + p4
id000 = ind0 + 1
id010 = ind0 +10
id100 = ind0
id110 = ind0 + 9
id200 = ind0 - 1
id210 = ind0 + 8
else
fk0 = f_one - fs
fk1 = fs
fk2 = f_zero
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0
id210 = ind0
endif
fac000 = fk0*fac00(k)
fac100 = fk1*fac00(k)
fac200 = fk2*fac00(k)
fac010 = fk0*fac10(k)
fac110 = fk1*fac10(k)
fac210 = fk2*fac10(k)
if (specparm1 < 0.125) then
p = fs1 - f_one
p4 = p**4
fk0 = p4
fk1 = f_one - p - 2.0*p4
fk2 = p + p4
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1 + 2
id211 = ind1 +11
elseif (specparm1 > 0.875) then
p = -fs1
p4 = p**4
fk0 = p4
fk1 = f_one - p - 2.0*p4
fk2 = p + p4
id001 = ind1 + 1
id011 = ind1 +10
id101 = ind1
id111 = ind1 + 9
id201 = ind1 - 1
id211 = ind1 + 8
else
fk0 = f_one - fs1
fk1 = fs1
fk2 = f_zero
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1
id211 = ind1
endif
fac001 = fk0*fac01(k)
fac101 = fk1*fac01(k)
fac201 = fk2*fac01(k)
fac011 = fk0*fac11(k)
fac111 = fk1*fac11(k)
fac211 = fk2*fac11(k)
do ig = 1, ng03
tauself = selffac(k)* (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
n2om1 = ka_mn2o(ig,jmn2o,indm) + fmn2o &
& * (ka_mn2o(ig,jmn2op,indm) - ka_mn2o(ig,jmn2o,indm))
n2om2 = ka_mn2o(ig,jmn2o,indmp) + fmn2o &
& * (ka_mn2o(ig,jmn2op,indmp) - ka_mn2o(ig,jmn2o,indmp))
absn2o = n2om1 + minorfrac(k) * (n2om2 - n2om1)
tau_major = speccomb &
& * (fac000*absa(ig,id000) + fac010*absa(ig,id010) &
& + fac100*absa(ig,id100) + fac110*absa(ig,id110) &
& + fac200*absa(ig,id200) + fac210*absa(ig,id210))
tau_major1 = speccomb1 &
& * (fac001*absa(ig,id001) + fac011*absa(ig,id011) &
& + fac101*absa(ig,id101) + fac111*absa(ig,id111) &
& + fac201*absa(ig,id201) + fac211*absa(ig,id211))
taug(ns03+ig,k) = tau_major + tau_major1 &
& + tauself + taufor + adjcoln2o*absn2o
fracs(ns03+ig,k) = fracrefa(ig,jpl) + fpl &
& * (fracrefa(ig,jplp) - fracrefa(ig,jpl))
enddo ! end do_k_loop
enddo ! end do_ig_loop
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
speccomb = colamt(k,1) + rfrate(k,1,1)*colamt(k,2)
specparm = colamt(k,1) / speccomb
specmult = 4.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-13)*5 + (jt(k)-1)) * nspb(3) + js
speccomb1 = colamt(k,1) + rfrate(k,1,2)*colamt(k,2)
specparm1 = colamt(k,1) / speccomb1
specmult1 = 4.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(3) + js1
speccomb_mn2o = colamt(k,1) + refrat_m_b*colamt(k,2)
specparm_mn2o = colamt(k,1) / speccomb_mn2o
specmult_mn2o = 4.0 * min(specparm_mn2o, oneminus)
jmn2o = 1 + int(specmult_mn2o)
fmn2o = mod(specmult_mn2o, f_one)
speccomb_planck = colamt(k,1) + refrat_planck_b*colamt(k,2)
specparm_planck = colamt(k,1) / speccomb_planck
specmult_planck = 4.0 * min(specparm_planck, oneminus)
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, f_one)
indf = indfor(k)
indm = indminor(k)
indfp = indf + 1
indmp = indm + 1
jmn2op= jmn2o+ 1
jplp = jpl + 1
id000 = ind0
id010 = ind0 + 5
id100 = ind0 + 1
id110 = ind0 + 6
id001 = ind1
id011 = ind1 + 5
id101 = ind1 + 1
id111 = ind1 + 6
! --- ... in atmospheres where the amount of n2o is too great to be considered
! a minor species, adjust the column amount of N2O by an empirical factor
! to obtain the proper contribution.
p = coldry(k) * chi_mls(4,jp(k)+1)
ratn2o = colamt(k,4) / p
if (ratn2o > 1.5) then
adjfac = 0.5 + (ratn2o - 0.5)**0.65
adjcoln2o = adjfac * p
else
adjcoln2o = colamt(k,4)
endif
fk0 = f_one - fs
fk1 = fs
fac000 = fk0*fac00(k)
fac010 = fk0*fac10(k)
fac100 = fk1*fac00(k)
fac110 = fk1*fac10(k)
fk0 = f_one - fs1
fk1 = fs1
fac001 = fk0*fac01(k)
fac011 = fk0*fac11(k)
fac101 = fk1*fac01(k)
fac111 = fk1*fac11(k)
do ig = 1, ng03
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
n2om1 = kb_mn2o(ig,jmn2o,indm) + fmn2o &
& * (kb_mn2o(ig,jmn2op,indm) - kb_mn2o(ig,jmn2o,indm))
n2om2 = kb_mn2o(ig,jmn2o,indmp) + fmn2o &
& * (kb_mn2o(ig,jmn2op,indmp) - kb_mn2o(ig,jmn2o,indmp))
absn2o = n2om1 + minorfrac(k) * (n2om2 - n2om1)
tau_major = speccomb &
& * (fac000*absb(ig,id000) + fac010*absb(ig,id010) &
& + fac100*absb(ig,id100) + fac110*absb(ig,id110))
tau_major1 = speccomb1 &
& * (fac001*absb(ig,id001) + fac011*absb(ig,id011) &
& + fac101*absb(ig,id101) + fac111*absb(ig,id111))
taug(ns03+ig,k) = tau_major + tau_major1 &
& + taufor + adjcoln2o*absn2o
fracs(ns03+ig,k) = fracrefb(ig,jpl) + fpl &
& * (fracrefb(ig,jplp) - fracrefb(ig,jpl))
enddo
enddo
! ..................................
end subroutine taugb03
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 4: 630-700 cm-1 (low key - h2o,co2; high key - o3,co2)
! ----------------------------------
subroutine taugb04
! ..................................
! ------------------------------------------------------------------ !
! band 4: 630-700 cm-1 (low key - h2o,co2; high key - o3,co2) !
! ------------------------------------------------------------------ !
use module_radlw_kgb04
! --- locals:
integer :: k, ind0, ind1, inds, indsp, indf, indfp, jpl, jplp, &
& id000, id010, id100, id110, id200, id210, ig, js, js1, &
& id001, id011, id101, id111, id201, id211
real (kind=kind_phys) :: tauself, taufor, p, p4, fk0, fk1, fk2, &
& speccomb, specparm, specmult, fs, &
& speccomb1, specparm1, specmult1, fs1, &
& speccomb_planck,specparm_planck,specmult_planck,fpl, &
& fac000, fac100, fac200, fac010, fac110, fac210, &
& fac001, fac101, fac201, fac011, fac111, fac211, &
& refrat_planck_a, refrat_planck_b, tau_major, tau_major1
!
!===> ... begin here
!
refrat_planck_a = chi_mls(1,11)/chi_mls(2,11) ! P = 142.5940 mb
refrat_planck_b = chi_mls(3,13)/chi_mls(2,13) ! P = 95.58350 mb
! --- ... lower atmosphere loop
do k = 1, laytrop
speccomb = colamt(k,1) + rfrate(k,1,1)*colamt(k,2)
specparm = colamt(k,1) / speccomb
specmult = 8.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-1)*5 + (jt(k)-1)) * nspa(4) + js
speccomb1 = colamt(k,1) + rfrate(k,1,2)*colamt(k,2)
specparm1 = colamt(k,1) / speccomb1
specmult1 = 8.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = ( jp(k)*5 + (jt1(k)-1)) * nspa(4) + js1
speccomb_planck = colamt(k,1) + refrat_planck_a*colamt(k,2)
specparm_planck = colamt(k,1) / speccomb_planck
specmult_planck = 8.0 * min(specparm_planck, oneminus)
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, 1.0)
inds = indself(k)
indf = indfor(k)
indsp = inds + 1
indfp = indf + 1
jplp = jpl + 1
if (specparm < 0.125) then
p = fs - f_one
p4 = p**4
fk0 = p4
fk1 = f_one - p - 2.0*p4
fk2 = p + p4
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0 + 2
id210 = ind0 +11
elseif (specparm > 0.875) then
p = -fs
p4 = p**4
fk0 = p4
fk1 = f_one - p - 2.0*p4
fk2 = p + p4
id000 = ind0 + 1
id010 = ind0 +10
id100 = ind0
id110 = ind0 + 9
id200 = ind0 - 1
id210 = ind0 + 8
else
fk0 = f_one - fs
fk1 = fs
fk2 = f_zero
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0
id210 = ind0
endif
fac000 = fk0*fac00(k)
fac100 = fk1*fac00(k)
fac200 = fk2*fac00(k)
fac010 = fk0*fac10(k)
fac110 = fk1*fac10(k)
fac210 = fk2*fac10(k)
if (specparm1 < 0.125) then
p = fs1 - f_one
p4 = p**4
fk0 = p4
fk1 = f_one - p - 2.0*p4
fk2 = p + p4
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1 + 2
id211 = ind1 +11
elseif (specparm1 > 0.875) then
p = -fs1
p4 = p**4
fk0 = p4
fk1 = f_one - p - 2.0*p4
fk2 = p + p4
id001 = ind1 + 1
id011 = ind1 +10
id101 = ind1
id111 = ind1 + 9
id201 = ind1 - 1
id211 = ind1 + 8
else
fk0 = f_one - fs1
fk1 = fs1
fk2 = f_zero
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1
id211 = ind1
endif
fac001 = fk0*fac01(k)
fac101 = fk1*fac01(k)
fac201 = fk2*fac01(k)
fac011 = fk0*fac11(k)
fac111 = fk1*fac11(k)
fac211 = fk2*fac11(k)
do ig = 1, ng04
tauself = selffac(k)* (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
tau_major = speccomb &
& * (fac000*absa(ig,id000) + fac010*absa(ig,id010) &
& + fac100*absa(ig,id100) + fac110*absa(ig,id110) &
& + fac200*absa(ig,id200) + fac210*absa(ig,id210))
tau_major1 = speccomb1 &
& * (fac001*absa(ig,id001) + fac011*absa(ig,id011) &
& + fac101*absa(ig,id101) + fac111*absa(ig,id111) &
& + fac201*absa(ig,id201) + fac211*absa(ig,id211))
taug(ns04+ig,k) = tau_major + tau_major1 + tauself + taufor
fracs(ns04+ig,k) = fracrefa(ig,jpl) + fpl &
& * (fracrefa(ig,jplp) - fracrefa(ig,jpl))
enddo ! end do_k_loop
enddo ! end do_ig_loop
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
speccomb = colamt(k,3) + rfrate(k,6,1)*colamt(k,2)
specparm = colamt(k,3) / speccomb
specmult = 4.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-13)*5 + (jt(k)-1)) * nspb(4) + js
speccomb1 = colamt(k,3) + rfrate(k,6,2)*colamt(k,2)
specparm1 = colamt(k,3) / speccomb1
specmult1 = 4.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(4) + js1
speccomb_planck = colamt(k,3) + refrat_planck_b*colamt(k,2)
specparm_planck = colamt(k,3) / speccomb_planck
specmult_planck = 4.0 * min(specparm_planck, oneminus)
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, f_one)
jplp = jpl + 1
id000 = ind0
id010 = ind0 + 5
id100 = ind0 + 1
id110 = ind0 + 6
id001 = ind1
id011 = ind1 + 5
id101 = ind1 + 1
id111 = ind1 + 6
fk0 = f_one - fs
fk1 = fs
fac000 = fk0*fac00(k)
fac010 = fk0*fac10(k)
fac100 = fk1*fac00(k)
fac110 = fk1*fac10(k)
fk0 = f_one - fs1
fk1 = fs1
fac001 = fk0*fac01(k)
fac011 = fk0*fac11(k)
fac101 = fk1*fac01(k)
fac111 = fk1*fac11(k)
do ig = 1, ng04
tau_major = speccomb &
& * (fac000*absb(ig,id000) + fac010*absb(ig,id010) &
& + fac100*absb(ig,id100) + fac110*absb(ig,id110))
tau_major1 = speccomb1 &
& * (fac001*absb(ig,id001) + fac011*absb(ig,id011) &
& + fac101*absb(ig,id101) + fac111*absb(ig,id111))
taug(ns04+ig,k) = tau_major + tau_major1
fracs(ns04+ig,k) = fracrefb(ig,jpl) + fpl &
& * (fracrefb(ig,jplp) - fracrefb(ig,jpl))
enddo
! --- ... empirical modification to code to improve stratospheric cooling rates
! for co2. revised to apply weighting for g-point reduction in this band.
taug(ns04+ 8,k) = taug(ns04+ 8,k) * 0.92
taug(ns04+ 9,k) = taug(ns04+ 9,k) * 0.88
taug(ns04+10,k) = taug(ns04+10,k) * 1.07
taug(ns04+11,k) = taug(ns04+11,k) * 1.1
taug(ns04+12,k) = taug(ns04+12,k) * 0.99
taug(ns04+13,k) = taug(ns04+13,k) * 0.88
taug(ns04+14,k) = taug(ns04+14,k) * 0.943
enddo
! ..................................
end subroutine taugb04
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 5: 700-820 cm-1 (low key - h2o,co2; low minor - o3, ccl4)
!! (high key - o3,co2)
! ----------------------------------
subroutine taugb05
! ..................................
! ------------------------------------------------------------------ !
! band 5: 700-820 cm-1 (low key - h2o,co2; low minor - o3, ccl4) !
! (high key - o3,co2) !
! ------------------------------------------------------------------ !
use module_radlw_kgb05
! --- locals:
integer :: k, ind0, ind1, inds, indsp, indf, indfp, indm, indmp, &
& id000, id010, id100, id110, id200, id210, jmo3, jmo3p, &
& id001, id011, id101, id111, id201, id211, jpl, jplp, &
& ig, js, js1
real (kind=kind_phys) :: tauself, taufor, o3m1, o3m2, abso3, &
& speccomb, specparm, specmult, fs, &
& speccomb1, specparm1, specmult1, fs1, &
& speccomb_mo3, specparm_mo3, specmult_mo3, fmo3, &
& speccomb_planck,specparm_planck,specmult_planck,fpl, &
& refrat_planck_a, refrat_planck_b, refrat_m_a, &
& fac000, fac100, fac200, fac010, fac110, fac210, &
& fac001, fac101, fac201, fac011, fac111, fac211, &
& p0, p40, fk00, fk10, fk20, p1, p41, fk01, fk11, fk21
!
!===> ... begin here
!
! --- ... minor gas mapping level :
! lower - o3, p = 317.34 mbar, t = 240.77 k
! lower - ccl4
! --- ... calculate reference ratio to be used in calculation of Planck
! fraction in lower/upper atmosphere.
refrat_planck_a = chi_mls(1,5)/chi_mls(2,5) ! P = 473.420 mb
refrat_planck_b = chi_mls(3,43)/chi_mls(2,43) ! P = 0.2369 mb
refrat_m_a = chi_mls(1,7)/chi_mls(2,7) ! P = 317.348 mb
! --- ... lower atmosphere loop
do k = 1, laytrop
speccomb = colamt(k,1) + rfrate(k,1,1)*colamt(k,2)
specparm = colamt(k,1) / speccomb
specmult = 8.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-1)*5 + (jt(k)-1)) * nspa(5) + js
speccomb1 = colamt(k,1) + rfrate(k,1,2)*colamt(k,2)
specparm1 = colamt(k,1) / speccomb1
specmult1 = 8.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = (jp(k)*5 + (jt1(k)-1)) * nspa(5) + js1
speccomb_mo3 = colamt(k,1) + refrat_m_a*colamt(k,2)
specparm_mo3 = colamt(k,1) / speccomb_mo3
specmult_mo3 = 8.0 * min(specparm_mo3, oneminus)
jmo3 = 1 + int(specmult_mo3)
fmo3 = mod(specmult_mo3, f_one)
speccomb_planck = colamt(k,1) + refrat_planck_a*colamt(k,2)
specparm_planck = colamt(k,1) / speccomb_planck
specmult_planck = 8.0 * min(specparm_planck, oneminus)
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, f_one)
inds = indself(k)
indf = indfor(k)
indm = indminor(k)
indsp = inds + 1
indfp = indf + 1
indmp = indm + 1
jplp = jpl + 1
jmo3p = jmo3 + 1
if (specparm < 0.125) then
p0 = fs - f_one
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0 + 2
id210 = ind0 +11
elseif (specparm > 0.875) then
p0 = -fs
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0 + 1
id010 = ind0 +10
id100 = ind0
id110 = ind0 + 9
id200 = ind0 - 1
id210 = ind0 + 8
else
fk00 = f_one - fs
fk10 = fs
fk20 = f_zero
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0
id210 = ind0
endif
fac000 = fk00 * fac00(k)
fac100 = fk10 * fac00(k)
fac200 = fk20 * fac00(k)
fac010 = fk00 * fac10(k)
fac110 = fk10 * fac10(k)
fac210 = fk20 * fac10(k)
if (specparm1 < 0.125) then
p1 = fs1 - f_one
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1 + 2
id211 = ind1 +11
elseif (specparm1 > 0.875) then
p1 = -fs1
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1 + 1
id011 = ind1 +10
id101 = ind1
id111 = ind1 + 9
id201 = ind1 - 1
id211 = ind1 + 8
else
fk01 = f_one - fs1
fk11 = fs1
fk21 = f_zero
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1
id211 = ind1
endif
fac001 = fk01 * fac01(k)
fac101 = fk11 * fac01(k)
fac201 = fk21 * fac01(k)
fac011 = fk01 * fac11(k)
fac111 = fk11 * fac11(k)
fac211 = fk21 * fac11(k)
do ig = 1, ng05
tauself = selffac(k) * (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
o3m1 = ka_mo3(ig,jmo3,indm) + fmo3 &
& * (ka_mo3(ig,jmo3p,indm) - ka_mo3(ig,jmo3,indm))
o3m2 = ka_mo3(ig,jmo3,indmp) + fmo3 &
& * (ka_mo3(ig,jmo3p,indmp) - ka_mo3(ig,jmo3,indmp))
abso3 = o3m1 + minorfrac(k)*(o3m2 - o3m1)
taug(ns05+ig,k) = speccomb &
& * (fac000*absa(ig,id000) + fac010*absa(ig,id010) &
& + fac100*absa(ig,id100) + fac110*absa(ig,id110) &
& + fac200*absa(ig,id200) + fac210*absa(ig,id210)) &
& + speccomb1 &
& * (fac001*absa(ig,id001) + fac011*absa(ig,id011) &
& + fac101*absa(ig,id101) + fac111*absa(ig,id111) &
& + fac201*absa(ig,id201) + fac211*absa(ig,id211)) &
& + tauself + taufor+abso3*colamt(k,3)+wx(k,1)*ccl4(ig)
fracs(ns05+ig,k) = fracrefa(ig,jpl) + fpl &
& * (fracrefa(ig,jplp) - fracrefa(ig,jpl))
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
speccomb = colamt(k,3) + rfrate(k,6,1)*colamt(k,2)
specparm = colamt(k,3) / speccomb
specmult = 4.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-13)*5 + (jt(k)-1)) * nspb(5) + js
speccomb1 = colamt(k,3) + rfrate(k,6,2)*colamt(k,2)
specparm1 = colamt(k,3) / speccomb1
specmult1 = 4.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(5) + js1
speccomb_planck = colamt(k,3) + refrat_planck_b*colamt(k,2)
specparm_planck = colamt(k,3) / speccomb_planck
specmult_planck = 4.0 * min(specparm_planck, oneminus)
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, f_one)
jplp= jpl + 1
id000 = ind0
id010 = ind0 + 5
id100 = ind0 + 1
id110 = ind0 + 6
id001 = ind1
id011 = ind1 + 5
id101 = ind1 + 1
id111 = ind1 + 6
fk00 = f_one - fs
fk10 = fs
fk01 = f_one - fs1
fk11 = fs1
fac000 = fk00 * fac00(k)
fac010 = fk00 * fac10(k)
fac100 = fk10 * fac00(k)
fac110 = fk10 * fac10(k)
fac001 = fk01 * fac01(k)
fac011 = fk01 * fac11(k)
fac101 = fk11 * fac01(k)
fac111 = fk11 * fac11(k)
do ig = 1, ng05
taug(ns05+ig,k) = speccomb &
& * (fac000*absb(ig,id000) + fac010*absb(ig,id010) &
& + fac100*absb(ig,id100) + fac110*absb(ig,id110)) &
& + speccomb1 &
& * (fac001*absb(ig,id001) + fac011*absb(ig,id011) &
& + fac101*absb(ig,id101) + fac111*absb(ig,id111)) &
& + wx(k,1) * ccl4(ig)
fracs(ns05+ig,k) = fracrefb(ig,jpl) + fpl &
& * (fracrefb(ig,jplp) - fracrefb(ig,jpl))
enddo
enddo
! ..................................
end subroutine taugb05
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 6: 820-980 cm-1 (low key - h2o; low minor - co2)
!! (high key - none; high minor - cfc11, cfc12)
! ----------------------------------
subroutine taugb06
! ..................................
! ------------------------------------------------------------------ !
! band 6: 820-980 cm-1 (low key - h2o; low minor - co2) !
! (high key - none; high minor - cfc11, cfc12)
! ------------------------------------------------------------------ !
use module_radlw_kgb06
! --- locals:
integer :: k, ind0, ind0p, ind1, ind1p, inds, indsp, indf, indfp, &
& indm, indmp, ig
real (kind=kind_phys) :: ratco2, adjfac, adjcolco2, tauself, &
& taufor, absco2, temp
!
!===> ... begin here
!
! --- ... minor gas mapping level:
! lower - co2, p = 706.2720 mb, t = 294.2 k
! upper - cfc11, cfc12
! --- ... lower atmosphere loop
do k = 1, laytrop
ind0 = ((jp(k)-1)*5 + (jt (k)-1)) * nspa(6) + 1
ind1 = ( jp(k) *5 + (jt1(k)-1)) * nspa(6) + 1
inds = indself(k)
indf = indfor(k)
indm = indminor(k)
indsp = inds + 1
indfp = indf + 1
indmp = indm + 1
ind0p = ind0 + 1
ind1p = ind1 + 1
! --- ... in atmospheres where the amount of co2 is too great to be considered
! a minor species, adjust the column amount of co2 by an empirical factor
! to obtain the proper contribution.
temp = coldry(k) * chi_mls(2,jp(k)+1)
ratco2 = colamt(k,2) / temp
if (ratco2 > 3.0) then
adjfac = 2.0 + (ratco2-2.0)**0.77
adjcolco2 = adjfac * temp
else
adjcolco2 = colamt(k,2)
endif
do ig = 1, ng06
tauself = selffac(k) * (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
absco2 = ka_mco2(ig,indm) + minorfrac(k) &
& * (ka_mco2(ig,indmp) - ka_mco2(ig,indm))
taug(ns06+ig,k) = colamt(k,1) &
& * (fac00(k)*absa(ig,ind0) + fac10(k)*absa(ig,ind0p) &
& + fac01(k)*absa(ig,ind1) + fac11(k)*absa(ig,ind1p)) &
& + tauself + taufor + adjcolco2*absco2 &
& + wx(k,2)*cfc11adj(ig) + wx(k,3)*cfc12(ig)
fracs(ns06+ig,k) = fracrefa(ig)
enddo
enddo
! --- ... upper atmosphere loop
! nothing important goes on above laytrop in this band.
do k = laytrop+1, nlay
do ig = 1, ng06
taug(ns06+ig,k) = wx(k,2)*cfc11adj(ig) + wx(k,3)*cfc12(ig)
fracs(ns06+ig,k) = fracrefa(ig)
enddo
enddo
! ..................................
end subroutine taugb06
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 7: 980-1080 cm-1 (low key - h2o,o3; low minor - co2)
!! (high key - o3; high minor - co2)
! ----------------------------------
subroutine taugb07
! ..................................
! ------------------------------------------------------------------ !
! band 7: 980-1080 cm-1 (low key - h2o,o3; low minor - co2) !
! (high key - o3; high minor - co2) !
! ------------------------------------------------------------------ !
use module_radlw_kgb07
! --- locals:
integer :: k, ind0, ind0p, ind1, ind1p, inds, indsp, indf, indfp, &
& id000, id010, id100, id110, id200, id210, indm, indmp, &
& id001, id011, id101, id111, id201, id211, jmco2, jmco2p, &
& jpl, jplp, ig, js, js1
real (kind=kind_phys) :: tauself, taufor, co2m1, co2m2, absco2, &
& speccomb, specparm, specmult, fs, &
& speccomb1, specparm1, specmult1, fs1, &
& speccomb_mco2, specparm_mco2, specmult_mco2, fmco2, &
& speccomb_planck,specparm_planck,specmult_planck,fpl, &
& refrat_planck_a, refrat_m_a, ratco2, adjfac, adjcolco2, &
& fac000, fac100, fac200, fac010, fac110, fac210, &
& fac001, fac101, fac201, fac011, fac111, fac211, &
& p0, p40, fk00, fk10, fk20, p1, p41, fk01, fk11, fk21, temp
!
!===> ... begin here
!
! --- ... minor gas mapping level :
! lower - co2, p = 706.2620 mbar, t= 278.94 k
! upper - co2, p = 12.9350 mbar, t = 234.01 k
! --- ... calculate reference ratio to be used in calculation of Planck
! fraction in lower atmosphere.
refrat_planck_a = chi_mls(1,3)/chi_mls(3,3) ! P = 706.2620 mb
refrat_m_a = chi_mls(1,3)/chi_mls(3,3) ! P = 706.2720 mb
! --- ... lower atmosphere loop
do k = 1, laytrop
speccomb = colamt(k,1) + rfrate(k,2,1)*colamt(k,3)
specparm = colamt(k,1) / speccomb
specmult = 8.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-1)*5 + (jt(k)-1)) * nspa(7) + js
speccomb1 = colamt(k,1) + rfrate(k,2,2)*colamt(k,3)
specparm1 = colamt(k,1) / speccomb1
specmult1 = 8.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = (jp(k)*5 + (jt1(k)-1)) * nspa(7) + js1
speccomb_mco2 = colamt(k,1) + refrat_m_a*colamt(k,3)
specparm_mco2 = colamt(k,1) / speccomb_mco2
specmult_mco2 = 8.0 * min(specparm_mco2, oneminus)
jmco2 = 1 + int(specmult_mco2)
fmco2 = mod(specmult_mco2, f_one)
speccomb_planck = colamt(k,1) + refrat_planck_a*colamt(k,3)
specparm_planck = colamt(k,1) / speccomb_planck
specmult_planck = 8.0 * min(specparm_planck, oneminus)
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, f_one)
inds = indself(k)
indf = indfor(k)
indm = indminor(k)
indsp = inds + 1
indfp = indf + 1
indmp = indm + 1
jplp = jpl + 1
jmco2p= jmco2+ 1
ind0p = ind0 + 1
ind1p = ind1 + 1
! --- ... in atmospheres where the amount of CO2 is too great to be considered
! a minor species, adjust the column amount of CO2 by an empirical factor
! to obtain the proper contribution.
temp = coldry(k) * chi_mls(2,jp(k)+1)
ratco2 = colamt(k,2) / temp
if (ratco2 > 3.0) then
adjfac = 3.0 + (ratco2-3.0)**0.79
adjcolco2 = adjfac * temp
else
adjcolco2 = colamt(k,2)
endif
if (specparm < 0.125) then
p0 = fs - f_one
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0 + 2
id210 = ind0 +11
elseif (specparm > 0.875) then
p0 = -fs
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0 + 1
id010 = ind0 +10
id100 = ind0
id110 = ind0 + 9
id200 = ind0 - 1
id210 = ind0 + 8
else
fk00 = f_one - fs
fk10 = fs
fk20 = f_zero
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0
id210 = ind0
endif
fac000 = fk00 * fac00(k)
fac100 = fk10 * fac00(k)
fac200 = fk20 * fac00(k)
fac010 = fk00 * fac10(k)
fac110 = fk10 * fac10(k)
fac210 = fk20 * fac10(k)
if (specparm1 < 0.125) then
p1 = fs1 - f_one
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1 + 2
id211 = ind1 +11
elseif (specparm1 > 0.875) then
p1 = -fs1
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1 + 1
id011 = ind1 +10
id101 = ind1
id111 = ind1 + 9
id201 = ind1 - 1
id211 = ind1 + 8
else
fk01 = f_one - fs1
fk11 = fs1
fk21 = f_zero
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1
id211 = ind1
endif
fac001 = fk01 * fac01(k)
fac101 = fk11 * fac01(k)
fac201 = fk21 * fac01(k)
fac011 = fk01 * fac11(k)
fac111 = fk11 * fac11(k)
fac211 = fk21 * fac11(k)
do ig = 1, ng07
tauself = selffac(k)* (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
co2m1 = ka_mco2(ig,jmco2,indm) + fmco2 &
& * (ka_mco2(ig,jmco2p,indm) - ka_mco2(ig,jmco2,indm))
co2m2 = ka_mco2(ig,jmco2,indmp) + fmco2 &
& * (ka_mco2(ig,jmco2p,indmp) - ka_mco2(ig,jmco2,indmp))
absco2 = co2m1 + minorfrac(k) * (co2m2 - co2m1)
taug(ns07+ig,k) = speccomb &
& * (fac000*absa(ig,id000) + fac010*absa(ig,id010) &
& + fac100*absa(ig,id100) + fac110*absa(ig,id110) &
& + fac200*absa(ig,id200) + fac210*absa(ig,id210)) &
& + speccomb1 &
& * (fac001*absa(ig,id001) + fac011*absa(ig,id011) &
& + fac101*absa(ig,id101) + fac111*absa(ig,id111) &
& + fac201*absa(ig,id201) + fac211*absa(ig,id211)) &
& + tauself + taufor + adjcolco2*absco2
fracs(ns07+ig,k) = fracrefa(ig,jpl) + fpl &
& * (fracrefa(ig,jplp) - fracrefa(ig,jpl))
enddo
enddo
! --- ... upper atmosphere loop
! --- ... in atmospheres where the amount of co2 is too great to be considered
! a minor species, adjust the column amount of co2 by an empirical factor
! to obtain the proper contribution.
do k = laytrop+1, nlay
temp = coldry(k) * chi_mls(2,jp(k)+1)
ratco2 = colamt(k,2) / temp
if (ratco2 > 3.0) then
adjfac = 2.0 + (ratco2-2.0)**0.79
adjcolco2 = adjfac * temp
else
adjcolco2 = colamt(k,2)
endif
ind0 = ((jp(k)-13)*5 + (jt (k)-1)) * nspb(7) + 1
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(7) + 1
indm = indminor(k)
indmp = indm + 1
ind0p = ind0 + 1
ind1p = ind1 + 1
do ig = 1, ng07
absco2 = kb_mco2(ig,indm) + minorfrac(k) &
& * (kb_mco2(ig,indmp) - kb_mco2(ig,indm))
taug(ns07+ig,k) = colamt(k,3) &
& * (fac00(k)*absb(ig,ind0) + fac10(k)*absb(ig,ind0p) &
& + fac01(k)*absb(ig,ind1) + fac11(k)*absb(ig,ind1p)) &
& + adjcolco2 * absco2
fracs(ns07+ig,k) = fracrefb(ig)
enddo
! --- ... empirical modification to code to improve stratospheric cooling rates
! for o3. revised to apply weighting for g-point reduction in this band.
taug(ns07+ 6,k) = taug(ns07+ 6,k) * 0.92
taug(ns07+ 7,k) = taug(ns07+ 7,k) * 0.88
taug(ns07+ 8,k) = taug(ns07+ 8,k) * 1.07
taug(ns07+ 9,k) = taug(ns07+ 9,k) * 1.1
taug(ns07+10,k) = taug(ns07+10,k) * 0.99
taug(ns07+11,k) = taug(ns07+11,k) * 0.855
enddo
! ..................................
end subroutine taugb07
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 8: 1080-1180 cm-1 (low key - h2o; low minor - co2,o3,n2o)
!! (high key - o3; high minor - co2, n2o)
! ----------------------------------
subroutine taugb08
! ..................................
! ------------------------------------------------------------------ !
! band 8: 1080-1180 cm-1 (low key - h2o; low minor - co2,o3,n2o) !
! (high key - o3; high minor - co2, n2o) !
! ------------------------------------------------------------------ !
use module_radlw_kgb08
! --- locals:
integer :: k, ind0, ind0p, ind1, ind1p, inds, indsp, indf, indfp, &
& indm, indmp, ig
real (kind=kind_phys) :: tauself, taufor, absco2, abso3, absn2o, &
& ratco2, adjfac, adjcolco2, temp
!
!===> ... begin here
!
! --- ... minor gas mapping level:
! lower - co2, p = 1053.63 mb, t = 294.2 k
! lower - o3, p = 317.348 mb, t = 240.77 k
! lower - n2o, p = 706.2720 mb, t= 278.94 k
! lower - cfc12,cfc11
! upper - co2, p = 35.1632 mb, t = 223.28 k
! upper - n2o, p = 8.716e-2 mb, t = 226.03 k
! --- ... lower atmosphere loop
do k = 1, laytrop
ind0 = ((jp(k)-1)*5 + (jt (k)-1)) * nspa(8) + 1
ind1 = ( jp(k) *5 + (jt1(k)-1)) * nspa(8) + 1
inds = indself(k)
indf = indfor(k)
indm = indminor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indsp = inds + 1
indfp = indf + 1
indmp = indm + 1
! --- ... in atmospheres where the amount of co2 is too great to be considered
! a minor species, adjust the column amount of co2 by an empirical factor
! to obtain the proper contribution.
temp = coldry(k) * chi_mls(2,jp(k)+1)
ratco2 = colamt(k,2) / temp
if (ratco2 > 3.0) then
adjfac = 2.0 + (ratco2-2.0)**0.65
adjcolco2 = adjfac * temp
else
adjcolco2 = colamt(k,2)
endif
do ig = 1, ng08
tauself = selffac(k) * (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
absco2 = (ka_mco2(ig,indm) + minorfrac(k) &
& * (ka_mco2(ig,indmp) - ka_mco2(ig,indm)))
abso3 = (ka_mo3(ig,indm) + minorfrac(k) &
& * (ka_mo3(ig,indmp) - ka_mo3(ig,indm)))
absn2o = (ka_mn2o(ig,indm) + minorfrac(k) &
& * (ka_mn2o(ig,indmp) - ka_mn2o(ig,indm)))
taug(ns08+ig,k) = colamt(k,1) &
& * (fac00(k)*absa(ig,ind0) + fac10(k)*absa(ig,ind0p) &
& + fac01(k)*absa(ig,ind1) + fac11(k)*absa(ig,ind1p)) &
& + tauself+taufor + adjcolco2*absco2 &
& + colamt(k,3)*abso3 + colamt(k,4)*absn2o &
& + wx(k,3)*cfc12(ig) + wx(k,4)*cfc22adj(ig)
fracs(ns08+ig,k) = fracrefa(ig)
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
ind0 = ((jp(k)-13)*5 + (jt (k)-1)) * nspb(8) + 1
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(8) + 1
indm = indminor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indmp = indm + 1
! --- ... in atmospheres where the amount of co2 is too great to be considered
! a minor species, adjust the column amount of co2 by an empirical factor
! to obtain the proper contribution.
temp = coldry(k) * chi_mls(2,jp(k)+1)
ratco2 = colamt(k,2) / temp
if (ratco2 > 3.0) then
adjfac = 2.0 + (ratco2-2.0)**0.65
adjcolco2 = adjfac * temp
else
adjcolco2 = colamt(k,2)
endif
do ig = 1, ng08
absco2 = (kb_mco2(ig,indm) + minorfrac(k) &
& * (kb_mco2(ig,indmp) - kb_mco2(ig,indm)))
absn2o = (kb_mn2o(ig,indm) + minorfrac(k) &
& * (kb_mn2o(ig,indmp) - kb_mn2o(ig,indm)))
taug(ns08+ig,k) = colamt(k,3) &
& * (fac00(k)*absb(ig,ind0) + fac10(k)*absb(ig,ind0p) &
& + fac01(k)*absb(ig,ind1) + fac11(k)*absb(ig,ind1p)) &
& + adjcolco2*absco2 + colamt(k,4)*absn2o &
& + wx(k,3)*cfc12(ig) + wx(k,4)*cfc22adj(ig)
fracs(ns08+ig,k) = fracrefb(ig)
enddo
enddo
! ..................................
end subroutine taugb08
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 9: 1180-1390 cm-1 (low key - h2o,ch4; low minor - n2o)
!! (high key - ch4; high minor - n2o)
! ----------------------------------
subroutine taugb09
! ..................................
! ------------------------------------------------------------------ !
! band 9: 1180-1390 cm-1 (low key - h2o,ch4; low minor - n2o) !
! (high key - ch4; high minor - n2o) !
! ------------------------------------------------------------------ !
use module_radlw_kgb09
! --- locals:
integer :: k, ind0, ind0p, ind1, ind1p, inds, indsp, indf, indfp, &
& id000, id010, id100, id110, id200, id210, indm, indmp, &
& id001, id011, id101, id111, id201, id211, jmn2o, jmn2op, &
& jpl, jplp, ig, js, js1
real (kind=kind_phys) :: tauself, taufor, n2om1, n2om2, absn2o, &
& speccomb, specparm, specmult, fs, &
& speccomb1, specparm1, specmult1, fs1, &
& speccomb_mn2o, specparm_mn2o, specmult_mn2o, fmn2o, &
& speccomb_planck,specparm_planck,specmult_planck,fpl, &
& refrat_planck_a, refrat_m_a, ratn2o, adjfac, adjcoln2o, &
& fac000, fac100, fac200, fac010, fac110, fac210, &
& fac001, fac101, fac201, fac011, fac111, fac211, &
& p0, p40, fk00, fk10, fk20, p1, p41, fk01, fk11, fk21, temp
!
!===> ... begin here
!
! --- ... minor gas mapping level :
! lower - n2o, p = 706.272 mbar, t = 278.94 k
! upper - n2o, p = 95.58 mbar, t = 215.7 k
! --- ... calculate reference ratio to be used in calculation of Planck
! fraction in lower/upper atmosphere.
refrat_planck_a = chi_mls(1,9)/chi_mls(6,9) ! P = 212 mb
refrat_m_a = chi_mls(1,3)/chi_mls(6,3) ! P = 706.272 mb
! --- ... lower atmosphere loop
do k = 1, laytrop
speccomb = colamt(k,1) + rfrate(k,4,1)*colamt(k,5)
specparm = colamt(k,1) / speccomb
specmult = 8.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-1)*5 + (jt(k)-1)) * nspa(9) + js
speccomb1 = colamt(k,1) + rfrate(k,4,2)*colamt(k,5)
specparm1 = colamt(k,1) / speccomb1
specmult1 = 8.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = (jp(k)*5 + (jt1(k)-1)) * nspa(9) + js1
speccomb_mn2o = colamt(k,1) + refrat_m_a*colamt(k,5)
specparm_mn2o = colamt(k,1) / speccomb_mn2o
specmult_mn2o = 8.0 * min(specparm_mn2o, oneminus)
jmn2o = 1 + int(specmult_mn2o)
fmn2o = mod(specmult_mn2o, f_one)
speccomb_planck = colamt(k,1) + refrat_planck_a*colamt(k,5)
specparm_planck = colamt(k,1) / speccomb_planck
specmult_planck = 8.0 * min(specparm_planck, oneminus)
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, f_one)
inds = indself(k)
indf = indfor(k)
indm = indminor(k)
indsp = inds + 1
indfp = indf + 1
indmp = indm + 1
jplp = jpl + 1
jmn2op= jmn2o+ 1
! --- ... in atmospheres where the amount of n2o is too great to be considered
! a minor species, adjust the column amount of n2o by an empirical factor
! to obtain the proper contribution.
temp = coldry(k) * chi_mls(4,jp(k)+1)
ratn2o = colamt(k,4) / temp
if (ratn2o > 1.5) then
adjfac = 0.5 + (ratn2o-0.5)**0.65
adjcoln2o = adjfac * temp
else
adjcoln2o = colamt(k,4)
endif
if (specparm < 0.125) then
p0 = fs - f_one
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0 + 2
id210 = ind0 +11
elseif (specparm > 0.875) then
p0 = -fs
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0 + 1
id010 = ind0 +10
id100 = ind0
id110 = ind0 + 9
id200 = ind0 - 1
id210 = ind0 + 8
else
fk00 = f_one - fs
fk10 = fs
fk20 = f_zero
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0
id210 = ind0
endif
fac000 = fk00 * fac00(k)
fac100 = fk10 * fac00(k)
fac200 = fk20 * fac00(k)
fac010 = fk00 * fac10(k)
fac110 = fk10 * fac10(k)
fac210 = fk20 * fac10(k)
if (specparm1 < 0.125) then
p1 = fs1 - f_one
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1 + 2
id211 = ind1 +11
elseif (specparm1 > 0.875) then
p1 = -fs1
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1 + 1
id011 = ind1 +10
id101 = ind1
id111 = ind1 + 9
id201 = ind1 - 1
id211 = ind1 + 8
else
fk01 = f_one - fs1
fk11 = fs1
fk21 = f_zero
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1
id211 = ind1
endif
fac001 = fk01 * fac01(k)
fac101 = fk11 * fac01(k)
fac201 = fk21 * fac01(k)
fac011 = fk01 * fac11(k)
fac111 = fk11 * fac11(k)
fac211 = fk21 * fac11(k)
do ig = 1, ng09
tauself = selffac(k)* (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
n2om1 = ka_mn2o(ig,jmn2o,indm) + fmn2o &
& * (ka_mn2o(ig,jmn2op,indm) - ka_mn2o(ig,jmn2o,indm))
n2om2 = ka_mn2o(ig,jmn2o,indmp) + fmn2o &
& * (ka_mn2o(ig,jmn2op,indmp) - ka_mn2o(ig,jmn2o,indmp))
absn2o = n2om1 + minorfrac(k) * (n2om2 - n2om1)
taug(ns09+ig,k) = speccomb &
& * (fac000*absa(ig,id000) + fac010*absa(ig,id010) &
& + fac100*absa(ig,id100) + fac110*absa(ig,id110) &
& + fac200*absa(ig,id200) + fac210*absa(ig,id210)) &
& + speccomb1 &
& * (fac001*absa(ig,id001) + fac011*absa(ig,id011) &
& + fac101*absa(ig,id101) + fac111*absa(ig,id111) &
& + fac201*absa(ig,id201) + fac211*absa(ig,id211)) &
& + tauself + taufor + adjcoln2o*absn2o
fracs(ns09+ig,k) = fracrefa(ig,jpl) + fpl &
& * (fracrefa(ig,jplp) - fracrefa(ig,jpl))
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
ind0 = ((jp(k)-13)*5 + (jt (k)-1)) * nspb(9) + 1
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(9) + 1
indm = indminor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indmp = indm + 1
! --- ... in atmospheres where the amount of n2o is too great to be considered
! a minor species, adjust the column amount of n2o by an empirical factor
! to obtain the proper contribution.
temp = coldry(k) * chi_mls(4,jp(k)+1)
ratn2o = colamt(k,4) / temp
if (ratn2o > 1.5) then
adjfac = 0.5 + (ratn2o - 0.5)**0.65
adjcoln2o = adjfac * temp
else
adjcoln2o = colamt(k,4)
endif
do ig = 1, ng09
absn2o = kb_mn2o(ig,indm) + minorfrac(k) &
& * (kb_mn2o(ig,indmp) - kb_mn2o(ig,indm))
taug(ns09+ig,k) = colamt(k,5) &
& * (fac00(k)*absb(ig,ind0) + fac10(k)*absb(ig,ind0p) &
& + fac01(k)*absb(ig,ind1) + fac11(k)*absb(ig,ind1p)) &
& + adjcoln2o*absn2o
fracs(ns09+ig,k) = fracrefb(ig)
enddo
enddo
! ..................................
end subroutine taugb09
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 10: 1390-1480 cm-1 (low key - h2o; high key - h2o)
! ----------------------------------
subroutine taugb10
! ..................................
! ------------------------------------------------------------------ !
! band 10: 1390-1480 cm-1 (low key - h2o; high key - h2o) !
! ------------------------------------------------------------------ !
use module_radlw_kgb10
! --- locals:
integer :: k, ind0, ind0p, ind1, ind1p, inds, indsp, indf, indfp, &
& ig
real (kind=kind_phys) :: tauself, taufor
!
!===> ... begin here
!
! --- ... lower atmosphere loop
do k = 1, laytrop
ind0 = ((jp(k)-1)*5 + (jt (k)-1)) * nspa(10) + 1
ind1 = ( jp(k) *5 + (jt1(k)-1)) * nspa(10) + 1
inds = indself(k)
indf = indfor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indsp = inds + 1
indfp = indf + 1
do ig = 1, ng10
tauself = selffac(k) * (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
taug(ns10+ig,k) = colamt(k,1) &
& * (fac00(k)*absa(ig,ind0) + fac10(k)*absa(ig,ind0p) &
& + fac01(k)*absa(ig,ind1) + fac11(k)*absa(ig,ind1p)) &
& + tauself + taufor
fracs(ns10+ig,k) = fracrefa(ig)
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
ind0 = ((jp(k)-13)*5 + (jt (k)-1)) * nspb(10) + 1
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(10) + 1
indf = indfor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indfp = indf + 1
do ig = 1, ng10
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
taug(ns10+ig,k) = colamt(k,1) &
& * (fac00(k)*absb(ig,ind0) + fac10(k)*absb(ig,ind0p) &
& + fac01(k)*absb(ig,ind1) + fac11(k)*absb(ig,ind1p)) &
& + taufor
fracs(ns10+ig,k) = fracrefb(ig)
enddo
enddo
! ..................................
end subroutine taugb10
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 11: 1480-1800 cm-1 (low - h2o; low minor - o2)
!! (high key - h2o; high minor - o2)
! ----------------------------------
subroutine taugb11
! ..................................
! ------------------------------------------------------------------ !
! band 11: 1480-1800 cm-1 (low - h2o; low minor - o2) !
! (high key - h2o; high minor - o2) !
! ------------------------------------------------------------------ !
use module_radlw_kgb11
! --- locals:
integer :: k, ind0, ind0p, ind1, ind1p, inds, indsp, indf, indfp, &
& indm, indmp, ig
real (kind=kind_phys) :: scaleo2, tauself, taufor, tauo2
!
!===> ... begin here
!
! --- ... minor gas mapping level :
! lower - o2, p = 706.2720 mbar, t = 278.94 k
! upper - o2, p = 4.758820 mbarm t = 250.85 k
! --- ... lower atmosphere loop
do k = 1, laytrop
ind0 = ((jp(k)-1)*5 + (jt (k)-1)) * nspa(11) + 1
ind1 = ( jp(k) *5 + (jt1(k)-1)) * nspa(11) + 1
inds = indself(k)
indf = indfor(k)
indm = indminor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indsp = inds + 1
indfp = indf + 1
indmp = indm + 1
scaleo2 = colamt(k,6) * scaleminor(k)
do ig = 1, ng11
tauself = selffac(k) * (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
tauo2 = scaleo2 * (ka_mo2(ig,indm) + minorfrac(k) &
& * (ka_mo2(ig,indmp) - ka_mo2(ig,indm)))
taug(ns11+ig,k) = colamt(k,1) &
& * (fac00(k)*absa(ig,ind0) + fac10(k)*absa(ig,ind0p) &
& + fac01(k)*absa(ig,ind1) + fac11(k)*absa(ig,ind1p)) &
& + tauself + taufor + tauo2
fracs(ns11+ig,k) = fracrefa(ig)
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
ind0 = ((jp(k)-13)*5 + (jt (k)-1)) * nspb(11) + 1
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(11) + 1
indf = indfor(k)
indm = indminor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indfp = indf + 1
indmp = indm + 1
scaleo2 = colamt(k,6) * scaleminor(k)
do ig = 1, ng11
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
tauo2 = scaleo2 * (kb_mo2(ig,indm) + minorfrac(k) &
& * (kb_mo2(ig,indmp) - kb_mo2(ig,indm)))
taug(ns11+ig,k) = colamt(k,1) &
& * (fac00(k)*absb(ig,ind0) + fac10(k)*absb(ig,ind0p) &
& + fac01(k)*absb(ig,ind1) + fac11(k)*absb(ig,ind1p)) &
& + taufor + tauo2
fracs(ns11+ig,k) = fracrefb(ig)
enddo
enddo
! ..................................
end subroutine taugb11
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 12: 1800-2080 cm-1 (low - h2o,co2; high - nothing)
! ----------------------------------
subroutine taugb12
! ..................................
! ------------------------------------------------------------------ !
! band 12: 1800-2080 cm-1 (low - h2o,co2; high - nothing) !
! ------------------------------------------------------------------ !
use module_radlw_kgb12
! --- locals:
integer :: k, ind0, ind1, inds, indsp, indf, indfp, jpl, jplp, &
& id000, id010, id100, id110, id200, id210, ig, js, js1, &
& id001, id011, id101, id111, id201, id211
real (kind=kind_phys) :: tauself, taufor, refrat_planck_a, &
& speccomb, specparm, specmult, fs, &
& speccomb1, specparm1, specmult1, fs1, &
& speccomb_planck,specparm_planck,specmult_planck,fpl, &
& fac000, fac100, fac200, fac010, fac110, fac210, &
& fac001, fac101, fac201, fac011, fac111, fac211, &
& p0, p40, fk00, fk10, fk20, p1, p41, fk01, fk11, fk21
!
!===> ... begin here
!
! --- ... calculate reference ratio to be used in calculation of Planck
! fraction in lower/upper atmosphere.
refrat_planck_a = chi_mls(1,10)/chi_mls(2,10) ! P = 174.164 mb
! --- ... lower atmosphere loop
do k = 1, laytrop
speccomb = colamt(k,1) + rfrate(k,1,1)*colamt(k,2)
specparm = colamt(k,1) / speccomb
specmult = 8.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-1)*5 + (jt(k)-1)) * nspa(12) + js
speccomb1 = colamt(k,1) + rfrate(k,1,2)*colamt(k,2)
specparm1 = colamt(k,1) / speccomb1
specmult1 = 8.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = (jp(k)*5 + (jt1(k)-1)) * nspa(12) + js1
speccomb_planck = colamt(k,1) + refrat_planck_a*colamt(k,2)
specparm_planck = colamt(k,1) / speccomb_planck
if (specparm_planck >= oneminus) specparm_planck=oneminus
specmult_planck = 8.0 * specparm_planck
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, f_one)
inds = indself(k)
indf = indfor(k)
indsp = inds + 1
indfp = indf + 1
jplp = jpl + 1
if (specparm < 0.125) then
p0 = fs - f_one
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0 + 2
id210 = ind0 +11
elseif (specparm > 0.875) then
p0 = -fs
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0 + 1
id010 = ind0 +10
id100 = ind0
id110 = ind0 + 9
id200 = ind0 - 1
id210 = ind0 + 8
else
fk00 = f_one - fs
fk10 = fs
fk20 = f_zero
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0
id210 = ind0
endif
fac000 = fk00 * fac00(k)
fac100 = fk10 * fac00(k)
fac200 = fk20 * fac00(k)
fac010 = fk00 * fac10(k)
fac110 = fk10 * fac10(k)
fac210 = fk20 * fac10(k)
if (specparm1 < 0.125) then
p1 = fs1 - f_one
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1 + 2
id211 = ind1 +11
elseif (specparm1 > 0.875) then
p1 = -fs1
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1 + 1
id011 = ind1 +10
id101 = ind1
id111 = ind1 + 9
id201 = ind1 - 1
id211 = ind1 + 8
else
fk01 = f_one - fs1
fk11 = fs1
fk21 = f_zero
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1
id211 = ind1
endif
fac001 = fk01 * fac01(k)
fac101 = fk11 * fac01(k)
fac201 = fk21 * fac01(k)
fac011 = fk01 * fac11(k)
fac111 = fk11 * fac11(k)
fac211 = fk21 * fac11(k)
do ig = 1, ng12
tauself = selffac(k)* (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
taug(ns12+ig,k) = speccomb &
& * (fac000*absa(ig,id000) + fac010*absa(ig,id010) &
& + fac100*absa(ig,id100) + fac110*absa(ig,id110) &
& + fac200*absa(ig,id200) + fac210*absa(ig,id210)) &
& + speccomb1 &
& * (fac001*absa(ig,id001) + fac011*absa(ig,id011) &
& + fac101*absa(ig,id101) + fac111*absa(ig,id111) &
& + fac201*absa(ig,id201) + fac211*absa(ig,id211)) &
& + tauself + taufor
fracs(ns12+ig,k) = fracrefa(ig,jpl) + fpl &
& *(fracrefa(ig,jplp) - fracrefa(ig,jpl))
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
do ig = 1, ng12
taug(ns12+ig,k) = f_zero
fracs(ns12+ig,k) = f_zero
enddo
enddo
! ..................................
end subroutine taugb12
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 13: 2080-2250 cm-1 (low key-h2o,n2o; high minor-o3 minor)
! ----------------------------------
subroutine taugb13
! ..................................
! ------------------------------------------------------------------ !
! band 13: 2080-2250 cm-1 (low key-h2o,n2o; high minor-o3 minor) !
! ------------------------------------------------------------------ !
use module_radlw_kgb13
! --- locals:
integer :: k, ind0, ind1, inds, indsp, indf, indfp, indm, indmp, &
& id000, id010, id100, id110, id200, id210, jmco2, jpl, &
& id001, id011, id101, id111, id201, id211, jmco2p, jplp, &
& jmco, jmcop, ig, js, js1
real (kind=kind_phys) :: tauself, taufor, co2m1, co2m2, absco2, &
& speccomb, specparm, specmult, fs, &
& speccomb1, specparm1, specmult1, fs1, &
& speccomb_mco2, specparm_mco2, specmult_mco2, fmco2, &
& speccomb_mco, specparm_mco, specmult_mco, fmco, &
& speccomb_planck,specparm_planck,specmult_planck,fpl, &
& refrat_planck_a, refrat_m_a, refrat_m_a3, ratco2, &
& adjfac, adjcolco2, com1, com2, absco, abso3, &
& fac000, fac100, fac200, fac010, fac110, fac210, &
& fac001, fac101, fac201, fac011, fac111, fac211, &
& p0, p40, fk00, fk10, fk20, p1, p41, fk01, fk11, fk21, temp
!
!===> ... begin here
!
! --- ... minor gas mapping levels :
! lower - co2, p = 1053.63 mb, t = 294.2 k
! lower - co, p = 706 mb, t = 278.94 k
! upper - o3, p = 95.5835 mb, t = 215.7 k
! --- ... calculate reference ratio to be used in calculation of Planck
! fraction in lower/upper atmosphere.
refrat_planck_a = chi_mls(1,5)/chi_mls(4,5) ! P = 473.420 mb (Level 5)
refrat_m_a = chi_mls(1,1)/chi_mls(4,1) ! P = 1053. (Level 1)
refrat_m_a3 = chi_mls(1,3)/chi_mls(4,3) ! P = 706. (Level 3)
! --- ... lower atmosphere loop
do k = 1, laytrop
speccomb = colamt(k,1) + rfrate(k,3,1)*colamt(k,4)
specparm = colamt(k,1) / speccomb
specmult = 8.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-1)*5 + (jt(k)-1)) * nspa(13) + js
speccomb1 = colamt(k,1) + rfrate(k,3,2)*colamt(k,4)
specparm1 = colamt(k,1) / speccomb1
specmult1 = 8.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = (jp(k)*5 + (jt1(k)-1)) * nspa(13) + js1
speccomb_mco2 = colamt(k,1) + refrat_m_a*colamt(k,4)
specparm_mco2 = colamt(k,1) / speccomb_mco2
specmult_mco2 = 8.0 * min(specparm_mco2, oneminus)
jmco2 = 1 + int(specmult_mco2)
fmco2 = mod(specmult_mco2, f_one)
! --- ... in atmospheres where the amount of co2 is too great to be considered
! a minor species, adjust the column amount of co2 by an empirical factor
! to obtain the proper contribution.
speccomb_mco = colamt(k,1) + refrat_m_a3*colamt(k,4)
specparm_mco = colamt(k,1) / speccomb_mco
specmult_mco = 8.0 * min(specparm_mco, oneminus)
jmco = 1 + int(specmult_mco)
fmco = mod(specmult_mco, f_one)
speccomb_planck = colamt(k,1) + refrat_planck_a*colamt(k,4)
specparm_planck = colamt(k,1) / speccomb_planck
specmult_planck = 8.0 * min(specparm_planck, oneminus)
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, f_one)
inds = indself(k)
indf = indfor(k)
indm = indminor(k)
indsp = inds + 1
indfp = indf + 1
indmp = indm + 1
jplp = jpl + 1
jmco2p= jmco2+ 1
jmcop = jmco + 1
! --- ... in atmospheres where the amount of co2 is too great to be considered
! a minor species, adjust the column amount of co2 by an empirical factor
! to obtain the proper contribution.
temp = coldry(k) * 3.55e-4
ratco2 = colamt(k,2) / temp
if (ratco2 > 3.0) then
adjfac = 2.0 + (ratco2-2.0)**0.68
adjcolco2 = adjfac * temp
else
adjcolco2 = colamt(k,2)
endif
if (specparm < 0.125) then
p0 = fs - f_one
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0 + 2
id210 = ind0 +11
elseif (specparm > 0.875) then
p0 = -fs
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0 + 1
id010 = ind0 +10
id100 = ind0
id110 = ind0 + 9
id200 = ind0 - 1
id210 = ind0 + 8
else
fk00 = f_one - fs
fk10 = fs
fk20 = f_zero
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0
id210 = ind0
endif
fac000 = fk00 * fac00(k)
fac100 = fk10 * fac00(k)
fac200 = fk20 * fac00(k)
fac010 = fk00 * fac10(k)
fac110 = fk10 * fac10(k)
fac210 = fk20 * fac10(k)
if (specparm1 < 0.125) then
p1 = fs1 - f_one
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1 + 2
id211 = ind1 +11
elseif (specparm1 > 0.875) then
p1 = -fs1
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1 + 1
id011 = ind1 +10
id101 = ind1
id111 = ind1 + 9
id201 = ind1 - 1
id211 = ind1 + 8
else
fk01 = f_one - fs1
fk11 = fs1
fk21 = f_zero
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1
id211 = ind1
endif
fac001 = fk01 * fac01(k)
fac101 = fk11 * fac01(k)
fac201 = fk21 * fac01(k)
fac011 = fk01 * fac11(k)
fac111 = fk11 * fac11(k)
fac211 = fk21 * fac11(k)
do ig = 1, ng13
tauself = selffac(k)* (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
co2m1 = ka_mco2(ig,jmco2,indm) + fmco2 &
& * (ka_mco2(ig,jmco2p,indm) - ka_mco2(ig,jmco2,indm))
co2m2 = ka_mco2(ig,jmco2,indmp) + fmco2 &
& * (ka_mco2(ig,jmco2p,indmp) - ka_mco2(ig,jmco2,indmp))
absco2 = co2m1 + minorfrac(k) * (co2m2 - co2m1)
com1 = ka_mco(ig,jmco,indm) + fmco &
& * (ka_mco(ig,jmcop,indm) - ka_mco(ig,jmco,indm))
com2 = ka_mco(ig,jmco,indmp) + fmco &
& * (ka_mco(ig,jmcop,indmp) - ka_mco(ig,jmco,indmp))
absco = com1 + minorfrac(k) * (com2 - com1)
taug(ns13+ig,k) = speccomb &
& * (fac000*absa(ig,id000) + fac010*absa(ig,id010) &
& + fac100*absa(ig,id100) + fac110*absa(ig,id110) &
& + fac200*absa(ig,id200) + fac210*absa(ig,id210)) &
& + speccomb1 &
& * (fac001*absa(ig,id001) + fac011*absa(ig,id011) &
& + fac101*absa(ig,id101) + fac111*absa(ig,id111) &
& + fac201*absa(ig,id201) + fac211*absa(ig,id211)) &
& + tauself + taufor + adjcolco2*absco2 &
& + colamt(k,7)*absco
fracs(ns13+ig,k) = fracrefa(ig,jpl) + fpl &
& * (fracrefa(ig,jplp) - fracrefa(ig,jpl))
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
indm = indminor(k)
indmp = indm + 1
do ig = 1, ng13
abso3 = kb_mo3(ig,indm) + minorfrac(k) &
& * (kb_mo3(ig,indmp) - kb_mo3(ig,indm))
taug(ns13+ig,k) = colamt(k,3)*abso3
fracs(ns13+ig,k) = fracrefb(ig)
enddo
enddo
! ..................................
end subroutine taugb13
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 14: 2250-2380 cm-1 (low - co2; high - co2)
! ----------------------------------
subroutine taugb14
! ..................................
! ------------------------------------------------------------------ !
! band 14: 2250-2380 cm-1 (low - co2; high - co2) !
! ------------------------------------------------------------------ !
use module_radlw_kgb14
! --- locals:
integer :: k, ind0, ind0p, ind1, ind1p, inds, indsp, indf, indfp, &
& ig
real (kind=kind_phys) :: tauself, taufor
!
!===> ... begin here
!
! --- ... lower atmosphere loop
do k = 1, laytrop
ind0 = ((jp(k)-1)*5 + (jt (k)-1)) * nspa(14) + 1
ind1 = ( jp(k) *5 + (jt1(k)-1)) * nspa(14) + 1
inds = indself(k)
indf = indfor(k)
ind0p = ind0 + 1
ind1p = ind1 + 1
indsp = inds + 1
indfp = indf + 1
do ig = 1, ng14
tauself = selffac(k) * (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
taug(ns14+ig,k) = colamt(k,2) &
& * (fac00(k)*absa(ig,ind0) + fac10(k)*absa(ig,ind0p) &
& + fac01(k)*absa(ig,ind1) + fac11(k)*absa(ig,ind1p)) &
& + tauself + taufor
fracs(ns14+ig,k) = fracrefa(ig)
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
ind0 = ((jp(k)-13)*5 + (jt (k)-1)) * nspb(14) + 1
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(14) + 1
ind0p = ind0 + 1
ind1p = ind1 + 1
do ig = 1, ng14
taug(ns14+ig,k) = colamt(k,2) &
& * (fac00(k)*absb(ig,ind0) + fac10(k)*absb(ig,ind0p) &
& + fac01(k)*absb(ig,ind1) + fac11(k)*absb(ig,ind1p))
fracs(ns14+ig,k) = fracrefb(ig)
enddo
enddo
! ..................................
end subroutine taugb14
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 15: 2380-2600 cm-1 (low - n2o,co2; low minor - n2)
!! (high - nothing)
! ----------------------------------
subroutine taugb15
! ..................................
! ------------------------------------------------------------------ !
! band 15: 2380-2600 cm-1 (low - n2o,co2; low minor - n2) !
! (high - nothing) !
! ------------------------------------------------------------------ !
use module_radlw_kgb15
! --- locals:
integer :: k, ind0, ind1, inds, indsp, indf, indfp, indm, indmp, &
& id000, id010, id100, id110, id200, id210, jpl, jplp, &
& id001, id011, id101, id111, id201, id211, jmn2, jmn2p, &
& ig, js, js1
real (kind=kind_phys) :: scalen2, tauself, taufor, &
& speccomb, specparm, specmult, fs, &
& speccomb1, specparm1, specmult1, fs1, &
& speccomb_mn2, specparm_mn2, specmult_mn2, fmn2, &
& speccomb_planck,specparm_planck,specmult_planck,fpl, &
& refrat_planck_a, refrat_m_a, n2m1, n2m2, taun2, &
& fac000, fac100, fac200, fac010, fac110, fac210, &
& fac001, fac101, fac201, fac011, fac111, fac211, &
& p0, p40, fk00, fk10, fk20, p1, p41, fk01, fk11, fk21
!
!===> ... begin here
!
! --- ... minor gas mapping level :
! lower - nitrogen continuum, P = 1053., T = 294.
! --- ... calculate reference ratio to be used in calculation of Planck
! fraction in lower atmosphere.
refrat_planck_a = chi_mls(4,1)/chi_mls(2,1) ! P = 1053. mb (Level 1)
refrat_m_a = chi_mls(4,1)/chi_mls(2,1) ! P = 1053. mb
! --- ... lower atmosphere loop
do k = 1, laytrop
speccomb = colamt(k,4) + rfrate(k,5,1)*colamt(k,2)
specparm = colamt(k,4) / speccomb
specmult = 8.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-1)*5 + (jt(k)-1)) * nspa(15) + js
speccomb1 = colamt(k,4) + rfrate(k,5,2)*colamt(k,2)
specparm1 = colamt(k,4) / speccomb1
specmult1 = 8.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = (jp(k)*5 + (jt1(k)-1)) * nspa(15) + js1
speccomb_mn2 = colamt(k,4) + refrat_m_a*colamt(k,2)
specparm_mn2 = colamt(k,4) / speccomb_mn2
specmult_mn2 = 8.0 * min(specparm_mn2, oneminus)
jmn2 = 1 + int(specmult_mn2)
fmn2 = mod(specmult_mn2, f_one)
speccomb_planck = colamt(k,4) + refrat_planck_a*colamt(k,2)
specparm_planck = colamt(k,4) / speccomb_planck
specmult_planck = 8.0 * min(specparm_planck, oneminus)
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, f_one)
scalen2 = colbrd(k) * scaleminor(k)
inds = indself(k)
indf = indfor(k)
indm = indminor(k)
indsp = inds + 1
indfp = indf + 1
indmp = indm + 1
jplp = jpl + 1
jmn2p = jmn2 + 1
if (specparm < 0.125) then
p0 = fs - f_one
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0 + 2
id210 = ind0 +11
elseif (specparm > 0.875) then
p0 = -fs
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0 + 1
id010 = ind0 +10
id100 = ind0
id110 = ind0 + 9
id200 = ind0 - 1
id210 = ind0 + 8
else
fk00 = f_one - fs
fk10 = fs
fk20 = f_zero
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0
id210 = ind0
endif
fac000 = fk00 * fac00(k)
fac100 = fk10 * fac00(k)
fac200 = fk20 * fac00(k)
fac010 = fk00 * fac10(k)
fac110 = fk10 * fac10(k)
fac210 = fk20 * fac10(k)
if (specparm1 < 0.125) then
p1 = fs1 - f_one
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1 + 2
id211 = ind1 +11
elseif (specparm1 > 0.875) then
p1 = -fs1
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1 + 1
id011 = ind1 +10
id101 = ind1
id111 = ind1 + 9
id201 = ind1 - 1
id211 = ind1 + 8
else
fk01 = f_one - fs1
fk11 = fs1
fk21 = f_zero
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1
id211 = ind1
endif
fac001 = fk01 * fac01(k)
fac101 = fk11 * fac01(k)
fac201 = fk21 * fac01(k)
fac011 = fk01 * fac11(k)
fac111 = fk11 * fac11(k)
fac211 = fk21 * fac11(k)
do ig = 1, ng15
tauself = selffac(k)* (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
n2m1 = ka_mn2(ig,jmn2,indm) + fmn2 &
& * (ka_mn2(ig,jmn2p,indm) - ka_mn2(ig,jmn2,indm))
n2m2 = ka_mn2(ig,jmn2,indmp) + fmn2 &
& * (ka_mn2(ig,jmn2p,indmp) - ka_mn2(ig,jmn2,indmp))
taun2 = scalen2 * (n2m1 + minorfrac(k) * (n2m2 - n2m1))
taug(ns15+ig,k) = speccomb &
& * (fac000*absa(ig,id000) + fac010*absa(ig,id010) &
& + fac100*absa(ig,id100) + fac110*absa(ig,id110) &
& + fac200*absa(ig,id200) + fac210*absa(ig,id210)) &
& + speccomb1 &
& * (fac001*absa(ig,id001) + fac011*absa(ig,id011) &
& + fac101*absa(ig,id101) + fac111*absa(ig,id111) &
& + fac201*absa(ig,id201) + fac211*absa(ig,id211)) &
& + tauself + taufor + taun2
fracs(ns15+ig,k) = fracrefa(ig,jpl) + fpl &
& * (fracrefa(ig,jplp) - fracrefa(ig,jpl))
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
do ig = 1, ng15
taug(ns15+ig,k) = f_zero
fracs(ns15+ig,k) = f_zero
enddo
enddo
! ..................................
end subroutine taugb15
! ----------------------------------
!>\ingroup module_radlw_main
!> Band 16: 2600-3250 cm-1 (low key- h2o,ch4; high key - ch4)
! ----------------------------------
subroutine taugb16
! ..................................
! ------------------------------------------------------------------ !
! band 16: 2600-3250 cm-1 (low key- h2o,ch4; high key - ch4) !
! ------------------------------------------------------------------ !
use module_radlw_kgb16
! --- locals:
integer :: k, ind0, ind0p, ind1, ind1p, inds, indsp, indf, indfp, &
& id000, id010, id100, id110, id200, id210, jpl, jplp, &
& id001, id011, id101, id111, id201, id211, ig, js, js1
real (kind=kind_phys) :: tauself, taufor, refrat_planck_a, &
& speccomb, specparm, specmult, fs, &
& speccomb1, specparm1, specmult1, fs1, &
& speccomb_planck,specparm_planck,specmult_planck,fpl, &
& fac000, fac100, fac200, fac010, fac110, fac210, &
& fac001, fac101, fac201, fac011, fac111, fac211, &
& p0, p40, fk00, fk10, fk20, p1, p41, fk01, fk11, fk21
!
!===> ... begin here
!
! --- ... calculate reference ratio to be used in calculation of Planck
! fraction in lower atmosphere.
refrat_planck_a = chi_mls(1,6)/chi_mls(6,6) ! P = 387. mb (Level 6)
! --- ... lower atmosphere loop
do k = 1, laytrop
speccomb = colamt(k,1) + rfrate(k,4,1)*colamt(k,5)
specparm = colamt(k,1) / speccomb
specmult = 8.0 * min(specparm, oneminus)
js = 1 + int(specmult)
fs = mod(specmult, f_one)
ind0 = ((jp(k)-1)*5 + (jt(k)-1)) * nspa(16) + js
speccomb1 = colamt(k,1) + rfrate(k,4,2)*colamt(k,5)
specparm1 = colamt(k,1) / speccomb1
specmult1 = 8.0 * min(specparm1, oneminus)
js1 = 1 + int(specmult1)
fs1 = mod(specmult1, f_one)
ind1 = (jp(k)*5 + (jt1(k)-1)) * nspa(16) + js1
speccomb_planck = colamt(k,1) + refrat_planck_a*colamt(k,5)
specparm_planck = colamt(k,1) / speccomb_planck
specmult_planck = 8.0 * min(specparm_planck, oneminus)
jpl = 1 + int(specmult_planck)
fpl = mod(specmult_planck, f_one)
inds = indself(k)
indf = indfor(k)
indsp = inds + 1
indfp = indf + 1
jplp = jpl + 1
if (specparm < 0.125) then
p0 = fs - f_one
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0 + 2
id210 = ind0 +11
elseif (specparm > 0.875) then
p0 = -fs
p40 = p0**4
fk00 = p40
fk10 = f_one - p0 - 2.0*p40
fk20 = p0 + p40
id000 = ind0 + 1
id010 = ind0 +10
id100 = ind0
id110 = ind0 + 9
id200 = ind0 - 1
id210 = ind0 + 8
else
fk00 = f_one - fs
fk10 = fs
fk20 = f_zero
id000 = ind0
id010 = ind0 + 9
id100 = ind0 + 1
id110 = ind0 +10
id200 = ind0
id210 = ind0
endif
fac000 = fk00 * fac00(k)
fac100 = fk10 * fac00(k)
fac200 = fk20 * fac00(k)
fac010 = fk00 * fac10(k)
fac110 = fk10 * fac10(k)
fac210 = fk20 * fac10(k)
if (specparm1 < 0.125) then
p1 = fs1 - f_one
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1 + 2
id211 = ind1 +11
elseif (specparm1 > 0.875) then
p1 = -fs1
p41 = p1**4
fk01 = p41
fk11 = f_one - p1 - 2.0*p41
fk21 = p1 + p41
id001 = ind1 + 1
id011 = ind1 +10
id101 = ind1
id111 = ind1 + 9
id201 = ind1 - 1
id211 = ind1 + 8
else
fk01 = f_one - fs1
fk11 = fs1
fk21 = f_zero
id001 = ind1
id011 = ind1 + 9
id101 = ind1 + 1
id111 = ind1 +10
id201 = ind1
id211 = ind1
endif
fac001 = fk01 * fac01(k)
fac101 = fk11 * fac01(k)
fac201 = fk21 * fac01(k)
fac011 = fk01 * fac11(k)
fac111 = fk11 * fac11(k)
fac211 = fk21 * fac11(k)
do ig = 1, ng16
tauself = selffac(k)* (selfref(ig,inds) + selffrac(k) &
& * (selfref(ig,indsp) - selfref(ig,inds)))
taufor = forfac(k) * (forref(ig,indf) + forfrac(k) &
& * (forref(ig,indfp) - forref(ig,indf)))
taug(ns16+ig,k) = speccomb &
& * (fac000*absa(ig,id000) + fac010*absa(ig,id010) &
& + fac100*absa(ig,id100) + fac110*absa(ig,id110) &
& + fac200*absa(ig,id200) + fac210*absa(ig,id210)) &
& + speccomb1 &
& * (fac001*absa(ig,id001) + fac011*absa(ig,id011) &
& + fac101*absa(ig,id101) + fac111*absa(ig,id111) &
& + fac201*absa(ig,id201) + fac211*absa(ig,id211)) &
& + tauself + taufor
fracs(ns16+ig,k) = fracrefa(ig,jpl) + fpl &
& * (fracrefa(ig,jplp) - fracrefa(ig,jpl))
enddo
enddo
! --- ... upper atmosphere loop
do k = laytrop+1, nlay
ind0 = ((jp(k)-13)*5 + (jt (k)-1)) * nspb(16) + 1
ind1 = ((jp(k)-12)*5 + (jt1(k)-1)) * nspb(16) + 1
ind0p = ind0 + 1
ind1p = ind1 + 1
do ig = 1, ng16
taug(ns16+ig,k) = colamt(k,5) &
& * (fac00(k)*absb(ig,ind0) + fac10(k)*absb(ig,ind0p) &
& + fac01(k)*absb(ig,ind1) + fac11(k)*absb(ig,ind1p))
fracs(ns16+ig,k) = fracrefb(ig)
enddo
enddo
! ..................................
end subroutine taugb16
! ----------------------------------
! ..................................
end subroutine taumol
!! @}
!-----------------------------------
!
!........................................!
end module rrtmg_lw !
!========================================!
|
{"hexsha": "7b029f8b0dfc01228eb45e2af75d56ef04bd8f56", "size": 277583, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "physics/radlw_main.f", "max_stars_repo_name": "tsupinie/ccpp-physics", "max_stars_repo_head_hexsha": "a1b957c9a8cea499121a1356ac0a826f692a30d8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-13T12:06:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-13T12:06:17.000Z", "max_issues_repo_path": "physics/radlw_main.f", "max_issues_repo_name": "tsupinie/ccpp-physics", "max_issues_repo_head_hexsha": "a1b957c9a8cea499121a1356ac0a826f692a30d8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2018-08-20T20:54:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-26T17:47:15.000Z", "max_forks_repo_path": "physics/radlw_main.f", "max_forks_repo_name": "tsupinie/ccpp-physics", "max_forks_repo_head_hexsha": "a1b957c9a8cea499121a1356ac0a826f692a30d8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-12T04:30:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-12T04:30:59.000Z", "avg_line_length": 40.9958647172, "max_line_length": 103, "alphanum_fraction": 0.4586051739, "num_tokens": 82773}
|
! Implicit FTSC FDM Solver using TDMA algorithm and ADI method for parabolic 2D heat transfer equation
!-------------------------------------------------------------
!------------------By Arthur Rostami -------------------------
!-------------------------------------------------------------
program CFD_FIRST_PROJECT_IMPLICIT
implicit none
!_____________________________Variables_____________________________
integer:: i,j,k,xF,yF,xG,yG,L,H,time
real:: e,alpha,dx,dt,w,t_st,t_fin,m,n
real,allocatable:: T(:,:),x(:),y(:),er(:,:),TF(:,:),TG(:,:),T0(:,:),T1(:,:),T2(:,:),T3(:,:),T4(:,:)
real,allocatable:: a(:,:),b(:,:),c(:,:),d(:,:),c_new(:,:),d_new(:,:),temp1(:,:),temp2(:,:)
!_____________________________Define Parameters_____________________
print*, 'This Method is always stable, you can choose any positive value for dx and dt!'
print*, 'Please enter delta x:'
read*, dx
print*, 'Please enter delta t:'
read*, dt
L=2.0 ! Length of the plate
H=1.5 ! Height of the plate
m=(L/dx)+1 ! number of grid points in x direction
n=(H/dx)+1 ! number of grid points in x direction
alpha=23.1E-6 ! thermal diffusivity, m2/s
w=(alpha*dt)/(dx**2)
allocate(T(m,n),x(m),y(n),er(m,n),TF(1,100000),TG(1,100000),T0(m,n),T1(m,n),T2(m,n),T3(m,n),T4(m,n),temp1(m,n),temp2(m,n))
allocate(a(2:m-1,2:n-1),b(2:m-1,2:n-1),c(2:m-1,2:n-1),d(2:m-1,2:n-1),c_new(2:m-1,2:n-1),d_new(2:m-1,2:n-1))
!_____________________________Defining F & G locations_____________________
xF=(0.5/dx)+1 !point F location
yF=(0.5/dx)+1
xG=(1/dx)+1 !point G location
yG=(0.75/dx)+1
!_____________________________Mesh Generation_____________________________
do i=1,m
x(i)=(i-1)*dx
end do
do j=1,n
y(j)=(j-1)*dx
end do
!_____________________________Initial Condition____________________________
do i=1,m
do j=1,n
T(i,j)=25
temp1(i,j)=25
temp2(i,j)=25
end do
end do
!_____________________________________________________________________________
!____________________FTCS Implicit Solver Using ADI method____________________
!_____________________________________________________________________________
k=1 !time step counter
call cpu_time(t_st)
do
!_____________________________Boundary condition_____________________________
do j=2,n-1
T(1,j)=(T(2,j)+10*dx)/(0.5*dx+1)!BC on wall AB
temp1(i,j)=(temp1(2,j)+10*dx)/(0.5*dx+1)
temp2(i,j)=(temp2(2,j)+10*dx)/(0.5*dx+1)
end do
do i=1,m
T(i,1)=T(i,2) !BC on wall BD
temp1(i,1)=temp1(i,2)
temp2(i,1)=temp1(i,2)
end do
do i=1,m
T(i,n)=100 !BC on wall AC
temp1(i,n)=100
temp2(i,n)=100
end do
do j=2,n-1
T(m,j)=25 !BC on wall CD
temp1(m,j)=25
temp2(m,j)=25
end do
!_____________________________Saving Temp at chosen times and locations F and G_____________________________
if(k==1) T0(:,:)=T(:,:) !saving temperature at t=0*dt
if(k==2000) T1(:,:)=T(:,:) !saving temperature at t=2000*dt
if(k==10000) T2(:,:)=T(:,:) !saving temperature at t=10000*dt
if(k==50000) T3(:,:)=T(:,:) !saving temperature at t=50000*dt
if(k==100000)T4(:,:)=T(:,:) !saving temperature at t=100000*dt
TF(1,k)=T(xF,yF) !saving temperature of point F for different times
TG(1,k)=T(xG,yG) !saving temperature of point G for different times
!_____ADI for first half-step_______
!___________________________________
do j=2,n-1
do i=3,m-2
a(i,j)=w
b(i,j)=-2-2*w
c(i,j)=w
d(i,j)=-w*T(i,j+1)-w*T(i,j-1)+(-2+2*w)*T(i,j)
end do
a(2,j)=0
b(2,j)=-2-2*w+1/(1+0.5*dx)
c(2,j)=w
d(2,j)=-w*T(i,j+1)-w*T(i,j-1)+(-2+2*w)*T(i,j)-10*dx/(1+0.5*dx)
a(m-1,j)=w
b(m-1,j)=-2-2*w
c(m-1,j)=0
d(m-1,j)=-w*T(i,j+1)-w*T(i,j-1)+(-2+2*w)*T(i,j)-w*T(m,j)
end do
!_____TDMA Solver_____
do j=2,n-1
c_new(2,j)=c(2,j)/b(2,j)
d_new(2,j)=d(2,j)/b(2,j)
do i=3,m-1
c_new(i,j)=c(i,j)/(b(i,j)-a(i,j)*c_new(i-1,j))
d_new(i,j)=(d(i,j)-a(i,j)*d_new(i-1,j))/(b(i,j)-a(i,j)*c_new(i-1,j))
end do
temp1(m-1,j)=d_new(m-1,j)
do i=m-2,2,-1
temp1(i,j)=d_new(i,j)-c_new(i,j)*temp1(i+1,j) ! Saving half-step(k+0.5) values in a temporary variable
end do
end do
!_____ADI for second half-step_______
!____________________________________
do i=2,m-1
do j=3,n-2
a(i,j)=w
b(i,j)=-2-2*w
c(i,j)=w
d(i,j)=-w*temp1(i+1,j)-w*temp1(i-1,j)+(-2+2*w)*temp1(i,j)
end do
a(i,2)=0
b(i,2)=-2-w
c(i,2)=w
d(i,2)=-w*temp1(i+1,j)-w*temp1(i-1,j)+(-2+2*w)*temp1(i,j)
a(i,n-1)=w
b(i,n-1)=-2-2*w
c(i,n-1)=0
d(i,n-1)=-w*temp1(i+1,j)-w*temp1(i-1,j)+(-2+2*w)*temp1(i,j)-w*T(i,n)
end do
!_____TDMA Solver_____
do i=2,m-1
c_new(i,2)=c(i,2)/b(i,2)
d_new(i,2)=d(i,2)/b(i,2)
do j=3,n-1
c_new(i,j)=c(i,j)/(b(i,j)-a(i,j)*c_new(i,j-1))
d_new(i,j)=(d(i,j)-a(i,j)*d_new(i,j-1))/(b(i,j)-a(i,j)*c_new(i,j-1))
end do
temp2(i,n-1)=d_new(i,n-1)
do j=n-2,2,-1
temp2(i,j)=d_new(i,j)-c_new(i,j)*temp2(i,j+1) ! Saving second half-step(k+1)
end do
end do
er(:,:)=ABS(temp2(:,:)-T(:,:)) ! This variable is defined to determine the time the solution will reach steady state
e=maxval(er) !Maximum difference
T(:,:)=temp2(:,:)
if(e<0.0001) exit
k=k+1
end do
call cpu_time(t_fin)
!_____________________________Reporting solution information_____________________________
print*, 'w= ',w
print*, 'Number of iterations to reach steady state: ',k
print*, 'Maximum difference between the last two time steps:',e, 'Celsius'
print*, 'Solution will be Steady at: ',k*dt,'seconds'
print*, 'Calculation time is: ',t_fin-t_st, 'seconds'
!_____________________________Saving the results_____________________________
open(10,file='Temp contour.plt')
write(10,*)'VARIABLES = "X", "Y", "T"'
do j=1,n
do i=1,m
write(10,*) x(i),y(j),T(i,j)
end do
end do
open(20,file='Midline temp at SS.dat')
write(20,*)"VARIABLES=X,TEMPERATURE"
do i=1,m
write(20,*) (i-1)*dx,T(i,yG)
end do
open(30,file='Midline initial temp.dat')
write(30,*)"VARIABLES=X,TEMPERATURE"
do i=1,m
write(30,*) (i-1)*dx,T0(i,yG)
end do
open(40,file='Midline temp at t=2000dt.dat')
write(40,*)"VARIABLES=X,TEMPERATURE"
do i=1,m
write(40,*) (i-1)*dx,T1(i,yG)
end do
open(50,file='Midline temp at t=10000dt.dat')
write(50,*)"VARIABLES=X,TEMPERATURE"
do i=1,m
write(50,*) (i-1)*dx,T2(i,yG)
end do
open(60,file='Midline temp at t=50000dt.dat')
write(60,*)"VARIABLES=X,TEMPERATURE"
do i=1,m
write(60,*) (i-1)*dx,T3(i,yG)
end do
open(70,file='Midline temp at t=100000dt.dat')
write(70,*)"VARIABLES=X,TEMPERATURE"
do i=1,m
write(70,*) (i-1)*dx,T4(i,yG)
end do
open(80,file='Temp at F.dat')
write(80,*)"VARIABLES=t,TEMPERATURE"
do time=1,k
write(80,*) (time-1)*dt,TF(1,time)
end do
open(90,file='Temp at G.dat')
write(90,*)"VARIABLES=t,TEMPERATURE"
do time=1,k
write(90,*) (i-1)*dt,TG(1,time)
end do
end program CFD_FIRST_PROJECT_IMPLICIT
|
{"hexsha": "4fc378ce830fe8609f3a3469a5dae49b36c96fa4", "size": 8703, "ext": "f95", "lang": "FORTRAN", "max_stars_repo_path": "Implicit FTSC FDM Parabolic.f95", "max_stars_repo_name": "r2rro/CFD", "max_stars_repo_head_hexsha": "3151751423f68036c32004eea1350ee69ce959ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Implicit FTSC FDM Parabolic.f95", "max_issues_repo_name": "r2rro/CFD", "max_issues_repo_head_hexsha": "3151751423f68036c32004eea1350ee69ce959ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Implicit FTSC FDM Parabolic.f95", "max_forks_repo_name": "r2rro/CFD", "max_forks_repo_head_hexsha": "3151751423f68036c32004eea1350ee69ce959ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8771186441, "max_line_length": 127, "alphanum_fraction": 0.488222452, "num_tokens": 2715}
|
!------------------------------------------------------------------------------
!P+
! NAME:
! Test_MWSE
!
! PURPOSE:
! Program to test the microwave surface emissivity routines for
! benchmarking and refactoring.
!
! CATEGORY:
! CRTM : User Code : NESDIS Emissivity
!
! LANGUAGE:
! Fortran-95
!
! MODULES:
! kinds: Module containing definitions for kinds
! of variable types.
!
! CONTAINS:
! None.
!
! INCLUDE FILES:
! None.
!
! EXTERNALS:
! None.
!
! COMMON BLOCKS:
! None.
!
! FILES ACCESSED:
! None.
!
! CREATION HISTORY:
! Written by: Paul van Delst, CIMSS/SSEC 06-Dec-2004
! paul.vandelst@ssec.wisc.edu
!
! Copyright (C) 2004 Paul van Delst
!
!P-
!------------------------------------------------------------------------------
PROGRAM Test_MWSE
! ------------
! Module usage
! ------------
USE kinds
USE Message_Handler
USE EmisTestData_Define
USE EmisTestData_Binary_IO
USE MWSE_Snow
! ---------------------------
! Disable all implicit typing
! ---------------------------
IMPLICIT NONE
! ----------
! Parameters
! ----------
CHARACTER( * ), PARAMETER :: PROGRAM_NAME = 'Test_MWSE'
CHARACTER( * ), PARAMETER :: PROGRAM_RCS_ID = &
CHARACTER( * ), PARAMETER :: PROGRAM_HEADER = &
'**********************************************************'
! -- Test data files
INTEGER, PARAMETER :: N_FILES = 8
CHARACTER( * ), PARAMETER, DIMENSION( N_FILES ) :: &
FILENAME = (/ 'emin_amsua15.dat.DirectBinary', &
'emin_amsua16.dat.DirectBinary', &
'emin_amsub15.dat.DirectBinary', &
'emin_amsub16.dat.DirectBinary', &
'emin_amsub17.dat.DirectBinary', &
'emin_ssmi13.dat.DirectBinary ', &
'emin_ssmi14.dat.DirectBinary ', &
'emin_ssmi15.dat.DirectBinary ' /)
INTEGER, PARAMETER, DIMENSION( N_FILES ) :: &
N_CHANNELS = (/ 15, 15, 5, 5, 5, 7, 7, 7 /)
INTEGER, PARAMETER, DIMENSION( N_FILES ) :: &
N_RECORDS = (/ 1395, 1035, 170, 130, 165, 413, 406, 378 /)
LOGICAL,PARAMETER, DIMENSION( N_FILES ) :: &
IS_AMSUA = (/ .TRUE., .TRUE., .FALSE., .FALSE., .FALSE., .FALSE., .FALSE., .FALSE. /)
LOGICAL,PARAMETER, DIMENSION( N_FILES ) :: &
IS_AMSUB = (/ .FALSE., .FALSE., .TRUE., .TRUE., .TRUE., .FALSE., .FALSE., .FALSE. /)
LOGICAL,PARAMETER, DIMENSION( N_FILES ) :: &
IS_SSMI = (/ .FALSE., .FALSE., .FALSE., .FALSE., .FALSE., .TRUE., .TRUE., .TRUE. /)
! -- The channel indices for the observations
INTEGER, PARAMETER :: N_AMSUA_CHANNELS = 4
INTEGER, PARAMETER, DIMENSION( N_AMSUA_CHANNELS ) :: AMSU_A_TB_INDEX = (/ 1, 2, 3, 15 /)
INTEGER, PARAMETER :: N_AMSUB_CHANNELS = 2
INTEGER, PARAMETER, DIMENSION( N_AMSUB_CHANNELS ) :: AMSU_B_TB_INDEX = (/ 1, 2 /)
! -- Invalid brightness temperature
REAL( r_kind ), PARAMETER :: TB_INVALID = -999.0_r_kind
! -- Frequency limits
REAL( r_kind ), PARAMETER :: FREQUENCY_BEGIN = 4.0_r_kind
REAL( r_kind ), PARAMETER :: FREQUENCY_END = 150.0_r_kind
INTEGER, PARAMETER :: N_FREQUENCIES = 100
! -- Data flag values
INTEGER, PARAMETER :: SEA = 0
INTEGER, PARAMETER :: LAND = 1
INTEGER, PARAMETER :: NO_SNOW_OR_ICE = 0
INTEGER, PARAMETER :: SNOW_OR_ICE = 1
INTEGER, PARAMETER :: INFRARED = 0
INTEGER, PARAMETER :: MICROWAVE = 1
! -- Data threshold values
REAL( r_kind ), PARAMETER :: MINIMUM_SNOW_DEPTH = 0.1_r_kind
! --Literal constants
REAL( r_kind ), PARAMETER :: ZERO = 0.0_r_kind
! ---------
! Variables
! ---------
INTEGER :: pn_pos
CHARACTER( 80 ) :: pn_fmt
CHARACTER( 256 ) :: MEssage
INTEGER :: Error_Status
INTEGER :: l, n
TYPE( EmisTestData_type ) :: EmisTestData
REAL( r_kind ), DIMENSION( N_AMSUA_CHANNELS ) :: AMSU_A_Tb
REAL( r_kind ), DIMENSION( N_AMSUB_CHANNELS ) :: AMSU_B_Tb
REAL( r_kind ) :: Emissivity_Horizontal
REAL( r_kind ) :: Emissivity_Vertical
REAL( r_kind ) :: Emissivity
INTEGER :: n_de
REAL( r_kind ) :: de
REAL( r_kind ) :: Ave_de
REAL( r_kind ) :: RMS_de
INTEGER :: n_Cases
!#----------------------------------------------------------------------------#
!# -- OUTPUT DESCRIPTIVE HEADER -- #
!#----------------------------------------------------------------------------#
pn_pos = ( LEN( PROGRAM_HEADER ) / 2 ) - &
( LEN( PROGRAM_NAME ) / 2 )
pn_pos = MAX( pn_pos, 0 ) + 5
WRITE( pn_fmt, '( "( ",i2,"x, a )" )' ) pn_pos
WRITE( *, '(/5x,a )' ) PROGRAM_HEADER
WRITE( *, FMT = TRIM( pn_fmt ) ) PROGRAM_NAME
WRITE( *, '(/5x, " Program to test the microwave surface emissivity routines" )' )
WRITE( *, '( 5x, " for benchmarking and refactoring." )' )
WRITE( *, '(/5x, " $Revision: 1.6 $")' )
WRITE( *, '( 5x, a )' ) PROGRAM_HEADER
!#----------------------------------------------------------------------------#
!# -- INITIALISE AVERAGE AND RMS VARIABLES -- #
!#----------------------------------------------------------------------------#
n_de = 0
AVE_de = ZERO
RMS_de = ZERO
!#----------------------------------------------------------------------------#
!# -- LOOP OVER TEST DATA FILES -- #
!#----------------------------------------------------------------------------#
File_Loop: DO n = 1, N_FILES
WRITE( *, '( /5x, "Reading file ", a )' ) FILENAME( n )
! ------------------------------
! Initialise the compute counter
! ------------------------------
n_Cases = 0
! ----------------------
! Loop over file records
! ----------------------
Record_Loop: DO l = 1, N_RECORDS( n )
! -----------------------
! Read a test data record
! -----------------------
Error_Status = Read_EmisTestData_Binary( FILENAME( n ), &
N_CHANNELS( n ), &
l, &
EmisTestData )
IF ( Error_Status /= SUCCESS ) THEN
WRITE( Message, '( "Error reading record # ", i5, " from ", a )' ) &
l, TRIM( FILENAME( n ) )
CALL Display_Message( PROGRAM_NAME, &
TRIM( Message ), &
Error_Status )
STOP
END IF
! ------------------------------------
! Re-initialize the observation arrays
! ------------------------------------
AMSU_A_Tb = TB_INVALID
AMSU_B_Tb = TB_INVALID
! ---------------------
! Extract the AMSU data
! ---------------------
IF ( IS_AMSUA( n ) ) THEN
AMSU_A_Tb = EmisTestData%ObsTb(AMSU_A_TB_INDEX)
ELSE IF ( IS_AMSUB( n ) ) THEN
AMSU_B_Tb = EmisTestData%ObsTb(AMSU_B_TB_INDEX)
END IF
! -----------------
! Call the routines
! -----------------
! -- Test if over land or sea
Land_or_Sea: IF ( EmisTestData%LandSea_Flag == LAND ) THEN
! -- Test if land surface is snow covered or not
Snow_or_BareLand: IF ( EmisTestData%IceSnow_Flag == SNOW_OR_ICE .AND. &
EmisTestData%Snow_Depth > MINIMUM_SNOW_DEPTH ) THEN
! -- We have a snow covered land surface.
Snow: IF ( IS_AMSUA( n ) .OR. IS_AMSUB( n ) ) THEN
! -- Call the AMSU routine
CALL snwem_amsu( EmisTestData%Satellite_Zenith_Angle, &
EmisTestData%Frequency, &
EmisTestData%Snow_Depth, &
EmisTestData%Skin_Temperature, &
AMSU_A_Tb, &
AMSU_B_Tb, &
Emissivity_Horizontal, &
Emissivity_Vertical )
Emissivity = Emissivity_Vertical
de = EmisTestData%Emissivity - Emissivity
AVE_de = AVE_de + de
RMS_de = RMS_de + de**2
n_de = n_de + 1
n_Cases = n_Cases + 1
ELSE IF ( IS_SSMI( n ) ) THEN Snow
END IF Snow
END IF Snow_or_BareLand
END IF Land_or_Sea
END DO Record_Loop
WRITE( *, '( 10x, "Number of cases: ", i4, " out of ", i4 )' ) &
n_Cases, N_RECORDS( n )
END DO File_Loop
! ----------------------------------
! Destroy the EmisTestData structure
! ----------------------------------
Error_Status = Destroy_EmisTestData( EmisTestData )
IF ( Error_Status /= SUCCESS ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Error destroying EmisTestData structure.', &
Error_Status )
END IF
!#----------------------------------------------------------------------------#
!# -- COMPUTE THE AVERAGE AND RMS EMISSIVITY DIFFERENCE -- #
!#----------------------------------------------------------------------------#
AVE_de = AVE_de / REAL( n_de, r_kind )
RMS_de = SQRT( RMS_de / REAL( n_de, r_kind ) )
WRITE( *, '( /5x, "AVE emissivity difference = ", es13.6, &
&/5x, "RMS emissivity difference = ", es13.6 )' ) AVE_de, RMS_de
END PROGRAM Test_MWSE
!-------------------------------------------------------------------------------
! -- MODIFICATION HISTORY --
!-------------------------------------------------------------------------------
!
!
! $Date: 2006/05/02 14:58:35 $
!
! $Revision: 1.6 $
!
! $Name: $
!
! $State: Exp $
!
! $Log: Test_MWSE.f90,v $
! Revision 1.6 2006/05/02 14:58:35 dgroff
! - Replaced all references of Error_Handler with Message_Handler
!
! Revision 1.5 2005/08/05 16:49:42 paulv
! - Changes introduced to allow for eventual testing of other routines.
!
! Revision 1.4 2004/12/09 20:37:48 paulv
! - Added EmisTestData structure destruction call. g95 reported left over
! memory. Cool.
!
! Revision 1.3 2004/12/09 20:08:19 paulv
! - Updated to use the MWSE_Snow* modules.
!
! Revision 1.2 2004/12/08 17:38:29 paulv
! - Added output to report the number of land/snow cases out of the total
! number processed.
!
! Revision 1.1 2004/12/08 16:49:01 paulv
! Initial checkin.
!
!
!
|
{"hexsha": "5fa47ba169df7b4717e63528256efc18fbff3239", "size": 10371, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/User_Code/NESDIS_Emissivity/Test_MWSE/Test_MWSE.f90", "max_stars_repo_name": "hsbadr/crtm", "max_stars_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-11-19T10:00:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T02:42:18.000Z", "max_issues_repo_path": "src/User_Code/NESDIS_Emissivity/Test_MWSE/Test_MWSE.f90", "max_issues_repo_name": "hsbadr/crtm", "max_issues_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-05T21:04:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T18:23:10.000Z", "max_forks_repo_path": "src/User_Code/NESDIS_Emissivity/Test_MWSE/Test_MWSE.f90", "max_forks_repo_name": "hsbadr/crtm", "max_forks_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-10-29T17:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T08:42:45.000Z", "avg_line_length": 27.9541778976, "max_line_length": 90, "alphanum_fraction": 0.4756532639, "num_tokens": 2714}
|
------------------------------------------------------------------------
-- A terminating parser data type and the accompanying interpreter
------------------------------------------------------------------------
module RecursiveDescent.Coinductive.Internal where
open import RecursiveDescent.Index
open import Data.Bool
open import Data.Product.Record
open import Data.Maybe
open import Data.BoundedVec.Inefficient
import Data.List as L
open import Data.Nat
open import Category.Applicative.Indexed
open import Category.Monad.Indexed
open import Category.Monad.State
open import Utilities
------------------------------------------------------------------------
-- Parser data type
-- A type for parsers which can be implemented using recursive
-- descent. The types used ensure that the implementation below is
-- structurally recursive.
codata Parser (tok : Set) : ParserType₁ where
symbol : Parser tok (false , leaf) tok
ret : forall {r} -> r -> Parser tok (true , leaf) r
fail : forall {r} -> Parser tok (false , leaf) r
bind₀ : forall {c₁ e₂ c₂ r₁ r₂}
-> Parser tok (true , c₁) r₁
-> (r₁ -> Parser tok (e₂ , c₂) r₂)
-> Parser tok (e₂ , node c₁ c₂) r₂
bind₁ : forall {c₁ r₁ r₂} {i₂ : r₁ -> Index}
-> Parser tok (false , c₁) r₁
-> ((x : r₁) -> Parser tok (i₂ x) r₂)
-> Parser tok (false , step c₁) r₂
alt₀ : forall {c₁ e₂ c₂ r}
-> Parser tok (true , c₁) r
-> Parser tok (e₂ , c₂) r
-> Parser tok (true , node c₁ c₂) r
alt₁ : forall {c₁} e₂ {c₂ r}
-> Parser tok (false , c₁) r
-> Parser tok (e₂ , c₂) r
-> Parser tok (e₂ , node c₁ c₂) r
------------------------------------------------------------------------
-- Run function for the parsers
-- Parser monad.
P : Set -> IFun ℕ
P tok = IStateT (BoundedVec tok) L.List
PIMonadPlus : (tok : Set) -> RawIMonadPlus (P tok)
PIMonadPlus tok = StateTIMonadPlus (BoundedVec tok) L.monadPlus
PIMonadState : (tok : Set) -> RawIMonadState (BoundedVec tok) (P tok)
PIMonadState tok = StateTIMonadState (BoundedVec tok) L.monad
private
open module LM {tok} = RawIMonadPlus (PIMonadPlus tok)
open module SM {tok} = RawIMonadState (PIMonadState tok)
using (get; put; modify)
-- For every successful parse the run function returns the remaining
-- string. (Since there can be several successful parses a list of
-- strings is returned.)
-- This function is structurally recursive with respect to the
-- following lexicographic measure:
--
-- 1) The upper bound of the length of the input string.
-- 2) The parser's proper left corner tree.
mutual
parse : forall n {tok e c r} ->
Parser tok (e , c) r ->
P tok n (if e then n else pred n) r
parse zero symbol = ∅
parse (suc n) symbol = eat =<< get
parse n (ret x) = return x
parse n fail = ∅
parse n (bind₀ p₁ p₂) = parse n p₁ >>= parse n ∘′ p₂
parse zero (bind₁ p₁ p₂) = ∅
parse (suc n) (bind₁ p₁ p₂) = parse (suc n) p₁ >>= parse↑ n ∘′ p₂
parse n (alt₀ p₁ p₂) = parse n p₁ ∣ parse↑ n p₂
parse n (alt₁ true p₁ p₂) = parse↑ n p₁ ∣ parse n p₂
parse n (alt₁ false p₁ p₂) = parse n p₁ ∣ parse n p₂
parse↑ : forall n {e tok c r} -> Parser tok (e , c) r -> P tok n n r
parse↑ n {true} p = parse n p
parse↑ zero {false} p = ∅
parse↑ (suc n) {false} p = parse (suc n) p >>= \r ->
modify ↑ >>
return r
eat : forall {tok n} -> BoundedVec tok (suc n) -> P tok (suc n) n tok
eat [] = ∅
eat (c ∷ s) = put s >> return c
|
{"hexsha": "7254776f934f2269f74941fd45e84fb4aa45a481", "size": 3803, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "misc/RecursiveDescent/Coinductive/Internal.agda", "max_stars_repo_name": "yurrriq/parser-combinators", "max_stars_repo_head_hexsha": "b396d35cc2cb7e8aea50b982429ee385f001aa88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2016-12-13T05:23:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-22T05:35:31.000Z", "max_issues_repo_path": "misc/RecursiveDescent/Coinductive/Internal.agda", "max_issues_repo_name": "nad/parser-combinators", "max_issues_repo_head_hexsha": "76774f54f466cfe943debf2da731074fe0c33644", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-01-22T22:21:41.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-24T16:39:37.000Z", "max_forks_repo_path": "misc/RecursiveDescent/Coinductive/Internal.agda", "max_forks_repo_name": "nad/parser-combinators", "max_forks_repo_head_hexsha": "76774f54f466cfe943debf2da731074fe0c33644", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2843137255, "max_line_length": 72, "alphanum_fraction": 0.5403628714, "num_tokens": 1123}
|
#!/usr/bin/env python
import os
import json
import argparse
import requests
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def pred_to_fig(pred, alpha=0.9, color_min=0.3, color_max=0.7):
name = os.path.split(pred['uri'])[-1]
print('Applying masks on "{}"'.format(name))
img = np.array(Image.open(pred['uri']).convert('RGBA'))
fig, ax = plt.subplots(1)
ax.set_title(name)
ax.imshow(img)
for item in pred['classes']:
#Fetch
xmin = int(item['bbox']['xmin'])
ymin = int(item['bbox']['ymin'])
width = item['mask']['width']
height = item['mask']['height']
#Set to a random color
mask = np.array(item['mask']['data']).astype(float) * 255
mask = np.stack((mask.reshape(height, width),) * 4, -1)
mask[...,-1] *= alpha
mask[...,:-1] *= np.random.uniform(color_min, color_max, 3)
#Plot
buff = np.zeros(img.shape, dtype='uint8')
buff[ymin:ymin+height, xmin:xmin+width] = mask
ax.imshow(buff)
ax.text(xmin, ymin, '{} {:.2f}'.format(item['cat'], item['prob']))
plt.figure(fig.number)
def preds_to_pdf(data, path):
with PdfPages(path) as pdf:
for pred in data['body']['predictions']:
pred_to_fig(pred)
pdf.savefig()
def get_preds(host, port, service, thresh, images):
url = 'http://{}:{}/predict'.format(host, port)
print('Posting on "{}"'.format(url))
return requests.post(url, json = {
'service': service,
'parameters': {
'output': {
'mask': True,
'best': 1,
'confidence_threshold': thresh
}
},
'data': images
}).json()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--host', default='localhost', type=str)
parser.add_argument('--port', default=8080, type=int)
parser.add_argument('--threshold', default=0.8, type=float)
parser.add_argument('--pdf', required=True, type=str)
parser.add_argument('--service', required=True, type=str)
parser.add_argument('image', type=str, nargs='+')
return parser.parse_args()
def main():
args = get_args()
preds = get_preds(args.host, args.port, args.service, args.threshold, args.image)
preds_to_pdf(preds, args.pdf)
return 0
if __name__ == '__main__':
exit(main())
|
{"hexsha": "771268962b8dd22cd7a6d91d1dc36e1f8664b2c9", "size": 2463, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/caffe2/detectron/plot_masks.py", "max_stars_repo_name": "dgtlmoon/deepdetect", "max_stars_repo_head_hexsha": "0b2f20be8211a95b1fea3a600f0d5ba17b8d339f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1672, "max_stars_repo_stars_event_min_datetime": "2015-05-26T19:20:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T10:09:09.000Z", "max_issues_repo_path": "examples/caffe2/detectron/plot_masks.py", "max_issues_repo_name": "dgtlmoon/deepdetect", "max_issues_repo_head_hexsha": "0b2f20be8211a95b1fea3a600f0d5ba17b8d339f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 401, "max_issues_repo_issues_event_min_datetime": "2018-05-16T21:59:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T07:52:15.000Z", "max_forks_repo_path": "examples/caffe2/detectron/plot_masks.py", "max_forks_repo_name": "dgtlmoon/deepdetect", "max_forks_repo_head_hexsha": "0b2f20be8211a95b1fea3a600f0d5ba17b8d339f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 412, "max_forks_repo_forks_event_min_datetime": "2015-06-02T08:12:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-07T03:32:03.000Z", "avg_line_length": 30.7875, "max_line_length": 85, "alphanum_fraction": 0.5952090946, "include": true, "reason": "import numpy", "num_tokens": 620}
|
from __future__ import print_function
import os
import argparse
import numpy
import h5py
import irlib
import scipy.integrate as integrate
from mpmath import *
class BasisSet(object):
def __init__(self, h5file, prefix_name):
self._h5file = h5file
self._prefix_name = prefix_name
def _write_data(self, path, data):
if path in self._h5file:
del self._h5file[path]
self._h5file[path] = data
def set_info(self, Lambda, dim, statistics):
self._write_data(self._prefix_name + "/info/Lambda", Lambda)
self._write_data(self._prefix_name + "/info/dim", dim)
self._write_data(self._prefix_name + "/info/statistics", \
0 if statistics == "B" else 1)
self._dim = dim
self._Lambda = Lambda
def set_sl(self, sl):
# CheckSizet
dir = self._prefix_name
self._write_data(dir + "/sl", sl)
return True
def set_func(self, func_name, data, np, ns, section_edges):
if func_name != "ulx" and func_name != "vly":
print("Error in Set_func: func_name must be ulx or vly.")
return False
# TODO: CheckSize
dir = self._prefix_name + "/" + func_name
self._write_data(dir + "/ns", data=ns)
self._write_data(dir + "/np", data=np)
assert data.shape[1] == section_edges.size - 1
assert data.shape[2] == np
self._write_data(dir + "/data", data=data)
self._write_data(dir + "/section_edges", data=section_edges)
return True
def _find_zeros(self, ulx):
Nx = 10000
eps = 1e-10
tvec = numpy.linspace(-3, 3, Nx) #3 is a very safe option.
xvec = numpy.tanh(0.5*numpy.pi*numpy.sinh(tvec))
zeros = []
for i in range(Nx-1):
if ulx(xvec[i]) * ulx(xvec[i+1]) < 0:
a = xvec[i+1]
b = xvec[i]
u_a = ulx(a)
u_b = ulx(b)
while a-b > eps:
#print(a,b)
half_point = 0.5*(a+b)
if ulx(half_point) * u_a > 0:
a = half_point
else:
b = half_point
zeros.append(0.5*(a+b))
return numpy.array(zeros)
def _get_max_abs_value(self, l, basis, type):
Nl = l
if type == "ulx":
func_l = (lambda x : basis.ulx(Nl-1,x))
func_l_derivative = (lambda x : basis.ulx_derivative(Nl-1,x,1))
elif type == "vly":
func_l = (lambda x : basis.vly(Nl-1,x))
func_l_derivative = (lambda x : basis.vly_derivative(Nl-1,x,1))
else:
return None
zeros_data=self._find_zeros(func_l_derivative)
values_zeros = numpy.array( [ abs(func_l(_x)) for _x in zeros_data] )
max_index = numpy.argmax(values_zeros)
max_point=zeros_data[max_index]
if abs(func_l(1.0)) > values_zeros[max_index]:
max_point = 1.0
elif abs(func_l(-1.0)) > values_zeros[max_index]:
max_point = -1.0
return (int(l), max_point, abs(func_l(max_point)))
def save_ref_values(self, basis):
Nl = self._dim
Lambda = self._Lambda
dir = self._prefix_name
if Nl % 2 == 1 : Nl-=1
#Get ulx data
points=self._get_max_abs_value(Nl, basis, "ulx")
edges = numpy.array([basis.section_edge_ulx(s) for s in range(basis.num_sections_ulx()+1)])
Region=numpy.append(numpy.linspace(edges[0], edges[1], 10),\
numpy.linspace(edges[basis.num_sections_ulx()-1], edges[basis.num_sections_ulx()], 10))
ulx_data = numpy.array( [ (int(Nl), _x, 0, basis.ulx(Nl-1, _x)) for _x in Region] )
for _order in range(1, 3):
ulx_data = numpy.append(ulx_data, numpy.array( [ (int(Nl), 1.0, _order, basis.ulx_derivative(Nl-1, 1.0, _order))]), axis=0 )
self._write_data(dir+"/ulx/ref/max", data=points)
self._write_data(dir + "/ulx/ref/data", data=ulx_data)
#Get vly data
points=self._get_max_abs_value(Nl, basis, "vly")
edges = numpy.array([basis.section_edge_vly(s) for s in range(basis.num_sections_vly()+1)])
Region = numpy.append(numpy.linspace(edges[0], edges[1], 10),\
numpy.linspace(edges[basis.num_sections_vly()-1], edges[basis.num_sections_vly()], 10))
vly_data = numpy.array( [ (int(Nl), _y, 0, basis.vly(Nl-1, _y)) for _y in Region] )
for _order in range(1, 3):
numpy.append(vly_data, numpy.array([(int(Nl), 1.0, _order, basis.vly_derivative(Nl-1, 1.0, _order))]), axis=0)
self._write_data(dir+"/vly/ref/max", data=points)
self._write_data(dir+"/vly/ref/data", data=vly_data)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='save.py',
description='Output results to hdf5 file.',
epilog='end',
add_help=True,
)
parser.add_argument('-o', '--output', action='store', dest='outputfile',
default='irbasis.h5',
type=str, choices=None,
help=('Path to output hdf5 file.'),
metavar=None)
parser.add_argument('-i', '--input', action='store', dest='inputfile',
type=str, choices=None,
required=True,
help=('Path to input file.'),
metavar=None)
parser.add_argument('-l', '--lambda', action='store', dest='lambda',
required=True,
type=float, choices=None,
help=('Value of lambda.'),
metavar=None)
parser.add_argument('-p', '--prefix', action='store', dest='prefix',
type=str, choices=None,
default='/',
help=('Data will be stored in this HF5 group.'),
metavar=None)
args = parser.parse_args()
if os.path.exists(args.inputfile):
b = irlib.loadtxt(args.inputfile)
else:
print("Input file does not exist.")
exit(-1)
h5file = h5py.File(args.outputfile, "a")
irset = BasisSet(h5file, args.prefix)
nl = b.dim()
# set info
irset.set_info(b.Lambda(), nl, b.get_statistics_str())
sl = numpy.array([b.sl(i) for i in range(0, nl)])
irset.set_sl(sl)
# input ulx
ns = b.num_sections_ulx()
n_local_poly = b.num_local_poly_ulx()
coeff = numpy.zeros((nl, ns, n_local_poly), dtype=float)
for l in range(nl):
for s in range(ns):
for p in range(n_local_poly):
coeff[l, s, p] = b.coeff_ulx(l, s, p)
section_edge_ulx = numpy.array([b.section_edge_ulx(i) for i in range(ns + 1)])
irset.set_func("ulx", coeff, n_local_poly, ns, section_edge_ulx)
# input vly
ns = b.num_sections_vly()
n_local_poly = b.num_local_poly_vly()
coeff = numpy.zeros((nl, ns, n_local_poly), dtype=float)
for l in range(nl):
for s in range(ns):
for p in range(n_local_poly):
coeff[l, s, p] = b.coeff_vly(l, s, p)
section_edge_vly = numpy.array([b.section_edge_vly(i) for i in range(ns + 1)])
irset.set_func("vly", coeff, n_local_poly, ns, section_edge_vly)
irset.save_ref_values(b)
h5file.flush()
h5file.close()
|
{"hexsha": "cea7bf2a4f059e20e6a42d7b9851bbc625ee69d4", "size": 7509, "ext": "py", "lang": "Python", "max_stars_repo_path": "database/make_h5.py", "max_stars_repo_name": "SpM-lab/irbasis", "max_stars_repo_head_hexsha": "5beb5cbe3c0ba0fb42c32e262f04d1f3359d6045", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-07-16T15:07:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T06:46:55.000Z", "max_issues_repo_path": "database/make_h5.py", "max_issues_repo_name": "SpM-lab/irbasis", "max_issues_repo_head_hexsha": "5beb5cbe3c0ba0fb42c32e262f04d1f3359d6045", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-09-19T07:12:01.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-14T11:54:03.000Z", "max_forks_repo_path": "database/make_h5.py", "max_forks_repo_name": "SpM-lab/irbasis", "max_forks_repo_head_hexsha": "5beb5cbe3c0ba0fb42c32e262f04d1f3359d6045", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-01-28T19:51:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-02T12:57:14.000Z", "avg_line_length": 37.545, "max_line_length": 137, "alphanum_fraction": 0.5552004262, "include": true, "reason": "import numpy,import scipy,from mpmath", "num_tokens": 1988}
|
import xml.dom.minidom as MD
import math
import csv
# import pandas
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import torchvision.transforms as T
from PIL import Image
from collections import namedtuple
Batch_Size = 128
LR = 0.01
GAMMA = 0.9
Frst_EPSILON = 0.5
Final_EPISILON = 0.01
EPSILON_DECAY = 20000
TARGET_REPLACE_ITER = 100
ACTIONS_DIMENTION = 2
Transition = namedtuple('Transition',
('state', 'action', 'reward' ,'next_state'))
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
self.output = torch.nn.Linear(256 ,ACTIONS_DIMENTION)
self.output.weight.data.normal_(0,0.2)
def forward(self,x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
actions_value = self.output(x.view(x.size(0), -1))
return actions_value
class DQN(object):
def __init__(self):
self.evalueNet = torch.load('Netmodel.pkl')
self.targetNet = torch.load('Target.pkl')
self.log = None
self.learnCounter = 0
self.memoryCounter = 0
self.memory_size = 2000
self.memory = []
self.optimizer = torch.optim.Adam(self.evalueNet.parameters(), lr = LR)
self.lossFunction = nn.MSELoss()
self.epsilon = Frst_EPSILON
def choose_action(self,x):
x = Variable(x, volatile=True).type(torch.FloatTensor)
self.epsilon = Final_EPISILON + (Frst_EPSILON - Final_EPISILON) * math.exp(-1. * self.epsilon / EPSILON_DECAY)
if np.random.uniform() > self.epsilon :
actionsValue = self.evalueNet.forward(x)
action = actionsValue.data.max(1)[1].view(1, 1)
else:
# print('RANDOM')
action = torch.LongTensor([[random.randrange(ACTIONS_DIMENTION)]])
return action
def record_transition(self,s,a,r,next_s):
transition = Transition(s,a,r,next_s)
i = self.memoryCounter % self.memory_size
if self.memoryCounter < self.memory_size:
self.memory.append(transition)
else:
self.memory[i] = transition
self.memoryCounter += 1
def learn(self):
if self.learnCounter % TARGET_REPLACE_ITER == 0:
self.targetNet.load_state_dict(self.evalueNet.state_dict())
self.learnCounter +=1
samples = random.sample(self.memory,Batch_Size)
batch = Transition(*zip(*samples))
# print('I am here')
sample_s = Variable(torch.cat(batch.state))
sample_a = Variable(torch.cat(batch.action))
sample_r = Variable(torch.cat(batch.reward))
sample_next_s = Variable(torch.cat(batch.next_state))
# print(len(sample_s),len(sample_a))
# exit()
q_value = self.evalueNet(sample_s).gather(1,sample_a)
q_next = self.targetNet(sample_next_s).max(1)[0].detach()
q_target = sample_r + GAMMA * q_next
q_target = Variable(q_target.data)
loss = self.lossFunction(q_value, q_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
|
{"hexsha": "98f1bba0291bba26a23fb1e69739e5fcf1aabceb", "size": 3609, "ext": "py", "lang": "Python", "max_stars_repo_path": "GeneralAgent/dqn_pix.py", "max_stars_repo_name": "shaw-wong/Malmo", "max_stars_repo_head_hexsha": "2683891206e8ab7f015d5d0feb6b5a967f02c94f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-21T01:32:21.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-21T01:32:21.000Z", "max_issues_repo_path": "GeneralAgent/dqn_pix.py", "max_issues_repo_name": "shaw-wong/Malmo", "max_issues_repo_head_hexsha": "2683891206e8ab7f015d5d0feb6b5a967f02c94f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-03-20T04:35:37.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-20T04:35:37.000Z", "max_forks_repo_path": "GeneralAgent/dqn_pix.py", "max_forks_repo_name": "shaw-wong/Malmo", "max_forks_repo_head_hexsha": "2683891206e8ab7f015d5d0feb6b5a967f02c94f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.2777777778, "max_line_length": 118, "alphanum_fraction": 0.6251039069, "include": true, "reason": "import numpy", "num_tokens": 893}
|
__precompile__()
module FLAC
using FileIO
# This `using` is literally only just so that `Ogg.__init__()` gets run. This
# ensures that `libogg` is loaded into the Julia namcespace, which is necessary
# for `libFLAC` to load properly. This will not be necessary in the future,
# once https://github.com/JuliaPackaging/BinaryBuilder.jl/issues/194 is solved.
using Ogg
export StreamMetaData,
InfoMetaData,
VorbisCommentMetaData,
PaddingMetaData,
ApplicationMetaData,
SeekTableMetaData,
CueSheetMetaData,
StreamDecoderPtr,
StreamEncoderPtr,
initfile!,
FLACDecoder,
seek,
read,
size,
length
import Base: read, seek, size, length
const depfile = joinpath(dirname(@__FILE__), "..", "deps", "deps.jl")
if isfile(depfile)
include(depfile)
else
error("FLAC not properly installed. Please run Pkg.build(\"FLAC\")")
end
include("metadata.jl")
include("format.jl")
include("decoder.jl")
include("encoder.jl")
function __init__()
init_decoder_cfunctions()
init_encoder_cfunctions()
end
end # module
|
{"hexsha": "262278eae13d07910b2bc785c90939b7d3a97971", "size": 1110, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/FLAC.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/FLAC.jl-05003743-e4a8-526e-8961-a30f3f368c99", "max_stars_repo_head_hexsha": "10e1fbaf446cc3c5db9b839d6b26047d668fdf95", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/FLAC.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/FLAC.jl-05003743-e4a8-526e-8961-a30f3f368c99", "max_issues_repo_head_hexsha": "10e1fbaf446cc3c5db9b839d6b26047d668fdf95", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/FLAC.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/FLAC.jl-05003743-e4a8-526e-8961-a30f3f368c99", "max_forks_repo_head_hexsha": "10e1fbaf446cc3c5db9b839d6b26047d668fdf95", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.6530612245, "max_line_length": 79, "alphanum_fraction": 0.6927927928, "num_tokens": 277}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 15:03:54 2020
@author: franciscodeborjaponz
"""
#Resets ALL (Careful This is a "magic" function then it doesn't run as script)
#reset -f
#load basiclibraries
import os
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype #For definition of custom categorical data types (ordinal if necesary)
import matplotlib.pyplot as plt
import seaborn as sns # For hi level, Pandas oriented, graphics
import scipy.stats as stats # For statistical inference
#importamos la libreria para correlación
from scipy.stats.stats import pearsonr
#importamos para poder calcular el modelo.
from statsmodels.formula.api import ols
#Reads data from CSV file and stores it in a dataframe called rentals_2011
# Pay atention to the specific format of your CSV data (; , or , .)
wbr = pd.read_csv ("WBR_11_12_denormalized_temp.csv", sep=';', decimal=',')
"""
1)Siempre describir las variables en busqueda de valores anomalos,
2) Siempre explorar relaciones bivariadas Scatterplot/Pearson's
3) Ajustar el modelo de regresión cuidadosamente.
a) Escalor y intercept
b) P.value
c) Ajuste de modelo
"""
model1 = ols('cnt ~ temp_celsius', data=wbr).fit()
"""
Numero de observaciones, R-squared
al tener un p.value de 0.000 damos por buena y es suficientemente significativa.
En la muestra están asociadas.
R2 es la cantidad de variabilidad de mis ventas, que puedo asociar a la variabilidad
de la temperatura.
"""
model1.summary2() #visualizar el modelo.
"""
Siguiende modelo basado con el windspeed
"""
model2 = ols('cnt ~ windspeed_kh', data=wbr).fit()
model2.summary2()
"""
Siguiente modelo basado en la temperatura y el windspeed_kh
Como podemos ver en el modelo, al incluir nuevas variables cambia la influencia de
las variables.
"""
model3 = ols('cnt ~ temp_celsius + windspeed_kh', data=wbr).fit()
model3.summary2()
wbr.hum.hist()
"""
Siguiende modelo basado con la variable humedad
"""
model4 = ols('cnt ~ hum', data=wbr).fit()
model4.summary2()
"""
Siguiente modelo basado en la temperatura, el windspeed_kh y hum
"""
model5 = ols('cnt ~ temp_celsius + windspeed_kh + hum', data=wbr).fit()
model5.summary2()
"""
stargazer ayuda a representar todos los modelos.
"""
#!pip install stargazer
from stargazer.stargazer import Stargazer
stargazer = Stargazer([model1, model2, model3, model4, model5])
stargazer.render_html()
|
{"hexsha": "679ebbe5006b9fd122d41551fc16134009c22839", "size": 2433, "ext": "py", "lang": "Python", "max_stars_repo_path": "sesiones/sesion10.py", "max_stars_repo_name": "fbponz/EstadisticaEnPython", "max_stars_repo_head_hexsha": "9a2a6db07bfa68c70e59b16223474fa7e5b670fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-07T19:41:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-07T19:41:45.000Z", "max_issues_repo_path": "sesiones/sesion10.py", "max_issues_repo_name": "fbponz/EstadisticaEnPython", "max_issues_repo_head_hexsha": "9a2a6db07bfa68c70e59b16223474fa7e5b670fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sesiones/sesion10.py", "max_forks_repo_name": "fbponz/EstadisticaEnPython", "max_forks_repo_head_hexsha": "9a2a6db07bfa68c70e59b16223474fa7e5b670fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.313253012, "max_line_length": 116, "alphanum_fraction": 0.7501027538, "include": true, "reason": "import numpy,import scipy,from scipy,from statsmodels", "num_tokens": 673}
|
import numpy as np
from gekko import GEKKO
from scipy.signal import tf2ss
import collections
import math
import time
from dataclasses import dataclass
ATSETTLINGTIME=100
ZEROCROSSINGTOL=0.001
METHODFACTORS=[[0.5,0,0],[1/2.2,1/1.2,0],[1/1.7,1/2,1/8],[1/3.2,2.2,0],[1/2.2,2.2,6.3]]
@dataclass
class MethodList:
ZN_P=0
ZN_PI=1
ZN_PID=2
TL_PI=3
TL_PID=4
class TuningStatus:
COARSE_RELAY=0
COARSE_READY=1
COARSE_SETTLING=2
DWELL=3
FINE_RELAY=4
FINE_READY=5
class PID:
def __init__(self,Kp,Ki,Kd, out_min=-999999,out_max=999999):
self.Kp=Kp
self.Ki=Ki
self.Kd=Kd
self.limits=[out_min,out_max]
self.outmax=out_max
self.outmin=out_min
self.e_prev=0
self.t_prev=-0.001
self.I=0
self.P=0
self.D=0
def apply(self,error,t):
self.P=self.Kp*error
self.D=self.Kd*(error-self.e_prev)/(t-self.t_prev)
self.e_prev = error
self.integral(error,t)
self.t_prev = t
output=self.P+self.I+self.D
return np.max([np.min([output,self.outmax]),self.outmin])
def integral(self,error,t):
if self.Ki!=0:
self.Imax=(self.outmax-self.P-self.D)
self.Imin=(self.outmin-self.P-self.D)
change=self.Ki*error*(t-self.t_prev)
self.I=self.I+change
if error>0:
if self.I>0:
if self.Imax>0:
if (self.I>self.Imax):
self.I=self.Imax
else:
self.I-=change
else:
if self.I<0:
if self.Imin<0:
if (self.I<self.Imin):
self.I=self.Imin
else:
self.I -= change
else:
self.I=0
@classmethod
def autotune_offline(self, tf_num, tf_den, tf, steps, amp,
Kc_min=0.01, Kc_max=100,
Ti_min=0.01, Ti_max=100,
Td_min=0.01, Td_max=100,
outmin=0, outmax=1000,
overshootweight=0,
risetimeweight=0,
settlingtimeweight=0):
self.m = GEKKO()
self.tf = tf
self.steps = steps
self.m.time = np.linspace(0, self.tf, self.steps)
self.step = np.zeros(self.steps)
self.step[0:10] = 0
self.step[11:] = amp
self.Kc = self.m.FV(value=0.01, lb=Kc_min, ub=Kc_max)
self.Kc.STATUS = 1
# tauI=3.0
self.Ti = self.m.FV(value=100, lb=Ti_min, ub=Ti_max)
self.Ti.STATUS = 1
# tauD=0.0
self.Td = self.m.FV(value=100, lb=Td_min, ub=Td_max)
self.Td.STATUS = 1
self.OP_0 = 0.0
self.OP = self.m.Var(value=0, lb=outmin, ub=outmax)
self.PV = self.m.Var(value=0)
self.SP = self.m.Param(value=self.step)
self.Intgl = self.m.Var(value=0)
self.err = self.m.Intermediate(self.SP - self.PV)
self.overshoot=self.m.if2(self.err+0.02*self.SP,-self.err,0)
self.risetime=self.m.if2(self.err-0.1*self.SP,0,self.err)
self.settlingtime=self.m.if2(self.err-0.2*self.SP,-self.err,0)
self.settlingtime=self.settlingtime-self.overshoot
self.m.Equation(self.Intgl.dt() == self.err)
self.m.Equation(self.OP == self.OP_0 + self.Kc * self.err + (self.Kc / self.Ti) * self.Intgl
- self.Kc * self.Td * self.PV.dt())
self.m.Obj(self.err ** 2 + overshootweight*self.overshoot**2+risetimeweight*self.risetime**2+settlingtimeweight*self.settlingtime**2)
# TODO make UI control for overshoot weight
# Process model
# convert transfer function to statespace
A, B, C, D = tf2ss(tf_num, tf_den)
# Find order of equation
order = len(A)
x = self.m.Array(self.m.Var, (order))
# create state variables
eqn = np.dot(A, x)
eqn2 = np.dot(C, x)
for i in range(order):
self.m.Equation(x[i].dt() == eqn[i] + B[i][0] * self.OP)
self.m.Equation(self.PV == eqn2[0] + D[0][0] * self.OP)
self.m.options.IMODE = 6
self.m.options.MAX_ITER=1000
# self.m.options.OTOL=0.5
PIDvals = collections.namedtuple('PIDvals', ['Kc', 'Ti', 'Td'])
StepResponse=collections.namedtuple('StepResponse',['time','step','PV','OP'])
print(self.overshoot.value)
try:
self.m.solve()
return PIDvals(self.Kc.value[0],self.Ti.value[0],self.Td.value[0]),StepResponse(self.m.time, self.step,self.PV.value,self.OP.value)
except Exception:
print("no solution found")
return 0
class PIDAutotuneRT:
def __init__(self, Y0,a,a_fine,method=MethodList.TL_PI,cycles=5,midpoint=0):
"""
Autotuner based on relay feedback method
Parameters
----------
Y0 : float
Setpoint
a : float
Amplitude of control signal
"""
self.OP=np.array([])
self.PV=np.array([])
self.Err=np.array([])
self.sampling=0
self.currentTime=0
self.ready=False
self.currentOutput=a
self.cycles=cycles
self.zerocrosses=0
self.zerocrossingindices=np.array([])
self.setpoint=Y0
self.OPamp=a
self.OPlow=midpoint-self.OPamp/2
self.OPfine=a_fine
self.method=method
self.status=TuningStatus.COARSE_RELAY
self.captureTime=0
self.postTime=0
#start scheduler thread
def nextVal(self,currentPV,currentOP):
if self.currentTime==0:
self.currentTime=time.time()
else:
self.sampling=time.time()-self.currentTime
self.currentTime=time.time()
if self.status==TuningStatus.COARSE_RELAY or self.status==TuningStatus.FINE_RELAY:
self.PV=np.append(self.PV,currentPV)
self.Err=np.append(self.Err,self.setpoint-currentPV)
zerocrossings=self._zeroCrossings()
print(zerocrossings)
if zerocrossings>self.zerocrosses:
self._toggleOutput()
self.zerocrosses+=1
if self.zerocrosses > 2 * self.cycles:
self.status = TuningStatus.COARSE_READY if self.status==TuningStatus.COARSE_RELAY else TuningStatus.FINE_READY
self.OP=np.append(self.OP,self.currentOutput)
elif self.status==TuningStatus.COARSE_READY:
self.status=TuningStatus.COARSE_SETTLING
#reset PV and Err arrays
self.PV=np.array([])
self.Err=np.array([])
self.OP=np.array([])
self.zerocrossingindices = np.array([])
self.zerocrosses = 0
self.captureTime=time.time()
elif self.status==TuningStatus.DWELL:
if time.time()-self.postTime>ATSETTLINGTIME and self.postTime!=0:
self.currentOutput = self.OPlow + self.OPamp
self.status=TuningStatus.FINE_RELAY
elif self.status==TuningStatus.COARSE_SETTLING:
Err=self.setpoint-currentPV
delta=time.time()-self.captureTime
if (Err<0.01*self.setpoint and Err>-0.01*self.setpoint):
pass
else:
self.captureTime=time.time()
if delta>ATSETTLINGTIME:
self.OPlow=currentOP-self.OPfine/2
self.OPamp=self.OPfine
self.currentOutput=self.OPlow
self.postTime=time.time()
self.status=TuningStatus.DWELL
print(delta)
return self.currentOutput,self.status
def outputReady(self):
return self.ready
def PIDparameters(self):
if self.status==TuningStatus.COARSE_READY or self.status==TuningStatus.FINE_READY:
b,Tu=self._calculateInputs()
print(b)
print(Tu)
Ku = (4 / math.pi) * (self.OPamp / b)
Td=0
Ti=9999
Kc=0
Kc=METHODFACTORS[self.method][0]*Ku
Ti=METHODFACTORS[self.method][1]*Tu
Td=METHODFACTORS[self.method][2]*Tu
# if self.method==0:
# Kc=Ku/2
# elif self.method==1:
# Kc=Ku/2.2
# Ti=Tu/1.2
# elif self.method==2:
# Kc=Ku/1.7
# Ti=Tu/2
# Td=Tu/8
# elif self.method==3:
# Kc=Ku/3.2
# Ti=2.2*Tu
# elif self.method==4:
# Kc=Ku/2.2
# Ti=2.2*Tu
# Td=Tu/6.3
return {'Kc': Kc, 'Ti':Ti,'Td':Td}
else:
return {'Kc':0,'Ti':9999,'Td':0}
@classmethod
def convertPID(cls,Kp,Ti,Td,initialMethod:MethodList.ZN_PI,finalMethod:MethodList.TL_PI):
Kpn=Kp*METHODFACTORS[finalMethod][0]/(METHODFACTORS[initialMethod][0])
if (METHODFACTORS[initialMethod][1]) !=0:
Tin = Ti * METHODFACTORS[finalMethod][1] / (METHODFACTORS[initialMethod][1])
else:
Tin=0
if (METHODFACTORS[initialMethod][2]) !=0:
Tdn = Td * METHODFACTORS[finalMethod][2] / (METHODFACTORS[initialMethod][2])
else:
Tdn=0
return {'Kp':Kpn,'Ti':Tin,'Td':Tdn}
def _toggleOutput(self):
if self.currentOutput==self.OPlow:
self.currentOutput=self.OPamp+self.OPlow
else:
self.currentOutput=self.OPlow
def _zeroCrossings(self):
if np.any(self.Err):
Err_nosmall=self.Err[(self.Err>ZEROCROSSINGTOL*self.setpoint)|(self.Err<-ZEROCROSSINGTOL*self.setpoint)]
self.zerocrossingindices=np.where(np.diff(np.sign(Err_nosmall)))[0]
return len(self.zerocrossingindices)
else:
return 0
def _calculateInputs(self):
#remove all elements before first zero crossing index:
for i in range(self.zerocrossingindices[0]):
self.PV=np.delete(self.PV, 0)
b=np.max(self.PV)-np.min(self.PV)
diff=np.array([])
for i in range(len(self.zerocrossingindices)-1):
diff=np.append(diff,self.zerocrossingindices[i+1]-self.zerocrossingindices[i])
# diff=PIDAutotuneRT.reject_outliers(diff)
Tu=2*np.mean(diff)*self.sampling
return b,Tu
@classmethod
def reject_outliers(cls,data, m=2.):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d / mdev if mdev else 0.
return data[s < m]
if __name__=="__main__":
# pid=PID(1,1,1,0,10)
# t=np.linspace(0,10,100)
# print(pid.apply(1,0.5))
# Autotune=PIDAutotune()
# for i in range(len(t)):
# next(pid.apply())
pid,res=PID.autotune_offline([1], [1, 10], 30, 100, 1)
print(pid)
print(res)
|
{"hexsha": "97856906c10079c7baea9b0fbd295263cf075983", "size": 11233, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/InstPyr/Control/PID.py", "max_stars_repo_name": "soodsidd/instpyr", "max_stars_repo_head_hexsha": "138d0a8164dc388187fde58329b9ff770af77af4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/InstPyr/Control/PID.py", "max_issues_repo_name": "soodsidd/instpyr", "max_issues_repo_head_hexsha": "138d0a8164dc388187fde58329b9ff770af77af4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/InstPyr/Control/PID.py", "max_forks_repo_name": "soodsidd/instpyr", "max_forks_repo_head_hexsha": "138d0a8164dc388187fde58329b9ff770af77af4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0303867403, "max_line_length": 143, "alphanum_fraction": 0.5415294222, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3078}
|
{-
Product of structures S and T: X ↦ S X × T X
-}
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Structures.Relational.Product where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Function
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Structure
open import Cubical.Foundations.RelationalStructure
open import Cubical.Foundations.SIP
open import Cubical.Foundations.Univalence
open import Cubical.Data.Sigma
open import Cubical.HITs.PropositionalTruncation as Trunc
open import Cubical.HITs.SetQuotients
open import Cubical.Structures.Product
private
variable
ℓ ℓ₁ ℓ₁' ℓ₁'' ℓ₂ ℓ₂' ℓ₂'' : Level
-- Structured relations
ProductRelStr :
{S₁ : Type ℓ → Type ℓ₁} (ρ₁ : StrRel S₁ ℓ₁')
{S₂ : Type ℓ → Type ℓ₂} (ρ₂ : StrRel S₂ ℓ₂')
→ StrRel (ProductStructure S₁ S₂) (ℓ-max ℓ₁' ℓ₂')
ProductRelStr ρ₁ ρ₂ R (s₁ , s₂) (t₁ , t₂) =
ρ₁ R s₁ t₁ × ρ₂ R s₂ t₂
productSuitableRel :
{S₁ : Type ℓ → Type ℓ₁} {ρ₁ : StrRel S₁ ℓ₁'}
{S₂ : Type ℓ → Type ℓ₂} {ρ₂ : StrRel S₂ ℓ₂'}
→ SuitableStrRel S₁ ρ₁ → SuitableStrRel S₂ ρ₂
→ SuitableStrRel (ProductStructure S₁ S₂) (ProductRelStr ρ₁ ρ₂)
productSuitableRel θ₁ θ₂ .quo (X , s₁ , s₂) R (r₁ , r₂) .fst .fst =
θ₁ .quo (X , s₁) R r₁ .fst .fst , θ₂ .quo (X , s₂) R r₂ .fst .fst
productSuitableRel θ₁ θ₂ .quo (X , s₁ , s₂) R (r₁ , r₂) .fst .snd =
θ₁ .quo (X , s₁) R r₁ .fst .snd , θ₂ .quo (X , s₂) R r₂ .fst .snd
productSuitableRel θ₁ θ₂ .quo (X , s₁ , s₂) R (r₁ , r₂) .snd ((q₁ , q₂) , (c₁ , c₂)) i .fst =
θ₁ .quo (X , s₁) R r₁ .snd (q₁ , c₁) i .fst , θ₂ .quo (X , s₂) R r₂ .snd (q₂ , c₂) i .fst
productSuitableRel θ₁ θ₂ .quo (X , s₁ , s₂) R (r₁ , r₂) .snd ((q₁ , q₂) , (c₁ , c₂)) i .snd =
θ₁ .quo (X , s₁) R r₁ .snd (q₁ , c₁) i .snd , θ₂ .quo (X , s₂) R r₂ .snd (q₂ , c₂) i .snd
productSuitableRel θ₁ θ₂ .symmetric R (r₁ , r₂) =
θ₁ .symmetric R r₁ , θ₂ .symmetric R r₂
productSuitableRel θ₁ θ₂ .transitive R R' (r₁ , r₂) (r₁' , r₂') =
θ₁ .transitive R R' r₁ r₁' , θ₂ .transitive R R' r₂ r₂'
productSuitableRel θ₁ θ₂ .set setX =
isSet× (θ₁ .set setX) (θ₂ .set setX)
productSuitableRel θ₁ θ₂ .prop propR (s₁ , s₂) (t₁ , t₂) =
isProp× (θ₁ .prop propR s₁ t₁) (θ₂ .prop propR s₂ t₂)
productRelMatchesEquiv :
{S₁ : Type ℓ → Type ℓ₁} (ρ₁ : StrRel S₁ ℓ₁') {ι₁ : StrEquiv S₁ ℓ₁''}
{S₂ : Type ℓ → Type ℓ₂} (ρ₂ : StrRel S₂ ℓ₂') {ι₂ : StrEquiv S₂ ℓ₂''}
→ StrRelMatchesEquiv ρ₁ ι₁ → StrRelMatchesEquiv ρ₂ ι₂
→ StrRelMatchesEquiv (ProductRelStr ρ₁ ρ₂) (ProductEquivStr ι₁ ι₂)
productRelMatchesEquiv ρ₁ ρ₂ μ₁ μ₂ A B e =
Σ-cong-equiv (μ₁ _ _ e) (λ _ → μ₂ _ _ e)
productRelAction :
{S₁ : Type ℓ → Type ℓ₁} {ρ₁ : StrRel S₁ ℓ₁'} (α₁ : StrRelAction ρ₁)
{S₂ : Type ℓ → Type ℓ₂} {ρ₂ : StrRel S₂ ℓ₂'} (α₂ : StrRelAction ρ₂)
→ StrRelAction (ProductRelStr ρ₁ ρ₂)
productRelAction α₁ α₂ .actStr f (s₁ , s₂) = α₁ .actStr f s₁ , α₂ .actStr f s₂
productRelAction α₁ α₂ .actStrId (s₁ , s₂) = ΣPathP (α₁ .actStrId s₁ , α₂ .actStrId s₂)
productRelAction α₁ α₂ .actRel h _ _ (r₁ , r₂) = α₁ .actRel h _ _ r₁ , α₂ .actRel h _ _ r₂
productPositiveRel :
{S₁ : Type ℓ → Type ℓ₁} {ρ₁ : StrRel S₁ ℓ₁'} {θ₁ : SuitableStrRel S₁ ρ₁}
{S₂ : Type ℓ → Type ℓ₂} {ρ₂ : StrRel S₂ ℓ₂'} {θ₂ : SuitableStrRel S₂ ρ₂}
→ PositiveStrRel θ₁
→ PositiveStrRel θ₂
→ PositiveStrRel (productSuitableRel θ₁ θ₂)
productPositiveRel σ₁ σ₂ .act = productRelAction (σ₁ .act) (σ₂ .act)
productPositiveRel σ₁ σ₂ .reflexive (s₁ , s₂) = σ₁ .reflexive s₁ , σ₂ .reflexive s₂
productPositiveRel σ₁ σ₂ .detransitive R R' (rr'₁ , rr'₂) =
Trunc.rec squash
(λ {(s₁ , r₁ , r₁') →
Trunc.rec squash
(λ {(s₂ , r₂ , r₂') → ∣ (s₁ , s₂) , (r₁ , r₂) , (r₁' , r₂') ∣})
(σ₂ .detransitive R R' rr'₂)})
(σ₁ .detransitive R R' rr'₁)
productPositiveRel {S₁ = S₁} {ρ₁} {θ₁} {S₂} {ρ₂} {θ₂} σ₁ σ₂ .quo {X} R =
subst isEquiv
(funExt (elimProp (λ _ → productSuitableRel θ₁ θ₂ .set squash/ _ _) (λ _ → refl)))
(compEquiv
(isoToEquiv isom)
(Σ-cong-equiv (_ , σ₁ .quo R) (λ _ → _ , σ₂ .quo R)) .snd)
where
fwd :
ProductStructure S₁ S₂ X / ProductRelStr ρ₁ ρ₂ (R .fst .fst)
→ (S₁ X / ρ₁ (R .fst .fst)) × (S₂ X / ρ₂ (R .fst .fst))
fwd [ s₁ , s₂ ] = [ s₁ ] , [ s₂ ]
fwd (eq/ (s₁ , s₂) (t₁ , t₂) (r₁ , r₂) i) = eq/ s₁ t₁ r₁ i , eq/ s₂ t₂ r₂ i
fwd (squash/ _ _ p q i j) =
isSet× squash/ squash/ _ _ (cong fwd p) (cong fwd q) i j
bwd[] : S₁ X → S₂ X / ρ₂ (R .fst .fst)
→ ProductStructure S₁ S₂ X / ProductRelStr ρ₁ ρ₂ (R .fst .fst)
bwd[] s₁ [ s₂ ] = [ s₁ , s₂ ]
bwd[] s₁ (eq/ s₂ t₂ r₂ i) =
eq/ (s₁ , s₂) (s₁ , t₂) (posRelReflexive σ₁ R s₁ , r₂) i
bwd[] s₁ (squash/ _ _ p q i j) =
squash/ _ _ (λ j → bwd[] s₁ (p j)) (λ j → bwd[] s₁ (q j)) i j
bwd : S₁ X / ρ₁ (R .fst .fst) → S₂ X / ρ₂ (R .fst .fst)
→ ProductStructure S₁ S₂ X / ProductRelStr ρ₁ ρ₂ (R .fst .fst)
bwd [ s₁ ] u = bwd[] s₁ u
bwd (eq/ s₁ t₁ r₁ i) u = path u i
where
path : ∀ u → bwd [ s₁ ] u ≡ bwd [ t₁ ] u
path = elimProp (λ _ → squash/ _ _) (λ s₂ → eq/ (s₁ , s₂) (t₁ , s₂) (r₁ , posRelReflexive σ₂ R s₂))
bwd (squash/ _ _ p q i j) =
isSetΠ (λ _ → squash/) _ _ (cong bwd p) (cong bwd q) i j
open Iso
isom : Iso _ _
isom .fun = fwd
isom .inv = uncurry bwd
isom .rightInv =
uncurry
(elimProp (λ _ → isPropΠ λ _ → isSet× squash/ squash/ _ _)
(λ _ → elimProp (λ _ → isSet× squash/ squash/ _ _) (λ _ → refl)))
isom .leftInv = elimProp (λ _ → squash/ _ _) (λ _ → refl)
productRelMatchesTransp :
{S₁ : Type ℓ → Type ℓ₁} (ρ₁ : StrRel S₁ ℓ₁') (α₁ : EquivAction S₁)
{S₂ : Type ℓ → Type ℓ₂} (ρ₂ : StrRel S₂ ℓ₂') (α₂ : EquivAction S₂)
→ StrRelMatchesEquiv ρ₁ (EquivAction→StrEquiv α₁)
→ StrRelMatchesEquiv ρ₂ (EquivAction→StrEquiv α₂)
→ StrRelMatchesEquiv (ProductRelStr ρ₁ ρ₂) (EquivAction→StrEquiv (productEquivAction α₁ α₂))
productRelMatchesTransp _ _ _ _ μ₁ μ₂ _ _ e =
compEquiv (Σ-cong-equiv (μ₁ _ _ e) (λ _ → μ₂ _ _ e)) ΣPath≃PathΣ
|
{"hexsha": "df3b2ba8abfb7618fd1ae36a0aa713e382e997a1", "size": 5998, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Cubical/Structures/Relational/Product.agda", "max_stars_repo_name": "dan-iel-lee/cubical", "max_stars_repo_head_hexsha": "fd8059ec3eed03f8280b4233753d00ad123ffce8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Cubical/Structures/Relational/Product.agda", "max_issues_repo_name": "dan-iel-lee/cubical", "max_issues_repo_head_hexsha": "fd8059ec3eed03f8280b4233753d00ad123ffce8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-27T02:07:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-27T02:07:48.000Z", "max_forks_repo_path": "Cubical/Structures/Relational/Product.agda", "max_forks_repo_name": "dan-iel-lee/cubical", "max_forks_repo_head_hexsha": "fd8059ec3eed03f8280b4233753d00ad123ffce8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5390070922, "max_line_length": 103, "alphanum_fraction": 0.6248749583, "num_tokens": 2710}
|
'''
The main run file for training TM-Glow for both the backwards step
and cylinder array test cases which can be controlled through the
arguments.
=====
Distributed by: Notre Dame SCAI Lab (MIT Liscense)
- Associated publication:
url: http://aimsciences.org//article/id/3a9f3d14-3421-4947-a45f-a9cc74edd097
doi: https://dx.doi.org/10.3934/fods.2020019
github: https://github.com/zabaras/deep-turbulence
=====
'''
from args import Parser
from nn.tmGlow import TMGlow
from nn.trainFlowParallel import TrainFlow
from utils.dataLoader import DataLoaderAuto
from utils.utils import saveWorkspace, loadWorkspace
from utils.log import Log
from utils.parallel import DataParallelINNModel
from torch.optim.lr_scheduler import ExponentialLR, CosineAnnealingLR, CyclicLR
import torch
import numpy as np
import os, sys
if __name__ == '__main__':
# Parse arguments
args = Parser().parse()
if(args.epoch_start > 0):
print('Looking to load workspace {:d}.'.format(args.epoch_start))
args, model_state_dict, optimizer_state_dict = loadWorkspace(args, args.ckpt_dir, file_id=args.epoch_start)
log = Log(args, record=True)
# Set up PyTorch devices
args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
log.info("Torch device:{}".format(args.device))
if(torch.cuda.device_count() > 1 and args.parallel == True):
if(torch.cuda.device_count() < args.n_gpu):
args.n_gpu = torch.cuda.device_count()
if(args.n_gpu < 1):
args.n_gpu = torch.cuda.device_count()
log.info("Looks like we have {:d} GPUs to use. Going parallel.".format(args.n_gpu))
args.device_ids = [i for i in range(0,args.n_gpu)]
args.src_device = "cuda:{}".format(args.device_ids[0])
else:
log.info("Using a single GPU for training.")
args.device_ids = [0]
args.src_device = "cuda:{}".format(args.device_ids[0])
args.parallel == False
args.n_gpu = 1
# Construct the model
scheduler = None
# def __init__(self, in_channels, out_channels, nsteps, enc_blocks, glow_blocks,
# cond_features=8, cglow_upscale=1, growth_rate=4, init_features=48, rec_features=8, bn_size=8,
# drop_rate=0, bottleneck=False)
model = TMGlow(in_features=args.nic,
out_features=args.noc,
enc_blocks=args.enc_blocks,
glow_blocks=args.glow_blocks,
cond_features=args.cond_features,
cglow_upscale=args.glow_upscale,
growth_rate=args.growth_rate,
init_features=args.init_features,
rec_features=args.rec_features,
bn_size=args.bn_size,
drop_rate=args.drop_rate,
bottleneck=False).to(args.src_device)
# Wrap model module with parallel GPU support
# Can also handle a single GPU as well
model = DataParallelINNModel(model, args.device_ids)
# Create optimizer and scheduler
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr*(0.995**args.epoch_start), weight_decay=1e-8, amsgrad=True)
scheduler = ExponentialLR(optimizer, gamma=0.995)
# scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, 13, 2)
if(args.epoch_start > 0):
model.module.load_state_dict(model_state_dict)
optimizer.load_state_dict(optimizer_state_dict)
data_loader, training_loader, testing_loader = DataLoaderAuto.init_data_loaders(args, model, log)
modelTrainer = TrainFlow(args, model, training_loader, testing_loader, log=log)
# ========== Epoch loop ============
log.log('Training network, lets rock.')
for epoch in range(args.epoch_start+1, args.epochs + 1):
# Time-step size to take
tstep = (int(epoch/2)+10)
# tstep = (int(epoch/10)+2)
log.log('tsteps: {}'.format(tstep))
loss = modelTrainer.trainParallel(model, optimizer, epoch=epoch)
log.log('Epoch {:d}: Sample Training Loss: {}'.format(epoch, loss))
if(not scheduler is None):
scheduler.step()
for param_group in optimizer.param_groups:
log.log('Learning-rate: {:0.05f}'.format(param_group['lr']))
if(epoch % args.test_freq == 0):
log.log('Testing Model')
with torch.no_grad():
loss = modelTrainer.test(model, samples=2, epoch=epoch)
log.log('Epoch {:d}: Testing Loss: {}'.format(epoch, loss))
if(epoch % args.ckpt_freq == 0):
file_dir = args.ckpt_dir
# If director does not exist create it
if not os.path.exists(file_dir):
os.makedirs(file_dir)
log.log('Epoch {}, Saving network!'.format(epoch))
# Note, we save the base model created on the source device
saveWorkspace(args, model.module, optimizer, file_id=epoch)
|
{"hexsha": "6d0a76afb1c6adc2764513f6a12703c3ba0e4839", "size": 4948, "ext": "py", "lang": "Python", "max_stars_repo_path": "tmglow/main.py", "max_stars_repo_name": "zabaras/deep-turbulence", "max_stars_repo_head_hexsha": "0daca5daada449d4ba16bce37b703e20b444b6bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2020-12-01T14:58:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T07:40:10.000Z", "max_issues_repo_path": "tmglow/main.py", "max_issues_repo_name": "zabaras/deep-turbulence", "max_issues_repo_head_hexsha": "0daca5daada449d4ba16bce37b703e20b444b6bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-06-05T14:29:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-04T15:57:40.000Z", "max_forks_repo_path": "tmglow/main.py", "max_forks_repo_name": "zabaras/deep-turbulence", "max_forks_repo_head_hexsha": "0daca5daada449d4ba16bce37b703e20b444b6bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-09-04T06:11:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-05T10:47:16.000Z", "avg_line_length": 40.5573770492, "max_line_length": 123, "alphanum_fraction": 0.6521827001, "include": true, "reason": "import numpy", "num_tokens": 1172}
|
[STATEMENT]
lemma node_step_no_change_on_send_or_receive:
assumes "((\<sigma>, NodeS i P R), a, (\<sigma>', NodeS i' P' R')) \<in> onode_sos
(oparp_sos i (oseqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V i) (seqp_sos \<Gamma>\<^sub>Q\<^sub>M\<^sub>S\<^sub>G))"
and "a \<noteq> \<tau>"
shows "\<sigma>' i = \<sigma> i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<sigma>' i = \<sigma> i
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
((\<sigma>, NodeS i P R), a, \<sigma>', NodeS i' P' R') \<in> onode_sos (oparp_sos i (oseqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V i) (seqp_sos \<Gamma>\<^sub>Q\<^sub>M\<^sub>S\<^sub>G))
a \<noteq> \<tau>
goal (1 subgoal):
1. \<sigma>' i = \<sigma> i
[PROOF STEP]
by (cases a) (auto elim!: par_step_no_change_on_send_or_receive)
|
{"llama_tokens": 369, "file": "AODV_variants_b_fwdrreps_B_Aodv_Loop_Freedom", "length": 2}
|
import numpy as np
class NeuralNetwork:
def __init__(self, layer_sizes):
weight_shapes = [(a,b) for a,b in zip(layer_sizes[1:],layer_sizes[:-1])]
self.weights = [np.random.standard_normal(s)/s[1]**.5 for s in weight_shapes]
self.biases = [np.zeros((s,1)) for s in layer_sizes[1:]]
def predict(self, a):
for w,b in zip(self.weights,self.biases):
a = self.activation(np.matmul(w,a) + b)
return a
def print_accuracy(self, images, labels):
predictions = self.predict(images)
num_correct = sum([np.argmax(a) == np.argmax(b) for a,b in zip(predictions.labels)])
print('{0}/{1} accuracy {2}%'.format(num_correct,len(images), (num_correct/len(images))*100))
@staticmethod
def activation(x):
return 1/(1+np.exp(-x))
|
{"hexsha": "a121bc9107e71bffcdaf63acf122534f52e94d2b", "size": 764, "ext": "py", "lang": "Python", "max_stars_repo_path": "NeuralNetwork.py", "max_stars_repo_name": "AminAbdelmlak/Number-Guesser-NeuralNet", "max_stars_repo_head_hexsha": "fae1bf4a9871ae8501399d7424705206564543f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NeuralNetwork.py", "max_issues_repo_name": "AminAbdelmlak/Number-Guesser-NeuralNet", "max_issues_repo_head_hexsha": "fae1bf4a9871ae8501399d7424705206564543f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NeuralNetwork.py", "max_forks_repo_name": "AminAbdelmlak/Number-Guesser-NeuralNet", "max_forks_repo_head_hexsha": "fae1bf4a9871ae8501399d7424705206564543f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8333333333, "max_line_length": 96, "alphanum_fraction": 0.6623036649, "include": true, "reason": "import numpy", "num_tokens": 214}
|
// Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 The Bitcoin developers
// Copyright (c) 2014-2015 The Dash developers
// Copyright (c) 2015-2020 The PIVX developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "base58.h"
#include "clientversion.h"
#include "httpserver.h"
#include "init.h"
#include "multimining/multimining_programs.h"
#include "multimining/multimining.h"
#include "invalid.h"
#include "net.h"
#include "netbase.h"
#include "rpc/server.h"
#include "timedata.h"
#include "util.h"
#ifdef ENABLE_WALLET
#include "wallet/wallet.h"
#include "wallet/walletdb.h"
#endif
#include <stdint.h>
#include <boost/assign/list_of.hpp>
#include <univalue.h>
extern std::vector<CSporkDef> sporkDefs;
/**
* @note Do not add or change anything in the information returned by this
* method. `getinfo` exists for backwards-compatibility only. It combines
* information from wildly different sources in the program, which is a mess,
* and is thus planned to be deprecated eventually.
*
* Based on the source of the information, new information should be added to:
* - `getblockchaininfo`,
* - `getnetworkinfo` or
* - `getwalletinfo`
*
* Or alternatively, create a specific query method for the information.
**/
UniValue getassettarifs(const JSONRPCRequest& request)
{
if (request.params.size() == 1 && request.params[0].get_str() == "show") {
UniValue ret(UniValue::VOBJ);
for (const auto& programDef : mmProgramDefs) {
UniValue program(UniValue::VOBJ);
CMultiMiningProgramMessage activeProgram;
if (mmProgramManager.GetProgram(programDef.programId, activeProgram)) {
program.pushKV("status", activeProgram.nActive > 0 ? "Active" : "Inactive");
program.pushKV(MIN_WALLET_BALANCE_FIELD, activeProgram.minWalletBalance / COIN);
program.pushKV(MIN_STRUCTURE_BALANCE_FIELD, activeProgram.minStructureBalance / COIN);
program.pushKV(DAILY_STANDALONE_PERCENT_FIELD, strprintf("%03d.%02d", activeProgram.dailyPercentStandalone / 100, activeProgram.dailyPercentStandalone % 100));
program.pushKV(DAILY_STRUCTURE_PERCENT_FIELD, strprintf("%03d.%02d", activeProgram.dailyPercentStructure / 100, activeProgram.dailyPercentStructure % 100));
} else {
program.pushKV("status", programDef.active > 0 ? "Active" : "Inactive");
program.pushKV(MIN_WALLET_BALANCE_FIELD, programDef.minWalletBalance / COIN);
program.pushKV(MIN_STRUCTURE_BALANCE_FIELD, programDef.minStructureBalance / COIN);
program.pushKV(DAILY_STANDALONE_PERCENT_FIELD, strprintf("%03d.%02d", programDef.dailyPercentStandalone / 100, programDef.dailyPercentStandalone % 100));
program.pushKV(DAILY_STRUCTURE_PERCENT_FIELD, strprintf("%03d.%02d", programDef.dailyPercentStructure / 100, programDef.dailyPercentStructure % 100));
}
ret.pushKV(programDef.name, program);
}
return ret;
} else if (request.params.size() == 3) {
// advanced mode, update spork values
multiminingProgramId nProgramId = mmProgramManager.GetProgramIDByName(request.params[0].get_str());
if (nProgramId == MULTIMINING_PROGRAM_INVALID) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid program name");
}
std::string field = request.params[1].get_str();
bool status = false;
uint16_t percent = 0;
int64_t balance = 0;
if (field == MIN_WALLET_BALANCE_FIELD || field == MIN_STRUCTURE_BALANCE_FIELD) {
balance = request.params[2].get_int64() * COIN;
} else if (field == DAILY_STANDALONE_PERCENT_FIELD || field == DAILY_STRUCTURE_PERCENT_FIELD) {
percent = request.params[2].get_real() * 100;
} else if (field == "status") {
status = request.params[2].get_int();
} else {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid field name");
}
//broadcast new spork
if (mmProgramManager.UpdateProgram(nProgramId, field, status, percent, balance)) {
return "success";
} else {
return "failure";
}
}
throw std::runtime_error(
"getassettarifs \"name\" ( value )\n"
"\nReturn spork values or their active state.\n"
"\nArguments:\n"
"1. \"name\" (string, required) \"show\" to show values, \"active\" to show active state.\n"
" When set up as a spork signer, the name of the spork can be used to update it's value.\n"
"2. value (numeric, required when updating a spork) The new value for the spork.\n"
"\nResult (show):\n"
"{\n"
" \"spork_name\": nnn (key/value) Key is the spork name, value is it's current value.\n"
" ,...\n"
"}\n"
"\nResult (active):\n"
"{\n"
" \"spork_name\": true|false (key/value) Key is the spork name, value is a boolean for it's active state.\n"
" ,...\n"
"}\n"
"\nResult (name):\n"
" \"success|failure\" (string) Whether or not the update succeeded.\n"
"\nExamples:\n" +
HelpExampleCli("getassettarifs", "show") + HelpExampleRpc("getassettarifs", "show"));
}
UniValue getassetstructure(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() > 1)
throw std::runtime_error(
"getassetstructure\n"
"\nReturns an object containing various state info.\n"
"\nExamples:\n" +
HelpExampleCli("getassetstructure", "") + HelpExampleRpc("getassetstructure", ""));
if (!fMultiMiningIsInit) throw JSONRPCError(RPC_MISC_ERROR, "Multimining currently not available");
CKeyID hash;
CKeyID parentHash;
UniValue ret(UniValue::VOBJ);
UniValue childs(UniValue::VOBJ);
UniValue delegations(UniValue::VARR);
if (request.params.size() > 0) {
hash.SetHex(request.params[0].get_str());
parentHash = multiMiningManager.GetNodeParentKey(hash);
} else if (!fMultiWalletsNode) {
hash = multiMiningManager.getKeyId();
parentHash = multiMiningManager.GetNodeParentKey(hash);
} else if (request.params.size() == 0) {
hash = multiMiningManager.getGenesisKeyId();
}
if (hash.IsNull() || parentHash.IsNull()) throw JSONRPCError(RPC_MISC_ERROR, "Must be set hash");
if (invalid_out::ContainsAssetKey(hash)) throw JSONRPCError(RPC_MISC_ERROR, "Hash not found");
bool isGenesisHash = false;
if (hash == multiMiningManager.getGenesisKeyId()) isGenesisHash = true;
ret.pushKV("walletIdentHash", hash.GetHex());
ret.pushKV("parentWalletIdentHash", parentHash.GetHex());
double balance = 0;
double structureBalance = 0;
double currentAssets = 0;
std::string programName = "0.00";
double timePeriod = 0;
double currentPercent = 0;
uint64_t lastPaymentTime = 0;
CAssetNode *node = multiMiningManager.GetNode(hash);
uint8_t start = 0;
if (node != nullptr && !node->getKeyId().IsNull())
{
balance = node->totalBalance;
structureBalance = multiMiningTree.GetStructureNodeBalance(node, start);
lastPaymentTime = node->GetUpdateTime();
timePeriod = GetAdjustedTime() - lastPaymentTime;
currentPercent = multiMiningManager.GetOwnPercentProgram(hash);
double asset = balance * currentPercent * timePeriod / 864000000;
currentAssets = asset + node->profitBeforePercentDowngrade;
programName = mmProgramManager.GetProgramNameByID(node->currentProgramId);
}
if (isGenesisHash) structureBalance = multiMiningTree.GetStructureNodeBalance(hash, start);
ret.pushKV("walletBalance", FormatMoney(balance));
ret.pushKV("structureBalance", FormatMoney(structureBalance));
ret.pushKV("currentAssets", FormatMoney(currentAssets));
ret.pushKV("percent", currentPercent / 100.0);
ret.pushKV("lastPaymentTime", lastPaymentTime);
std::vector<CAssetNode*> vNodes = multiMiningTree.GetNodeStructure(hash);
if (vNodes.size() > 0) {
for (CAssetNode* cnode : vNodes)
{
UniValue child(UniValue::VOBJ);
child.pushKV("walletBalance", FormatMoney(cnode->totalBalance));
child.pushKV("structureBalance", FormatMoney(multiMiningTree.GetStructureNodeBalance(cnode, 0)));
lastPaymentTime = cnode->GetUpdateTime();
timePeriod = GetTime() - lastPaymentTime;
currentPercent = multiMiningManager.GetOwnPercentProgram(cnode->getKeyId());
double asset = cnode->totalBalance * currentPercent * timePeriod / 864000000;
currentAssets = asset + cnode->profitBeforePercentDowngrade;
child.pushKV("currentAssets", FormatMoney(currentAssets));
child.pushKV("percent", currentPercent / 100.0);
child.pushKV("lastPaymentTime", lastPaymentTime);
childs.pushKV(cnode->getKeyId().GetHex(), child);
}
}
ret.pushKV("childs", childs);
if (node->delegations.size() > 0) {
for (CAssetDelegation* d : node->delegations)
{
if (d->isActivated() == false || d->getAmount() == 0 || d->getPercent() == 0) continue;
UniValue delegation(UniValue::VOBJ);
CAddressOwnerIdentificator owner;
if (!GetAddressIdentification(d->getOwnerKey(), CChainParams::PUBKEY_ADDRESS, owner)) continue;
delegation.pushKV("staker", d->getStakerKey().GetHex());
delegation.pushKV("delegator", d->getOwnerKey().GetHex());
delegation.pushKV("owner", owner.hashBytes.GetHex());
delegation.pushKV("amount", FormatMoney(d->getAmount()));
delegation.pushKV("timestamp", d->GetUpdateTime());
double currentPercent = d->getPercent();
double currentBalance = d->getAmount();
double timePeriod = GetAdjustedTime() - d->GetUpdateTime();
double total = (currentBalance * currentPercent * timePeriod / 864000000) + d->getNHalfProfit();
delegation.pushKV("profit", FormatMoney(total));
delegation.pushKV("percent", d->getPercent() / 100);
delegations.push_back(delegation);
}
}
ret.pushKV("delegations", delegations);
return ret;
}
static const CRPCCommand commands[] =
{ // category name actor (function) okSafeMode
// --------------------- ------------------------ ----------------------- ----------
{ "multimining", "getassettarifs", &getassettarifs, true },
{ "multimining", "getassetstructure", &getassetstructure, true },
/* Not shown in help */
};
void RegisterMultiMiningRPCCommands(CRPCTable &tableRPC)
{
for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++)
tableRPC.appendCommand(commands[vcidx].name, &commands[vcidx]);
}
|
{"hexsha": "789a5c868f0c82ad55734db4fc8e749f7fc46f79", "size": 10996, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/rpc/multimining.cpp", "max_stars_repo_name": "QRAX-LABS/QRAX", "max_stars_repo_head_hexsha": "951ed45d473b7ab8c74bf35ff794e97169736d0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-12-29T14:10:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T09:24:37.000Z", "max_issues_repo_path": "src/rpc/multimining.cpp", "max_issues_repo_name": "QRAX-LABS/QRAX", "max_issues_repo_head_hexsha": "951ed45d473b7ab8c74bf35ff794e97169736d0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/rpc/multimining.cpp", "max_forks_repo_name": "QRAX-LABS/QRAX", "max_forks_repo_head_hexsha": "951ed45d473b7ab8c74bf35ff794e97169736d0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9694656489, "max_line_length": 176, "alphanum_fraction": 0.6564205166, "num_tokens": 2703}
|
import numpy as np
from keras.applications.vgg19 import VGG19
from keras.applications.vgg19 import preprocess_input
import os
import keras
import sys
from datautils import get_data,get_model,data_proprecessing
def cos_distribution(cos_array):
cos_distribute = [0 for i in range(10)]
for i in cos_array:
if i >= 0 and i < 0.1:
cos_distribute[0] += 1
elif i >= 0.1 and i < 0.2:
cos_distribute[1] += 1
elif i >= 0.2 and i < 0.3:
cos_distribute[2] += 1
elif i >= 0.3 and i < 0.4:
cos_distribute[3] += 1
elif i >= 0.4 and i < 0.5:
cos_distribute[4] += 1
elif i >= 0.5 and i < 0.6:
cos_distribute[5] += 1
elif i >= 0.6 and i < 0.7:
cos_distribute[6] += 1
elif i >= 0.7 and i < 0.8:
cos_distribute[7] += 1
elif i >= 0.8 and i < 0.9:
cos_distribute[8] += 1
elif i >= 0.9 and i <= 1.0:
cos_distribute[9] += 1
return cos_distribute
exp_id = sys.argv[1]
ptype = sys.argv[2]
samples = len(get_data(exp_id)[0])
if __name__ == '__main__':
#2
#origin_model = get_model(exp_id)
#X_test,_ = get_data(exp_id)
#ori_prob = origin_model.predict(X_test)
basedir = os.path.dirname(__file__)
basedir = os.path.join(basedir, 'input')
basedir = os.path.join(basedir, exp_id)
predicting_file_path = os.path.join(basedir, 'predict_probability_vector_'+str(exp_id)+'.npy')
X_test,Y_test = get_data(exp_id)
X_test = data_proprecessing(exp_id)(X_test)
origin_model = get_model(exp_id)
if not os.path.exists(predicting_file_path):
a = origin_model.predict(X_test)
# a = np.argmax(a, axis=1)
np.save(predicting_file_path,a)
ori_prob = a
else:
ori_prob = np.load(predicting_file_path)
# ori_prob = np.load('predict_prob_resnet20_cifar10.npy')
# ori_prob = np.load('origin_model_temp_result.npy')
result = np.argmax(ori_prob, axis=1)
# np.save('vgg19_random_predict.npy',ori_prob)
file_name = exp_id+'_'+ptype+'_feature'
file_name = os.path.join(basedir, file_name)
prob_path = exp_id+'_'+ptype+'_prob'
prob_path = os.path.join(basedir,prob_path)
for i in range(0,samples):
a = ori_prob[i]
max_value = np.max(a)
max_value_pos = np.argmax(a)
file_path = os.path.join(prob_path,str(i)+'.npy')
#if not os.path.exists(file_path):
#continue
perturbated_prediction = np.load(file_path)
result_recording_file = open(file_name + '.txt', 'a+')
euler = 0
mahat = 0
qube = 0
cos = 0
difference = 0
different_class = []
cos_list = []
for pp in perturbated_prediction:
pro = pp
opro = a
# if np.argmax(ii) != result[i]:
difference += abs(max_value - pp[max_value_pos])
euler += np.linalg.norm(pro - opro)
mahat += np.linalg.norm(pro - opro, ord=1)
qube += np.linalg.norm(pro - opro, ord=np.inf)
co = (1 - (np.dot(pro, opro.T) / (np.linalg.norm(pro) * (np.linalg.norm(opro)))))
if co < 0:
co = 0
elif co > 1:
co = 1
cos += co
cos_list.append(co)
if np.argmax(pp) != max_value_pos:
different_class.append(np.argmax(pp))
cos_dis = cos_distribution(cos_list)
# euler /= 256
# mahat /= 256
# qube /= 256
# cos /= 256
dic = {}
for key in different_class:
dic[key] = dic.get(key, 0) + 1
wrong_class_num = len(dic)
if len(dic)>0:
max_class_num = max(dic.values())
else :
max_class_num = 0
print('id:',i)
print('euler:', euler)
print('mahat:', mahat)
print('qube:', qube)
print('cos:', cos)
print('difference:',difference)
print('wnum:',wrong_class_num)
print('num_mc:', max_class_num)
print('fenbu:',cos_dis)
result_recording_file.write('image_id:' + str(i))
result_recording_file.write('\n')
result_recording_file.write('euler:' + str(euler))
result_recording_file.write('\n')
result_recording_file.write('mahat:' + str(mahat))
result_recording_file.write('\n')
result_recording_file.write('qube:' + str(qube))
result_recording_file.write('\n')
result_recording_file.write('cos:' + str(cos))
result_recording_file.write('\n')
result_recording_file.write('difference:' + str(difference))
result_recording_file.write('\n')
result_recording_file.write('wnum:' + str(wrong_class_num))
result_recording_file.write('\n')
result_recording_file.write('num_mc:' + str(max_class_num))
result_recording_file.write('\n')
result_recording_file.write('fenbu:' + str(cos_dis))
result_recording_file.write('\n')
result_recording_file.close()
|
{"hexsha": "298fc4e70c0bacbe39bbe980e8359763db35b528", "size": 5242, "ext": "py", "lang": "Python", "max_stars_repo_path": "prioritzation/feature_extraction.py", "max_stars_repo_name": "sail-repos/PRIMA", "max_stars_repo_head_hexsha": "21993e34484a8659e5988d8037f4430839dd3eb3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-13T12:36:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-17T12:23:11.000Z", "max_issues_repo_path": "prioritzation/feature_extraction.py", "max_issues_repo_name": "EDA-Testing/PRIMA", "max_issues_repo_head_hexsha": "21993e34484a8659e5988d8037f4430839dd3eb3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-21T10:40:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T10:04:06.000Z", "max_forks_repo_path": "prioritzation/feature_extraction.py", "max_forks_repo_name": "EDA-Testing/PRIMA", "max_forks_repo_head_hexsha": "21993e34484a8659e5988d8037f4430839dd3eb3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-07-22T12:17:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T13:35:53.000Z", "avg_line_length": 37.4428571429, "max_line_length": 99, "alphanum_fraction": 0.5623807707, "include": true, "reason": "import numpy", "num_tokens": 1403}
|
# -*- coding: utf-8 -*-
import os
import sys
import math
sys.dont_write_bytecode = True
import caffe
from caffe import layers as L
from caffe import params as P
from caffe.proto import caffe_pb2
sys.path.append('../')
from PyLib.LayerParam.MultiBoxLossLayerParam import *
from PyLib.NetLib.ConvBNLayer import *
from PyLib.NetLib.InceptionLayer import *
from PyLib.NetLib.MultiScaleLayer import *
from PyLib.NetLib.VggNet import VGG16_BaseNet_ChangeChannel
from PyLib.NetLib.YoloNet import YoloNetPart
from BaseNet import *
from DetectorHeader import *
from DAP_Param import *
import numpy as np
#############################################################################
def getDecovArgs(num_output,lr=1,decay=1):
return {
'param': [dict(lr_mult=lr, decay_mult=decay)],
'convolution_param': {
'num_output': num_output,
'kernel_size': 2,
'pad': 0,
'stride': 2,
'weight_filler': dict(type='xavier'),
'bias_term': True,
'group': 1,
'bias_filler': dict(type='constant', value=0.0),
}
}
# ------------------------------------------------------------------------------
# Final Network
flag_train_withperson = True
def InceptionOfficialLayer(net, from_layer, out_layer, channels_1=1,channels_3=[],channels_5=[],channels_ave=1,inter_bn = True,leaky=False,lr=1,decay=1):
fea_layer = from_layer
concatlayers = []
mid_layer = "{}/incep/1x1".format(out_layer)
ConvBNUnitLayer(net, fea_layer, mid_layer, use_bn=inter_bn, use_relu=True, num_output=channels_1, kernel_size=1,
pad=0,stride=1, use_scale=True, leaky=leaky,lr_mult=lr, decay_mult=decay)
concatlayers.append(net[mid_layer])
start_layer = mid_layer
mid_layer = "{}/incep/1_reduce".format(out_layer)
ConvBNUnitLayer(net, fea_layer, mid_layer, use_bn=inter_bn, use_relu=True,num_output=channels_3[0], kernel_size=1, pad=0,
stride=1, use_scale=True, leaky=leaky,lr_mult=lr, decay_mult=decay)
start_layer = mid_layer
mid_layer = "{}/incep/3x3".format(out_layer)
ConvBNUnitLayer(net, start_layer, mid_layer, use_bn=inter_bn, use_relu=True, num_output=channels_3[1], kernel_size=3, pad=1,
stride=1, use_scale=True, leaky=leaky,lr_mult=lr, decay_mult=decay)
concatlayers.append(net[mid_layer])
mid_layer = "{}/incep/2_reduce".format(out_layer)
ConvBNUnitLayer(net, fea_layer, mid_layer, use_bn=inter_bn, use_relu=True, num_output=channels_5[0], kernel_size=1, pad=0,
stride=1, use_scale=True, leaky=leaky,lr_mult=lr, decay_mult=decay)
start_layer = mid_layer
mid_layer = "{}/incep/5x5".format(out_layer)
ConvBNUnitLayer(net, start_layer, mid_layer, use_bn=inter_bn, use_relu=True, num_output=channels_5[1], kernel_size=5, pad=2,
stride=1, use_scale=True, leaky=leaky,lr_mult=lr, decay_mult=decay)
concatlayers.append(net[mid_layer])
mid_layer = "{}/incep/pool".format(out_layer)
net[mid_layer] = L.Pooling(net[fea_layer], pool=P.Pooling.AVE, kernel_size=3, stride=1, pad=1)
start_layer = mid_layer
mid_layer = "{}/incep/pool_1x1".format(out_layer)
ConvBNUnitLayer(net, start_layer, mid_layer, use_bn=inter_bn, use_relu=True, num_output=channels_ave, kernel_size=1,
pad=0,stride=1, use_scale=True, leaky=leaky,lr_mult=lr, decay_mult=decay)
concatlayers.append(net[mid_layer])
# incep
layer_name = "{}/incep".format(out_layer)
name = "{}/incep".format(out_layer)
net[name] = L.Concat(*concatlayers, name=layer_name, axis=1)
return net
def FaceBoxFPNNet(net, train=True, data_layer="data", gt_label="label", \
net_width=512, net_height=288):
flag_handusefpn = False
lr = 0
decay = 0
use_bn = False
from_layer = data_layer
num_channels = [32,64,128]
k_sizes = [3,3,3]
strides = [2,2,2]
for i in xrange(len(num_channels)):
add_layer = "conv{}".format(i+1)
ConvBNUnitLayer(net, from_layer, add_layer, use_bn=use_bn, use_relu=True, leaky=False,
num_output=num_channels[i], kernel_size=k_sizes[i], pad=(k_sizes[i]-1)/2, stride=strides[i], use_scale=True,
n_group=1, lr_mult=lr, decay_mult=decay)
from_layer = add_layer
# if not i == len(num_channels) - 1:
# add_layer = "pool{}".format(i+1)
# net[add_layer] = L.Pooling(net[from_layer], pool=P.Pooling.MAX, kernel_size=3, stride=2, pad=0)
# from_layer = add_layer
layer_cnt = len(num_channels)
num_channels = [192,192,192,192]
divide_scale = 4
f4_depth = len(num_channels)
for i in xrange(len(num_channels)):
n_chan = num_channels[i]
add_layer = "conv{}_{}".format(layer_cnt+1,i + 1)
net = InceptionOfficialLayer(net, from_layer, add_layer, channels_1=n_chan/divide_scale, channels_3=[n_chan/8, n_chan/4],
channels_5=[n_chan/8, n_chan/4], channels_ave=n_chan/divide_scale, inter_bn=use_bn, leaky=False,
lr=lr,decay=decay)
from_layer = "conv{}_{}/incep".format(layer_cnt+1,i + 1)
if flag_handusefpn:
layer_cnt += 1
num_channels = [256,128,256,128,256]
kernels = [3,1,3,1,3]
strides = [2,1,1,1,1]
f5_depth = len(num_channels)
for i in xrange(len(num_channels)):
add_layer = "conv{}_{}".format(layer_cnt+1,i + 1)
ConvBNUnitLayer(net, from_layer, add_layer, use_bn=use_bn, use_relu=True, leaky=False,
num_output=num_channels[i], kernel_size=kernels[i], pad=kernels[i]/2, stride=strides[i],
use_scale=True, n_group=1, lr_mult=lr, decay_mult=decay)
from_layer = add_layer
layer_cnt += 1
num_channels = [128,128,128,128,128]
kernels = [3,3,3,3,3]
strides = [2,1,1,1,1]
f6_depth = len(num_channels)
for i in xrange(len(num_channels)):
add_layer = "conv{}_{}".format(layer_cnt+1,i + 1)
ConvBNUnitLayer(net, from_layer, add_layer, use_bn=use_bn, use_relu=True, leaky=False,
num_output=num_channels[i], kernel_size=kernels[i], pad=kernels[i]/2, stride=strides[i],
use_scale=True, n_group=1, lr_mult=lr, decay_mult=decay)
from_layer = add_layer
# ##########################################################################
# Use FPN
# f3 -> c6_4
f3 = 'conv6_{}'.format(f6_depth)
# f2: f3 -> deconv + c5_3 -> 1x1
out_layer_1 = f3 + '_deconv'
net[out_layer_1]=L.Deconvolution(net[f3],**(getDecovArgs(256,lr,decay)))
f2 = 'conv5_{}'.format(f5_depth)
out_layer_2 = f2 + '_1x1'
ConvBNUnitLayer(net, f2, out_layer_2, use_bn=False, use_relu=False, num_output=256, kernel_size=1, pad=0, stride=1, lr_mult=lr,decay_mult=decay)
out_layer = 'feat5'
net[out_layer] = L.Eltwise(net[out_layer_2], net[out_layer_1], eltwise_param=dict(operation=P.Eltwise.SUM))
net['feat5_relu'] = L.ReLU(net['feat5'], in_place=True)
# f1: f2 -> deconv + c4_4 -> 1x1
out_layer_1 = out_layer + '_deconv'
net[out_layer_1]=L.Deconvolution(net['feat5_relu'],**(getDecovArgs(192,lr,decay)))
f1 = 'conv4_{}/incep'.format(f4_depth)
out_layer_2 = f1 + '_1x1'
ConvBNUnitLayer(net, f1, out_layer_2, use_bn=False, use_relu=False, num_output=192, kernel_size=1, pad=0, stride=1, lr_mult=lr,decay_mult=decay)
out_layer = 'feat4'
net[out_layer] = L.Eltwise(net[out_layer_2], net[out_layer_1], eltwise_param=dict(operation=P.Eltwise.SUM))
net['feat4_relu'] = L.ReLU(net['feat4'], in_place=True)
from_layer = "feat4"
add_layer = from_layer + "_deconv"
net[add_layer] = L.Deconvolution(net[from_layer], **(getDecovArgs(64)))
from_layer = add_layer
add_layer = from_layer + "_relu"
net[add_layer] = L.ReLU(net[from_layer], in_place=True)
print net.keys()
# make Loss & Detout for SSD2
mbox_2_layers = SsdDetectorHeaders(net, \
net_width=net_width, net_height=net_height, data_layer=data_layer, \
from_layers=ssd_Param_2.get('feature_layers', []), \
num_classes=ssd_Param_2.get("num_classes", 2), \
boxsizes=ssd_Param_2.get("anchor_boxsizes", []), \
aspect_ratios=ssd_Param_2.get("anchor_aspect_ratios", []), \
prior_variance=ssd_Param_2.get("anchor_prior_variance", [0.1, 0.1, 0.2, 0.2]), \
flip=ssd_Param_2.get("anchor_flip", True), \
clip=ssd_Param_2.get("anchor_clip", True), \
normalizations=ssd_Param_2.get("interlayers_normalizations", []), \
use_batchnorm=ssd_Param_2.get("interlayers_use_batchnorm", True), \
inter_layer_channels=ssd_Param_2.get("interlayers_channels_kernels", []), \
use_focus_loss=ssd_Param_2.get("bboxloss_using_focus_loss", False), \
use_dense_boxes=ssd_Param_2.get('bboxloss_use_dense_boxes', False), \
stage=2)
# make Loss or Detout for SSD1
if train:
loss_param = get_loss_param(normalization=ssd_Param_2.get("bboxloss_normalization", P.Loss.VALID))
mbox_2_layers.append(net[gt_label])
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes', False)
if use_dense_boxes:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels', []),
'target_labels': ssd_Param_2.get('target_labels', []),
'num_classes': ssd_Param_2.get("num_classes", 2),
'alias_id': ssd_Param_2.get("alias_id", 0),
'loc_loss_type': ssd_Param_2.get("bboxloss_loc_loss_type", P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type': ssd_Param_2.get("bboxloss_conf_loss_type", P.MultiBoxLoss.LOGISTIC),
'loc_weight': ssd_Param_2.get("bboxloss_loc_weight", 1),
'conf_weight': ssd_Param_2.get("bboxloss_conf_weight", 1),
'overlap_threshold': ssd_Param_2.get("bboxloss_overlap_threshold", 0.5),
'neg_overlap': ssd_Param_2.get("bboxloss_neg_overlap", 0.5),
'size_threshold': ssd_Param_2.get("bboxloss_size_threshold", 0.0001),
'do_neg_mining': ssd_Param_2.get("bboxloss_do_neg_mining", True),
'neg_pos_ratio': ssd_Param_2.get("bboxloss_neg_pos_ratio", 3),
'using_focus_loss': ssd_Param_2.get("bboxloss_using_focus_loss", False),
'gama': ssd_Param_2.get("bboxloss_focus_gama", 2),
'use_difficult_gt': ssd_Param_2.get("bboxloss_use_difficult_gt", False),
'code_type': ssd_Param_2.get("bboxloss_code_type", P.PriorBox.CENTER_SIZE),
'use_prior_for_matching': True,
'encode_variance_in_target': False,
'flag_noperson': ssd_Param_2.get('flag_noperson', False),
'size_threshold_max': ssd_Param_2.get("bboxloss_size_threshold_max", 2),
'flag_showdebug': ssd_Param_2.get("flag_showdebug", False),
'flag_forcematchallgt': ssd_Param_2.get("flag_forcematchallgt", False),
'flag_areamaxcheckinmatch': ssd_Param_2.get("flag_areamaxcheckinmatch", False),
}
net["mbox_2_loss"] = L.DenseBBoxLoss(*mbox_2_layers, dense_bbox_loss_param=bboxloss_param, \
loss_param=loss_param,
include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels', []),
'target_labels': ssd_Param_2.get('target_labels', []),
'num_classes': ssd_Param_2.get("num_classes", 2),
'alias_id': ssd_Param_2.get("alias_id", 0),
'loc_loss_type': ssd_Param_2.get("bboxloss_loc_loss_type", P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type': ssd_Param_2.get("bboxloss_conf_loss_type", P.MultiBoxLoss.SOFTMAX),
'loc_weight': ssd_Param_2.get("bboxloss_loc_weight", 1),
'conf_weight': ssd_Param_2.get("bboxloss_conf_weight", 1),
'overlap_threshold': ssd_Param_2.get("bboxloss_overlap_threshold", 0.5),
'neg_overlap': ssd_Param_2.get("bboxloss_neg_overlap", 0.5),
'size_threshold': ssd_Param_2.get("bboxloss_size_threshold", 0.0001),
'do_neg_mining': ssd_Param_2.get("bboxloss_do_neg_mining", True),
'neg_pos_ratio': ssd_Param_2.get("bboxloss_neg_pos_ratio", 3),
'using_focus_loss': ssd_Param_2.get("bboxloss_using_focus_loss", False),
'gama': ssd_Param_2.get("bboxloss_focus_gama", 2),
'use_difficult_gt': ssd_Param_2.get("bboxloss_use_difficult_gt", False),
'code_type': ssd_Param_2.get("bboxloss_code_type", P.PriorBox.CENTER_SIZE),
'match_type': P.MultiBoxLoss.PER_PREDICTION,
'share_location': True,
'use_prior_for_matching': True,
'background_label_id': 0,
'encode_variance_in_target': False,
'map_object_to_agnostic': False,
}
net["mbox_2_loss"] = L.BBoxLoss(*mbox_2_layers, bbox_loss_param=bboxloss_param, \
loss_param=loss_param, include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
if ssd_Param_2.get("bboxloss_conf_loss_type", P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.SOFTMAX:
reshape_name = "mbox_2_conf_reshape"
net[reshape_name] = L.Reshape(mbox_2_layers[1], \
shape=dict(dim=[0, -1, ssd_Param_2.get("num_classes", 2)]))
softmax_name = "mbox_2_conf_softmax"
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "mbox_2_conf_flatten"
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_2_layers[1] = net[flatten_name]
elif ssd_Param_2.get("bboxloss_conf_loss_type", P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "mbox_2_conf_sigmoid"
net[sigmoid_name] = L.Sigmoid(mbox_2_layers[1])
mbox_2_layers[1] = net[sigmoid_name]
else:
raise ValueError("Unknown conf loss type.")
# Det-out param
det_out_param = {
'num_classes': ssd_Param_2.get("num_classes", 2),
'target_labels': ssd_Param_2.get('detout_target_labels', []),
'alias_id': ssd_Param_2.get("alias_id", 0),
'conf_threshold': ssd_Param_2.get("detout_conf_threshold", 0.01),
'nms_threshold': ssd_Param_2.get("detout_nms_threshold", 0.45),
'size_threshold': ssd_Param_2.get("detout_size_threshold", 0.0001),
'top_k': ssd_Param_2.get("detout_top_k", 30),
'share_location': True,
'code_type': P.PriorBox.CENTER_SIZE,
'background_label_id': 0,
'variance_encoded_in_target': False,
}
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes', False)
if use_dense_boxes:
net.detection_out_2 = L.DenseDetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.detection_out_2 = L.DetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
# EVAL in TEST MODE
if not train:
det_eval_param = {
'gt_labels': eval_Param.get('eval_gt_labels', []),
'num_classes': eval_Param.get("eval_num_classes", 2),
'evaluate_difficult_gt': eval_Param.get("eval_difficult_gt", False),
'boxsize_threshold': eval_Param.get("eval_boxsize_threshold", [0, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25]),
'iou_threshold': eval_Param.get("eval_iou_threshold", [0.9, 0.75, 0.5]),
'background_label_id': 0,
}
net.det_accu = L.DetEval(net['detection_out_2'], net[gt_label], \
detection_evaluate_param=det_eval_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
return net
|
{"hexsha": "5f774652d37b877772d6f10153a4fdfbda7c365c", "size": 17184, "ext": "py", "lang": "Python", "max_stars_repo_path": "remodet_repository_wdh_part/Projects/DAP_Minihand/FaceBoxFPNNet.py", "max_stars_repo_name": "UrwLee/Remo_experience", "max_stars_repo_head_hexsha": "a59d5b9d6d009524672e415c77d056bc9dd88c72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "remodet_repository_wdh_part/Projects/DAP_Minihand/FaceBoxFPNNet.py", "max_issues_repo_name": "UrwLee/Remo_experience", "max_issues_repo_head_hexsha": "a59d5b9d6d009524672e415c77d056bc9dd88c72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "remodet_repository_wdh_part/Projects/DAP_Minihand/FaceBoxFPNNet.py", "max_forks_repo_name": "UrwLee/Remo_experience", "max_forks_repo_head_hexsha": "a59d5b9d6d009524672e415c77d056bc9dd88c72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 57.6644295302, "max_line_length": 153, "alphanum_fraction": 0.6017807263, "include": true, "reason": "import numpy", "num_tokens": 4543}
|
import tensorflow as tf
import numpy as np
import os,glob,cv2
import sys,argparse
dir_path = os.path.dirname(os.path.realpath(__file__))
image_path=sys.argv[1]
filename = image_path
print(filename)
image_size=300
num_channels=3
images = []
image = cv2.imread(filename)
image = cv2.resize(image, (image_size, image_size), cv2.INTER_LINEAR)
cv2.imshow('output',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = np.multiply(images, 1.0/255.0)
cv2.imshow('output',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
x_batch = images.reshape(1, image_size,image_size,num_channels)
sess = tf.Session()
saver = tf.train.import_meta_graph('/home/vijayaganesh/Desktop/Kanjoos/Logo Classifier.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
graph = tf.get_default_graph()
y_pred = graph.get_tensor_by_name("y_pred:0")
x= graph.get_tensor_by_name("x:0")
y_true = graph.get_tensor_by_name("y_true:0")
y_test_images = np.zeros((1, 3))
feed_dict_testing = {x: x_batch, y_true: y_test_images}
result=sess.run(y_pred, feed_dict=feed_dict_testing)
print(result)
|
{"hexsha": "afde95e461ac20b32d5125bc26dd55bd84018cd9", "size": 1166, "ext": "py", "lang": "Python", "max_stars_repo_path": "django-webapp/kanjoos/myapp/src/runner/test.py", "max_stars_repo_name": "gokkulasudanr92/Kanjoos-HackGT", "max_stars_repo_head_hexsha": "a3dfb98cf98113b214a34e6cd3eaf338066315ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "django-webapp/kanjoos/myapp/src/runner/test.py", "max_issues_repo_name": "gokkulasudanr92/Kanjoos-HackGT", "max_issues_repo_head_hexsha": "a3dfb98cf98113b214a34e6cd3eaf338066315ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:10:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:29:07.000Z", "max_forks_repo_path": "django-webapp/kanjoos/myapp/src/runner/test.py", "max_forks_repo_name": "gokkulasudanr92/HackGT2017", "max_forks_repo_head_hexsha": "a3dfb98cf98113b214a34e6cd3eaf338066315ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2916666667, "max_line_length": 93, "alphanum_fraction": 0.769296741, "include": true, "reason": "import numpy", "num_tokens": 311}
|
import numpy as np
x = np.ones((10, 10))
x[1:-1, 1:-1] = 0
print(x)
|
{"hexsha": "af4c58b2c05a779deaf70c40b77a2bc7cc0ed5de", "size": 67, "ext": "py", "lang": "Python", "max_stars_repo_path": "semester-6/Python Practice/numpyPractice/program28.py", "max_stars_repo_name": "saranshbht/bsc-codes", "max_stars_repo_head_hexsha": "7386c09cc986de9c84947f7dea7db3dc42219a35", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-03-22T12:07:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-30T17:28:23.000Z", "max_issues_repo_path": "semester-6/Python Practice/numpyPractice/program28.py", "max_issues_repo_name": "saranshbht/bsc-codes", "max_issues_repo_head_hexsha": "7386c09cc986de9c84947f7dea7db3dc42219a35", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "semester-6/Python Practice/numpyPractice/program28.py", "max_forks_repo_name": "saranshbht/bsc-codes", "max_forks_repo_head_hexsha": "7386c09cc986de9c84947f7dea7db3dc42219a35", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.75, "max_line_length": 21, "alphanum_fraction": 0.5671641791, "include": true, "reason": "import numpy", "num_tokens": 33}
|
# Store network information
# TODO: echenolize model
struct MetNet
## LP (original)
S::Matrix{Float64}
b::Vector{Float64}
lb::Vector{Float64}
ub::Vector{Float64}
c::Vector{Float64}
rxns::Vector{String}
mets::Vector{String}
end
MetNet(;S, b, lb, ub, c, rxns, mets) = MetNet(S, b, lb, ub, c, rxns, mets)
## -------------------------------------------------------------------
# Utils
_IDER_TYPE = Union{String, Symbol, Int}
_IDER_TYPE_STR = Union{String, Symbol}
_IDER_TYPE_INT = Int
rxnindex(net::MetNet, rxn::_IDER_TYPE_STR) = findfirst(isequal(string(rxn)), net.rxns)
rxnindex(net::MetNet, rxn::_IDER_TYPE_INT) = findfirst(isequal(rxn), eachindex(net.rxns))
rxnindex(net::MetNet, rxns::Vector) = rxnindex.([net], rxns)
rxnindex(net::MetNet, rxns) = rxnindex(net, collect(rxns))
metindex(net::MetNet, met::_IDER_TYPE_STR) = findfirst(isequal(string(met)), net.mets)
metindex(net::MetNet, met::_IDER_TYPE_INT) = findfirst(isequal(met), eachindex(net.mets))
metindex(net::MetNet, mets::Vector) = metindex.([net], mets)
metindex(net::MetNet, mets) = metindex(net, collect(mets))
rxns(net::MetNet, rxns) = net.rxns[rxnindex(net, rxns)]
## -------------------------------------------------------------------
function reducebox!(net::MetNet)
lb, ub = fva(net)
net.lb .= lb
net.ub .= ub
return net
end
_reducebox!(net, reduce) = reduce ? reducebox!(net) : net
function fix!(net::MetNet, rxns, val; reducebox = false)
reducebox && reducebox!(net)
idxs = rxnindex(net, rxns)
idxs = (idxs isa Vector) ? idxs : [idxs]
net.lb[idxs] .= val
net.ub[idxs] .= val
net
end
function fixxing(f::Function, net::MetNet, rxns, val; tol = 0.0)
idxs = rxnindex(net, rxns)
bk_lb = net.lb[idxs]
bk_ub = net.ub[idxs]
net.lb[idxs] = (val .- tol)
net.ub[idxs] = (val .+ tol)
try; return f()
finally;
net.lb[idxs] = bk_lb
net.ub[idxs] = bk_ub
end
end
rxnid(net::MetNet, rxn) = net.rxn[rxnindex(net, rxn)]
lb(net::MetNet, rxn) = net.lb[rxnindex(net, rxn)]
lb!(net::MetNet, rxn, val; reducebox = false) =
(net.lb[rxnindex(net, rxn)] = val; _reducebox!(net, reducebox))
ub(net::MetNet, rxn) = net.ub[rxnindex(net, rxn)]
ub!(net::MetNet, rxn, val; reducebox = false) =
(net.ub[rxnindex(net, rxn)] = val; _reducebox!(net, reducebox))
bounds!(net::MetNet, rxn, lb, ub; reducebox = false) =
(idx = rxnindex(net, rxn); net.lb[idx] = lb; net.ub[idx] = ub; _reducebox!(net, reducebox))
bounds(net::MetNet, rxn) =
(idx = rxnindex(net, rxn); (net.lb[idx], net.ub[idx]))
## -------------------------------------------------------------------
_get_elstr(v, i) = i > lastindex(v) ? "" : string(v[i])
function pretty(;kwargs...)
kwargs = collect(kwargs)
ks = first.(kwargs)
vs = last.(kwargs)
# maximum length
padl = map(zip(ks, vs)) do (k, v)
maxvl = maximum(length.(string.(v)))
max(maxvl, length(string(k)))
end
tab = " "
println(join(rpad.(first.(kwargs), padl), tab))
println(join( ["-"].^padl , tab))
maxl = maximum(length.(last.(kwargs)))
for i in 1:maxl
println(join(rpad.(_get_elstr.(vs, i), padl), tab))
end
end
## -------------------------------------------------------------------
_round(n) = round(n; sigdigits = 3)
function prettyrxn(net::MetNet, v::Vector, rxns = eachindex(net.rxns))
rxns = rxnindex(net, rxns)
pretty(;
n = collect(eachindex(net.rxns))[rxns],
rxn = net.rxns[rxns],
v = _round.(v)[rxns],
lb = _round.(net.lb)[rxns],
ub = _round.(net.ub)[rxns],
eq = rxn_str.([net], net.rxns)[rxns]
)
end
## -------------------------------------------------------------------
# LP
fba(net::MetNet; kwargs...) = fba(net.S, net.b, net.lb, net.ub, net.c; kwargs...)
fba(net::MetNet, obj, sense = MAX_SENSE; kwargs...) =
fba(net.S, net.b, net.lb, net.ub, net.c, rxnindex(net, obj), sense; kwargs...)
fva(net::MetNet, rxns = eachindex(net.rxns); kwargs...) =
fva(net.S, net.b, net.lb, net.ub, net.c, rxnindex(net, rxns); kwargs...)
Δv(net, rxns = eachindex(net.rxns)) =
((lb, ub) = fva(net, rxnindex(net, rxns)); ub - lb)
U(net, rxns = eachindex(net.rxns)) =
((lb, ub) = fva(net, rxnindex(net, rxns)); ub)
L(net, rxns = eachindex(net.rxns)) =
((lb, ub) = fva(net, rxnindex(net, rxns)); lb)
## -------------------------------------------------------------------
Base.hash(net::MetNet) = hash((:MetNet, net.S, net.b, net.lb, net.ub))
Base.size(net::MetNet) = size(net.S)
## -------------------------------------------------------------------
function rxn_str(net::MetNet, rxn)
ri = rxnindex(net, rxn)
rS = net.S[:, ri]
reacts = String[]
prods = String[]
for (mi, ms) in enumerate(rS)
mid = net.mets[mi]
ms = round(ms; sigdigits = 3)
(ms < 0.0) && push!(reacts, string("(", ms, ")", mid))
(ms > 0.0) && push!(prods, string("(", ms, ")", mid))
end
reacts_str = join(reacts, " + ")
prods_str = join(prods, " + ")
lb, ub = net.lb[ri], net.ub[ri]
arrow = (lb == ub == 0) ?
">-<" : (lb < 0.0 && ub > 0.0) ?
"<-->" : (lb >= 0.0 && ub > 0.0) ?
"-->" : (lb < 0.0 && ub <= 0.0) ?
"<--" : "error"
string(reacts_str, " ", arrow, " ", prods_str)
end
## -------------------------------------------------------------------
function Base.show(io::IO, net::MetNet)
println(io, "MetNet: ", size(net))
M, N = size(net)
ri_strs = String["ri"; string.(1:N)]
rxnid_strs = String["rxnid"]
bounds_strs = String["bounds"]
rxni_strs = String["rxn str"]
for ri in 1:N
lb, ub = net.lb[ri], net.ub[ri]
lb = round(lb; sigdigits = 3)
ub = round(ub; sigdigits = 3)
bounds_str = string("[", lb, ", " ,ub, "]")
push!(bounds_strs, bounds_str)
rid = net.rxns[ri]
rxnid_str = string(rid)
push!(rxnid_strs, rxnid_str)
rxni_str = rxn_str(net, ri)
push!(rxni_strs, rxni_str)
end
ri_pad = maximum(length.(ri_strs))
rxnid_pad = maximum(length.(rxnid_strs))
rxni_pad = maximum(length.(rxni_strs))
bounds_pad = maximum(length.(bounds_strs))
tab = " "
for ri in eachindex(rxnid_strs)
println(io,
join([
rpad(ri_strs[ri], ri_pad),
rpad(rxnid_strs[ri], rxnid_pad),
rpad(bounds_strs[ri], bounds_pad),
rpad(rxni_strs[ri], rxni_pad)
], tab)
)
end
nothing
end
Base.show(net::MetNet) = show(stdout, net)
## -------------------------------------------------------------------
function is_feasible(net::MetNet, freeis, v; tol::Vector = Δv(net, freeis) .* 0.01)
fullv = fixxing(net, freeis, v; tol) do
fba(net)
end
any(fullv .!= 0.0)
end
## -------------------------------------------------------------------
function BoxGrid(net::MetNet, frees::Vector, δs::Vector)
idxs = rxnindex.([net], frees)
dimdat = []
for (idx, free, δ) in zip(idxs, frees, δs)
isnothing(idx) && error(free, " not found")
id = net.rxns[idx]
lb, ub = net.lb[idx], net.ub[idx]
push!(dimdat, (Symbol(id), lb, ub, δ))
end
BoxGrid(dimdat)
end
BoxGrid(net::MetNet, frees::Vector, δ::Int) = BoxGrid(net, frees, fill(δ, length(frees)))
BoxGrid(net::MetNet, δ::Int) = BoxGrid(net, net.rxns, fill(δ, length(net.rxns)))
|
{"hexsha": "43fe19db2e5c3cfc64ae0c3a596fa93f87d89ee8", "size": 7543, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Dynamic/MetNets.jl", "max_stars_repo_name": "josePereiro/Chemostat_InSilico.jl", "max_stars_repo_head_hexsha": "794293c33ea3f346ffdd8275498eaa3ee6f81d8b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Dynamic/MetNets.jl", "max_issues_repo_name": "josePereiro/Chemostat_InSilico.jl", "max_issues_repo_head_hexsha": "794293c33ea3f346ffdd8275498eaa3ee6f81d8b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Dynamic/MetNets.jl", "max_forks_repo_name": "josePereiro/Chemostat_InSilico.jl", "max_forks_repo_head_hexsha": "794293c33ea3f346ffdd8275498eaa3ee6f81d8b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3733905579, "max_line_length": 95, "alphanum_fraction": 0.5224711653, "num_tokens": 2387}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 11:01:12 2018
@author: Yilin Liu
Reference: Yang, S., Wang, J., Fan, W., Zhang, X., Wonka, P. & Ye, J.
An Efficient ADMM Algorithm for Multidimensional Anisotropic
Total Variation Regularization Problems.
Proceedings of the 19th ACM SIGKDD International Conference on
Knowledge Discovery and Data Mining - KDD’13 (2013).
doi: 10.1145/2487575.2487586
Algorithm for arg_min_X 0.5|Y - X|_2^2 + lamda*|X|_TV (|X|_TV = |DX|_1) (ADMM)
"""
import numpy as np
import diff
def denoising_2D_TV(Y, para):
M, N = np.shape(Y)
X0 = np.zeros((M + 2, N + 2))
X0[1: M + 1, 1: N + 1] = Y
Y0 = np.zeros((M + 2, N + 2))
Y0[1: M + 1, 1: N + 1] = Y
X = np.zeros((M + 2, N + 2))
Zx = np.zeros((M + 2, N + 2))
Zy = np.zeros((M + 2, N + 2))
Ux = np.zeros((M + 2, N + 2))
Uy = np.zeros((M + 2, N + 2))
K = 0
lamda, rho = para.regularization, para.admmregularization
num, err = para.most_iter_num, para.convergence
while K < num and np.linalg.norm(X - X0, 2) > err:
# update X
X0 = X
RHS = Y0 + lamda * rho*(diff.Dxt(Zx) + diff.Dyt(Zy)) - lamda * (diff.Dxt(Ux) + diff.Dyt(Uy))
X = np.zeros((M + 2, N + 2))
for i in range(1, M + 1):
for j in range(1, N + 1):
X[i, j] = ((X0[i + 1, j] + X0[i - 1, j] + X0[i, j + 1] + X0[i, j - 1]) * lamda * rho
+ RHS[i, j]) / (1 + 4 * lamda * rho)
# update Z
Tx = Ux/rho + diff.Dx(X)
Ty = Uy/rho + diff.Dy(X)
Zx = shrink(Tx, 1/rho)
Zy = shrink(Ty, 1/rho)
# update U
Ux = Ux + (diff.Dx(X) - Zx)
Uy = Uy + (diff.Dy(X) - Zy)
K += 1
return X[1: M + 1, 1: N + 1]
def shrink(Y, lamda):
return np.fmax(np.fabs(Y) - lamda, 0) * np.sign(Y)
|
{"hexsha": "c9be20b02b917dd1369f6c5917111e64aefb3ba9", "size": 1931, "ext": "py", "lang": "Python", "max_stars_repo_path": "tv2d.py", "max_stars_repo_name": "MrCredulous/2D-MCTV-Denoising", "max_stars_repo_head_hexsha": "e261364802e5740780ad4278bf2bd4aba960a2c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-07T09:13:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-07T09:13:06.000Z", "max_issues_repo_path": "tv2d.py", "max_issues_repo_name": "zj15001/2D-MCTV-Denoising", "max_issues_repo_head_hexsha": "6bc99e44133cb421262ddd97b217cf5513415eb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tv2d.py", "max_forks_repo_name": "zj15001/2D-MCTV-Denoising", "max_forks_repo_head_hexsha": "6bc99e44133cb421262ddd97b217cf5513415eb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-18T16:05:52.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-11T09:03:18.000Z", "avg_line_length": 31.1451612903, "max_line_length": 101, "alphanum_fraction": 0.4857586743, "include": true, "reason": "import numpy", "num_tokens": 744}
|
# -*- coding: utf-8 -*-
# @Time : 2021/4/4 7:48 下午
# @Author : Yushuo Wang
# @FileName: Random_Forest.py
# @Software: PyCharm
# @Blog :https://lesliewongcv.github.io/
import pandas as pd
import numpy as np
import random
import math
import collections
from joblib import Parallel, delayed
from scipy.io import loadmat
import os
from sklearn.model_selection import KFold
class Tree(object):
"""Definition of a Decision Tree"""
def __init__(self):
self.split_feature = None
self.split_value = None
self.leaf_value = None
self.tree_left = None
self.tree_right = None
def calc_predict_value(self, dataset):
"""Find the leaf node of the sample through the recursive decision tree"""
if self.leaf_value is not None:
return self.leaf_value
elif dataset[self.split_feature] <= self.split_value:
return self.tree_left.calc_predict_value(dataset)
else:
return self.tree_right.calc_predict_value(dataset)
def describe_tree(self):
"""Print the decision tree in json format for easy viewing of the tree structure"""
if not self.tree_left and not self.tree_right:
leaf_info = "{leaf_value:" + str(self.leaf_value) + "}"
return leaf_info
left_info = self.tree_left.describe_tree()
right_info = self.tree_right.describe_tree()
tree_structure = "{split_feature:" + str(self.split_feature) + \
",split_value:" + str(self.split_value) + \
",left_tree:" + left_info + \
",right_tree:" + right_info + "}"
return tree_structure
class RandomForestClassifier(object):
def __init__(self, n_estimators=10, max_depth=-1, min_samples_split=2, min_samples_leaf=1,
min_split_gain=0.0, colsample_bytree=None, subsample=0.8, random_state=None):
"""
Params
----------
n_estimators: No. of trees
max_depth: Tree depth, -1 for unlimited depth
min_samples_split: The minimum number of samples required for node splitting
min_samples_leaf: The minimum sample number of leaf nodes
min_split_gain: The Minimal change of the Gini
colsample_bytree: Column sampling setting can be [sqrt, log2].
sqrt means randomly selecting sqrt(n_features) features,
log2 indicates that log(n_features) features are randomly selected,
and if set to other, column sampling is not performed
subsample: line sampling ratio
random_state: Random seed, the set of n_estimators generated each time
after setting will not change to ensure that the experiment can be repeated
"""
self.n_estimators = n_estimators
self.max_depth = max_depth if max_depth != -1 else float('inf')
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_split_gain = min_split_gain
self.colsample_bytree = colsample_bytree
self.subsample = subsample
self.random_state = random_state
self.trees = None # Multiple decision trees built in parallel
self.feature_importances_ = dict()
def fit(self, dataset, targets):
"""Where it begins"""
# assert targets.unique().__len__() == 2, "There must be two class for targets!"
targets = targets.to_frame(name='label') # label in the pd frame
if self.random_state:
random.seed(self.random_state)
random_state_stages = random.sample(range(self.n_estimators), self.n_estimators)
# Two column sampling methods
if self.colsample_bytree == "sqrt":
self.colsample_bytree = int(len(dataset.columns) ** 0.5)
elif self.colsample_bytree == "log2":
self.colsample_bytree = int(math.log(len(dataset.columns)))
else:
self.colsample_bytree = len(dataset.columns)
# Build multiple decision trees in parallel
self.trees = Parallel(n_jobs=-1, verbose=0, backend="threading")(
delayed(self._parallel_build_trees)(dataset, targets, random_state)
for random_state in random_state_stages)
def _parallel_build_trees(self, dataset, targets, random_state):
global describe
"""Bootstrap has replacement sampling to generate training sample set and establish decision tree"""
subcol_index = random.sample(dataset.columns.tolist(), self.colsample_bytree)
dataset_stage = dataset.sample(n=int(self.subsample * len(dataset)), replace=True,
random_state=random_state).reset_index(drop=True)
dataset_stage = dataset_stage.loc[:, subcol_index]
targets_stage = targets.sample(n=int(self.subsample * len(dataset)), replace=True,
random_state=random_state).reset_index(drop=True)
tree = self._build_single_tree(dataset_stage, targets_stage, depth=0)
describe += [file_name + ' : ' + tree.describe_tree()]
return tree
def _build_single_tree(self, dataset, targets, depth):
"""
Recursive
If the categories of the node are all the same/the sample is smaller than the minimum number of
samples required for splitting, then the category with the most occurrences is selected. End split
"""
if len(targets['label'].unique()) <= 1 or dataset.__len__() <= self.min_samples_split:
tree = Tree()
tree.leaf_value = self.calc_leaf_value(targets['label'])
return tree
if depth < self.max_depth:
best_split_feature, best_split_value, best_split_gain = self.choose_best_feature(dataset,
targets) # choose_best_feature
left_dataset, right_dataset, left_targets, right_targets = \
self.split_dataset(dataset, targets, best_split_feature, best_split_value)
tree = Tree()
"""
If the left leaf node/right leaf node samples are less than the minimum sample number of
leaf nodes after the parent node is split, the parent node will terminate the split
"""
if left_dataset.__len__() <= self.min_samples_leaf or \
right_dataset.__len__() <= self.min_samples_leaf or \
best_split_gain <= self.min_split_gain:
tree.leaf_value = self.calc_leaf_value(targets['label'])
return tree
else:
"""
If the feature is used during the split, the importance of the feature is increased by 1
"""
self.feature_importances_[best_split_feature] = \
self.feature_importances_.get(best_split_feature, 0) + 1
tree.split_feature = best_split_feature
tree.split_value = best_split_value
"""递归 Recursive"""
tree.tree_left = self._build_single_tree(left_dataset, left_targets, depth + 1)
tree.tree_right = self._build_single_tree(right_dataset, right_targets, depth + 1)
return tree
else:
'''
If the depth of the tree exceeds the preset value, terminate the split
'''
tree = Tree()
tree.leaf_value = self.calc_leaf_value(targets['label'])
return tree
def choose_best_feature(self, dataset, targets):
"""
Find the best way to divide the data set, find the optimal split feature, split threshold, and split gain
"""
best_split_gain = 1
best_split_feature = None
best_split_value = None
for feature in dataset.columns:
_ = 1 + 1
if dataset[feature].unique().__len__() <= 100:
unique_values = sorted(dataset[feature].unique().tolist())
else:
unique_values = np.unique([np.percentile(dataset[feature], x)
for x in np.linspace(0, 100, 100)])
for split_value in unique_values:
left_targets = targets[dataset[feature] <= split_value]
right_targets = targets[dataset[feature] > split_value]
split_gain = self.calc_gini(left_targets['label'], right_targets['label'])
if split_gain < best_split_gain:
best_split_feature = feature
best_split_value = split_value
best_split_gain = split_gain
return best_split_feature, best_split_value, best_split_gain
@staticmethod
def calc_leaf_value(targets):
label_counts = collections.Counter(targets)
major_label = max(zip(label_counts.values(), label_counts.keys()))
return major_label[1]
@staticmethod
def calc_gini(left_targets, right_targets):
split_gain = 0
for targets in [left_targets, right_targets]:
gini = 1
label_counts = collections.Counter(targets)
for key in label_counts:
prob = label_counts[key] * 1.0 / len(targets)
gini -= prob ** 2
split_gain += len(targets) * 1.0 / (len(left_targets) + len(right_targets)) * gini
return split_gain
@staticmethod
def split_dataset(dataset, targets, split_feature, split_value):
left_dataset = dataset[dataset[split_feature] <= split_value]
left_targets = targets[dataset[split_feature] <= split_value]
right_dataset = dataset[dataset[split_feature] > split_value]
right_targets = targets[dataset[split_feature] > split_value]
return left_dataset, right_dataset, left_targets, right_targets
def predict(self, dataset):
res = []
for _, row in dataset.iterrows():
pred_list = []
for tree in self.trees:
pred_list.append(tree.calc_predict_value(row))
pred_label_counts = collections.Counter(pred_list)
pred_label = max(zip(pred_label_counts.values(), pred_label_counts.keys()))
res.append(pred_label[1])
return np.array(res)
if __name__ == '__main__':
PATH = '/Users/leslie/Downloads/MatDataset/'
folders = os.listdir(PATH)
res = []
describe = []
for folder_name in folders:
file_name = folder_name
if folder_name == '.DS_Store':
continue
else:
matfn = PATH + folder_name + '/' + folder_name + '_Train.mat'
df_data = loadmat(matfn)['Data']
df_label = loadmat(matfn)['Label']
df_ = np.concatenate((df_data, df_label), axis=1)
df_f = pd.DataFrame(df_)
# df_f = df_f[df_f.loc[:, 8].isin([0, 2])].sample(frac=1, random_state=66).reset_index(drop=True)
kf = KFold(n_splits=4, shuffle=False)
for ntree in range(3,7):
acc_valid = 0
acc_train = 0
for train_index, test_index in kf.split(df_label): # 4-fold
clf = RandomForestClassifier(n_estimators=ntree,
max_depth=5,
min_samples_split=6,
min_samples_leaf=2,
min_split_gain=0.0,
colsample_bytree="sqrt",
subsample=0.8,
random_state=66)
feature_list = np.arange(df_.shape[1] - 1)
clf.fit(df_f.loc[train_index, feature_list], df_f.loc[train_index, df_f.shape[1] - 1])
from sklearn import metrics
acc_train += metrics.accuracy_score(df_f.loc[train_index,
df_.shape[1] - 1],
clf.predict(df_f.loc[train_index, feature_list]))
acc_valid += metrics.accuracy_score(df_f.loc[test_index, df_.shape[1] - 1],
clf.predict(df_f.loc[test_index, feature_list]))
acc_train = round(10 ** 4 * acc_train) / 10 ** 4
acc_valid = round(10 ** 4 * acc_valid) / 10 ** 4
acc_train /= 4
acc_valid /= 4
res += [folder_name + ':' + str(acc_train) + '/' + str(acc_valid) +" tree No. = " + str(ntree)]
print(folder_name + ":" + str(round(ntree/7 * 100)) + "%")
res = np.array(res)
np.savetxt("res_my_method.txt", res, fmt='%s', delimiter=',')
|
{"hexsha": "e607b7ae24e6b0aa944353ecfeda5cba39ea2c5f", "size": 13015, "ext": "py", "lang": "Python", "max_stars_repo_path": "Random_Forest.py", "max_stars_repo_name": "LeslieWongCV/RF", "max_stars_repo_head_hexsha": "40ffca61ed6b474d5d991d9db48dd7b7bff04b30", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-12T08:26:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-12T08:26:18.000Z", "max_issues_repo_path": "Random_Forest.py", "max_issues_repo_name": "LeslieWongCV/RF", "max_issues_repo_head_hexsha": "40ffca61ed6b474d5d991d9db48dd7b7bff04b30", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Random_Forest.py", "max_forks_repo_name": "LeslieWongCV/RF", "max_forks_repo_head_hexsha": "40ffca61ed6b474d5d991d9db48dd7b7bff04b30", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.268707483, "max_line_length": 124, "alphanum_fraction": 0.5850172877, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2649}
|
import logging
import unittest
from pathlib import Path
import numpy as np
import alf.io
from ibllib.io import raw_data_loaders as raw
import ibllib.io.extractors
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' / 'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' / 'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' / 'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' / 'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5['path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path']))
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5['path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path']))
# turn off logging for unit testing as we will purposedly go into warning/error cases
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
self.logger = logging.getLogger('ibllib')
def test_get_feedbackType(self):
# TRAINING SESSIONS
ft = ibllib.io.extractors.training_trials.get_feedbackType(
self.training_lt5['path'], save=False, data=False)
self.assertEqual(ft.size, self.training_lt5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# -- version >= 5.0.0
ft = ibllib.io.extractors.training_trials.get_feedbackType(
self.training_ge5['path'], save=False, data=False)
self.assertEqual(ft.size, self.training_ge5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# BIASED SESSIONS
ft = ibllib.io.extractors.biased_trials.get_feedbackType(
self.biased_lt5['path'], save=False, data=False)
self.assertEqual(ft.size, self.biased_lt5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# -- version >= 5.0.0
ft = ibllib.io.extractors.biased_trials.get_feedbackType(
self.biased_ge5['path'], save=False, data=False)
self.assertEqual(ft.size, self.biased_ge5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
def test_get_contrastLR(self):
# TRAINING SESSIONS
cl, cr = ibllib.io.extractors.training_trials.get_contrastLR(
self.training_lt5['path'])
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# -- version >= 5.0.0
cl, cr = ibllib.io.extractors.training_trials.get_contrastLR(
self.training_ge5['path'])
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# BIASED SESSIONS
cl, cr = ibllib.io.extractors.biased_trials.get_contrastLR(
self.biased_lt5['path'])
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# -- version >= 5.0.0
cl, cr = ibllib.io.extractors.biased_trials.get_contrastLR(
self.biased_ge5['path'])
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
def test_get_probabilityLeft(self):
# TRAINING SESSIONS
pl = ibllib.io.extractors.training_trials.get_probabilityLeft(
self.training_lt5['path'])
self.assertTrue(isinstance(pl, np.ndarray))
# -- version >= 5.0.0
pl = ibllib.io.extractors.training_trials.get_probabilityLeft(
self.training_ge5['path'])
self.assertTrue(isinstance(pl, np.ndarray))
# BIASED SESSIONS
pl = ibllib.io.extractors.biased_trials.get_probabilityLeft(
self.biased_lt5['path'])
self.assertTrue(isinstance(pl, np.ndarray))
# Test if only probs that are in prob set
md = raw.load_settings(self.biased_lt5['path'])
if md:
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([x in probs for x in pl]) == len(pl))
# -- version >= 5.0.0
pl = ibllib.io.extractors.biased_trials.get_probabilityLeft(
self.biased_ge5['path'])
self.assertTrue(isinstance(pl, np.ndarray))
# Test if only probs that are in prob set
md = raw.load_settings(self.biased_ge5['path'])
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([x in probs for x in pl]) == len(pl))
def test_get_choice(self):
# TRAINING SESSIONS
choice = ibllib.io.extractors.training_trials.get_choice(
self.training_lt5['path'])
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# -- version >= 5.0.0
choice = ibllib.io.extractors.training_trials.get_choice(
self.training_ge5['path'])
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# BIASED SESSIONS
choice = ibllib.io.extractors.biased_trials.get_choice(
self.biased_lt5['path'])
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# -- version >= 5.0.0
choice = ibllib.io.extractors.biased_trials.get_choice(
self.biased_ge5['path'])
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
# TODO: Test its sawtooth
# TRAINING SESSIONS
rn = ibllib.io.extractors.training_trials.get_repNum(
self.training_lt5['path'])
self.assertTrue(isinstance(rn, np.ndarray))
# -- version >= 5.0.0
rn = ibllib.io.extractors.training_trials.get_repNum(
self.training_ge5['path'])
self.assertTrue(isinstance(rn, np.ndarray))
# BIASED SESSIONS have no repeted trials
def test_get_rewardVolume(self):
# TRAINING SESSIONS
rv = ibllib.io.extractors.training_trials.get_rewardVolume(
self.training_lt5['path'])
self.assertTrue(isinstance(rv, np.ndarray))
# -- version >= 5.0.0
rv = ibllib.io.extractors.training_trials.get_rewardVolume(
self.training_ge5['path'])
self.assertTrue(isinstance(rv, np.ndarray))
# BIASED SESSIONS
rv = ibllib.io.extractors.biased_trials.get_rewardVolume(
self.biased_lt5['path'])
self.assertTrue(isinstance(rv, np.ndarray))
# Test if all non zero rewards are of the same value
self.assertTrue(all([x == max(rv) for x in rv if x != 0]))
# -- version >= 5.0.0
rv = ibllib.io.extractors.biased_trials.get_rewardVolume(
self.biased_ge5['path'])
self.assertTrue(isinstance(rv, np.ndarray))
# Test if all non zero rewards are of the same value
self.assertTrue(all([x == max(rv) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
# TRAINING SESSIONS
ft = ibllib.io.extractors.training_trials.get_feedback_times_ge5(
self.training_ge5['path'])
self.assertTrue(isinstance(ft, np.ndarray))
# BIASED SESSIONS
ft = ibllib.io.extractors.biased_trials.get_feedback_times_ge5(
self.biased_ge5['path'])
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
# TRAINING SESSIONS
ft = ibllib.io.extractors.training_trials.get_feedback_times_lt5(
self.training_lt5['path'])
self.assertTrue(isinstance(ft, np.ndarray))
# BIASED SESSIONS
ft = ibllib.io.extractors.biased_trials.get_feedback_times_lt5(
self.biased_lt5['path'])
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times(self):
# TRAINING SESSIONS
ft = ibllib.io.extractors.training_trials.get_feedback_times(
self.training_ge5['path'])
self.assertTrue(isinstance(ft, np.ndarray))
ft = ibllib.io.extractors.training_trials.get_feedback_times(
self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'})
self.assertTrue(isinstance(ft, np.ndarray))
# BIASED SESSIONS
ft = ibllib.io.extractors.biased_trials.get_feedback_times(
self.biased_ge5['path'])
self.assertTrue(isinstance(ft, np.ndarray))
ft = ibllib.io.extractors.biased_trials.get_feedback_times(
self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'})
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
# TRAINING SESSIONS
sott = ibllib.io.extractors.training_trials.get_stimOnTrigger_times(
self.training_lt5['path'])
self.assertTrue(isinstance(sott, np.ndarray))
# -- version >= 5.0.0
sott = ibllib.io.extractors.training_trials.get_stimOnTrigger_times(
self.training_ge5['path'])
self.assertTrue(isinstance(sott, np.ndarray))
# BIASED SESSIONS
sott = ibllib.io.extractors.biased_trials.get_stimOnTrigger_times(
self.biased_lt5['path'])
self.assertTrue(isinstance(sott, np.ndarray))
# -- version >= 5.0.0
sott = ibllib.io.extractors.biased_trials.get_stimOnTrigger_times(
self.biased_ge5['path'])
self.assertTrue(isinstance(sott, np.ndarray))
def test_get_stimOn_times_lt5(self):
# TRAINING SESSIONS
st = ibllib.io.extractors.training_trials.get_stimOn_times_lt5(
self.training_lt5['path'])
self.assertTrue(isinstance(st, np.ndarray))
# BIASED SESSIONS
st = ibllib.io.extractors.biased_trials.get_stimOn_times_lt5(
self.biased_lt5['path'])
self.assertTrue(isinstance(st, np.ndarray))
def test_get_stimOn_times_ge5(self):
# TRAINING SESSIONS
st = ibllib.io.extractors.training_trials.get_stimOn_times_ge5(
self.training_ge5['path'])
self.assertTrue(isinstance(st, np.ndarray))
# BIASED SESSIONS
st = ibllib.io.extractors.biased_trials.get_stimOn_times_ge5(
self.biased_ge5['path'])
self.assertTrue(isinstance(st, np.ndarray))
def test_get_stimOn_times(self):
# TRAINING SESSIONS
st = ibllib.io.extractors.training_trials.get_stimOn_times(
self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'})
self.assertTrue(isinstance(st, np.ndarray))
st = ibllib.io.extractors.training_trials.get_stimOn_times(
self.training_ge5['path'])
self.assertTrue(isinstance(st, np.ndarray))
# BIASED SESSIONS
st = ibllib.io.extractors.biased_trials.get_stimOn_times(
self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'})
self.assertTrue(isinstance(st, np.ndarray))
st = ibllib.io.extractors.biased_trials.get_stimOn_times(
self.biased_ge5['path'])
self.assertTrue(isinstance(st, np.ndarray))
def test_get_intervals(self):
# TRAINING SESSIONS
di = ibllib.io.extractors.training_trials.get_intervals(
self.training_lt5['path'])
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# -- version >= 5.0.0
di = ibllib.io.extractors.training_trials.get_intervals(
self.training_ge5['path'])
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# BIASED SESSIONS
di = ibllib.io.extractors.biased_trials.get_intervals(
self.training_lt5['path'])
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# -- version >= 5.0.0
di = ibllib.io.extractors.biased_trials.get_intervals(
self.training_ge5['path'])
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_iti_duration(self):
# TRAINING SESSIONS
iti = ibllib.io.extractors.training_trials.get_iti_duration(
self.training_lt5['path'])
self.assertTrue(isinstance(iti, np.ndarray))
# -- version >= 5.0.0 iti always == 0.5 sec no extract
# BIASED SESSIONS
iti = ibllib.io.extractors.biased_trials.get_iti_duration(
self.biased_lt5['path'])
self.assertTrue(isinstance(iti, np.ndarray))
# -- version >= 5.0.0 iti always == 0.5 sec no extract
def test_get_response_times(self):
# TRAINING SESSIONS
rt = ibllib.io.extractors.training_trials.get_response_times(
self.training_lt5['path'])
self.assertTrue(isinstance(rt, np.ndarray))
# -- version >= 5.0.0
rt = ibllib.io.extractors.training_trials.get_response_times(
self.training_ge5['path'])
self.assertTrue(isinstance(rt, np.ndarray))
# BIASED SESSIONS
rt = ibllib.io.extractors.biased_trials.get_response_times(
self.biased_lt5['path'])
self.assertTrue(isinstance(rt, np.ndarray))
# -- version >= 5.0.0
rt = ibllib.io.extractors.biased_trials.get_response_times(
self.biased_ge5['path'])
self.assertTrue(isinstance(rt, np.ndarray))
def test_get_goCueTrigger_times(self):
# TRAINING SESSIONS
data = raw.load_data(self.training_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
# -- version >= 5.0.0
gct = ibllib.io.extractors.training_trials.get_goCueTrigger_times(
self.training_ge5['path'])
self.assertTrue(isinstance(gct, np.ndarray))
# BIASED SESSIONS
data = raw.load_data(self.biased_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
# -- version >= 5.0.0
gct = ibllib.io.extractors.biased_trials.get_goCueTrigger_times(
self.biased_ge5['path'])
self.assertTrue(isinstance(gct, np.ndarray))
def test_get_goCueOnset_times(self):
# TRAINING SESSIONS
gcot = ibllib.io.extractors.training_trials.get_goCueOnset_times(
self.training_lt5['path'])
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
# -- version >= 5.0.0
gcot = ibllib.io.extractors.training_trials.get_goCueOnset_times(
self.training_ge5['path'])
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
# BIASED SESSIONS
gcot = ibllib.io.extractors.biased_trials.get_goCueOnset_times(
self.biased_lt5['path'])
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
# -- version >= 5.0.0
gcot = ibllib.io.extractors.biased_trials.get_goCueOnset_times(
self.biased_ge5['path'])
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
def test_get_included_trials_lt5(self):
# TRAINING SESSIONS
it = ibllib.io.extractors.training_trials.get_included_trials_lt5(
self.training_lt5['path'])
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = ibllib.io.extractors.biased_trials.get_included_trials_lt5(
self.biased_lt5['path'])
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials_ge5(self):
# TRAINING SESSIONS
it = ibllib.io.extractors.training_trials.get_included_trials_ge5(
self.training_ge5['path'])
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = ibllib.io.extractors.biased_trials.get_included_trials_ge5(
self.biased_ge5['path'])
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials(self):
# TRAINING SESSIONS
it = ibllib.io.extractors.training_trials.get_included_trials(
self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'})
self.assertTrue(isinstance(it, np.ndarray))
# -- version >= 5.0.0
it = ibllib.io.extractors.training_trials.get_included_trials(
self.training_ge5['path'])
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = ibllib.io.extractors.biased_trials.get_included_trials(
self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'})
self.assertTrue(isinstance(it, np.ndarray))
# -- version >= 5.0.0
it = ibllib.io.extractors.biased_trials.get_included_trials(
self.biased_ge5['path'])
self.assertTrue(isinstance(it, np.ndarray))
def test_extract_all(self):
# TRAINING SESSIONS
ibllib.io.extractors.training_trials.extract_all(
self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
# -- version >= 5.0.0
ibllib.io.extractors.training_trials.extract_all(
self.training_ge5['path'], save=True)
# BIASED SESSIONS
ibllib.io.extractors.biased_trials.extract_all(
self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
# -- version >= 5.0.0
ibllib.io.extractors.biased_trials.extract_all(
self.biased_ge5['path'], save=True)
# ENCODER TESTS (Should be moved to a RawDataLoaders test suite)
# ENCODER TESTS (Should be moved to a RawDataLoaders test suite)
def test_encoder_positions_duds(self):
# TRAINING SESSIONS
path = self.training_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertEqual(dy.bns_ts.dtype.name, 'object')
self.assertTrue(dy.shape[0] == 14)
# -- version >= 5.0.0
path = self.training_ge5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(dy.shape[0] == 936)
# BIASED SESSIONS
path = self.biased_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertEqual(dy.bns_ts.dtype.name, 'object')
self.assertTrue(dy.shape[0] == 14)
# -- version >= 5.0.0
path = self.biased_ge5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(dy.shape[0] == 1122)
def test_encoder_events_duds(self):
# TRAINING SESSIONS
path = self.training_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderEvents.raw*.ssv"), None)
dy = raw._load_encoder_events_file_lt5(path)
self.assertEqual(dy.bns_ts.dtype.name, 'object')
self.assertTrue(dy.shape[0] == 7)
# -- version >= 5.0.0
path = self.training_ge5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderEvents.raw*.ssv"), None)
dy = raw._load_encoder_events_file_ge5(path)
self.assertTrue(dy.shape[0] == 38)
# BIASED SESSIONS
path = self.biased_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderEvents.raw*.ssv"), None)
dy = raw._load_encoder_events_file_lt5(path)
self.assertEqual(dy.bns_ts.dtype.name, 'object')
self.assertTrue(dy.shape[0] == 7)
# -- version >= 5.0.0
path = self.biased_ge5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderEvents.raw*.ssv"), None)
dy = raw._load_encoder_events_file_ge5(path)
self.assertTrue(dy.shape[0] == 26)
def test_encoder_positions_clock_reset(self):
# TRAINING SESSIONS
# TODO: clarify why dat? make general? when should this fail?
# only for training?
path = self.training_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206, 1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
# here we test for 2 kinds of file corruption that happen
# 1/2 the first sample time is corrupt and absurdly high and should be discarded
# 2/2 2 samples are swapped and need to be swapped backk
path = self.biased_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
# -- version >= 5.0.0
path = self.biased_ge5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
# the wheel folder contains other errors in bpod output that had to be addressed
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_interpolation(self):
# straight test that it returns an usable function
ta = np.array([0., 1., 2., 3., 4., 5.])
tb = np.array([0., 1.1, 2.0, 2.9, 4., 5.])
finterp = ibllib.io.extractors.training_wheel.time_interpolation(ta, tb)
self.assertTrue(np.all(finterp(ta) == tb))
# next test if sizes are not similar
tc = np.array([0., 1.1, 2.0, 2.9, 4., 5., 6.])
finterp = ibllib.io.extractors.training_wheel.time_interpolation(ta, tc)
self.assertTrue(np.all(finterp(ta) == tb))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
def test_load_encoder_events(self):
raw.load_encoder_events(self.training_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.training_ge5['path'])
raw.load_encoder_events(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.biased_ge5['path'])
def test_size_outputs(self):
# check the output dimensions
from ibllib.pipes import extract_session
extract_session.from_path(self.training_ge5['path'])
trials = alf.io.load_object(self.training_ge5['path'] / 'alf', object='_ibl_trials')
self.assertTrue(alf.io.check_dimensions(trials) == 0)
extract_session.from_path(self.training_lt5['path'])
trials = alf.io.load_object(self.training_lt5['path'] / 'alf', object='_ibl_trials')
self.assertTrue(alf.io.check_dimensions(trials) == 0)
extract_session.from_path(self.biased_ge5['path'])
trials = alf.io.load_object(self.biased_ge5['path'] / 'alf', object='_ibl_trials')
self.assertTrue(alf.io.check_dimensions(trials) == 0)
extract_session.from_path(self.biased_lt5['path'], force=True)
trials = alf.io.load_object(self.biased_lt5['path'] / 'alf', object='_ibl_trials')
self.assertTrue(alf.io.check_dimensions(trials) == 0)
# Make sure we get the log files
log_files = list(self.main_path.rglob('_ibl_log.info*.log'))
self.assertTrue(len(log_files) == 4)
def tearDown(self):
for f in self.main_path.rglob('_ibl_log.*.log'):
f.unlink()
[x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.is_file()]
[x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()]
if __name__ == "__main__":
unittest.main(exit=False)
print('.')
|
{"hexsha": "f5d055f30bd35185847499b42d44011470384609", "size": 28345, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/ibllib/extractors/test_extractors.py", "max_stars_repo_name": "ekellbuch/ibllib", "max_stars_repo_head_hexsha": "6948f86c3f426cbebb39cc693f612d25079d7ef2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/ibllib/extractors/test_extractors.py", "max_issues_repo_name": "ekellbuch/ibllib", "max_issues_repo_head_hexsha": "6948f86c3f426cbebb39cc693f612d25079d7ef2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/ibllib/extractors/test_extractors.py", "max_forks_repo_name": "ekellbuch/ibllib", "max_forks_repo_head_hexsha": "6948f86c3f426cbebb39cc693f612d25079d7ef2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.084717608, "max_line_length": 95, "alphanum_fraction": 0.6337272888, "include": true, "reason": "import numpy", "num_tokens": 7015}
|
module Main
import Data.Vect
-- let .. in defines local variables
-- where .. allows for local function definitions
-- Nat is a natural number type, non-negative integers.
-- ++ is for appending Strings or Lists to each other.
-- words : String -> List String -- splits on a space
average : (str : String) -> Double
average str =
let
numWords = wordCount str
totalLength = sum (allLengths (words str)) in
cast totalLength / cast numWords -- this line is the result.
where
wordCount : String -> Nat
wordCount str = length (words str)
allLengths : List String -> List Nat
allLengths strs = map length strs
showAverage : String -> String
showAverage str =
"Average word lenth: " ++
show (average str) ++ "\n"
main : IO ()
main = repl "Enter a string: " showAverage
-- repl displays a prompt, reads a String from the console and displays the
-- result of running a function on that String, forever.
-- Where an expression can be one of several types, you can declare which
-- inline with the function 'the'
-- the Int 45 yields an Int with the value 45. This is not syntax sugar, 'the'
-- is an ordinary function.
-- the : (a: Type) -> a -> a
-- Given a type, a, and a value of that type, 'the' returns the value. The
-- implementation might look like the type = id -- identity
-- Can partially apply functions
add : Num a => a -> a -> a
add x y = x + y
--
add3 : Int -> Int
add3 = add 3
nine : Int
nine = add3 6
-- <T> T identity(T t) { return t; }
identity : ty -> ty
identity x = x
-- If we want to write a function that can add a number to itself, this won't
-- work:
-- bad_double : ty -> ty
-- bad_double x = x + x
-- "ty is not a numeric type"
-- => means we're setting a restriction on a
double : Num ty => ty -> ty
double x = x + x
-- Defining a function that executes a function twice over a value
twice : (a -> a) -> a -> a
twice f x = f (f x)
quadruple : Num a => a -> a
quadruple x = double (double x)
-- lambda is \arg => result
squared_twice : Integer
squared_twice = twice (\x => x * x) 2 -- 16
-- :t \x => x * x -- Integer -> Integer
-- :t \x : Int, y : Int => x + y -- Int -> Int
-- Tuples are nested pairs
tuples_are_nested_pairs : Bool
tuples_are_nested_pairs = (1, (2, (3, (4)))) == (1, 2, 3, 4)
-- Lists use []. Appending two lists:
one_to_seven : List Integer
one_to_seven = [1, 2, 3] ++ [4, 5, 6, 7]
-- Prepending (consing) to a list is (::)
zero_to_seven : List Integer
zero_to_seven = 0 :: one_to_seven
one_to_four : List Integer
one_to_four = 1 :: 2 :: 3 :: 4 :: []
one_to_a_hundred : List Integer
one_to_a_hundred = [1..100]
-- module Xyz at the top of the file, generally matches Xyz.idr in the filename
-- import Foo looks for Foo.idr in the same directory or elsewhere that Idris
-- can find.
{- Multiline
comment -}
||| Documentation comment for foo
foo : String
-- can be viewed with :doc foo or Ctrl-Alt-D in Atom
-- Exercises
-- 1. Types of
-- ("A", "B", "C") -- (String, String, String)
-- ["A", "B", "C"] -- List String
-- (("A", "B"), "C") -- ((String, String), String)
-- Given a natural number, min, and a String, str, convert str to lowercase and
-- return true if it's the same forwards as backwards, as long as it's over the
-- length 'min'.
palindrome : Nat -> String -> Bool
palindrome min str = if (length str > min) then
(let lstr = toLower str in reverse lstr == lstr)
else False
-- (x, y) is a tuple of an x and a y. (Nat, Nat) is a type, two natural numbers.
-- (length (words str), length str) is a value of that type.
counts : String -> (Nat, Nat)
counts str = (length (words str), length str)
-- Ord is an interface something like Java's Comparable.
-- <A extends Comparable<A>> List<A> topTen(List<A> list)
top_ten : Ord a => List a -> List a
top_ten list = take 10 (reverse (sort list))
over_length : Nat -> List String -> Nat
over_length len list = length (filter (\str => length str > len) list)
|
{"hexsha": "4ea6424c7ed08f451c6346ddfe3b7e8ce0f8aa3e", "size": 3920, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "tdd/Chapter2.idr", "max_stars_repo_name": "rickyclarkson/idris-playground", "max_stars_repo_head_hexsha": "3bd9b5fd76df4b2a6c0cf40fa537624e7b692e21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tdd/Chapter2.idr", "max_issues_repo_name": "rickyclarkson/idris-playground", "max_issues_repo_head_hexsha": "3bd9b5fd76df4b2a6c0cf40fa537624e7b692e21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tdd/Chapter2.idr", "max_forks_repo_name": "rickyclarkson/idris-playground", "max_forks_repo_head_hexsha": "3bd9b5fd76df4b2a6c0cf40fa537624e7b692e21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.037037037, "max_line_length": 80, "alphanum_fraction": 0.6558673469, "num_tokens": 1154}
|
import unidip.dip as dip
import numpy as np
"""
File contains three methods to quantify the polarization within the population.
1. Hartigan's D test (which is increasing when the distribution is less similar to unimodal distribution)
2. Fraction of the population holding a view in accordance with the minority.
3. Mean opinion.
"""
def compute_hartigan_opinions(opinion):
"""
Function to compute Hartigan's Dip test of unimodality in distribution of opinions in a selected step.
Arguments
---------
opinion : [ float ]
Series of opinions defined as output of the data_collector, i.e., [[1, 0.5], [2, 0.2], ... ]
Returns
-------
(dip, pvalue, indices) : tuple of floats
Dip - Hartigans' dip statistic as defined in Hartigan, J. A.; Hartigan, P. M. The Dip Test
of Unimodality. The Annals of Statistics 13 (1985), no. 1, 70--84. doi:10.1214/aos/1176346577.
http://projecteuclid.org/euclid.aos/1176346577
pvalue - P-value specifying the similarity of the distribution to an unimodal distribution. In short:
The smaller the value, the more likely it is that the distribution is not unimodal.
indices - left and center indices of the dip
"""
raw_opinions = [x[1] for x in opinion]
return dip.diptst(raw_opinions)
def compute_fractions_size(opinion):
"""
Computing fraction of the population which holds the view closer to 1 (usually the minority view).
Arguments
---------
opinion : [ float ]
Series of opinions defined as output of the data_collector, i.e., [[1, 0.5], [2, 0.2], ... ]
Returns
-------
(fraction, n_plus, n_minus) : tuple of float, int and int
fraction - the number of agents with opion > 0.0 divided by the total number of opinions
(i.e., population size).
n_plus - number of agents with opion > 0.0
n_minus - number of agents with opion <= 0.0
"""
raw_opinions = [x[1] for x in opinion]
n_minus = np.sum([o <= 0.0 for o in raw_opinions])
n_plus = np.sum([o > 0.0 for o in raw_opinions])
return n_plus/len(raw_opinions), n_plus, n_minus
def compute_mean_opinion(opinion):
"""
Computing mean opinion within the population. Also returns standard deviation.
Arguments
---------
opinion : [ float ]
Series of opinions defined as output of the data_collector, i.e., [[1, 0.5], [2, 0.2], ... ]
Returns
-------
(mean, stdev) : tuple of floats
mean - average value of the opinions passed in the parameter
stdev - standard deviation of the opinions passed in the parameter
"""
raw_opinions = [x[1] for x in opinion]
return np.mean(raw_opinions), np.std(raw_opinions)
|
{"hexsha": "b65d8b77b00912e5adc864a762610fdd1fa1657a", "size": 2747, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/stats.py", "max_stars_repo_name": "mahetoodang/hiom", "max_stars_repo_head_hexsha": "72628173086fe8f5edb36c3a88d1119ded4d7854", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/stats.py", "max_issues_repo_name": "mahetoodang/hiom", "max_issues_repo_head_hexsha": "72628173086fe8f5edb36c3a88d1119ded4d7854", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/stats.py", "max_forks_repo_name": "mahetoodang/hiom", "max_forks_repo_head_hexsha": "72628173086fe8f5edb36c3a88d1119ded4d7854", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1527777778, "max_line_length": 110, "alphanum_fraction": 0.6581725519, "include": true, "reason": "import numpy", "num_tokens": 725}
|
[STATEMENT]
lemma flow_usolves_ode:
assumes iv_defined: "t0 \<in> T" "x0 \<in> X"
shows "(flow t0 x0 usolves_ode f from t0) (existence_ivl t0 x0) X"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (flow t0 x0 usolves_ode f from t0) (existence_ivl t0 x0) X
[PROOF STEP]
proof (rule usolves_odeI)
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
2. t0 \<in> existence_ivl t0 x0
3. is_interval (existence_ivl t0 x0)
4. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
let ?l = "existence_ivl t0 x0 \<inter> {..t0}" and ?r = "existence_ivl t0 x0 \<inter> {t0..}"
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
2. t0 \<in> existence_ivl t0 x0
3. is_interval (existence_ivl t0 x0)
4. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
let ?split = "?l \<union> ?r"
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
2. t0 \<in> existence_ivl t0 x0
3. is_interval (existence_ivl t0 x0)
4. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
have insert_idem: "insert t0 ?l = ?l" "insert t0 ?r = ?r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. insert t0 (existence_ivl t0 x0 \<inter> {..t0}) = existence_ivl t0 x0 \<inter> {..t0} &&& insert t0 (existence_ivl t0 x0 \<inter> {t0..}) = existence_ivl t0 x0 \<inter> {t0..}
[PROOF STEP]
using iv_defined
[PROOF STATE]
proof (prove)
using this:
t0 \<in> T
x0 \<in> X
goal (1 subgoal):
1. insert t0 (existence_ivl t0 x0 \<inter> {..t0}) = existence_ivl t0 x0 \<inter> {..t0} &&& insert t0 (existence_ivl t0 x0 \<inter> {t0..}) = existence_ivl t0 x0 \<inter> {t0..}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
insert t0 (existence_ivl t0 x0 \<inter> {..t0}) = existence_ivl t0 x0 \<inter> {..t0}
insert t0 (existence_ivl t0 x0 \<inter> {t0..}) = existence_ivl t0 x0 \<inter> {t0..}
goal (4 subgoals):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
2. t0 \<in> existence_ivl t0 x0
3. is_interval (existence_ivl t0 x0)
4. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
from existence_ivl_initial_time
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>t0 \<in> T; ?x0.0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 ?x0.0
[PROOF STEP]
have cl_inter: "closure ?l \<inter> closure ?r = {t0}"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>t0 \<in> T; ?x0.0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 ?x0.0
goal (1 subgoal):
1. closure (existence_ivl t0 x0 \<inter> {..t0}) \<inter> closure (existence_ivl t0 x0 \<inter> {t0..}) = {t0}
[PROOF STEP]
proof safe
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
2. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {..t0})
3. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
[PROOF STEP]
from iv_defined
[PROOF STATE]
proof (chain)
picking this:
t0 \<in> T
x0 \<in> X
[PROOF STEP]
have "t0 \<in> ?l"
[PROOF STATE]
proof (prove)
using this:
t0 \<in> T
x0 \<in> X
goal (1 subgoal):
1. t0 \<in> existence_ivl t0 x0 \<inter> {..t0}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
t0 \<in> existence_ivl t0 x0 \<inter> {..t0}
goal (3 subgoals):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
2. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {..t0})
3. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
t0 \<in> existence_ivl t0 x0 \<inter> {..t0}
goal (3 subgoals):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
2. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {..t0})
3. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
[PROOF STEP]
note closure_subset
[PROOF STATE]
proof (state)
this:
?S \<subseteq> closure ?S
goal (3 subgoals):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
2. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {..t0})
3. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
t0 \<in> closure (existence_ivl t0 x0 \<inter> {..t0})
[PROOF STEP]
show "t0 \<in> closure ?l"
[PROOF STATE]
proof (prove)
using this:
t0 \<in> closure (existence_ivl t0 x0 \<inter> {..t0})
goal (1 subgoal):
1. t0 \<in> closure (existence_ivl t0 x0 \<inter> {..t0})
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
t0 \<in> closure (existence_ivl t0 x0 \<inter> {..t0})
goal (2 subgoals):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
2. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
[PROOF STEP]
from iv_defined
[PROOF STATE]
proof (chain)
picking this:
t0 \<in> T
x0 \<in> X
[PROOF STEP]
have "t0 \<in> ?r"
[PROOF STATE]
proof (prove)
using this:
t0 \<in> T
x0 \<in> X
goal (1 subgoal):
1. t0 \<in> existence_ivl t0 x0 \<inter> {t0..}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
t0 \<in> existence_ivl t0 x0 \<inter> {t0..}
goal (2 subgoals):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
2. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
t0 \<in> existence_ivl t0 x0 \<inter> {t0..}
goal (2 subgoals):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
2. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
[PROOF STEP]
note closure_subset
[PROOF STATE]
proof (state)
this:
?S \<subseteq> closure ?S
goal (2 subgoals):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
2. \<And>x. (\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0) \<Longrightarrow> t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
[PROOF STEP]
show "t0 \<in> closure ?r"
[PROOF STATE]
proof (prove)
using this:
t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
goal (1 subgoal):
1. t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
t0 \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
assume xl: "x \<in> closure ?l"
[PROOF STATE]
proof (state)
this:
x \<in> closure (existence_ivl t0 x0 \<inter> {..t0})
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
assume "x \<in> closure ?r"
[PROOF STATE]
proof (state)
this:
x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
have "closure ?r \<subseteq> closure {t0..}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closure (existence_ivl t0 x0 \<inter> {t0..}) \<subseteq> closure {t0..}
[PROOF STEP]
by (rule closure_mono) simp
[PROOF STATE]
proof (state)
this:
closure (existence_ivl t0 x0 \<inter> {t0..}) \<subseteq> closure {t0..}
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
x \<in> closure {t0..}
[PROOF STEP]
have "t0 \<le> x"
[PROOF STATE]
proof (prove)
using this:
x \<in> closure {t0..}
goal (1 subgoal):
1. t0 \<le> x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
t0 \<le> x
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
t0 \<le> x
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
t0 \<le> x
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
note xl
[PROOF STATE]
proof (state)
this:
x \<in> closure (existence_ivl t0 x0 \<inter> {..t0})
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
x \<in> closure (existence_ivl t0 x0 \<inter> {..t0})
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
have cl: "closure ?l \<subseteq> closure {..t0}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closure (existence_ivl t0 x0 \<inter> {..t0}) \<subseteq> closure {..t0}
[PROOF STEP]
by (rule closure_mono) simp
[PROOF STATE]
proof (state)
this:
closure (existence_ivl t0 x0 \<inter> {..t0}) \<subseteq> closure {..t0}
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
x \<in> closure {..t0}
[PROOF STEP]
have "x \<le> t0"
[PROOF STATE]
proof (prove)
using this:
x \<in> closure {..t0}
goal (1 subgoal):
1. x \<le> t0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x \<le> t0
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
x \<le> t0
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<And>x0. \<lbrakk>t0 \<in> T; x0 \<in> X\<rbrakk> \<Longrightarrow> t0 \<in> existence_ivl t0 x0; x \<notin> {}; x \<in> closure (existence_ivl t0 x0 \<inter> {..t0}); x \<in> closure (existence_ivl t0 x0 \<inter> {t0..})\<rbrakk> \<Longrightarrow> x = t0
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
t0 \<le> x
x \<le> t0
[PROOF STEP]
show "x = t0"
[PROOF STATE]
proof (prove)
using this:
t0 \<le> x
x \<le> t0
goal (1 subgoal):
1. x = t0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x = t0
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
closure (existence_ivl t0 x0 \<inter> {..t0}) \<inter> closure (existence_ivl t0 x0 \<inter> {t0..}) = {t0}
goal (4 subgoals):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
2. t0 \<in> existence_ivl t0 x0
3. is_interval (existence_ivl t0 x0)
4. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
have "(flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) ?split"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) (existence_ivl t0 x0 \<inter> {..t0} \<union> existence_ivl t0 x0 \<inter> {t0..})
[PROOF STEP]
by (rule has_vderiv_on_union)
(auto simp: cl_inter insert_idem flow_vderiv_on_right flow_vderiv_on_left)
[PROOF STATE]
proof (state)
this:
(flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) (existence_ivl t0 x0 \<inter> {..t0} \<union> existence_ivl t0 x0 \<inter> {t0..})
goal (4 subgoals):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
2. t0 \<in> existence_ivl t0 x0
3. is_interval (existence_ivl t0 x0)
4. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) (existence_ivl t0 x0 \<inter> {..t0} \<union> existence_ivl t0 x0 \<inter> {t0..})
goal (4 subgoals):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
2. t0 \<in> existence_ivl t0 x0
3. is_interval (existence_ivl t0 x0)
4. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
have "?split = existence_ivl t0 x0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. existence_ivl t0 x0 \<inter> {..t0} \<union> existence_ivl t0 x0 \<inter> {t0..} = existence_ivl t0 x0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
existence_ivl t0 x0 \<inter> {..t0} \<union> existence_ivl t0 x0 \<inter> {t0..} = existence_ivl t0 x0
goal (4 subgoals):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
2. t0 \<in> existence_ivl t0 x0
3. is_interval (existence_ivl t0 x0)
4. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) (existence_ivl t0 x0)
[PROOF STEP]
have "(flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) (existence_ivl t0 x0)"
[PROOF STATE]
proof (prove)
using this:
(flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) (existence_ivl t0 x0)
goal (1 subgoal):
1. (flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) (existence_ivl t0 x0)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
(flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) (existence_ivl t0 x0)
goal (4 subgoals):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
2. t0 \<in> existence_ivl t0 x0
3. is_interval (existence_ivl t0 x0)
4. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) (existence_ivl t0 x0)
goal (4 subgoals):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
2. t0 \<in> existence_ivl t0 x0
3. is_interval (existence_ivl t0 x0)
4. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
have "flow t0 x0 t \<in> X" if "t \<in> existence_ivl t0 x0" for t
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. flow t0 x0 t \<in> X
[PROOF STEP]
using solves_odeD(2)[OF csol(4)[OF that]] that
[PROOF STATE]
proof (prove)
using this:
?t \<in> {t0--t} \<Longrightarrow> csol t0 x0 t ?t \<in> X
t \<in> existence_ivl t0 x0
goal (1 subgoal):
1. flow t0 x0 t \<in> X
[PROOF STEP]
by (simp add: flow_def)
[PROOF STATE]
proof (state)
this:
?t \<in> existence_ivl t0 x0 \<Longrightarrow> flow t0 x0 ?t \<in> X
goal (4 subgoals):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
2. t0 \<in> existence_ivl t0 x0
3. is_interval (existence_ivl t0 x0)
4. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) (existence_ivl t0 x0)
?t \<in> existence_ivl t0 x0 \<Longrightarrow> flow t0 x0 ?t \<in> X
[PROOF STEP]
show "(flow t0 x0 solves_ode f) (existence_ivl t0 x0) X"
[PROOF STATE]
proof (prove)
using this:
(flow t0 x0 has_vderiv_on (\<lambda>t. f t (flow t0 x0 t))) (existence_ivl t0 x0)
?t \<in> existence_ivl t0 x0 \<Longrightarrow> flow t0 x0 ?t \<in> X
goal (1 subgoal):
1. (flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
[PROOF STEP]
by (rule solves_odeI)
[PROOF STATE]
proof (state)
this:
(flow t0 x0 solves_ode f) (existence_ivl t0 x0) X
goal (3 subgoals):
1. t0 \<in> existence_ivl t0 x0
2. is_interval (existence_ivl t0 x0)
3. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
show "t0 \<in> existence_ivl t0 x0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t0 \<in> existence_ivl t0 x0
[PROOF STEP]
using iv_defined
[PROOF STATE]
proof (prove)
using this:
t0 \<in> T
x0 \<in> X
goal (1 subgoal):
1. t0 \<in> existence_ivl t0 x0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
t0 \<in> existence_ivl t0 x0
goal (2 subgoals):
1. is_interval (existence_ivl t0 x0)
2. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
show "is_interval (existence_ivl t0 x0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_interval (existence_ivl t0 x0)
[PROOF STEP]
by (simp add: is_interval_existence_ivl)
[PROOF STATE]
proof (state)
this:
is_interval (existence_ivl t0 x0)
goal (1 subgoal):
1. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
fix z t
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
assume z: "{t0 -- t} \<subseteq> existence_ivl t0 x0" "(z solves_ode f) {t0 -- t} X" "z t0 = flow t0 x0 t0"
[PROOF STATE]
proof (state)
this:
{t0--t} \<subseteq> existence_ivl t0 x0
(z solves_ode f) {t0--t} X
z t0 = flow t0 x0 t0
goal (1 subgoal):
1. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
{t0--t} \<subseteq> existence_ivl t0 x0
(z solves_ode f) {t0--t} X
z t0 = flow t0 x0 t0
[PROOF STEP]
have "t \<in> existence_ivl t0 x0"
[PROOF STATE]
proof (prove)
using this:
{t0--t} \<subseteq> existence_ivl t0 x0
(z solves_ode f) {t0--t} X
z t0 = flow t0 x0 t0
goal (1 subgoal):
1. t \<in> existence_ivl t0 x0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t \<in> existence_ivl t0 x0
goal (1 subgoal):
1. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
t \<in> existence_ivl t0 x0
goal (1 subgoal):
1. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
from csol[OF this] z
[PROOF STATE]
proof (chain)
picking this:
t \<in> T
{t0--t} \<subseteq> T
csol t0 x0 t t0 = x0
(csol t0 x0 t solves_ode f) {t0--t} X
{t0--t} \<subseteq> existence_ivl t0 x0
(z solves_ode f) {t0--t} X
z t0 = flow t0 x0 t0
[PROOF STEP]
have "(z, t) \<in> csols t0 x0"
[PROOF STATE]
proof (prove)
using this:
t \<in> T
{t0--t} \<subseteq> T
csol t0 x0 t t0 = x0
(csol t0 x0 t solves_ode f) {t0--t} X
{t0--t} \<subseteq> existence_ivl t0 x0
(z solves_ode f) {t0--t} X
z t0 = flow t0 x0 t0
goal (1 subgoal):
1. (z, t) \<in> csols t0 x0
[PROOF STEP]
by (auto simp: csols_def)
[PROOF STATE]
proof (state)
this:
(z, t) \<in> csols t0 x0
goal (1 subgoal):
1. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(z, t) \<in> csols t0 x0
goal (1 subgoal):
1. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
have "(csol t0 x0 t, t) \<in> csols t0 x0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (csol t0 x0 t, t) \<in> csols t0 x0
[PROOF STEP]
by (rule csol_mem_csols) fact
[PROOF STATE]
proof (state)
this:
(csol t0 x0 t, t) \<in> csols t0 x0
goal (1 subgoal):
1. \<And>z t. \<lbrakk>{t0--t} \<subseteq> existence_ivl t0 x0; (z solves_ode f) {t0--t} X; z t0 = flow t0 x0 t0\<rbrakk> \<Longrightarrow> z t = flow t0 x0 t
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
t \<in> existence_ivl t0 x0
(z, t) \<in> csols t0 x0
(csol t0 x0 t, t) \<in> csols t0 x0
[PROOF STEP]
show "z t = flow t0 x0 t"
[PROOF STATE]
proof (prove)
using this:
t \<in> existence_ivl t0 x0
(z, t) \<in> csols t0 x0
(csol t0 x0 t, t) \<in> csols t0 x0
goal (1 subgoal):
1. z t = flow t0 x0 t
[PROOF STEP]
unfolding flow_def
[PROOF STATE]
proof (prove)
using this:
t \<in> existence_ivl t0 x0
(z, t) \<in> csols t0 x0
(csol t0 x0 t, t) \<in> csols t0 x0
goal (1 subgoal):
1. z t = (if t \<in> existence_ivl t0 x0 then csol t0 x0 t t else (0::'a))
[PROOF STEP]
by (auto intro: csols_unique[rule_format])
[PROOF STATE]
proof (state)
this:
z t = flow t0 x0 t
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 12308, "file": "Ordinary_Differential_Equations_IVP_Picard_Lindeloef_Qualitative", "length": 85}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
...
@author: fertesta, ucaiado
Created on 01/05/2018
'''
from enum import Enum
import datetime
import random
from collections import namedtuple
import numpy as np
ENV = None
BOVESPA = False
CALLBACKS = {}
class NoneObjectError(Exception):
"""
NoneObjectError is raised by the BandRegister and IndicatorRegister object
after they are removed using remove_indicator() bar method
"""
pass
def init_pending_cbacks():
return {
'trade': [], 'book': [], 'other': [], 'candle': [], 'checked': False}
def gen_uuid(size=16):
'''
'''
random.seed()
table = 'abcdefghijklmnopqrstuvxwyzABCDEFGHIJKLMNOPQRSTUVXWYZ0123456789'
table_size = len(table)
uid = ''.join(map(lambda x: table[random.randrange(table_size)],
range(size)))
return uid
def byPrice(book_side, i_depth):
'''
Return the nth top prices of the order book side passed
:param side_obj: Book Side object.
'''
i_depth2 = max(i_depth, 7)
book_aux = book_side.this_side
l_rtn = book_aux.get_n_top_prices(i_depth2, b_return_dataframe=False)
f_price_to_filer = book_aux.other_side.best_queue[0]
if not f_price_to_filer:
return [QueueInfo(0, None)]
if book_aux.s_side == 'BID':
l_rtn1 = [QueueInfo(x[1].i_qty, x[0]) for x in l_rtn
if x[0] < f_price_to_filer]
else:
l_rtn1 = [QueueInfo(x[1].i_qty, x[0]) for x in l_rtn
if x[0] > f_price_to_filer]
if len(l_rtn1) == 0:
if len(l_rtn) == 0:
return [QueueInfo(0, None)]
x = l_rtn[-1]
return [QueueInfo(x[1].i_qty, x[0])]
return l_rtn1[:i_depth]
class Side(Enum):
BID = 1
ASK = 2
def __eq__(self, o):
if isinstance(o, Side):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
return self.value.__hash__()
class Source(Enum):
IDLE = 0
MARKET = 1
ORDER = 2
COMMAND = 3
def __eq__(self, o):
if isinstance(o, Source):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
class UpdateReason(Enum):
BID_SIDE = 0
ASK_SIDE = 1
TRADES = 2
EMPTY_BAR = 3
def __eq__(self, o):
if isinstance(o, UpdateReason):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
class TimeInForce(Enum):
DAY = 0
FAK = 3
FOK = 4
def __eq__(self, o):
if isinstance(o, TimeInForce):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
class FIXStatus(Enum):
IDLE = 0
PENDING = 1
NEW = 2
PARTIALLY_FILLED = 3
FILLED = 4
CANCELLED = 5
REPLACED = 6
REJECTED = 8
def __eq__(self, o):
if isinstance(o, FIXStatus):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
class QuitReason(Enum):
USER_QUIT = 0
NEUTRINO_QUIT = 1
SYSTEM_QUIT = 2
ALGOMAN_QUIT = 3
OUT_OF_SYNC_QUIT = 4
def __eq__(self, o):
if isinstance(o, QuitReason):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
# class CandleInterval(Enum):
# ONE_MINUTE = 60
# FIVE_MINUTES = 60 * 5
# ONE_HOUR = 60 * 60
# ONE_DAY = 60 * 60 * 24
# def __eq__(self, o):
# if isinstance(o, CandleInterval):
# return o.value == self.value
# elif isinstance(o, int):
# return o == self.value
# return False
# def __str__(self):
# return self.name
class IndicatorSource(Enum):
OPEN = 'open'
HIGH = 'pmax'
LOW = 'pmin'
CLOSE = 'close'
VOLUME = 'volume'
QUANTITY = 'quantity'
QUANTITY_BUY = 'quantity_buy'
QUANTITY_SELL = 'quantity_sell'
QUANTITY_ACCUMULATED = 'quantity_accumulated'
QUANTITY_SELL_ACCUMULATED = 'quantity_sell_accumulated'
QUANTITY_BUY_ACCUMULATED = 'quantity_buy_accumulated'
def __eq__(self, o):
if isinstance(o, IndicatorSource):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
class NotificationEvent(Enum):
POPUP = 0
def __eq__(self, o):
if isinstance(o, TimeInForce):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
class OrderType(Enum):
NONE = 0
LIMIT = 1
MARKET = 2
STOP = 3
STOP_LIMIT = 4
def __eq__(self, o):
if isinstance(o, OrderType):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
class OrderStatus(Enum):
WAIT = 0
WAIT_REPLACE = 1
WAIT_CANCEL = 2
ACTIVE = 3
REPLACED = 4
PARTIAL_FILLED = 5
FILLED = 6
CANCELLED = 7
REJECTED = 8
def __eq__(self, o):
if isinstance(o, OrderStatus):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
def __or__(self, o):
if isinstance(o, OrderStatus):
return self.value | o.value
elif isinstance(o, int):
return self.value | o
def __ror__(self, o):
if isinstance(o, OrderStatus):
return self.value | o.value
elif isinstance(o, int):
return self.value | o
class OrderRetCode(Enum):
OK = 0
ORDER_NOT_FOUND = -1
RISK_INVALID_NET = -2
INVALID_QUANTITY = -3
INFLIGHT = -4
def __eq__(self, o):
if isinstance(o, OrderRetCode):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
class IndicatorAverage(Enum):
SMA = 0
EMA = 1
WMA = 2
def __eq__(self, o):
if isinstance(o, IndicatorAverage):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
class IndicatorName(Enum):
NONE = 0
SMA = 1
EMA = 2
MOM = 3
SAMOM = 4
TRANGE = 5
SATR = 6
ATR = 7
SAADX = 8
ADX = 9
PLUS_DI = 10
MINUS_DI = 11
BBANDS = 12
SABBANDS = 13
STDDEV = 14
RSI = 15
SAR = 16
OBV = 17
STOCH = 18
STOCHF = 19
MACD = 20
def __eq__(self, o):
if isinstance(o, IndicatorName):
return o.value == self.value
elif isinstance(o, int):
return o == self.value
return False
def __str__(self):
return self.name
def __hash__(self):
'''
Allow the Order object be used as a key in a hash table. It is used by
dictionaries
'''
return self.value.__hash__()
s_book_info = 'price quantity detail orderID order_id virtual_md_id'
s_trade_info = 'price quantity buyer seller date time status tradeID, trade_id'
s_trade_info += ', datetime'
QueueInfo = namedtuple('QueueInfo', 'quantity price')
NextInfo = namedtuple('NextInfo', 'qty timeInForce price side userData')
BookData = namedtuple('BookData', s_book_info)
TradeInfo = namedtuple('TradeInfo', s_trade_info)
SecurityInfo = namedtuple('SecurityInfo', 'priceIncrement minOrderQty')
StatusEntry = namedtuple('StatusEntry', 'status open_trade_time')
TunnelEntry = namedtuple('StatusEntry', 'low_price high_price')
class Transaction(object):
'''
'''
def __init__(self):
self.order = None
self.price = 0.0
self.qty = 0
self.cumQty = 0
self.timeInForce = TimeInForce.DAY
self.side = Side.BID
self.status = FIXStatus.IDLE
self.clOrdID = gen_uuid()
self.secondaryOrderID = ''
self.userData = ''
self.neutrinogymData = {}
self.isPending = False
self.isAlive = False
self._last_price = 0.0
self._last_qty = 0
self._last_quantity = 0
@staticmethod
def build1(order):
t = Transaction()
t.order = order
t.price = None
t.qty = None
t.cumQty = 0
t.timeInForce = TimeInForce.DAY
t.side = Side.BID
t.status = FIXStatus.IDLE
t.clOrdID = 'clOrdID:987654321'
t.secondaryOrderID = 'secondaryOrderID:asdfasdf'
t.userData = {'userdata': 'asdfqwer'}
t.neutrinogymData = {}
t.isPending = False
t.isAlive = False
return t
class Summary(object):
bidCount = None
askCount = None
tradeCount = None
statusChanged = None
# new fields, not valid to production
tradeCountIncrement = 0
newBidOrders = 0
canceledBidOrders = 0
replacedOBidrders = 0
newAskOrders = 0
canceledAskOrders = 0
replacedOAskrders = 0
class Update(object):
symbol = None
reason = []
times = []
bid_count = None
ask_count = None
trade_count = None
status_changed = None
class SchaduleInfos(object):
# NOTE: valid to simulation only
name = ''
kind = ''
_scheduled_obj = None
every = 10**6
at = 21*60**2
_last_time = 0
def __init__(self, name):
self.name = name
def should_trigger(self, f_time):
if self.kind == 'at':
if f_time > self.at:
self._last_time = f_time
return True
else:
if f_time > self._last_time + self.every:
self._last_time = f_time
return True
return False
def __eq__(self, other):
if isinstance(other, str):
return self.name == other
return self.name == other.name
def __ne__(self, other):
if isinstance(other, str):
return self.name != other
return not self.__eq__(other)
def __hash__(self):
return self.name.__hash__()
def __str__(self):
return self.name
class Client(object):
'''
'''
def __init__(self):
pass
class Order(object):
'''
Order class is a representation of an area in memory that is used to send
orders to market. This memory address can be reused across multiple orders,
which is the preferred way of working. Do not dispose of these objects, as
they are used internally by Neutrino to be updated upon messages arriving
from the network.
'''
def __init__(self, side):
self.side = side
self.status = FIXStatus.IDLE
self.current = Transaction.build1(self)
self.client = None
self.symbol = '<undef>'
self.orderID = '<undef>'
self.leg_order = -1
self.next = Transaction.build1(self)
self.userData = '<undef>'
def isPending(self):
b_t1 = self.status == FIXStatus.PENDING
b_t2 = self.current and self.current.status == FIXStatus.PENDING
return b_t1 or b_t2
def isAlive(self):
'''
:return: True if order status held by the memory is one of: PENDING,
NEW, REPLACED, PARTIALLY_FILLED.
Otherwise, False.
'''
l_is_alive = [FIXStatus.PENDING, FIXStatus.NEW, FIXStatus.REPLACED,
FIXStatus.PARTIALLY_FILLED]
b_t1 = self.status in l_is_alive
b_t2 = self.current and self.current.status in l_is_alive
return b_t1 or b_t2
def _isDead(self):
'''
:return: True if order status held by the memory is one of: PENDING,
NEW, REPLACED, PARTIALLY_FILLED.
Otherwise, False.
'''
l_is_dead = [FIXStatus.FILLED, FIXStatus.CANCELLED, FIXStatus.REJECTED]
b_t1 = self.status in l_is_dead
b_t2 = self.current and self.current.status in l_is_dead
return b_t1 or b_t2
def __str__(self):
'''
'''
s_rtn = 'Order(Symbol={}, Side={}, Status={}({}), '
s_rtn += 'Price={}, Qty={}, ID={})'
return s_rtn.format(
self.symbol, self.side, self.current.status, self.status,
self.current.price, self.current.qty, self.userData['id'])
def __repr__(self):
'''
'''
return self.__str__()
class InstrumentRegister(object):
'''
'''
def __init__(self, s_instrument, b_ready=True):
'''
'''
self._s_instr = s_instrument
self.b_ready = b_ready
@property
def book(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return None
return ENV.get_order_book(self._s_instr, False)
@property
def trades(self):
'''
Access the trades of the instrument, if it is available
'''
if not self.b_ready:
return None
return ENV.get_last_trades_new(self.book)
def ready(self):
return self.b_ready
@property
def name(self):
'''
:return: Symbol name for this book.
'''
return self.book.name
@property
def price_increment(self):
return self.book.security().priceIncrement
@property
def min_order_qty(self):
return self.book.security().minOrderQty
class CandleSelector(object):
'''
'''
def __init__(self, bar_data, i_corrector=0):
self.bar_data = bar_data
self.this_iter = iter(self.bar_data.l)
self.i_corrector = i_corrector
self.n = 0
def __iter__(self):
self.n = 0
return self
def next(self):
return self.__next__()
def __next__(self):
if self.n < len(self):
o = self.bar_data.l[self.n]
self.n += 1
return o
self.n = 0
raise StopIteration
# return next(self.this_iter)
def __len__(self):
return len(self.bar_data.l)
def __getitem__(self, key):
return self.bar_data.l[int(key+self.i_corrector)]
class BandsRegister(object):
'''
'''
def __init__(self, bar_obj, s_alias, i_inner_idx=0):
self.bar_obj = bar_obj
self._s_alias = s_alias
self.i_inner_idx = i_inner_idx
self.s_name = bar_obj._bar_obj._alias.get(s_alias, None)
self.properties = IndicatorProperties()
self._removed = False
self.values = [
IndicatorSelector(self, 0),
IndicatorSelector(self, 1),
IndicatorSelector(self, 2)]
def remove_indicator(self):
self._removed = True
ENV.candles.reset(
this_candle=self.bar_obj._bar_obj,
this_conf=self._s_alias)
@property
def data(self):
if self._removed:
raise NoneObjectError
# return self.bar_obj._bar_obj.d_candle_data[self.s_name]['data']
if not self.bar_obj.b_ready:
return []
return self.bar_obj._bar_data[self.s_name]
@property
def last_id(self):
if self._removed:
raise NoneObjectError
if not self.bar_obj.b_ready:
return 0
i_len = len(self.values[0])
if i_len == 0:
return 0
return (self.bar_obj._bar_data['LST'].count - 1)
class IndicatorRegister(object):
'''
'''
def __init__(self, bar_obj, s_alias, i_inner_idx=0):
self.bar_obj = bar_obj
self._s_alias = s_alias
self.i_inner_idx = i_inner_idx
self.s_name = bar_obj._bar_obj._alias.get(s_alias, None)
self.properties = IndicatorProperties()
self._removed = False
self._conf = dict(zip(
['what', 'symbol', 'conf'], self.s_name.split(':')))
# self.values = IndicatorSelector(self, i_inner_idx)
self.values = [IndicatorSelector(self, 0)]
def remove_indicator(self):
# NOTE: it is not working properly
self._removed = True
ENV.candles.reset(
this_candle=self.bar_obj._bar_obj,
this_conf=self._s_alias) # self._conf)
@property
def data(self):
if self._removed:
raise NoneObjectError
# return self.bar_obj._bar_obj.d_candle_data[self.s_name]['data']
if not self.bar_obj.b_ready:
return []
return self.bar_obj._bar_data[self.s_name]
@property
def last_id(self):
if self._removed:
raise NoneObjectError
if not self.bar_obj.b_ready:
return 0
i_len = len(self.values[0])
if i_len == 0:
return 0
return (self.bar_obj._bar_data['LST'].count - 1)
class IndicatorSelector(object):
'''
'''
def __init__(self, bar_data, i_inner_idx=0):
self.bar_data = bar_data
self.i_inner_idx = i_inner_idx
self.n = 0
def __iter__(self):
self.n = 0
return self
def next(self):
return self.__next__()
def __next__(self):
if self.n < len(self.bar_data.data):
obj = self.bar_data.data[self.i_inner_idx][self.n]
self.n += 1
return obj
self.n = 0
raise StopIteration
def __len__(self):
if isinstance(self.bar_data.data, type(None)):
return 0
return len(self.bar_data.data)
def __getitem__(self, key):
if isinstance(self.bar_data.data, type(None)):
return np.nan
try:
obj = self.bar_data.data[self.i_inner_idx][int(key)]
return obj
# TODO: I dont know wtf
except IndexError:
return np.nan
class IndicatorProperties(object):
bar_count = 10
source = IndicatorSource.CLOSE
sa_bar_count = 10
deviation_count = 1
sa_bar_count = 10
deviation_up = 1
deviation_down = 1
average = IndicatorAverage.SMA
class BarProperties(object):
bar_count = None
interval = None
symbol = None
class Symbol(object):
def __init__(self, l_instrument):
self.instruments = iter(l_instrument)
self._len = len(l_instrument)
self.n = 0
self.l_instruments = l_instrument
def __iter__(self):
self.n = 0
return self
def next(self):
return self.__next__()
def __next__(self):
if self.n < self._len:
s = self.l_instruments[self.n]
self.n += 1
return s
self.n = 0
raise StopIteration
def __getitem__(self, key):
return self.l_instruments[key]
class CandleRegister(object):
'''
'''
def __init__(self, candle_data, b_ready=True):
'''
'''
self._bar_obj = candle_data
self._indicator_data = {}
self._bar_data = {}
self._selectors = {}
self.b_ready = b_ready
self.properties = BarProperties()
self.properties.symbol = None
self.properties.interval = None
self.properties.bar_count = None
def add_sma(self, bar_count, source):
'''
bar_count: integer.
source: IndicatorSource object.
'''
self.b_ready = False
if 'SMA' not in self._indicator_data:
self._indicator_data['SMA'] = {}
s_alias = 'SMA_%i_%s' % (bar_count, source)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='SMA',
s_input=source.value,
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
self._indicator_data['SMA'][s_alias] = obj_rtn
obj_rtn.name = IndicatorName.SMA
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.source = source
return obj_rtn
def add_satr(self, sa_bar_count):
'''
sa_bar_count: integer.
'''
self.b_ready = False
if 'SATR' not in self._indicator_data:
self._indicator_data['SATR'] = {}
s_alias = 'SATR_%i' % (sa_bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='SATR',
i_time_period=sa_bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
self._indicator_data['SATR'][s_alias] = obj_rtn
obj_rtn.name = IndicatorName.SATR
obj_rtn.properties.sa_bar_count = sa_bar_count
return obj_rtn
def add_adx(self, bar_count):
'''
bar_count: integer.
'''
self.b_ready = False
if 'ADX' not in self._indicator_data:
self._indicator_data['ADX'] = {}
s_alias = 'ADX_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='ADX',
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
self._indicator_data['ADX'][s_alias] = obj_rtn
obj_rtn.name = IndicatorName.ADX
obj_rtn.properties.bar_count = bar_count
return obj_rtn
def add_mom(self, bar_count, source):
'''
bar_count: integer.
'''
self.b_ready = False
if 'MOM' not in self._indicator_data:
self._indicator_data['MOM'] = {}
s_alias = 'MOM_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='MOM',
s_input=source.value,
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
self._indicator_data['MOM'][s_alias] = obj_rtn
obj_rtn.name = IndicatorName.MOM
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.source = source
return obj_rtn
def add_samom(self, bar_count, sa_bar_count, source):
'''
bar_count: integer.
'''
self.b_ready = False
if 'SAMOM' not in self._indicator_data:
self._indicator_data['SAMOM'] = {}
s_alias = 'SAMOM_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='SAMOM',
s_input=source.value,
sa_period=sa_bar_count,
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
self._indicator_data['SAMOM'][s_alias] = obj_rtn
obj_rtn.name = IndicatorName.SAMOM
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.sa_bar_count = sa_bar_count
obj_rtn.properties.source = source
return obj_rtn
def add_stddev(self, bar_count, deviation_count, source):
'''
bar_count: integer.
'''
self.b_ready = False
if 'STDDEV' not in self._indicator_data:
self._indicator_data['STDDEV'] = {}
s_alias = 'STDDEV_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='STDDEV',
nbdev=deviation_count,
s_input=source.value,
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
self._indicator_data['STDDEV'][s_alias] = obj_rtn
obj_rtn.name = IndicatorName.STDDEV
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.deviation_count = deviation_count
obj_rtn.properties.source = source
return obj_rtn
def add_saadx(self, bar_count, sa_bar_count):
'''
bar_count: integer.
'''
self.b_ready = False
if 'SAADX' not in self._indicator_data:
self._indicator_data['SAADX'] = {}
s_alias = 'SAADX_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='SAADX',
sa_period=sa_bar_count,
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
self._indicator_data['SAADX'][s_alias] = obj_rtn
obj_rtn.name = IndicatorName.SAADX
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.sa_bar_count = sa_bar_count
return obj_rtn
def add_trange(self, bar_count=0):
'''
bar_count: integer.
'''
self.b_ready = False
if 'TRANGE' not in self._indicator_data:
self._indicator_data['TRANGE'] = {}
s_alias = 'TRANGE_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='TRANGE',
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
self._indicator_data['TRANGE'][s_alias] = obj_rtn
obj_rtn.name = IndicatorName.TRANGE
return obj_rtn
def add_minus_di(self, bar_count):
'''
bar_count: integer.
'''
self.b_ready = False
if 'MINUS_DI' not in self._indicator_data:
self._indicator_data['MINUS_DI'] = {}
s_alias = 'MINUS_DI_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='MINUS_DI',
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
self._indicator_data['MINUS_DI'][s_alias] = obj_rtn
obj_rtn.name = IndicatorName.MINUS_DI
obj_rtn.properties.bar_count = bar_count
return obj_rtn
def add_plus_di(self, bar_count):
'''
bar_count: integer.
'''
self.b_ready = False
if 'PLUS_DI' not in self._indicator_data:
self._indicator_data['PLUS_DI'] = {}
s_alias = 'PLUS_DI_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='PLUS_DI',
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
obj_rtn.name = IndicatorName.PLUS_DI
obj_rtn.properties.bar_count = bar_count
self._indicator_data['PLUS_DI'][s_alias] = obj_rtn
return obj_rtn
def add_atr(self, bar_count):
'''
bar_count: integer.
'''
self.b_ready = False
if 'ATR' not in self._indicator_data:
self._indicator_data['ATR'] = {}
s_alias = 'ATR_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='ATR',
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
obj_rtn.name = IndicatorName.ATR
obj_rtn.properties.bar_count = bar_count
self._indicator_data['ATR'][s_alias] = obj_rtn
return obj_rtn
def add_ema(self, bar_count, source):
'''
bar_count: integer.
'''
self.b_ready = False
if 'EMA' not in self._indicator_data:
self._indicator_data['EMA'] = {}
s_alias = 'EMA_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='EMA',
s_input=source.value,
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
obj_rtn.name = IndicatorName.EMA
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.source = source
self._indicator_data['EMA'][s_alias] = obj_rtn
return obj_rtn
def add_bbands(self, bar_count, deviation_up, deviation_down, average):
'''
bar_count: integer.
'''
self.b_ready = False
if 'BBANDS' not in self._indicator_data:
self._indicator_data['BBANDS'] = {}
s_alias = 'BBANDS_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='BBANDS',
i_time_period=bar_count,
nbdevup=deviation_up,
nbdevdn=deviation_down,
matype=average.value)
obj_rtn = BandsRegister(self, s_alias)
obj_rtn.name = IndicatorName.BBANDS
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.deviation_up = deviation_up
obj_rtn.properties.deviation_down = deviation_down
obj_rtn.properties.average = average
self._indicator_data['BBANDS'][s_alias] = obj_rtn
return obj_rtn
def add_sabbands(
self, bar_count, deviation_up, deviation_down, sa_bar_count,
average):
'''
bar_count: integer.
'''
self.b_ready = False
if 'SABBANDS' not in self._indicator_data:
self._indicator_data['SABBANDS'] = {}
s_alias = 'SABBANDS_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='SABBANDS',
i_time_period=bar_count,
nbdevup=deviation_up,
nbdevdn=deviation_down,
sa_period=sa_bar_count,
matype=average.value)
obj_rtn = BandsRegister(self, s_alias)
obj_rtn.name = IndicatorName.SABBANDS
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.deviation_up = deviation_up
obj_rtn.properties.deviation_down = deviation_down
obj_rtn.properties.average = average
obj_rtn.properties.sa_bar_count = sa_bar_count
self._indicator_data['SABBANDS'][s_alias] = obj_rtn
return obj_rtn
def add_rsi(self, bar_count, source):
'''
bar_count: integer.
'''
self.b_ready = False
if 'RSI' not in self._indicator_data:
self._indicator_data['RSI'] = {}
s_alias = 'RSI_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='RSI',
s_input=source.value,
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
obj_rtn.name = IndicatorName.RSI
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.source = source
self._indicator_data['RSI'][s_alias] = obj_rtn
return obj_rtn
def add_sar(self, acceleration, maximum):
'''
acceleration: integer.
maximum: integer.
'''
self.b_ready = False
bar_count = 0. # not used in this Indicator
if 'SAR' not in self._indicator_data:
self._indicator_data['SAR'] = {}
s_alias = 'SAR_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='SAR',
acceleration=acceleration,
maximum=maximum,
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
obj_rtn.name = IndicatorName.SAR
obj_rtn.properties.acceleration = acceleration
obj_rtn.properties.maximum = maximum
obj_rtn.properties.bar_count = bar_count
self._indicator_data['SAR'][s_alias] = obj_rtn
return obj_rtn
def add_obv(self, source):
'''
bar_count: integer.
'''
self.b_ready = False
bar_count = 0. # not used in this Indicator
if 'OBV' not in self._indicator_data:
self._indicator_data['OBV'] = {}
s_alias = 'OBV_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='OBV',
s_input=source.value,
i_time_period=bar_count)
obj_rtn = IndicatorRegister(self, s_alias)
obj_rtn.name = IndicatorName.OBV
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.source = source
self._indicator_data['OBV'][s_alias] = obj_rtn
return obj_rtn
def add_stoch(self, fast_k_ma_period, slow_k_ma_period,
slow_k_ma_type, slow_d_ma_period, slow_d_ma_type):
'''
bar_count: integer.
'''
self.b_ready = False
bar_count = 0. # not used in this Indicator
if 'STOCH' not in self._indicator_data:
self._indicator_data['STOCH'] = {}
s_alias = 'STOCH_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='STOCH',
i_time_period=bar_count,
fast_k_ma_period=fast_k_ma_period,
# fast_ma_type=fast_ma_type.value,
slow_k_ma_period=slow_k_ma_period,
slow_k_ma_type=slow_k_ma_type.value,
slow_d_ma_period=slow_d_ma_period,
slow_d_ma_type=slow_d_ma_type.value)
obj_rtn = BandsRegister(self, s_alias)
obj_rtn.name = IndicatorName.STOCH
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.fast_k_ma_period = fast_k_ma_period
# obj_rtn.properties.fast_ma_type = fast_ma_type
obj_rtn.properties.slow_k_ma_period = slow_k_ma_period
obj_rtn.properties.slow_k_ma_type = slow_k_ma_type
obj_rtn.properties.slow_d_ma_period = slow_d_ma_period
obj_rtn.properties.slow_d_ma_type = slow_d_ma_type
self._indicator_data['STOCH'][s_alias] = obj_rtn
return obj_rtn
def add_stochf(self, fast_k_ma_period, fast_d_ma_period, fast_d_ma_type):
'''
bar_count: integer.
'''
self.b_ready = False
bar_count = 0. # not used in this Indicator
if 'STOCHF' not in self._indicator_data:
self._indicator_data['STOCHF'] = {}
s_alias = 'STOCHF_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='STOCHF',
i_time_period=bar_count,
fast_k_ma_period=fast_k_ma_period,
fast_d_ma_period=fast_d_ma_period,
fast_d_ma_type=fast_d_ma_type.value)
obj_rtn = BandsRegister(self, s_alias)
obj_rtn.name = IndicatorName.STOCHF
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.fast_k_ma_period = fast_k_ma_period
obj_rtn.properties.fast_d_ma_period = fast_d_ma_period
obj_rtn.properties.fast_d_ma_type = fast_d_ma_type
self._indicator_data['STOCHF'][s_alias] = obj_rtn
return obj_rtn
def add_macd(self, fast_ma_type, fast_ma_period, slow_ma_type,
slow_ma_period, signal_ma_type, signal_ma_period):
'''
bar_count: integer.
'''
self.b_ready = False
bar_count = 0. # not used in this Indicator
if 'MACD' not in self._indicator_data:
self._indicator_data['MACD'] = {}
s_alias = 'MACD_%i' % (bar_count)
ENV.candles.add_indicator_to(
this_candle=self._bar_obj,
s_alias=s_alias,
s_ta_name='MACD',
i_time_period=bar_count,
fast_ma_period=fast_ma_period,
fast_ma_type=fast_ma_type.value,
slow_ma_period=slow_ma_period,
slow_ma_type=slow_ma_type.value,
signal_ma_period=signal_ma_period,
signal_ma_type=signal_ma_type.value)
obj_rtn = BandsRegister(self, s_alias)
obj_rtn.name = IndicatorName.MACD
obj_rtn.properties.bar_count = bar_count
obj_rtn.properties.fast_ma_period = fast_ma_period
obj_rtn.properties.fast_ma_type = fast_ma_type
obj_rtn.properties.slow_ma_type = slow_ma_type
obj_rtn.properties.slow_ma_period = slow_ma_period
obj_rtn.properties.signal_ma_type = signal_ma_type
obj_rtn.properties.signal_ma_period = signal_ma_period
self._indicator_data['MACD'][s_alias] = obj_rtn
return obj_rtn
@property
def last_id(self):
'''
Access the trades of the instrument, if it is available
'''
if not self.b_ready:
return 0
return (self._bar_data['LST'].count - 1)
@property
def open(self):
'''
Access the trades of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'open' not in self._selectors:
self._selectors['open'] = CandleSelector(self._bar_data['LST'], -1)
return self._selectors['open']
@property
def high(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'high' not in self._selectors:
self._selectors['high'] = CandleSelector(self._bar_data['MAX'])
return self._selectors['high']
@property
def low(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'low' not in self._selectors:
self._selectors['low'] = CandleSelector(self._bar_data['MIN'])
return self._selectors['low']
@property
def close(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'close' not in self._selectors:
self._selectors['close'] = CandleSelector(self._bar_data['LST'])
return self._selectors['close']
@property
def num_trades(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'num_trades' not in self._selectors:
self._selectors['num_trades'] = CandleSelector(
self._bar_data['NTRADES'])
return self._selectors['num_trades']
@property
def quantity(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'quantity' not in self._selectors:
self._selectors['quantity'] = CandleSelector(self._bar_data['QTD'])
return self._selectors['quantity']
@property
def quantity_buy(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'quantity_buy' not in self._selectors:
self._selectors['quantity_buy'] = CandleSelector(
self._bar_data['QTD_B'])
return self._selectors['quantity_buy']
@property
def quantity_sell(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'quantity_sell' not in self._selectors:
self._selectors['quantity_sell'] = CandleSelector(
self._bar_data['QTD_S'])
return self._selectors['quantity_sell']
@property
def quantity_accumulated(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'quantity_accumulated' not in self._selectors:
self._selectors['quantity_accumulated'] = CandleSelector(
self._bar_data['CUMQTD'])
return self._selectors['quantity_accumulated']
@property
def quantity_buy_accumulated(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'quantity_buy_accumulated' not in self._selectors:
self._selectors['quantity_buy_accumulated'] = CandleSelector(
self._bar_data['CUMQTD_B'])
return self._selectors['quantity_buy_accumulated']
@property
def quantity_sell_accumulated(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'quantity_sell_accumulated' not in self._selectors:
self._selectors['quantity_sell_accumulated'] = CandleSelector(
self._bar_data['CUMQTD_S'])
return self._selectors['quantity_sell_accumulated']
@property
def volume(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'volume' not in self._selectors:
self._selectors['volume'] = CandleSelector(
self._bar_data['VOLUME'])
return self._selectors['volume']
@property
def timestamps(self):
'''
Access the book of the instrument, if it is available
'''
if not self.b_ready:
return []
if 'timestamps' not in self._selectors:
self._selectors['timestamps'] = CandleSelector(
self._bar_data['TS'])
return self._selectors['timestamps']
def remove_indicator(self, indicator):
'''
:param indicator: Indicator|BandsResgister object
'''
# NOTE: it is not working properly. It used unsubscribe but the data
# persists
indicator.remove_indicator()
def get_indicators(self):
l_rtn = []
for s_key in self._indicator_data:
l_aux = self._indicator_data[s_key].values()
l_rtn += l_aux
return l_rtn
def ready(self):
return self.b_ready
class Book(object):
'''
A book class contains all the offers for buy and sell present in a
particular symbol.
'''
def __init__(self):
self.bookAsk = []
self.bookBid = []
self.s_name = '<Book.name undefined>'
def bid(self):
'''
Exposes the sides of a book.
:return: An instance of class Side with a list of bid offers.
'''
return self.bookBid
def ask(self):
return self.bookAsk
def name(self):
'''
:return: Symbol name for this book.
'''
return self.s_name
def state(self):
pass
def sequence(self):
pass
def byprice(self):
pass
def __str__(self):
s = '\nASK -----------\n'
for ask in self.bookAsk:
s += str(ask) + '\n'
s = 'BID -------------\n'
for bid in self.bookBid:
s += str(bid) + '\n'
s += '-----------------\n'
return s
class oms_client(object):
_ready = False
_id_mapping = {}
@staticmethod
def send_limit_order(
symbol, side, price, quantity, time_in_force, i_id=11):
'''
Send a limit order
symbol: string.
side: neutrino.Side.
price: float.
quantity: int.
time_in_force: neutrino.TimeInForce.
'''
orders = ENV.orders[i_id]
instr = orders.get_instrument_from(s_symbol=symbol)
# NOTE: tif in simulation is always the same
# s_tif = orders._tif2str[time_in_force]
i_order_id = orders.new_order(
instr=instr, s_side=str(side), f_price=price, i_qty=quantity)
if i_id not in oms_client._id_mapping:
oms_client._id_mapping[i_id] = {}
oms_client._id_mapping[i_id][i_order_id] = symbol
return i_order_id
@staticmethod
def replace_limit_order(
order, price, quantity, time_in_force, i_id=11):
orders = ENV.orders[i_id]
# NOTE: tif in simulation is always the same
# s_tif = orders._tif2str[time_in_force]
i_order_id = orders.modify_order(
order=order.order, f_price=price, i_qty=quantity, s_tif='day')
oms_client._id_mapping[i_id][i_order_id] = order.symbol
return i_order_id
@staticmethod
def get_order_by_id(order_id, i_id=11):
'''
'''
orders = ENV.orders[i_id]
if i_id not in oms_client._id_mapping:
return None
symbol = oms_client._id_mapping[i_id].get(order_id, None)
if not symbol:
return None
instr = orders.get_instrument_from(s_symbol=symbol)
order = instr.get_my_order_by_id(order_id)
if not order_id or not order:
return None
return LimitOrderEntry(order, i_id=i_id)
@staticmethod
def get_total_quantity(symbol, side, status_combination=None, i_id=11):
'''
'''
orders = ENV.orders[i_id]
instr = orders.get_instrument_from(s_symbol=symbol)
return instr.get_active_qty(str(side))
@staticmethod
def get_live_orders(symbol, side=None, price=None, i_id=11):
'''
'''
orders = ENV.orders[i_id]
s_tests = ''.join(
[str((not isinstance(x, type(None)))*1) for x in [side, price]])
instr = orders.get_instrument_from(s_symbol=symbol)
if s_tests == '10':
l_rtn = instr.get_my_orders_by_side(str(side))
elif s_tests == '01':
l_bids = instr.get_my_orders_by_price('BID', price)
l_asks = instr.get_my_orders_by_price('ASK', price)
l_rtn = list(l_bids) + list(l_asks)
elif s_tests == '11':
l_rtn = instr.get_my_orders_by_price(str(side), price)
elif s_tests == '00':
l_bids = instr.get_my_orders_by_side('BID')
l_asks = instr.get_my_orders_by_side('ASK')
l_rtn = list(l_bids) + list(l_asks)
l_rtn = [oms_client.get_order_by_id(x) for x in l_rtn]
return l_rtn
@staticmethod
def get_all_orders(symbol, side=None, price=None, i_id=11):
'''
'''
# NOTE: currently there is not option to get a list of older orders
return oms_client.get_live_orders(
symbol, side=side, price=price, i_id=i_id)
@staticmethod
def cancel(order_entry):
'''
'''
return order_entry.cancel()
@staticmethod
def cancel_all(symbol=None, side=None, price=None, i_id=11):
'''
'''
orders = ENV.orders[i_id]
s_tests = ''.join(
[str((not isinstance(x, type(None)))*1)
for x in [symbol, side, price]])
instr = None
if s_tests[0] == '1':
instr = orders.get_instrument_from(s_symbol=symbol)
if s_tests == '000':
orders.cancel_all_orders()
elif s_tests == '100':
orders.cancel_instrument_orders(instr)
elif s_tests == '110':
orders.cancel_orders_by_side(instr, s_side=str(side))
# elif s_tests == '111':
# orders.cancel_orders_by_price(instr, s_side=side, f_price=price)
elif s_tests == '101':
orders.cancel_orders_by_price(instr, s_side='BID', f_price=price)
orders.cancel_orders_by_price(instr, s_side='ASK', f_price=price)
class LimitOrderEntry(object):
_status_mapping = {
FIXStatus.IDLE: OrderStatus.WAIT,
FIXStatus.PENDING: OrderStatus.WAIT,
FIXStatus.NEW: OrderStatus.ACTIVE,
FIXStatus.PARTIALLY_FILLED: OrderStatus.PARTIAL_FILLED,
FIXStatus.FILLED: OrderStatus.FILLED,
FIXStatus.CANCELLED: OrderStatus.CANCELLED,
FIXStatus.REPLACED: OrderStatus.REPLACED,
FIXStatus.REJECTED: OrderStatus.REJECTED}
def __init__(self, order, i_id):
self.order = order
self.i_id = i_id
self.unique_id = order.userData['new_id']
self.side = order.side
self.type = OrderType.LIMIT
self.account = 9999
self.symbol = order.symbol
self._time_in_force = None
self._secondary_order_id = None
self._status = None
self._price = None
self._last_price = None
self._trigger_price = None
self._quantity = None
self._last_quantity = 0
self._filled_quantity = None
self._leaves_quantity = None
self._transact_time = None
self._client_order_id = None
self._original_client_order_id = None
@property
def secondary_order_id(self):
if self.order.current and self.order.current.secondaryOrderID:
return int(self.order.current.secondaryOrderID)
return None
@property
def virtual_md_id(self):
if self.order.current and self.order.current.secondaryOrderID:
if self.order.current.secondaryOrderID[:3] != 'sec':
return int(self.order.current.secondaryOrderID)
return None
@property
def time_in_force(self):
if self.order.current and self.order.current.timeInForce:
return self.order.current.timeInForce
return self.order.next.timeInForce
@property
def status(self):
if self.order.current and self.order.current.status:
return self._status_mapping[self.order.current.status]
return self._status_mapping[self.order.next.status]
@property
def price(self):
if not isinstance(self.order.current, type(None)):
if not isinstance(self.order.current.price, type(None)):
return self.order.current.price
return self.order.next.price
@property
def last_price(self):
if self.order.current:
if not isinstance(self.order.current.price, type(None)):
return self.order.current._last_price
return 0.
@property
def quantity(self):
if self.order.current:
if not isinstance(self.order.current.qty, type(None)):
return self.order.current.qty
return self.order.next.qty
@property
def filled_quantity(self):
if self.order.current:
if not isinstance(self.order.current.qty, type(None)):
return self.order.current.cumQty
return 0
@property
def leaves_quantity(self):
if self.order.current:
if not isinstance(self.order.current.qty, type(None)):
f_qty = self.order.current.qty - self.order.current.cumQty
return f_qty
return self.order.next.qty
@property
def transact_time(self):
return 0
@property
def last_quantity(self):
if self.order.current:
if not isinstance(self.order.current.qty, type(None)):
return self.order.current._last_quantity
return 0
def cancel(self):
orders = ENV.orders[self.i_id]
orders.cancel_order(self.order)
return self.unique_id
def replace(self, price, quantity, time_in_force):
return self.replace_limit(price, quantity, time_in_force)
def replace_limit(self, price, quantity, time_in_force):
# DEPRECTED
orders = ENV.orders[self.i_id]
# NOTE: tif in simulation is always the same
# s_tif = orders._tif2str[time_in_force]
i_order_id = orders.modify_order(
order=self.order, f_price=price, i_qty=quantity, s_tif='day')
if not isinstance(i_order_id, type(None)):
self.unique_id = i_order_id
else:
return -1
return self.unique_id
def is_alive(self):
return self.order.isAlive()
def is_pending(self):
return self.order.isPending()
def is_dead(self):
return self.order._isDead()
def __str__(self):
return str(self.order)
def __repr__(self):
'''
'''
return self.__str__()
class SummaryLine(object):
def __init__(self, instrumet):
self._instr = instrumet
@property
def symbol(self):
return self._instr.name
@property
def bid(self):
return self._instr.book.bid[0]
@property
def ask(self):
return self._instr.book.ask[0]
@property
def last_trade(self):
return self._instr.trades[-1]
@property
def stats(self):
raise NotImplementedError('Implement SummaryLine.stats')
@property
def status(self):
return StatusEntry(self._instr.book.state, '')
class PositionData(object):
def __init__(self, instrument, b_partial=False, b_init=False):
self._instr = instrument
self.b_partial = b_partial
self.b_init = b_init
@property
def net(self):
if self.b_init:
i_pos = self.bid_quantity
i_pos -= self.ask_quantity
return i_pos
return self._instr.get_position()
@property
def bid_quantity(self):
if self.b_partial:
f_pos = self.net
if f_pos >= 0:
return 0
return abs(f_pos)
# sum([p for q, p in self._instr._open_pos['BID']])
i_pos = self._instr._init_pos['qBid']
if not self.b_init:
i_pos += self._instr._position['qBid']
return i_pos
@property
def ask_quantity(self):
if self.b_partial:
f_pos = self.net
if f_pos <= 0:
return 0
return abs(f_pos)
i_pos = self._instr._init_pos['qAsk']
if not self.b_init:
i_pos += self._instr._position['qAsk']
return i_pos
@property
def bid_volume(self):
if self.b_partial:
f_pos = self.net
if f_pos <= 0:
return 0
return sum([p*q for q, p in self._instr._open_pos['BID']])
f_vol = self._instr._init_pos['Bid']
if not self.b_init:
f_vol += self._instr._position['Bid']
return f_vol
@property
def ask_volume(self):
if self.b_partial:
f_pos = self.net
if f_pos >= 0:
return 0
return sum([p*q for q, p in self._instr._open_pos['ASK']])
f_vol = self._instr._init_pos['Ask']
if not self.b_init:
f_vol += self._instr._position['Ask']
return f_vol
@property
def net_price(self):
f_pos = self.net
f_netvol = self.ask_volume - self.bid_volume
if not f_netvol:
return 0.
if not f_pos:
return f_netvol
return abs(f_netvol/f_pos)
class PositionStatus(object):
def __init__(self, intrument):
self.total = PositionData(intrument)
self.partial = PositionData(intrument, b_partial=True)
self.initial = PositionData(intrument, b_init=True)
class Position(object):
@staticmethod
def get(symbol, i_id=11):
'''
'''
orders = ENV.orders[i_id]
instr = orders.get_instrument_from(s_symbol=symbol)
return PositionStatus(instr)
def __call__(self, obj_agent):
# self._current_agent_id = obj_agent._id2env
return self
class Oms(object):
'''
Handle orders and keep all trasations related to them
Methods to create and manupulate orders:
- send_limit_order: TODO
- replace_limit_order: TODO
- cancel: TODO
- cancel_all: TODO
Methods to check orders already sent:
- get_orders(<symbol>="", <side>=NONE_SIDE, <price>=-1)
- get_live_orders(<symbol>="", <side>=NONE_SIDE, <price>=-1)
- get_total_quantity(<symbol>, <side>, <tatus_combination>=0)
- get_order_by_id(<unique_id>)
TODO
'''
@staticmethod
def send_limit_order(
symbol, side, price, quantity, time_in_force, i_id=11):
return oms_client.send_limit_order(
symbol=symbol, side=side, price=price, quantity=quantity,
time_in_force=time_in_force, i_id=i_id)
@staticmethod
def replace(order_entry, price, quantity, time_in_force, i_id=11):
return oms_client.replace_limit_order(
order=order_entry, price=price, quantity=quantity,
time_in_force=time_in_force, i_id=i_id)
@staticmethod
def send(symbol, side, price, quantity, time_in_force, i_id=11):
return oms_client.send_limit_order(
symbol=symbol, side=side, price=price, quantity=quantity,
time_in_force=time_in_force, i_id=i_id)
@staticmethod
def get_order_by_id(order_id, i_id=11):
return oms_client.get_order_by_id(order_id=order_id, i_id=i_id)
@staticmethod
def get_total_quantity(symbol, side, status=None, i_id=11):
# TODO: cover all cases
# only wait orders combination
if isinstance(status, int) and status < 4:
return 0
return oms_client.get_total_quantity(
symbol=symbol, side=side, i_id=i_id)
@staticmethod
def get_live_orders(symbol, side=None, price=None, i_id=11):
return oms_client.get_live_orders(
symbol=symbol, side=side, price=price, i_id=i_id)
@staticmethod
def get_all_orders(symbol, side=None, price=None, i_id=11):
return oms_client.get_all_orders(
symbol=symbol, side=side, price=price, i_id=i_id)
@staticmethod
def cancel(order_entry):
return oms_client.cancel(order_entry)
@staticmethod
def cancel_all(symbol=None, side=None, price=None, i_id=11):
return oms_client.cancel_all(
symbol=symbol, side=side, price=price, i_id=i_id)
def __call__(self, obj_agent):
# self._current_agent_id = obj_agent._id2env
return self
class ScheduledFunction(object):
def __init__(self, function, s_name, i_hour=0, i_minute=0, i_interval=0):
self.function = function
self._name = s_name
self.hour = i_hour
self.minute = i_minute
self.interval = i_interval
class Utils(object):
_this_path = None
@staticmethod
def every(callback, interval, i_id=11):
l_callback = str(callback).split(' ')
s_name = '[{};{}]'.format(l_callback[2], interval)
return fx.every(
name=s_name, interval=interval, callback=callback, i_id=i_id)
@staticmethod
def at(callback, hour, minute, i_id=11):
l_callback = str(callback).split(' ')
s_name = '[{};{};{}]'.format(l_callback[2], hour, minute)
return fx.at(
name=s_name, hour=hour, minute=minute, callback=callback, i_id=i_id)
@staticmethod
def remove_function(function, i_id=11):
return fx.remove_schedule(name=function._name, i_id=i_id)
@staticmethod
def get_functions(i_id=11):
return fx.get_schedules(i_id=i_id)
@staticmethod
def now(b_str=False, b_ts=False, b_old=False):
return fx.now(b_str=b_str, b_ts=b_ts, b_old=b_old)
@staticmethod
def notify(s_msg):
fx.notify(NotificationEvent.POPUP, s_msg)
@staticmethod
def quit(i_id=11):
fx.quit(i_id=i_id)
@staticmethod
def by_price(side, depth):
return byPrice(book_side=side, i_depth=depth)
@staticmethod
def path():
return Utils._this_path
def __call__(self, obj_agent):
# self._current_agent_id = obj_agent._id2env
return self
class Market(object):
@staticmethod
def add(symbol, trade_callback='default', book_callback='default',
trade_buffer_size=64, i_id=11):
return fx.add(
symbol=symbol,
trade_callback=trade_callback,
book_callback=book_callback,
trade_buffer_size=trade_buffer_size,
i_id=i_id)
@staticmethod
def remove(symbol_propty, i_id=11):
return fx.remove(symbol_propty=symbol_propty, i_id=i_id)
@staticmethod
def get(symbol, i_id=11):
return fx.get(symbol=symbol, i_id=i_id)
@staticmethod
def add_bar(symbol, bar_count, interval, callback='default', i_id=11):
return fx.add_bar(
symbol=symbol, bar_count=bar_count, interval=interval,
callback=callback, i_id=i_id)
@staticmethod
def get_bar(symbol, bar_count, interval):
return fx.get_bar(
symbol=symbol, bar_count=bar_count, interval=interval)
@staticmethod
def remove_bar(candle_propty):
return fx.remove_bar(candle_propty=candle_propty)
@staticmethod
def add_summary(symbol, summary_callback='default', i_id=11):
obj_aux = fx.add(
symbol=symbol,
trade_callback=summary_callback,
book_callback=None,
trade_buffer_size=64,
i_id=i_id)
return SummaryLine(obj_aux)
@staticmethod
def remove_summary(summary_propty, i_id=11):
symbol_propty = summary_propty._instr
return fx.remove(symbol_propty=symbol_propty, i_id=i_id)
def __call__(self, obj_agent):
# self._current_agent_id = obj_agent._id2env
return self
class fx(object):
legs = {}
online = True
config_file = 'twap.conf'
now_val = 0
pending_callbacks = {}
symbols_callbacks = {}
time_callbacks = {}
initial_time = 0
trade_callback_used = {}
@staticmethod
def now(b_str=False, b_ts=False, b_old=False):
if b_ts:
s_ts = ENV.order_matching.s_time[:23]
if not s_ts:
return 0
fx_now = (datetime.datetime.strptime(s_ts, '%Y-%m-%d %H:%M:%S.%f'))
fx_now = (fx_now-datetime.datetime(1970, 1, 1)).total_seconds()
return fx_now
if b_str:
return ENV.order_matching.s_time
if not fx.initial_time:
s_ts = ENV.order_matching.s_time[:10]
s_ts += ' 02:00:00.000'
fx_now = (datetime.datetime.strptime(s_ts, '%Y-%m-%d %H:%M:%S.%f'))
fx_now = (fx_now-datetime.datetime(1970, 1, 1)).total_seconds()
fx.initial_time = fx_now
if b_old:
return ENV.order_matching.f_time
return fx.initial_time + ENV.order_matching.f_time
@staticmethod
def broadcast(s_msg):
if s_msg:
print(s_msg)
@staticmethod
def notify(eventtype, s_msg):
'''
...
:param eventtype: NotificationEvent.
:param s_msg: string.
'''
if s_msg:
print(s_msg)
@staticmethod
def getConfigFile():
return fx.config_file
@staticmethod
def configureOrder(symbol, leg_number, order, client=None):
'''
First step in the process of sending an order.
It sets some fields before further processing.
:param symbol:
:param leg_number
:param order:
:param client: string. Used just in neutrinogym
:return:
'''
order.symbol = symbol
order.leg_number = leg_number
if client:
order.client = client
# symbol, 0, 3000.0, 3500.0, 100, '%06.1f', '%03d'
# const std::string &name, size32_t leg, double pxmin, double pxmax,
# quant_t qmax, const char *px_mask, const char *qty_mask
@staticmethod
def setLeg(symbol, legid, pmin, pmax, qmax, px_mask, qty_mask):
'''
Called only once per process, the `setLeg` method is used to setup all
the symbols that will be used in the subsequent calls. Typically
`setLeg` is called at system startup retrieving its parameters from
some configuration file. Internally the API will allocate a transaction
manager for every leg that will be used to send actual order requests.
Currently there is a maximum of three legs, and an exception will be
thrown in if the leg index falls outside this range.
:param symbol: String identifying the symbol this leg will manage.
:param legid: Leg index for this symbol.
:param pmin: Minimum price for this symbol. Order requests out of the
range [pmin-pmax] will be rejected.
:param pmax: Maximum price for this symbol.
:param qmax: Max quantity for this symbol.
:param px_mask: A printf style mask used to format the prices for this
symbol when sending orders.
:param qty_mask: A printf style mask used to format quantities for this
symbol when sending orders.
:return: None
'''
fx.legs[symbol] = {}
fx.legs[symbol][legid] = {'pmin': pmin, 'pmax': pmax, 'qmax': qmax,
'px_mask': px_mask, 'qty_mask': qty_mask}
@staticmethod
def attachToLeg(order, legs_ix):
'''
Before actually sending any order, one has to 'attach' it to a
transaction. `attachToLeg` does just that. The API maintains an array
of 'legs', every symbol has its own leg. For single-symbol algorithms,
`leg_index` is always 0.
:param order: Reference to an Order memory object
:param legs_ix: leg index in the internal array. T
:return: None
'''
fx.legs[legs_ix] = order
def setLookup(self, symbol):
pass
@staticmethod
def isOnline(bookname):
return fx.online
@staticmethod
def quit(i_id=11):
ENV.done = True
this_agent = ENV.agents_actions[i_id].owner
if hasattr(this_agent, 'agent'):
this_agent = this_agent.agent
if hasattr(this_agent, 'finalize'):
this_agent.finalize(QuitReason.USER_QUIT)
@staticmethod
def cancel(order_mem):
'''
Delivers a cancel request
:param order_mem: reference to a Order memory object.
:return:
'''
ENV.agents_actions[order_mem.client].append_msg(('cancel', order_mem))
return True
@staticmethod
def send(order_mem):
'''
Delivers a send request
:param order_mem: reference to a Order memory object.
:return:
'''
# NOTE: the next.status, before send(), is IDLE. After send, is PENDING
order_mem.next.status = FIXStatus.PENDING
ENV.agents_actions[order_mem.client].append_msg(('new', order_mem))
return True
@staticmethod
def book(symbol):
'''
Return an Instance of Book class related to the symbol passed
:param symbol: String identifying the symbol this leg will manage
:return: Book objebct
'''
# TODO: change the return to be a neutrino book object
return ENV.get_order_book(symbol, False)
# @staticmethod
# def get_book(symbol):
# '''
# Return an Instance of Book class related to the symbol passed
# :param symbol: String identifying the symbol this leg will manage
# :return: Book objebct
# '''
# # TODO: change the return to be a neutrino book object
# return ENV.get_order_book(symbol, False)
@staticmethod
def getTrades(book_obj, b_from_candles=False):
'''
Return an Instance of TradeBuffer class related to the book passed
:param book_obj: neutrino Book.
:*param b_from_candles: boolean. Only exist in simulation
:return: TradeBuffer objebct
'''
return ENV.get_last_trades(book_obj, b_from_candles)
# @staticmethod
# def get_trades(book_obj, b_from_candles=False):
# '''
# Return an Instance of TradeBuffer class related to the book passed
# :param book_obj: neutrino Book.
# :*param b_from_candles: boolean. Only exist in simulation
# :return: TradeBuffer objebct
# '''
# return ENV.get_last_trades(book_obj, b_from_candles)
@staticmethod
def getSummary(book_obj, b_from_candles=False):
'''
Return an Instance of Summary class related to the book passed
:param book_obj: neutrino Book.
:*param b_from_candles: boolean. Only exist in simulation
:return: Summary objebct
'''
obj_rtn = Summary()
obj_rtn.bidCount = book_obj.get_counts('BID', 'Total')
obj_rtn.askCount = book_obj.get_counts('ASK', 'Total')
obj_rtn.statusChanged = 0 # is 1 only when the book status changes
# NOTE: fileds presented just in simulation (for now)
obj_rtn.newBidOrders = book_obj.get_counts('BID', 'New')
obj_rtn.canceledBidOrders = book_obj.get_counts('BID', 'Canceled')
obj_rtn.replacedBidOrders = book_obj.get_counts('BID', 'Replaced')
obj_rtn.newAskOrders = book_obj.get_counts('ASK', 'New')
obj_rtn.canceledAskOrders = book_obj.get_counts('ASK', 'Canceled')
obj_rtn.replacedAskOrders = book_obj.get_counts('ASK', 'Replaced')
i_aux = book_obj.get_counts('BID', 'Partially Filled')
i_aux += book_obj.get_counts('BID', 'Filled')
obj_rtn.tradeCountIncrement = i_aux
# NOTE: As I just sent the incremental, the tradeCount is always the
# size of the TradeBuffer. However, in the production, it should be
# used to calculate the index to iterate in the trading list
if b_from_candles:
obj_rtn.tradeCount = len(book_obj.last_trades_aux)
else:
obj_rtn.tradeCount = len(book_obj.last_trades)
return obj_rtn
@staticmethod
def add(symbol, trade_callback='default', book_callback='default',
trade_buffer_size=64, i_id=11):
'''
Create new callbacks to the symbol's book and trades updates
:param symbol: string.
:param trade_callback: function.
:param book_callback: function.
:param trade_buffer_size: int.
:param agent_id: integer. Only valid to simulations
'''
s_err = '[neutrino error] Symbol %s is not registered' % symbol
assert symbol in ENV.l_instrument, s_err
d_pcbacks = fx.pending_callbacks
d_scbacks = fx.symbols_callbacks
if i_id not in d_scbacks:
d_scbacks[i_id] = set()
fx.trade_callback_used[i_id] = None
if i_id not in d_pcbacks:
d_pcbacks[i_id] = init_pending_cbacks()
this_agent = ENV.agents_actions[i_id].owner
if hasattr(ENV.agents_actions[i_id].owner, 'agent'):
this_agent = ENV.agents_actions[i_id].owner.agent
d_pcbacks[i_id]['checked'] = False
if trade_callback and trade_callback != 'default':
d_scbacks[i_id].add(symbol)
d_pcbacks[i_id]['trade'].append([symbol, trade_callback])
fx.trade_callback_used[i_id] = trade_callback
# self.last_trades = TradeBuffer()
# self.last_trades_aux = TradeBuffer()
elif trade_callback == 'default':
d_pcbacks[i_id]['trade'].append([symbol, this_agent.on_data])
if book_callback and book_callback != 'default':
d_scbacks[i_id].add(symbol)
d_pcbacks[i_id]['book'].append([symbol, book_callback])
elif book_callback == 'default':
d_pcbacks[i_id]['book'].append([symbol, this_agent.on_data])
# NOTE: use fx.online here is not quite right, but it is OK for sim
return InstrumentRegister(symbol, b_ready=fx.isOnline(symbol))
@staticmethod
def remove(symbol_propty, i_id=11):
symbol = symbol_propty._s_instr
# remove from fx
d_pcbacks = fx.pending_callbacks
d_scbacks = fx.symbols_callbacks
if i_id in d_scbacks and i_id in d_pcbacks:
d_pcbacks[i_id]['trade'] = []
d_pcbacks[i_id]['book'] = []
if not len(d_pcbacks[i_id]['other']):
d_pcbacks.pop(i_id)
if symbol in d_scbacks[i_id]:
d_scbacks[i_id].remove(symbol)
# remove from environment
ENV.remove_callback(trigger=Source.MARKET, i_id=i_id,
s_instr=symbol)
s_err = '[neutrino info] Symbol %s removed' % symbol
print(s_err)
@staticmethod
def get(symbol, i_id=11):
return InstrumentRegister(symbol)
@staticmethod
def every(name, interval, callback, i_id=11):
'''
Schedule a new callback to run every interval specified
:param name: string.
:param interval: neutrino.Interval object.
:param callback: function.
:param agent_id: integer. Only valid to simulations
'''
# initialize callbacks dict, if needed
d_pcbacks = fx.pending_callbacks
d_tcbacks = fx.time_callbacks
if i_id not in d_tcbacks:
d_tcbacks[i_id] = set()
if i_id not in d_pcbacks:
d_pcbacks[i_id] = init_pending_cbacks()
# append the callback
schdl = SchaduleInfos(name)
schdl.kind = 'every'
schdl.every = interval
d_tcbacks[i_id].add(schdl)
d_pcbacks[i_id]['checked'] = False
d_pcbacks[i_id]['other'].append([schdl, callback])
obj_rtn = ScheduledFunction(
function=callback, s_name=name, i_interval=interval)
schdl._scheduled_obj = obj_rtn
return obj_rtn
@staticmethod
def at(name, hour, minute, callback, i_id=11):
'''
Schedule a new callback to run at the time specified
:param name: string.
:param hour: integer.
:param minute: integer.
:param callback: function.
:param agent_id: integer. Only valid to simulations
'''
# initialize callbacks dict, if needed
d_pcbacks = fx.pending_callbacks
d_tcbacks = fx.time_callbacks
if i_id not in d_tcbacks:
d_tcbacks[i_id] = set()
if i_id not in d_pcbacks:
d_pcbacks[i_id] = init_pending_cbacks()
# append the callback
schdl = SchaduleInfos(name)
schdl.kind = 'at'
schdl.at = hour * 60**2 + minute * 60
d_tcbacks[i_id].add(schdl)
d_pcbacks[i_id]['checked'] = False
d_pcbacks[i_id]['other'].append([schdl, callback])
obj_rtn = ScheduledFunction(
function=callback, s_name=name, i_hour=hour, i_minute=minute)
schdl._scheduled_obj = obj_rtn
return obj_rtn
@staticmethod
def remove_schedule(name, i_id=11):
'''
Remove
:param name: string.
:param agent_id: integer. Only valid to simulations
'''
# remove from fx
d_pcbacks = fx.pending_callbacks
d_tcbacks = fx.time_callbacks
b_removed = True
if i_id in d_tcbacks:
# b_removed = True
d_pcbacks[i_id]['other'] = [
[n, c] for n, c in d_pcbacks[i_id]['other'] if n != name]
d_tcbacks[i_id].remove(name)
# if ((not len(d_pcbacks[i_id]['trade'])) and
# (not len(d_pcbacks[i_id]['book']))):
# d_pcbacks.pop(i_id)
# remove from environment
ENV.remove_callback(trigger=Source.IDLE, i_id=i_id,
s_name=name)
s_err = '[neutrino info] Schedule %s removed' % name
print(s_err)
return b_removed
@staticmethod
def get_schedules(i_id=11):
'''
Get all scheduled functions
:param agent_id: integer. Only valid to simulations
'''
# remove from fx
d_pcbacks = fx.pending_callbacks
d_tcbacks = fx.time_callbacks[i_id]
l_funcs = [c._scheduled_obj for n, c in d_pcbacks[i_id]['other']]
l_funcs += [c._scheduled_obj for c in list(d_tcbacks)]
return l_funcs
@staticmethod
def add_bar(symbol, bar_count, interval, callback='default', i_id=11):
'''
Subscribe the data streaming of a new candle
:param s_symbol: string. symbol to be subscribed
:param interval: integer. The interval of the candle, in minutes
:param bar_count: integer.
:param callback: function.
:return: CandleRegister.
'''
interval = int(interval)
s_err = '[neutrino error] Symbol %s is not registered' % symbol
s_alias = '%s_%s_%s' % (symbol, interval, bar_count)
assert symbol in ENV.l_instrument, s_err
d_pcbacks = fx.pending_callbacks
d_scbacks = fx.symbols_callbacks
if i_id not in d_scbacks:
d_scbacks[i_id] = set()
if i_id not in d_pcbacks:
d_pcbacks[i_id] = init_pending_cbacks()
this_agent = ENV.agents_actions[i_id].owner
if hasattr(ENV.agents_actions[i_id].owner, 'agent'):
this_agent = ENV.agents_actions[i_id].owner.agent
d_pcbacks[i_id]['checked'] = False
if callback and callback != 'default':
d_scbacks[i_id].add(symbol)
d_pcbacks[i_id]['candle'].append([symbol, callback])
elif callback == 'default':
d_scbacks[i_id].add(symbol)
if isinstance(fx.trade_callback_used[i_id], type(None)):
func_callback = this_agent.on_data
else:
func_callback = fx.trade_callback_used[i_id]
d_pcbacks[i_id]['candle'].append([symbol, func_callback])
obj_candle = ENV.candles.subscribe(
s_symbol=symbol,
interval=interval,
i_nbars=bar_count,
s_alias=s_alias)
obj_candle.v3_obj = CandleRegister(obj_candle, False)
obj_candle.v3_obj.properties.symbol = symbol
obj_candle.v3_obj.properties.interval = interval
obj_candle.v3_obj.properties.bar_count = bar_count
return obj_candle.v3_obj
@staticmethod
def get_bar(symbol, bar_count, interval):
'''
Subscribe the data streaming of a new candle
:param s_symbol: string. symbol to be subscribed
:param interval: neutrino.CandleInterval object. The candle interval
:param bar_count: integer.
:param callback:
:return: CandleData.
'''
s_alias = '%s_%s_%s' % (symbol, interval, bar_count)
obj_candle = ENV.candles.get_candle(s_alias=s_alias)
return obj_candle.v3_obj
@staticmethod
def remove_bar(candle_propty):
'''
Remove the CandleProperty object passed
:param candle_propty: CandleProperty object.
:return: boolean.
'''
symbol = candle_propty._bar_obj.symbol_name
interval = candle_propty._bar_obj.i_interval
bar_count = candle_propty._bar_obj.i_nbars
s_alias = '%s_%s_%s' % (symbol, interval, bar_count)
obj_candle = ENV.candles.get_candle(s_alias=s_alias)
b_remove = False
if not isinstance(obj_candle, type(None)):
b_remove = True
ENV.candles.reset(this_candle=obj_candle)
return b_remove
@staticmethod
def subscribeIndicator(symbol, stype, conf, begin):
'''
Subscribe new indicator or candle
:param symbol: string.
:param stype: string.
:param conf: string.
:param begin: float.
:param agent_id: integer. Only valid to simulations
'''
ENV.addIndicator(symbol, stype, conf, begin)
@staticmethod
def unsubscribeIndicator(symbol, stype, conf, begin):
'''
Subscribe new indicator or candle
:param symbol: string.
:param stype: string.
:param conf: string.
:param begin: float.
'''
ENV.excludeIndicator(symbol, stype, conf, begin)
class Data(object):
'''
Data present in a line of a Book
- price :
- orderID :
- quantity :
- detail :
'''
def __init__(self):
self.orderID = 'abc'
self.quantity = 0
self.price = 0
self.detail = ''
def __str__(self):
s_msg = 'Data(orderid={} qty={} price={} detail={}'
return s_msg.format(self.orderID, self.quantity, self.price,
self.detail)
class DataList(object):
'''
A vector (or list) of Data. See DataTypes.hpp
'''
def __init__(self):
self.dataEntities = []
class Callback(object):
'''
A python application must have a callback interface that will be
instantiated by the API and called certain methods as events arrive
from the network.
'''
def bidSide(self, source, book=None):
pass
def askSide(self, source, book=None):
'''
A callback receiving updates on books or on idle events.
:param source: Instance of class Source: IDLE = 0, MARKET = 1,
ORDER = 2, COMMAND = 3
:param book: Instance of Book class,
:return:
'''
pass
def command(self):
pass
def symbolsLoaded(self):
pass
def orderFilled(self):
pass
def orderUpdated(self):
pass
def setParameter(self):
pass
'''
Initialize structures
'''
# Initiate structure to be used by an agent. It complies to the new neutrino
# cluster sintaxe
market = Market()
utils = Utils()
oms = Oms()
position = Position()
'''
End Initialization
'''
|
{"hexsha": "80c015eec65cf1c3d27657eb3413566b9439ac26", "size": 83599, "ext": "py", "lang": "Python", "max_stars_repo_path": "gymV02/neutrino.py", "max_stars_repo_name": "onesoftsa/neutrino-lab", "max_stars_repo_head_hexsha": "2d52bdc46895e5659f4ffbc6ffa2629392ed4f9a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-11-27T17:55:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T21:41:05.000Z", "max_issues_repo_path": "gymV02/neutrino.py", "max_issues_repo_name": "onesoftsa/neutrino-lab", "max_issues_repo_head_hexsha": "2d52bdc46895e5659f4ffbc6ffa2629392ed4f9a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gymV02/neutrino.py", "max_forks_repo_name": "onesoftsa/neutrino-lab", "max_forks_repo_head_hexsha": "2d52bdc46895e5659f4ffbc6ffa2629392ed4f9a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-08T22:34:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-08T22:34:41.000Z", "avg_line_length": 29.9745428469, "max_line_length": 80, "alphanum_fraction": 0.5982487829, "include": true, "reason": "import numpy", "num_tokens": 20234}
|
#include <boost/polygon/interval_data.hpp>
|
{"hexsha": "49883068c47797ea37f36e196004f10da02d3073", "size": 43, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_polygon_interval_data.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_polygon_interval_data.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_polygon_interval_data.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 21.5, "max_line_length": 42, "alphanum_fraction": 0.8139534884, "num_tokens": 10}
|
/**/
#ifndef PoseSensorSIProxy_HPP_
#define PoseSensorSIProxy_HPP_
#include <rw/common/Ptr.hpp>
#include <rw/math.hpp>
#include <rw/trajectory/Path.hpp>
#include <boost/thread.hpp>
#include <ros/ros.h>
#include <caros_sensor_msgs/PoseSensorState.h>
#include <queue>
namespace caros {
/**
* @brief this class implements a cpp proxy to control and read data from
* a PoseSensorServiceInterface.
*
*/
class PoseSensorSIProxy {
public:
typedef rw::common::Ptr<PoseSensorSIProxy> Ptr;
//! constructor - create with device name
PoseSensorSIProxy(rw::common::Ptr<ros::NodeHandle> nhandle);
PoseSensorSIProxy(const std::string& devname);
//! destructor
virtual ~PoseSensorSIProxy();
struct PoseData {
rw::math::Transform3D<> pose;
int id;
float quality;
ros::Time stamp;
std::string frame;
};
std::vector<PoseData> getPoses();
ros::Time getTimeStamp();
protected:
void handlePoseSensorState(const caros_sensor_msgs::PoseSensorState& state);
protected:
rw::common::Ptr<ros::NodeHandle> _nodeHnd;
// states
ros::Subscriber _poseSensorState;
private:
boost::mutex _mutex;
// state variables
std::vector<PoseData> _poses;
ros::Time _stamp;
};
}
#endif //end include guard
|
{"hexsha": "3c1549e993df1c1d6df78456e06172f6d3ef3bc0", "size": 1212, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/interfaces/caros_sensor/include/caros/PoseSensorSIProxy.hpp", "max_stars_repo_name": "tlund80/MARVIN", "max_stars_repo_head_hexsha": "9fddfd4c8e298850fc8ce49c02ff437f139309d0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/interfaces/caros_sensor/include/caros/PoseSensorSIProxy.hpp", "max_issues_repo_name": "tlund80/MARVIN", "max_issues_repo_head_hexsha": "9fddfd4c8e298850fc8ce49c02ff437f139309d0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/interfaces/caros_sensor/include/caros/PoseSensorSIProxy.hpp", "max_forks_repo_name": "tlund80/MARVIN", "max_forks_repo_head_hexsha": "9fddfd4c8e298850fc8ce49c02ff437f139309d0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-11-03T09:10:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-03T09:10:44.000Z", "avg_line_length": 17.8235294118, "max_line_length": 77, "alphanum_fraction": 0.7326732673, "num_tokens": 318}
|
# %% Import
import geopandas as gpd
import pandas as pd
import numpy as np
import os
"""
Takes the converted geojson file and returns columns of interest
- Subzone
- Planning area
- Region
- Geometry data (important for choropleths)
"""
# %% Functions
def getArea(file):
gdf = gpd.read_file(file)
cols = [
# "id",
# "name",
# "description",
# "SUBZONE_NO",
"SUBZONE_N",
# "SUBZONE_C",
# "CA_IND",
"PLN_AREA_N",
# "PLN_AREA_C",
"REGION_N",
# "REGION_C",
# "INC_CRC",
# "FMEL_UPD_D",
"geometry",
]
# epsg 6933 for an equal area estimation
gdf = gdf.to_crs(epsg=6933)
gdf = gdf[cols]
gdf["area_km2"] = gdf.area / 10 ** 6
return gdf
def getPopulation(file):
df = pd.read_csv(file)
df["SZ"] = df["SZ"].str.upper()
df = df[["SZ", "Pop"]].groupby("SZ", as_index=False).sum()
return df
def mergeAndWrite(df1, df2, outputfn):
# Merge both dfs together and some minor cleaning
mdf = pd.merge(
df1, df2, how="inner", left_on="SUBZONE_N", right_on="SZ", validate="1:1"
)
mdf.columns = mdf.columns.str.lower()
mdf.drop(columns=["sz"], inplace=True)
# Decision to cast density as int because float doesn't add value to the conversation
mdf["pop_density/km2"] = round(mdf["pop"] / mdf["area_km2"], 0).astype(int)
# exports this to a processed geojson file
if os.path.isfile(outputfn):
os.remove(outputfn)
print(f"Removed the original {outputfn} in the folder.")
mdf.to_file(outputfn, driver="GeoJSON")
print(f"A new {outputfn} has been created.")
def main():
gdf = getArea("./r_boundarydata.geojson")
popdf = getPopulation("./r_demographicsdata.csv")
outputfn = "./r2_cleanboundary.geojson"
mergeAndWrite(gdf, popdf, outputfn)
# %% Main
if __name__ == "__main__":
main()
os.system("pause")
|
{"hexsha": "06515d100e83ba4b58ff9437021aae9ae29a54c4", "size": 1939, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/r2_cleanboundary.py", "max_stars_repo_name": "ljunhui/Koufu_SG_Map", "max_stars_repo_head_hexsha": "8d440605cc90c49c6635f4d5202bd262e30b0efb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-01T13:57:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-01T13:57:15.000Z", "max_issues_repo_path": "data/r2_cleanboundary.py", "max_issues_repo_name": "ljunhui/Koufu_SG_Map", "max_issues_repo_head_hexsha": "8d440605cc90c49c6635f4d5202bd262e30b0efb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/r2_cleanboundary.py", "max_forks_repo_name": "ljunhui/Koufu_SG_Map", "max_forks_repo_head_hexsha": "8d440605cc90c49c6635f4d5202bd262e30b0efb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1818181818, "max_line_length": 89, "alphanum_fraction": 0.6090768437, "include": true, "reason": "import numpy", "num_tokens": 562}
|
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- Pierre Gerard-Marchant & Matt Knox
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
class TestPeriodIndex(TestCase):
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
self.assert_(isinstance(series, TimeSeries))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp('D', 'end')
self.assert_(result.index.equals(exp_index))
self.assertEquals(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-DEC')
result = series.to_timestamp('D', 'start')
self.assert_(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
def test_constructor(self):
ii = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 9)
ii = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 4 * 9)
ii = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 12 * 9)
ii = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert_equal(len(ii), 365 * 9 + 2)
ii = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert_equal(len(ii), 261 * 9)
ii = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert_equal(len(ii), 365 * 24)
ii = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert_equal(len(ii), 24 * 60)
ii = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert_equal(len(ii), 24 * 60 * 60)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assert_((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assert_((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError('Must specify periods if missing start or end')
except ValueError:
pass
def test_shift(self):
ii1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(1).values, ii2.values)
ii1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(-1).values, ii2.values)
ii1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(1).values, ii2.values)
ii1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(-1).values, ii2.values)
ii1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(1).values, ii2.values)
ii1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(-1).values, ii2.values)
def test_asfreq(self):
ii1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001')
ii2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001')
ii3 = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2001')
ii4 = PeriodIndex(freq='D', start='1/1/2001', end='1/1/2001')
ii5 = PeriodIndex(freq='H', start='1/1/2001', end='1/1/2001 00:00')
ii6 = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 00:00')
ii7 = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 00:00:00')
self.assertEquals(ii1.asfreq('Q', 'S'), ii2)
self.assertEquals(ii1.asfreq('Q', 's'), ii2)
self.assertEquals(ii1.asfreq('M', 'start'), ii3)
self.assertEquals(ii1.asfreq('D', 'StarT'), ii4)
self.assertEquals(ii1.asfreq('H', 'beGIN'), ii5)
self.assertEquals(ii1.asfreq('Min', 'S'), ii6)
self.assertEquals(ii1.asfreq('S', 'S'), ii7)
self.assertEquals(ii2.asfreq('A', 'S'), ii1)
self.assertEquals(ii2.asfreq('M', 'S'), ii3)
self.assertEquals(ii2.asfreq('D', 'S'), ii4)
self.assertEquals(ii2.asfreq('H', 'S'), ii5)
self.assertEquals(ii2.asfreq('Min', 'S'), ii6)
self.assertEquals(ii2.asfreq('S', 'S'), ii7)
self.assertEquals(ii3.asfreq('A', 'S'), ii1)
self.assertEquals(ii3.asfreq('Q', 'S'), ii2)
self.assertEquals(ii3.asfreq('D', 'S'), ii4)
self.assertEquals(ii3.asfreq('H', 'S'), ii5)
self.assertEquals(ii3.asfreq('Min', 'S'), ii6)
self.assertEquals(ii3.asfreq('S', 'S'), ii7)
self.assertEquals(ii4.asfreq('A', 'S'), ii1)
self.assertEquals(ii4.asfreq('Q', 'S'), ii2)
self.assertEquals(ii4.asfreq('M', 'S'), ii3)
self.assertEquals(ii4.asfreq('H', 'S'), ii5)
self.assertEquals(ii4.asfreq('Min', 'S'), ii6)
self.assertEquals(ii4.asfreq('S', 'S'), ii7)
self.assertEquals(ii5.asfreq('A', 'S'), ii1)
self.assertEquals(ii5.asfreq('Q', 'S'), ii2)
self.assertEquals(ii5.asfreq('M', 'S'), ii3)
self.assertEquals(ii5.asfreq('D', 'S'), ii4)
self.assertEquals(ii5.asfreq('Min', 'S'), ii6)
self.assertEquals(ii5.asfreq('S', 'S'), ii7)
self.assertEquals(ii6.asfreq('A', 'S'), ii1)
self.assertEquals(ii6.asfreq('Q', 'S'), ii2)
self.assertEquals(ii6.asfreq('M', 'S'), ii3)
self.assertEquals(ii6.asfreq('D', 'S'), ii4)
self.assertEquals(ii6.asfreq('H', 'S'), ii5)
self.assertEquals(ii6.asfreq('S', 'S'), ii7)
self.assertEquals(ii7.asfreq('A', 'S'), ii1)
self.assertEquals(ii7.asfreq('Q', 'S'), ii2)
self.assertEquals(ii7.asfreq('M', 'S'), ii3)
self.assertEquals(ii7.asfreq('D', 'S'), ii4)
self.assertEquals(ii7.asfreq('H', 'S'), ii5)
self.assertEquals(ii7.asfreq('Min', 'S'), ii6)
#self.assertEquals(ii7.asfreq('A', 'E'), i_end)
def test_badinput(self):
self.assertRaises(datetools.DateParseError, Period, '1/1/-2000', 'A')
self.assertRaises(ValueError, Period, -2000, 'A')
self.assertRaises(ValueError, Period, 0, 'A')
self.assertRaises(ValueError, PeriodIndex, [-1, 0, 1], 'A')
self.assertRaises(ValueError, PeriodIndex, np.array([-1, 0, 1]), 'A')
def test_dti_to_period(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
ii1 = dti.to_period()
ii2 = dti.to_period(freq='D')
self.assertEquals(ii1[0], Period('Jan 2005', freq='M'))
self.assertEquals(ii2[0], Period('1/31/2005', freq='D'))
self.assertEquals(ii1[-1], Period('Nov 2005', freq='M'))
self.assertEquals(ii2[-1], Period('11/30/2005', freq='D'))
def test_iindex_slice_index(self):
ii = PeriodIndex(start='1/1/10', end='12/31/12', freq='M')
s = Series(np.random.rand(len(ii)), index=ii)
res = s['2010']
exp = s[0:12]
assert_series_equal(res, exp)
res = s['2011']
exp = s[12:24]
assert_series_equal(res, exp)
def test_iindex_qaccess(self):
ii = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(ii)), index=ii).cumsum()
# Todo: fix these accessors!
self.assert_(s['05Q4'] == s[2])
def test_interval_dt64_round_trip(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='B')
ii = dti.to_period()
self.assert_(ii.to_timestamp().equals(dti))
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='B')
ii = dti.to_period(freq='3H')
self.assert_(ii.to_timestamp().equals(dti))
def test_iindex_multiples(self):
ii = PeriodIndex(start='1/1/10', end='12/31/12', freq='2M')
self.assertEquals(ii[0], Period('1/1/10', '2M'))
self.assertEquals(ii[1], Period('3/1/10', '2M'))
self.assertEquals(ii[0].asfreq('6M'), ii[2].asfreq('6M'))
self.assertEquals(ii[0].asfreq('A'), ii[2].asfreq('A'))
self.assertEquals(ii[0].asfreq('M', how='S'),
Period('Jan 2010', '1M'))
self.assertEquals(ii[0].asfreq('M', how='E'),
Period('Feb 2010', '1M'))
self.assertEquals(ii[1].asfreq('M', how='S'),
Period('Mar 2010', '1M'))
i = Period('1/1/2010 12:05:18', '5S')
self.assertEquals(i, Period('1/1/2010 12:05:15', '5S'))
i = Period('1/1/2010 12:05:18', '5S')
self.assertEquals(i.asfreq('1S', how='E'),
Period('1/1/2010 12:05:19', '1S'))
class TestMethods(TestCase):
"Base test class for MaskedArrays."
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_add(self):
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
assert_equal(dt1 + 1, dt2)
#
self.assertRaises(ValueError, dt1.__add__, "str")
self.assertRaises(ValueError, dt1.__add__, dt2)
###############################################################################
#------------------------------------------------------------------------------
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
|
{"hexsha": "49f7e1734ba5c01d6a7027e186ec834c39f42e70", "size": 51443, "ext": "py", "lang": "Python", "max_stars_repo_path": "pandas/tseries/tests/test_period.py", "max_stars_repo_name": "takluyver/pandas", "max_stars_repo_head_hexsha": "6c820b4b1a3b945d52cffbd9a4d40a582c077b5d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pandas/tseries/tests/test_period.py", "max_issues_repo_name": "takluyver/pandas", "max_issues_repo_head_hexsha": "6c820b4b1a3b945d52cffbd9a4d40a582c077b5d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pandas/tseries/tests/test_period.py", "max_forks_repo_name": "takluyver/pandas", "max_forks_repo_head_hexsha": "6c820b4b1a3b945d52cffbd9a4d40a582c077b5d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.7677935943, "max_line_length": 80, "alphanum_fraction": 0.5910036351, "include": true, "reason": "import numpy,from numpy", "num_tokens": 15732}
|
from __future__ import absolute_import, print_function
from collections import defaultdict
import pytest
from sage.all import prod, factorial, QQ, vector, Permutation, Permutations
from moment_polytopes import *
def test_rect_tableaux_22():
tableaux = list(rect_tableaux(2, 2))
assert len(tableaux) == 2
assert [(1, 2), (3, 4)] in tableaux
assert [(1, 3), (2, 4)] in tableaux
@pytest.mark.parametrize(
"a, b, count",
[
(2, 2, 2),
(2, 3, 5),
(3, 2, 5),
(2, 4, 14),
(4, 2, 14),
(3, 3, 42),
(4, 3, 462),
(3, 4, 462),
(4, 4, 24024),
],
)
def test_rect_tableaux_count(a, b, count):
assert len(rect_tableaux(a, b)) == count
@pytest.mark.parametrize(
"a, b, count",
[
(2, 2, 2),
(2, 3, 5),
(3, 2, 5),
(2, 4, 14),
(4, 2, 14),
(3, 3, 36),
(3, 4, 295),
(4, 3, 295),
(4, 4, 6660),
],
)
def test_cubicle_tableaux_counts(a, b, count):
assert len(cubicle_tableaux(a, b)) == count
@pytest.mark.parametrize("v, dominant", [((3, 2, 4), False), ((4, 3, 2), True),])
def test_is_dominant(v, dominant):
assert is_dominant(v) == dominant
@pytest.mark.parametrize(
"dims, count",
[
((2, 2), 3),
((2, 3), 6),
((3, 2), 6),
((2, 4), 11),
((4, 2), 11),
((3, 3), 17),
((3, 4), 56),
((4, 3), 56),
((4, 4), 457),
((2, 2, 3), 39),
],
)
def test_extremal_edges(dims, count):
# count
edges = extremal_edges(dims, include_perms=True)
assert len(edges) == count
# check that they are indeed extremal edges
for edge in edges:
assert is_extremal_edge(dims, edge)
def test_extremal_edges_implementations():
for dims in [(2, 2), (2, 3), (3, 2), (3, 3)]:
assert set(map(tuple, extremal_edges(dims, algorithm="generic"))) == set(
map(tuple, extremal_edges(dims, algorithm="bipartite"))
)
@pytest.mark.parametrize(
"dims, count",
[
((2, 2), 2),
((2, 3), 6),
((3, 2), 6), # same as above
((2, 4), 11),
((4, 2), 11), # same as above
((3, 3), 10),
((3, 4), 56),
((4, 3), 56), # same as above
((4, 4), 233),
((2, 2, 3), 25),
((2, 2, 2), 4),
((2, 2, 2, 2), 12),
],
)
def test_extremal_edges_up_to_perms(dims, count):
# count
edges = extremal_edges(dims, include_perms=False)
assert len(edges) == count
# check that they are indeed extremal edges
for edge in edges:
assert is_extremal_edge(dims, edge)
def test_is_extremal_edge():
# non-example of Klyachko
dims = (2, 2, 3)
H = (2, -2, 1, -1, QQ("5/3"), QQ("2/3"), QQ("-7/3"))
assert not is_extremal_edge(dims, H)
def test_is_extremal_edge_ieq():
dims = (2, 2, 3, 12)
ieq = vector([0, 0, 0, 0, -2, 1, 1, 2, 2, 2, 2, -1, -1, -1, -1, -1, -1, -1, -1]), 0
# the first two asserts will fail since ieq is neither dominant nor primitive
with pytest.raises(AssertionError):
assert not is_extremal_edge_ieq(dims, ieq)
with pytest.raises(AssertionError):
assert not is_extremal_edge_ieq(dims, ieq, assert_dominant=False)
assert is_extremal_edge_ieq(
dims, ieq, assert_dominant=False, assert_primitive=False
)
@pytest.mark.parametrize(
"a, b", [(2, 2), (2, 3), (3, 2), (2, 4), (4, 2), (3, 3), (3, 4), (4, 3), (4, 4),]
)
def test_primitivity_lemma(a, b):
"""Test Lemma 6.3 in Vergne and Walter (2014)."""
edges = extremal_edges((a, b), include_perms=False)
root_system_A = ["A", a - 1]
root_system_B = ["A", b - 1]
root_system_AB = [root_system_A, root_system_B]
for H in edges:
# edge (H_A, H_B) should be primitive...
assert is_dual_root_primitive(root_system_AB, H)
# ...and by our lemma this should imply primitivity of H_A and H_B (if non-zero)
H_A, H_B = H[:a], H[a:]
assert H_A.is_zero() or is_dual_root_primitive(root_system_A, H_A)
assert H_B.is_zero() or is_dual_root_primitive(root_system_B, H_B)
def P(*pi):
return Permutation(list(pi))
@pytest.mark.parametrize(
"n, length, perms",
[
(3, 0, {P(1, 2, 3)}),
(3, 1, {P(2, 1, 3), P(1, 3, 2)}),
(3, 2, {P(2, 3, 1), P(3, 1, 2)}),
(3, 3, {P(3, 2, 1)}),
(4, 0, {P(1, 2, 3, 4)}),
(4, 1, {P(2, 1, 3, 4), P(1, 3, 2, 4), P(1, 2, 4, 3)}),
(
4,
2,
{P(2, 3, 1, 4), P(2, 1, 4, 3), P(1, 3, 4, 2), P(3, 1, 2, 4), P(1, 4, 2, 3)},
),
(
4,
3,
{
P(4, 1, 2, 3),
P(3, 1, 4, 2),
P(1, 4, 3, 2),
P(2, 3, 4, 1),
P(3, 2, 1, 4),
P(2, 4, 1, 3),
},
),
(
4,
4,
{P(3, 2, 4, 1), P(4, 1, 3, 2), P(4, 2, 1, 3), P(3, 4, 1, 2), P(2, 4, 3, 1)},
),
(4, 5, {P(3, 4, 2, 1), P(4, 2, 3, 1), P(4, 3, 1, 2)}),
(4, 6, {P(4, 3, 2, 1)}),
],
)
def test_perms_of_length(n, length, perms):
assert set(perms_of_length(n, length=length)) == perms
@pytest.mark.parametrize(
"dims, total, tuples",
[
((2, 2, 2), -1, set()),
((2, 2, 2), 0, {(0, 0, 0)}),
((2, 2, 2), 1, {(1, 0, 0), (0, 1, 0), (0, 0, 1)}),
((2, 2, 2), 2, {(1, 1, 0), (1, 0, 1), (0, 1, 1)}),
((2, 2, 2), 3, {(1, 1, 1)}),
((2, 2, 2), 4, set()),
((2, 2, 2, 2), -1, set()),
((2, 2, 2, 2), 0, {(0, 0, 0, 0)}),
((2, 2, 2, 2), 1, {(1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, 1)}),
(
(2, 2, 2, 2),
2,
{
(1, 1, 0, 0),
(1, 0, 1, 0),
(1, 0, 0, 1),
(0, 1, 1, 0),
(0, 1, 0, 1),
(0, 0, 1, 1),
},
),
((2, 2, 2, 2), 3, {(1, 1, 1, 0), (1, 1, 0, 1), (1, 0, 1, 1), (0, 1, 1, 1)}),
((2, 2, 2, 2), 4, {(1, 1, 1, 1)}),
((2, 2, 2, 2), 5, set()),
],
)
def test_length_tuples(dims, total, tuples):
assert set(length_tuples(dims, total=total)) == tuples
@pytest.mark.parametrize(
"v, n, expected_shuffles, expected_antishuffles",
[
# regular element
([3, 2, 1], 0, {P(1, 2, 3)}, {P(1, 2, 3)}),
([3, 2, 1], 1, {P(2, 1, 3), P(1, 3, 2)}, {P(2, 1, 3), P(1, 3, 2)}),
([3, 2, 1], 2, {P(2, 3, 1), P(3, 1, 2)}, {P(2, 3, 1), P(3, 1, 2)}),
([3, 2, 1], 3, {P(3, 2, 1)}, {P(3, 2, 1)}),
# one degeneracy
([2, 2, 1], 0, {P(1, 2, 3)}, set()),
([2, 2, 1], 1, {P(1, 3, 2)}, {P(2, 1, 3)}),
([2, 2, 1], 2, {P(2, 3, 1)}, {P(3, 1, 2)}),
([2, 2, 1], 3, set(), {P(3, 2, 1)}),
([2, 1, 1], 0, {P(1, 2, 3)}, set()),
([2, 1, 1], 1, {P(2, 1, 3)}, {P(1, 3, 2)}),
([2, 1, 1], 2, {P(3, 1, 2)}, {P(2, 3, 1)}),
([2, 1, 1], 3, set(), {P(3, 2, 1)}),
# completely degenerate
([1, 1, 1], 0, {P(1, 2, 3)}, set()),
([1, 1, 1], 1, set(), set()),
([1, 1, 1], 2, set(), set()),
([1, 1, 1], 3, set(), {P(3, 2, 1)}),
],
)
def test_shuffles_S3(v, n, expected_shuffles, expected_antishuffles):
assert set(shuffles(v, length=n)) == expected_shuffles
assert set(antishuffles(v, antilength=3 - n)) == expected_antishuffles
def _card_coset(v):
blocks = defaultdict(int)
for x in v:
blocks[x] += 1
return factorial(len(v)) / prod([factorial(l) for l in blocks.values()])
@pytest.mark.parametrize(
"v",
[
(4, 3, 2, 1),
(4, 4, 2, 1),
(4, 4, 2, 2),
(4, 3, 3, 1),
(4, 3, 2, 2),
(4, 4, 4, 2),
(4, 3, 3, 3),
(4, 4, 4, 4),
],
)
def test_shuffles_S4(v):
d = len(v)
len_max = d * (d - 1) // 2
# test shuffles
num_shuffles = 0
for l in range(len_max + 1):
pis = shuffles(v, length=l)
num_shuffles += len(pis)
# check that permutation is shuffle of the correct length
assert all(is_shuffle(pi, v) for pi in pis)
assert all(pi.number_of_inversions() == l for pi in pis)
# test shuffles
num_antishuffles = 0
for l in range(len_max + 1):
pis = antishuffles(v, antilength=l)
num_antishuffles += len(pis)
# check that permutation is shuffle of the correct length
assert all(is_antishuffle(pi, v) for pi in pis)
assert all(pi.number_of_noninversions(2) == l for pi in pis)
# check that the number of shuffles is the expected one
card_permutations = factorial(d)
assert num_shuffles == num_antishuffles == _card_coset(v)
def test_perm_action():
def perm0(pi):
return [i - 1 for i in pi]
H = ["X", "Y", "Z", "W"]
for pi in Permutations(4):
for tau in Permutations(4):
pi_tau = pi.left_action_product(tau)
assert perm_action(pi, perm_action(tau, H)) == perm_action(pi_tau, H)
def test_stabilizer_group():
# all dimensions distinct
stab = StabilizerGroup([2, 3, 4])
assert stab.blocks == [[0], [1], [2]]
assert stab.normal_form("BCA") == tuple("BCA")
assert stab.orbit({"BCA"}) == {tuple("BCA")}
assert stab.orbit({"XYX"}) == {tuple("XYX")}
assert stab.orbit({"ZZZ"}) == {tuple("ZZZ")}
# first and last dimension equal
stab = StabilizerGroup([2, 3, 2])
assert stab.blocks == [[0, 2], [1]]
assert stab.normal_form("BCA") == tuple("ACB")
assert stab.orbit({"BCA"}) == {tuple("ACB"), tuple("BCA")}
assert stab.orbit({"XYX"}) == {tuple("XYX")}
assert stab.orbit({"ZZZ"}) == {tuple("ZZZ")}
# all dimension equal
stab = StabilizerGroup([3, 3, 3])
assert stab.blocks == [[0, 1, 2]]
assert stab.normal_form("BCA") == tuple("ABC")
assert stab.orbit({"BCA"}) == {
tuple("ABC"),
tuple("ACB"),
tuple("BAC"),
tuple("BCA"),
tuple("CAB"),
tuple("CBA"),
}
assert stab.orbit({"XYX"}) == {tuple("YXX"), tuple("XYX"), tuple("XXY")}
assert stab.orbit({"ZZZ"}) == {tuple("ZZZ")}
|
{"hexsha": "73f3ae3f820a941c047da00a6b62557309b41f15", "size": 10211, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_combinat.py", "max_stars_repo_name": "amsqi/moment_polytopes", "max_stars_repo_head_hexsha": "641f3c0ebeb0daaea6e9664acb01f95c3686382e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-02-14T21:37:33.000Z", "max_stars_repo_stars_event_max_datetime": "2017-02-15T10:24:37.000Z", "max_issues_repo_path": "tests/test_combinat.py", "max_issues_repo_name": "catch22/moment_polytopes", "max_issues_repo_head_hexsha": "641f3c0ebeb0daaea6e9664acb01f95c3686382e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_combinat.py", "max_forks_repo_name": "catch22/moment_polytopes", "max_forks_repo_head_hexsha": "641f3c0ebeb0daaea6e9664acb01f95c3686382e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-23T15:35:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-23T15:35:22.000Z", "avg_line_length": 28.8446327684, "max_line_length": 88, "alphanum_fraction": 0.4727254921, "include": true, "reason": "from sage", "num_tokens": 4009}
|
import inspect
import io
import logging
import os
import time
import warnings
from collections import namedtuple
from functools import wraps
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import numpy as np
import pandas as pd
import pyarrow as pa
from simplekv import KeyValueStore
from plateau.core import naming
from plateau.core.common_metadata import (
SchemaWrapper,
make_meta,
normalize_column_order,
read_schema_metadata,
validate_compatible,
)
from plateau.core.docs import default_docs
from plateau.core.index import ExplicitSecondaryIndex, IndexBase
from plateau.core.index import merge_indices as merge_indices_algo
from plateau.core.naming import get_partition_file_prefix
from plateau.core.partition import Partition
from plateau.core.typing import StoreInput
from plateau.core.urlencode import decode_key, quote_indices
from plateau.core.utils import ensure_store, ensure_string_type, verify_metadata_version
from plateau.core.uuid import gen_uuid
from plateau.io_components.utils import align_categories
from plateau.serialization import (
DataFrameSerializer,
PredicatesType,
default_serializer,
filter_df_from_predicates,
)
LOGGER = logging.getLogger(__name__)
SINGLE_TABLE = "table"
_Literal = namedtuple("_Literal", ["column", "op", "value"])
_SplitPredicate = namedtuple("_SplitPredicate", ["key_part", "content_part"])
_METADATA_SCHEMA = {
"partition_label": np.dtype("O"),
"row_group_id": np.dtype(int),
"row_group_compressed_size": np.dtype(int),
"row_group_uncompressed_size": np.dtype(int),
"number_rows_total": np.dtype(int),
"number_row_groups": np.dtype(int),
"serialized_size": np.dtype(int),
"number_rows_per_row_group": np.dtype(int),
}
MetaPartitionInput = Optional[Union[pd.DataFrame, Sequence, "MetaPartition"]]
def _predicates_to_named(predicates):
if predicates is None:
return None
return [[_Literal(*x) for x in conjunction] for conjunction in predicates]
def _combine_predicates(predicates, logical_conjunction):
if not logical_conjunction:
return predicates
if predicates is None:
return [logical_conjunction]
combined_predicates = []
for conjunction in predicates:
new_conjunction = conjunction[:]
for literal in logical_conjunction:
new_conjunction.append(literal)
combined_predicates.append(new_conjunction)
return combined_predicates
def _initialize_store_for_metapartition(method, method_args, method_kwargs):
for store_variable in ["store", "storage"]:
if store_variable in method_kwargs:
method_kwargs[store_variable] = ensure_store(method_kwargs[store_variable])
else:
method = cast(object, method)
args = inspect.getfullargspec(method).args
if store_variable in args:
ix = args.index(store_variable)
# reduce index since the argspec and method_args start counting differently due to self
ix -= 1
instantiated_store = ensure_store(method_args[ix])
new_args = []
for ix_method, arg in enumerate(method_args):
if ix_method != ix:
new_args.append(arg)
else:
new_args.append(instantiated_store)
method_args = tuple(new_args)
return method_args, method_kwargs
def _apply_to_list(method):
"""
Decorate a MetaPartition method to act upon the internal list of metapartitions
The methods must return a MetaPartition object!
"""
@wraps(method)
def _impl(self, *method_args, **method_kwargs):
if not isinstance(self, MetaPartition):
raise TypeError("Type unknown %s", type(self))
result = self.as_sentinel()
if len(self) == 0:
raise RuntimeError("Invalid MetaPartition. No sub-partitions to act upon.")
# Look whether there is a `store` in the arguments and instatiate it
# this way we avoid multiple HTTP pools
method_args, method_kwargs = _initialize_store_for_metapartition(
method, method_args, method_kwargs
)
method_return = None # declare for mypy
if (len(self) == 1) and (self.label is None):
result = method(self, *method_args, **method_kwargs)
else:
for mp in self:
method_return = method(mp, *method_args, **method_kwargs)
if not isinstance(method_return, MetaPartition):
raise ValueError(
"Method {} did not return a MetaPartition "
"but {}".format(method.__name__, type(method_return))
)
if method_return.is_sentinel:
result = method_return
else:
for mp in method_return:
result = result.add_metapartition(mp, schema_validation=False)
if not isinstance(result, MetaPartition):
raise ValueError(
"Result for method {} is not a `MetaPartition` but {}".format(
method.__name__, type(method_return)
)
)
return result
return _impl
class MetaPartitionIterator(Iterator):
def __init__(self, metapartition):
self.metapartition = metapartition
self.position = 0
def __iter__(self):
return self
def __next__(self):
current = self.metapartition
if len(current) == 1:
if current.label is None:
raise StopIteration()
if self.position >= len(current.metapartitions):
raise StopIteration()
else:
mp_dict = current.metapartitions[self.position]
# These are global attributes, i.e. the nested metapartitions do not carry these and need
# to be added here
mp_dict["metadata_version"] = current.metadata_version
mp_dict["schema"] = current.schema
mp_dict["partition_keys"] = current.partition_keys
mp_dict["logical_conjunction"] = current.logical_conjunction
mp_dict["table_name"] = current.table_name
self.position += 1
return MetaPartition.from_dict(mp_dict)
next = __next__ # Python 2
class MetaPartition(Iterable):
"""
Wrapper for plateau partition which includes additional information
about the parent dataset
"""
def __init__(
self,
label: Optional[str],
file: Optional[str] = None,
table_name: str = SINGLE_TABLE,
data: Optional[pd.DataFrame] = None,
indices: Optional[Dict[Any, Any]] = None,
metadata_version: Optional[int] = None,
schema: Optional[SchemaWrapper] = None,
partition_keys: Optional[Sequence[str]] = None,
logical_conjunction: Optional[List[Tuple[Any, str, Any]]] = None,
):
"""
Initialize the :mod:`plateau.io` base class MetaPartition.
The `MetaPartition` is used as a wrapper around the plateau
`Partition` and primarily deals with dataframe manipulations,
in- and output to store.
The :class:`plateau.io_components.metapartition` is immutable, i.e. all member
functions will return a new MetaPartition object where the new
attribute is changed
Parameters
----------
label
partition label
files
A dictionary with references to the files in store where the
keys represent file labels and the keys file prefixes.
metadata
The metadata of the partition
data
A dictionary including the materialized in-memory DataFrames
corresponding to the file references in `files`.
indices
plateau index dictionary,
metadata_version
table_meta
The dataset table schemas
partition_keys
The dataset partition keys
logical_conjunction
A logical conjunction to assign to the MetaPartition. By assigning
this, the MetaPartition will only be able to load data respecting
this conjunction.
"""
if metadata_version is None:
self.metadata_version = naming.DEFAULT_METADATA_VERSION
else:
self.metadata_version = metadata_version
verify_metadata_version(self.metadata_version)
self.schema = schema
self.table_name = table_name
if data is not None and schema is None:
self.schema = make_meta(
data, origin=f"{table_name}/{label}", partition_keys=partition_keys
)
indices = indices or {}
for column, index_dct in indices.items():
if isinstance(index_dct, dict):
indices[column] = ExplicitSecondaryIndex(
column=column, index_dct=index_dct
)
self.logical_conjunction = logical_conjunction
self.metapartitions = [
{
"label": label,
"data": data,
"file": file or None,
"indices": indices,
"logical_conjunction": logical_conjunction,
}
]
self.partition_keys = partition_keys or []
def __repr__(self):
if len(self.metapartitions) > 1:
label = "NESTED ({})".format(len(self.metapartitions))
else:
label = self.label
return "<{_class} v{version} | {label} >".format(
version=self.metadata_version, _class=self.__class__.__name__, label=label
)
def __len__(self):
return len(self.metapartitions)
def __iter__(self):
return MetaPartitionIterator(self)
def __getitem__(self, label):
for mp in self:
if mp.label == label:
return mp
raise KeyError("Metapartition doesn't contain partition `{}`".format(label))
@property
def data(self):
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `data` attribute is not allowed while nested"
)
assert isinstance(self.metapartitions[0], dict), self.metapartitions
return self.metapartitions[0]["data"]
@property
def file(self) -> str:
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `files` attribute is not allowed while nested"
)
return cast(str, self.metapartitions[0]["file"])
@property
def is_sentinel(self) -> bool:
return len(self.metapartitions) == 1 and self.label is None
@property
def label(self) -> str:
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `label` attribute is not allowed while nested"
)
assert isinstance(self.metapartitions[0], dict), self.metapartitions[0]
return cast(str, self.metapartitions[0]["label"])
@property
def indices(self):
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `indices` attribute is not allowed while nested"
)
return self.metapartitions[0]["indices"]
@property
def partition(self) -> Partition:
return Partition(label=self.label, files={self.table_name: self.file})
def __eq__(self, other):
if not isinstance(other, MetaPartition):
return False
if self.metadata_version != other.metadata_version:
return False
if self.schema is not None and not self.schema.equals(other.schema):
return False
if len(self.metapartitions) != len(other.metapartitions):
return False
# In the case both MetaPartitions are nested, we need to ensure a match
# for all sub-partitions.
# Since the label is unique, this can be used as a distinguishing key to sort and compare
# the nested metapartitions.
if len(self.metapartitions) > 1:
for mp_self, mp_other in zip(
sorted(self.metapartitions, key=lambda x: x["label"]),
sorted(other.metapartitions, key=lambda x: x["label"]),
):
if mp_self == mp_other:
continue
# If a single metapartition does not match, the whole object is considered different
return False
return True
# This is unnested only
if self.label != other.label:
return False
if self.file != other.file:
return False
if self.data is not None and not self.data.equals(other.data):
return False
return True
@staticmethod
def from_partition(
partition: Partition,
data: Optional[pd.DataFrame] = None,
indices: Optional[Dict] = None,
metadata_version: Optional[int] = None,
schema: Optional[SchemaWrapper] = None,
partition_keys: Optional[List[str]] = None,
logical_conjunction: Optional[List[Tuple[Any, str, Any]]] = None,
table_name: str = SINGLE_TABLE,
):
"""
Transform a plateau :class:`~plateau.core.partition.Partition` into a
:class:`~plateau.io_components.metapartition.MetaPartition`.
Parameters
----------
partition
The plateau partition to be wrapped
data
A dictionaries with materialised :class:`~pandas.DataFrame`
indices : dict
The index dictionary of the dataset
schema
Type metadata for each table, optional
metadata_version
partition_keys
A list of the primary partition keys
Returns
-------
:class:`~plateau.io_components.metapartition.MetaPartition`
"""
return MetaPartition(
label=partition.label,
file=partition.files[table_name],
data=data,
indices=indices,
metadata_version=metadata_version,
schema=schema,
partition_keys=partition_keys,
logical_conjunction=logical_conjunction,
table_name=table_name,
)
def add_metapartition(
self,
metapartition: "MetaPartition",
schema_validation: bool = True,
):
"""
Adds a metapartition to the internal list structure to enable batch processing.
Parameters
----------
metapartition
The MetaPartition to be added.
schema_validation
If True (default), ensure that the `table_meta` of both `MetaPartition` objects are the same
"""
if self.is_sentinel:
return metapartition
existing_label = [mp_["label"] for mp_ in self.metapartitions]
if any(
[mp_["label"] in existing_label for mp_ in metapartition.metapartitions]
):
raise RuntimeError(
"Duplicate labels for nested metapartitions are not allowed!"
)
schema = metapartition.schema
if schema_validation and schema:
# This ensures that only schema-compatible metapartitions can be nested
# The returned schema by validate_compatible is the reference schema with the most
# information, i.e. the fewest null columns
schema = validate_compatible([self.schema, metapartition.schema])
new_object = MetaPartition(
label="NestedMetaPartition",
metadata_version=metapartition.metadata_version,
schema=schema,
partition_keys=metapartition.partition_keys or None,
logical_conjunction=metapartition.logical_conjunction or None,
table_name=metapartition.table_name,
)
# Add metapartition information to the new object
new_metapartitions = self.metapartitions.copy()
new_metapartitions.extend(metapartition.metapartitions.copy())
new_object.metapartitions = new_metapartitions
return new_object
@staticmethod
def from_dict(dct):
"""
Create a :class:`~plateau.io_components.metapartition.MetaPartition` from a dictionary.
Parameters
----------
dct : dict
Dictionary containing constructor arguments as keys
Returns
-------
"""
return MetaPartition(
label=dct["label"],
file=dct.get("file", None),
data=dct.get("data", None),
table_name=dct.get("table_name", SINGLE_TABLE),
indices=dct.get("indices", {}),
metadata_version=dct.get("metadata_version", None),
schema=dct.get("schema", None),
partition_keys=dct.get("partition_keys", None),
logical_conjunction=dct.get("logical_conjunction", None),
)
def to_dict(self):
return {
"label": self.label,
"file": self.file,
"data": self.data,
"indices": self.indices,
"metadata_version": self.metadata_version,
"schema": self.schema,
"partition_keys": self.partition_keys,
"logical_conjunction": self.logical_conjunction,
"table_name": self.table_name,
}
@_apply_to_list
def remove_dataframes(self):
"""
Remove all dataframes from the metapartition in memory.
"""
return self.copy(data=None)
def _split_predicates_in_index_and_content(self, predicates):
"""
Split a list of predicates in the parts that can be resolved by the
partition columns and the ones that are persisted in the data file.
"""
# Predicates are split in this function into the parts that apply to
# the partition key columns `key_part` and the parts that apply to the
# contents of the file `content_part`.
split_predicates = []
has_index_condition = False
for conjunction in predicates:
key_part = []
content_part = []
for literal in conjunction:
if literal.column in self.partition_keys:
has_index_condition = True
key_part.append(literal)
else:
content_part.append(literal)
split_predicates.append(_SplitPredicate(key_part, content_part))
return split_predicates, has_index_condition
def _apply_partition_key_predicates(self, indices, split_predicates):
"""
Apply the predicates to the partition_key columns and return the remaining
predicates that should be pushed to the DataFrame serialiser.
"""
# Construct a single line DF with the partition columns
schema = self.schema
index_df_dct = {}
for column, value in indices:
pa_dtype = schema[schema.get_field_index(column)].type
value = IndexBase.normalize_value(pa_dtype, value)
if pa.types.is_date(pa_dtype):
index_df_dct[column] = pd.Series(
pd.to_datetime([value], infer_datetime_format=True)
).dt.date
else:
dtype = pa_dtype.to_pandas_dtype()
index_df_dct[column] = pd.Series([value], dtype=dtype)
index_df = pd.DataFrame(index_df_dct)
filtered_predicates = []
# We assume that indices on the partition level have been filtered out already in `dispatch_metapartitions`.
# `filtered_predicates` should only contain predicates that can be evaluated on parquet level
for conjunction in split_predicates:
predicates = [conjunction.key_part]
if (
len(conjunction.key_part) == 0
or len(
filter_df_from_predicates(
index_df, predicates, strict_date_types=True
)
)
> 0
):
if len(conjunction.content_part) > 0:
filtered_predicates.append(conjunction.content_part)
else:
# A condititon applies to the whole DataFrame, so we need to
# load all data.
return None
return filtered_predicates
@default_docs
@_apply_to_list
def load_dataframes(
self,
store: KeyValueStore,
columns: Optional[Sequence[str]] = None,
predicate_pushdown_to_io: bool = True,
categoricals: Optional[Sequence[str]] = None,
dates_as_object: bool = True,
predicates: PredicatesType = None,
) -> "MetaPartition":
"""
Load the dataframes of the partitions from store into memory.
Parameters
----------
tables
If a list is supplied, only the given tables of the partition are
loaded. If the given table does not exist it is ignored.
Examples
.. code::
>>> part = MetaPartition(
... label='part_label'
... files={
... 'core': 'core_key_in_store',
... 'helper': 'helper_key_in_store'
... }
... )
>>> part.data
{}
>>> part = part.load_dataframes(store, ['core'])
>>> part.data
{
'core': pd.DataFrame()
}
"""
if categoricals is None:
categoricals = []
if not dates_as_object:
warnings.warn(
"The argument `date_as_object` is set to False. This argument will be deprecated and the future behaviour will be as if the paramere was set to `True`. Please migrate your code accordingly ahead of time.",
DeprecationWarning,
)
LOGGER.debug("Loading internal dataframes of %s", self.label)
if not self.file:
# This used to raise, but the specs do not require this, so simply do a no op
LOGGER.debug("Partition %s is empty and has no data.", self.label)
return self
predicates = _combine_predicates(predicates, self.logical_conjunction)
predicates = _predicates_to_named(predicates)
dataset_uuid, _, indices, _ = decode_key(self.file)
# In case the columns only refer to the partition indices, we need to load at least a single column to
# determine the length of the required dataframe.
table_columns_to_io = columns
filtered_predicates = predicates
self = self.load_schema(dataset_uuid=dataset_uuid, store=store)
# Filter predicates that would apply to this partition and remove the partition columns
if predicates:
# Check if there are predicates that match to the partition columns.
# For these we need to check if the partition columns already falsify
# the conditition.
#
# We separate these predicates into their index and their Parquet part.
(
split_predicates,
has_index_condition,
) = self._split_predicates_in_index_and_content(predicates)
filtered_predicates = []
if has_index_condition:
filtered_predicates = self._apply_partition_key_predicates(
indices, split_predicates
)
else:
filtered_predicates = [pred.content_part for pred in split_predicates]
# Remove partition_keys from table_columns_to_io
if self.partition_keys and table_columns_to_io is not None:
keys_to_remove = set(self.partition_keys) & set(table_columns_to_io)
# This is done to not change the ordering of the list
table_columns_to_io = [
c for c in table_columns_to_io if c not in keys_to_remove
]
start = time.time()
df = DataFrameSerializer.restore_dataframe(
key=self.file,
store=store,
columns=table_columns_to_io,
categories=categoricals,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=filtered_predicates,
date_as_object=dates_as_object,
)
LOGGER.debug(
"Loaded dataframe %s in %s seconds.", self.file, time.time() - start
)
# Metadata version >=4 parse the index columns and add them back to the dataframe
df = self._reconstruct_index_columns(
df=df,
key_indices=indices,
columns=columns,
categories=categoricals,
date_as_object=dates_as_object,
)
df.columns = df.columns.map(ensure_string_type)
if columns is not None:
# TODO: When the write-path ensures that all partitions have the same column set, this check can be
# moved before `DataFrameSerializer.restore_dataframe`. At the position of the current check we
# may want to double check the columns of the loaded DF and raise an exception indicating an
# inconsistent dataset state instead.
missing_cols = set(columns).difference(df.columns)
if missing_cols:
raise ValueError(
"Columns cannot be found in stored dataframe: {}".format(
", ".join(sorted(missing_cols))
)
)
if list(df.columns) != columns:
df = df.reindex(columns=columns, copy=False)
return self.copy(data=df)
@_apply_to_list
def load_schema(self, store: StoreInput, dataset_uuid: str) -> "MetaPartition":
"""
Loads all table metadata in memory and stores it under the `tables` attribute
"""
if self.schema is None:
store = ensure_store(store)
self.schema = read_schema_metadata(
dataset_uuid=dataset_uuid, store=store, table=self.table_name
)
return self
def _reconstruct_index_columns(
self, df, key_indices, columns, categories, date_as_object
):
if len(key_indices) == 0:
return df
original_columns = list(df.columns)
zeros = np.zeros(len(df), dtype=int)
schema = self.schema
# One of the few places `inplace=True` makes a signifcant difference
df.reset_index(drop=True, inplace=True)
index_names = [primary_key for primary_key, _ in key_indices]
# The index might already be part of the dataframe which is recovered from the parquet file.
# In this case, still use the reconstructed index col to have consistent index columns behavior.
# In this case the column in part of `original_columns` and must be removed to avoid duplication
# in the column axis
cleaned_original_columns = [
orig for orig in original_columns if orig not in index_names
]
if cleaned_original_columns != original_columns:
# indexer call is slow, so only do that if really necessary
df = df.reindex(columns=cleaned_original_columns, copy=False)
pos = 0
for primary_key, value in key_indices:
# If there are predicates, don't reconstruct the index if it wasn't requested
if columns is not None and primary_key not in columns:
continue
pa_dtype = schema.field(primary_key).type
dtype = pa_dtype.to_pandas_dtype()
convert_to_date = False
if date_as_object and pa_dtype in [pa.date32(), pa.date64()]:
convert_to_date = True
if isinstance(dtype, type):
value = dtype(value)
elif isinstance(dtype, np.dtype):
if dtype == np.dtype("datetime64[ns]"):
value = pd.Timestamp(value)
else:
value = dtype.type(value)
else:
raise RuntimeError(
"Unexepected object encountered: ({}, {})".format(
dtype, type(dtype)
)
)
if categories and primary_key in categories:
if convert_to_date:
cats = pd.Series(value).dt.date
else:
cats = [value]
value = pd.Categorical.from_codes(zeros, categories=cats)
else:
if convert_to_date:
value = pd.Timestamp(value).to_pydatetime().date()
df.insert(pos, primary_key, value)
pos += 1
return df
@_apply_to_list
def validate_schema_compatible(
self, store: StoreInput, dataset_uuid: str
) -> "MetaPartition":
"""
Validates that the currently held DataFrames match the schema of the existing dataset.
Parameters
----------
store
If it is a function, the result of calling it must be a KeyValueStore.
dataset_uuid
The dataset UUID the partition will be assigned to
"""
# Load the reference meta of the existing dataset. Using the built-in
# `load_all_table_meta` would not be helpful here as it would be a no-op
# as we have already loaded the meta from the input DataFrame.
store = ensure_store(store)
reference_meta = read_schema_metadata(
dataset_uuid=dataset_uuid, store=store, table=self.table_name
)
try:
validate_compatible([self.schema, reference_meta])
except ValueError as e:
raise ValueError(
f"Schemas for dataset '{dataset_uuid}' are not compatible!\n\n{e}"
)
return self
@_apply_to_list
def store_dataframes(
self,
store: StoreInput,
dataset_uuid: str,
df_serializer: Optional[DataFrameSerializer] = None,
) -> "MetaPartition":
"""
Stores all dataframes of the MetaPartitions and registers the saved
files under the `files` atrribute. The dataframe itself is deleted from memory.
Parameters
----------
store
If it is a function, the result of calling it must be a KeyValueStore.
dataset_uuid
The dataset UUID the partition will be assigned to
df_serializer
Serialiser to be used to store the dataframe
Returns
-------
MetaPartition
"""
df_serializer = (
df_serializer if df_serializer is not None else default_serializer()
)
key = get_partition_file_prefix(
partition_label=self.label,
dataset_uuid=dataset_uuid,
metadata_version=self.metadata_version,
table=self.table_name,
)
if self.data is not None:
df = self.data
try:
file = df_serializer.store(store, key, df)
except Exception as exc:
try:
if isinstance(df, pd.DataFrame):
buf = io.StringIO()
df.info(buf=buf)
LOGGER.error(
"Writing dataframe failed.\n" "%s\n" "%s\n" "%s",
exc,
buf.getvalue(),
df.head(),
)
else:
LOGGER.error("Storage of dask dataframe failed.")
pass
finally:
raise
new_metapartition = self.copy(file=file, data=None)
return new_metapartition
else:
return self
@_apply_to_list
def apply(
self,
func: Callable,
type_safe: bool = False,
) -> "MetaPartition":
"""
Applies a given function to all dataframes of the MetaPartition.
Parameters
----------
func
A callable accepting and returning a :class:`pandas.DataFrame`
uuid :
The changed dataset is assigned a new UUID.
type_safe
If the transformation is type-safe, optimizations can be applied
"""
new_data = func(self.data)
if type_safe:
new_schema = self.schema
else:
new_schema = make_meta(
new_data,
origin=self.label,
partition_keys=self.partition_keys,
)
return self.copy(data=new_data, schema=new_schema)
def as_sentinel(self):
""""""
return MetaPartition(
None,
metadata_version=self.metadata_version,
partition_keys=self.partition_keys,
)
def copy(self, **kwargs):
"""
Creates a shallow copy where the kwargs overwrite existing attributes
"""
def _renormalize_meta(meta):
if "partition_keys" in kwargs and meta is not None:
pk = kwargs["partition_keys"]
return normalize_column_order(meta, pk)
else:
return meta
metapartitions = kwargs.get("metapartitions", None) or []
metapartitions.extend(self.metapartitions)
if len(metapartitions) > 1:
first_mp = metapartitions.pop()
mp_parent = MetaPartition(
label=first_mp.get("label"),
file=first_mp.get("file"),
data=first_mp.get("data"),
indices=first_mp.get("indices"),
metadata_version=self.metadata_version,
schema=_renormalize_meta(kwargs.get("schema", self.schema)),
partition_keys=kwargs.get("partition_keys", self.partition_keys),
logical_conjunction=kwargs.get(
"logical_conjunction", self.logical_conjunction
),
table_name=kwargs.get("table_name", self.table_name),
)
for mp in metapartitions:
mp_parent = mp_parent.add_metapartition(
MetaPartition(
label=mp.get("label"),
file=mp.get("file"),
data=mp.get("data"),
indices=mp.get("indices"),
metadata_version=self.metadata_version,
schema=_renormalize_meta(kwargs.get("schema", self.schema)),
partition_keys=kwargs.get(
"partition_keys", self.partition_keys
),
logical_conjunction=kwargs.get(
"logical_conjunction", self.logical_conjunction
),
table_name=kwargs.get("table_name", self.table_name),
),
schema_validation=False,
)
return mp_parent
else:
mp = MetaPartition(
label=kwargs.get("label", self.label),
file=kwargs.get("file", self.file),
data=kwargs.get("data", self.data),
indices=kwargs.get("indices", self.indices),
metadata_version=kwargs.get("metadata_version", self.metadata_version),
schema=_renormalize_meta(kwargs.get("schema", self.schema)),
partition_keys=kwargs.get("partition_keys", self.partition_keys),
logical_conjunction=kwargs.get(
"logical_conjunction", self.logical_conjunction
),
table_name=kwargs.get("table_name", self.table_name),
)
return mp
@_apply_to_list
def build_indices(self, columns: Iterable[str]):
"""
This builds the indices for this metapartition for the given columns. The indices for the passed columns
are rebuilt, so exisiting index entries in the metapartition are overwritten.
:param columns: A list of columns from which the indices over all dataframes in the metapartition
are overwritten
:return: self
"""
if self.label is None:
return self
new_indices = {}
for col in columns:
possible_values: Set[str] = set()
df = self.data
if not self.is_sentinel and col not in df:
raise RuntimeError(
"Column `{corrupt_col}` could not be found in the partition `{partition_label}` Please check for any typos and validate your dataset.".format(
corrupt_col=col, partition_label=self.label
)
)
possible_values = possible_values | set(df[col].dropna().unique())
if self.schema is not None:
dtype = self.schema.field(col).type
else:
dtype = None
new_index = ExplicitSecondaryIndex(
column=col,
index_dct={value: [self.label] for value in possible_values},
dtype=dtype,
)
if (col in self.indices) and self.indices[col].loaded:
new_indices[col] = self.indices[col].update(new_index)
else:
new_indices[col] = new_index
return self.copy(indices=new_indices)
@_apply_to_list
def partition_on(self, partition_on: Union[str, Sequence[str]]):
"""
Partition all dataframes assigned to this MetaPartition according the the given columns.
If the MetaPartition object contains index information, the information is split in such a way that they
reference the new partitions.
In case a requested partition column is not existent in **all** tables, a KeyError is raised.
All output partitions are re-assigned labels encoding the partitioned columns (urlencoded)
Examples::
>>> import pandas as pd
>>> from plateau.io_components.metapartition import MetaPartition
>>> mp = MetaPartition(
... label='partition_label',
... data={
... "Table1": pd.DataFrame({
... 'P': [1, 2, 1, 2],
... 'L': [1, 1, 2, 2]
... })
... }
... )
>>> repartitioned_mp = mp.partition_on(['P', 'L'])
>>> assert [mp["label"] for mp in repartitioned_mp.metapartitions] == [
... "P=1/L=1/partition_label",
... "P=1/L=2/partition_label",
... "P=2/L=1/partition_label",
... "P=2/L=2/partition_label"
... ]
Parameters
----------
partition_on
"""
if partition_on == self.partition_keys:
return self
for partition_column in partition_on:
if partition_column in self.indices:
raise ValueError(
"Trying to `partition_on` on a column with an explicit index!"
)
if self.is_sentinel:
return self.copy(partition_keys=partition_on)
else:
new_mp = self.as_sentinel().copy(
partition_keys=partition_on,
schema=normalize_column_order(self.schema, partition_on),
)
if isinstance(partition_on, str):
partition_on = [partition_on]
partition_on = self._ensure_compatible_partitioning(partition_on)
new_data = self._partition_data(partition_on)
for label, data in new_data.items():
tmp_mp = MetaPartition(
label=label,
file=self.file,
data=data,
metadata_version=self.metadata_version,
indices={},
schema=normalize_column_order(self.schema, partition_on).with_origin(
f"{label}"
),
partition_keys=partition_on,
table_name=self.table_name,
)
new_mp = new_mp.add_metapartition(tmp_mp, schema_validation=False)
if self.indices:
new_mp = new_mp.build_indices(columns=self.indices.keys())
return new_mp
def _ensure_compatible_partitioning(self, partition_on):
if (
not self.partition_keys
or self.partition_keys
and (len(partition_on) >= len(self.partition_keys))
and (self.partition_keys == partition_on[: len(self.partition_keys)])
):
return partition_on[len(self.partition_keys) :]
else:
raise ValueError(
"Incompatible partitioning encountered. `partition_on` needs to include the already "
"existing partition keys and must preserve their order.\n"
"Current partition keys: `{}`\n"
"Partition on called with: `{}`".format(
self.partition_keys, partition_on
)
)
def _partition_data(self, partition_on):
existing_indices, base_label = decode_key("uuid/table/{}".format(self.label))[
2:
]
dct = dict()
df = self.data
# Check that data sizes do not change. This might happen if the
# groupby below drops data, e.g. nulls
size_after = 0
size_before = len(df)
# Implementation from pyarrow
# See https://github.com/apache/arrow/blob/b33dfd9c6bd800308bb1619b237dbf24dea159be/python/pyarrow/parquet.py#L1030 # noqa: E501
# column sanity checks
data_cols = set(df.columns).difference(partition_on)
missing_po_cols = set(partition_on).difference(df.columns)
if missing_po_cols:
raise ValueError(
"Partition column(s) missing: {}".format(
", ".join(sorted(missing_po_cols))
)
)
if len(data_cols) == 0:
raise ValueError("No data left to save outside partition columns")
# To be aligned with open source tooling we drop the index columns and recreate
# them upon reading as it is done by fastparquet and pyarrow
partition_keys = [df[col] for col in partition_on]
# # The handling of empty dfs is not part of the arrow implementation
# if df.empty:
# return {}
data_df = df.drop(partition_on, axis="columns")
for value, group in data_df.groupby(by=partition_keys, sort=False):
partitioning_info = []
if pd.api.types.is_scalar(value):
value = [value]
if existing_indices:
partitioning_info.extend(quote_indices(existing_indices))
partitioning_info.extend(quote_indices(zip(partition_on, value)))
partitioning_info.append(base_label)
new_label = "/".join(partitioning_info)
if new_label not in dct:
dct[new_label] = {}
dct[new_label] = group
size_after += len(group)
if size_before != size_after:
raise ValueError(
f"Original dataframe size ({size_before} rows) does not "
f"match new dataframe size ({size_after} rows). "
f"Hint: you may see this if you are trying to use `partition_on` on a column with null values."
)
return dct
@staticmethod
def merge_indices(metapartitions):
list_of_indices = []
for mp in metapartitions:
for sub_mp in mp:
if sub_mp.indices:
list_of_indices.append(sub_mp.indices)
return merge_indices_algo(list_of_indices)
@staticmethod
def _merge_labels(metapartitions, label_merger=None):
# Use the shortest of available labels since this has to be the partition
# label prefix
new_label = None
# FIXME: This is probably not compatible with >= v3
if label_merger is None:
for mp in metapartitions:
label = mp.label
if new_label is None or len(label) < len(new_label):
new_label = label
continue
else:
new_label = label_merger([mp.label for mp in metapartitions])
return new_label
@staticmethod
def concat_metapartitions(metapartitions, label_merger=None):
LOGGER.debug("Concatenating metapartitions")
new_metadata_version = -1
data = []
schema = []
for mp in metapartitions:
new_metadata_version = max(new_metadata_version, mp.metadata_version)
data.append(mp.data)
schema.append(mp.schema)
# Don't care about the partition_keys. If we try to merge
# MetaPartitions without alignment the schemas won't match.
partition_keys = mp.partition_keys
categoricals = [
col
for col, dtype in data[0].items()
if pd.api.types.is_categorical_dtype(dtype)
]
if categoricals:
data = align_categories(data, categoricals)
new_df = pd.concat(data)
new_schema = validate_compatible(schema)
new_label = MetaPartition._merge_labels(metapartitions, label_merger)
new_mp = MetaPartition(
label=new_label,
data=new_df,
metadata_version=new_metadata_version,
schema=new_schema,
partition_keys=partition_keys,
)
return new_mp
@_apply_to_list
def delete_from_store(
self, dataset_uuid: Any, store: StoreInput
) -> "MetaPartition":
store = ensure_store(store)
# Delete data first
store.delete(self.file)
return self.copy(file=None, data=None)
def get_parquet_metadata(self, store: StoreInput) -> pd.DataFrame:
"""
Retrieve the parquet metadata for the MetaPartition.
Especially relevant for calculating dataset statistics.
Parameters
----------
store
A factory function providing a KeyValueStore
table_name
Name of the plateau table for which the statistics should be retrieved
Returns
-------
pd.DataFrame
A DataFrame with relevant parquet metadata
"""
store = ensure_store(store)
data = {}
with store.open(self.file) as fd: # type: ignore
pq_metadata = pa.parquet.ParquetFile(fd).metadata
data = {
"partition_label": self.label,
"serialized_size": pq_metadata.serialized_size,
"number_rows_total": pq_metadata.num_rows,
"number_row_groups": pq_metadata.num_row_groups,
"row_group_id": [],
"number_rows_per_row_group": [],
"row_group_compressed_size": [],
"row_group_uncompressed_size": [],
}
for rg_ix in range(pq_metadata.num_row_groups):
rg = pq_metadata.row_group(rg_ix)
data["row_group_id"].append(rg_ix)
data["number_rows_per_row_group"].append(rg.num_rows)
data["row_group_compressed_size"].append(rg.total_byte_size)
data["row_group_uncompressed_size"].append(
sum(
rg.column(col_ix).total_uncompressed_size
for col_ix in range(rg.num_columns)
)
)
df = pd.DataFrame(data=data, columns=_METADATA_SCHEMA.keys())
df = df.astype(_METADATA_SCHEMA)
return df
def _unique_label(label_list):
label = os.path.commonprefix(label_list)
if len(label) == 0:
label = "_".join(label_list)
while len(label) > 0 and not label[-1].isalnum():
label = label[:-1]
return label
def partition_labels_from_mps(mps: List[MetaPartition]) -> List[str]:
"""
Get a list of partition labels, flattening any nested meta partitions in the input and ignoring sentinels.
"""
partition_labels = []
for mp in mps:
if len(mp) > 1:
for nested_mp in mp:
if not nested_mp.is_sentinel:
partition_labels.append(nested_mp.label)
else:
if not mp.is_sentinel:
partition_labels.append(mp.label)
return partition_labels
def parse_input_to_metapartition(
obj: MetaPartitionInput,
table_name: str = SINGLE_TABLE,
metadata_version: Optional[int] = None,
) -> MetaPartition:
"""
Parses given user input and return a MetaPartition
The expected input is a :class:`pandas.DataFrame` or a list of
:class:`pandas.DataFrame`.
Every element of the list will be treated as a dedicated user input and will
result in a physical file, if not specified otherwise.
Parameters
----------
obj
table_name
The table name assigned to the partitions
metadata_version
The plateau dataset specification version
"""
if obj is None:
obj = []
if isinstance(obj, list):
if len(obj) == 0:
return MetaPartition(label=None, metadata_version=metadata_version)
first_element = obj[0]
mp = parse_input_to_metapartition(
obj=first_element,
metadata_version=metadata_version,
table_name=table_name,
)
for mp_in in obj[1:]:
mp = mp.add_metapartition(
parse_input_to_metapartition(
obj=mp_in,
metadata_version=metadata_version,
table_name=table_name,
)
)
elif isinstance(obj, pd.DataFrame):
mp = MetaPartition(
label=gen_uuid(),
data=obj,
metadata_version=metadata_version,
table_name=table_name,
)
elif isinstance(obj, MetaPartition):
return obj
else:
raise ValueError(
f"Unexpected type during parsing encountered: ({type(obj)}, {obj})"
)
return mp
|
{"hexsha": "b9adad9dc70fc33c5ef85850766085898cf452a8", "size": 50745, "ext": "py", "lang": "Python", "max_stars_repo_path": "plateau/io_components/metapartition.py", "max_stars_repo_name": "data-engineering-collective/plateau", "max_stars_repo_head_hexsha": "ab87282a2f66c4f847654f28f8a2b0df33cb4d62", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-11T01:27:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T22:44:02.000Z", "max_issues_repo_path": "plateau/io_components/metapartition.py", "max_issues_repo_name": "data-engineering-collective/plateau", "max_issues_repo_head_hexsha": "ab87282a2f66c4f847654f28f8a2b0df33cb4d62", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2022-02-08T16:14:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T00:36:16.000Z", "max_forks_repo_path": "plateau/io_components/metapartition.py", "max_forks_repo_name": "data-engineering-collective/plateau", "max_forks_repo_head_hexsha": "ab87282a2f66c4f847654f28f8a2b0df33cb4d62", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-02-08T16:59:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T17:54:39.000Z", "avg_line_length": 35.9893617021, "max_line_length": 221, "alphanum_fraction": 0.585969061, "include": true, "reason": "import numpy", "num_tokens": 10098}
|
# -*- coding: utf-8 -*-
""" This example shows how to compute the atmospheric attenuation exceeded
for 0.1 % of the time for multiple ground stations.
It is assumed that the satellite is located in geostationary orbit, at the
77 W slot, and the link operates at 22.5 GHz with receiver-dishes of 1.2 m
diameter.
Finally, we also plot the surface mean temperature distribution to illustrate
that other variables can also be computed using vectorized operations.
"""
import itur
import numpy as np
import matplotlib.pyplot as plt
# Obtain the coordinates of the different cities
cities = {'Boston': (42.36, -71.06),
'New York': (40.71, -74.01),
'Los Angeles': (34.05, -118.24),
'Denver': (39.74, -104.99),
'Las Vegas': (36.20, -115.14),
'Seattle': (47.61, -122.33),
'Washington DC': (38.91, -77.04)}
lat = [coords[0] for coords in cities.values()]
lon = [coords[1] for coords in cities.values()]
# Satellite coordinates (GEO, 4 E)
lat_sat = 0
lon_sat = -77
h_sat = 35786 * itur.u.km
# Compute the elevation angle between satellite and ground stations
el = itur.utils.elevation_angle(h_sat, lat_sat, lon_sat, lat, lon)
# Set the link parameters
f = 22.5 * itur.u.GHz # Link frequency
D = 1.2 * itur.u.m # Antenna diameters
p = 0.1 # Unavailability (Values exceeded 0.1% of time)
# Compute the atmospheric attenuation
Ag, Ac, Ar, As, Att = itur.atmospheric_attenuation_slant_path(
lat, lon, f, el, p, D, return_contributions=True)
# Plot the results
city_idx = np.arange(len(cities))
width = 0.15
fig, ax = plt.subplots(1, 1)
ax.bar(city_idx, Att.value, 0.6, label='Total atmospheric Attenuation')
ax.bar(city_idx - 1.5 * width, Ar.value, width, label='Rain attenuation')
ax.bar(city_idx - 0.5 * width, Ag.value, width, label='Gaseous attenuation')
ax.bar(city_idx + 0.5 * width, Ac.value, width, label='Clouds attenuation')
ax.bar(city_idx + 1.5 * width, As.value, width,
label='Scintillation attenuation')
# Set the labels
ticks = ax.set_xticklabels([''] + list(cities.keys()))
for t in ticks:
t.set_rotation(45)
ax.set_ylabel('Atmospheric attenuation exceeded for 0.1% [dB]')
# Format image
ax.yaxis.grid(which='both', linestyle=':')
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.3), ncol=2)
plt.tight_layout(rect=(0, 0, 1, 0.85))
|
{"hexsha": "38d7d6d7ec702bdc0e4b3c3ef19a2ae17a697408", "size": 2349, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/multiple_location.py", "max_stars_repo_name": "the-aerospace-corporation/ITU-Rpy", "max_stars_repo_head_hexsha": "4456da2db9f28453d5a08339c84fe5bf25b999d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2018-06-01T17:12:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-03T11:33:00.000Z", "max_issues_repo_path": "examples/multiple_location.py", "max_issues_repo_name": "hygson/ITU-Rpy", "max_issues_repo_head_hexsha": "35123f8894137f9bcc375b4513367d068cc33ca1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2021-04-08T17:25:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T00:58:30.000Z", "max_forks_repo_path": "examples/multiple_location.py", "max_forks_repo_name": "hygson/ITU-Rpy", "max_forks_repo_head_hexsha": "35123f8894137f9bcc375b4513367d068cc33ca1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 253, "max_forks_repo_forks_event_min_datetime": "2021-04-25T23:21:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T11:09:01.000Z", "avg_line_length": 35.0597014925, "max_line_length": 77, "alphanum_fraction": 0.6845466156, "include": true, "reason": "import numpy", "num_tokens": 692}
|
Require Import oeuf.Common oeuf.Monads.
Require Import oeuf.Metadata.
Require String.
Require oeuf.LocalsOnly oeuf.FlatSwitch.
Require Import oeuf.ListLemmas.
Require Import oeuf.HigherValue.
Require Import oeuf.StepLib.
Require Import Psatz.
Module A := LocalsOnly.
Module B := FlatSwitch.
Add Printing Constructor A.frame.
Add Printing Constructor B.frame.
Definition compile : A.insn -> B.insn :=
let fix go e :=
let fix go_list (es : list A.insn) : list B.insn :=
match es with
| [] => []
| e :: es => go e :: go_list es
end in
let fix go_list_list (es : list (list A.insn)) : list (list B.insn) :=
match es with
| [] => []
| e :: es => go_list e :: go_list_list es
end in
match e with
| A.Arg dst => B.Arg dst
| A.Self dst => B.Self dst
| A.Deref dst e off => B.Deref dst e off
| A.Call dst f a => B.Call dst f a
| A.MkConstr dst tag args => B.MkConstr dst tag args
| A.Switch dst cases => B.Switch dst (go_list_list cases)
| A.MkClose dst fname free => B.MkClose dst fname free
| A.OpaqueOp dst op args => B.OpaqueOp dst op args
| A.Copy dst src => B.Copy dst src
end in go.
Definition compile_list :=
let go := compile in
let fix go_list es :=
match es with
| [] => []
| e :: es => go e :: go_list es
end in go_list.
Definition compile_list_list :=
let go_list := compile_list in
let fix go_list_list es :=
match es with
| [] => []
| e :: es => go_list e :: go_list_list es
end in go_list_list.
Ltac refold_compile :=
fold compile_list in *;
fold compile_list_list in *.
Definition compile_func (f : list A.insn * nat) : list B.insn * nat :=
let '(body, ret) := f in
(compile_list body, ret).
Definition compile_cu (cu : list (list A.insn * nat) * list metadata) :
list (list B.insn * nat) * list metadata :=
let '(funcs, metas) := cu in
(map compile_func funcs, metas).
Inductive I_insn : A.insn -> B.insn -> Prop :=
| IArg : forall dst,
I_insn (A.Arg dst) (B.Arg dst)
| ISelf : forall dst,
I_insn (A.Self dst) (B.Self dst)
| IDeref : forall dst e off,
I_insn (A.Deref dst e off) (B.Deref dst e off)
| ICall : forall dst f a,
I_insn (A.Call dst f a) (B.Call dst f a)
| IMkConstr : forall dst tag args,
I_insn (A.MkConstr dst tag args) (B.MkConstr dst tag args)
| ISwitch : forall dst acases bcases,
Forall2 (Forall2 I_insn) acases bcases ->
I_insn (A.Switch dst acases) (B.Switch dst bcases)
| IMkClose : forall dst fname free,
I_insn (A.MkClose dst fname free) (B.MkClose dst fname free)
| IOpaqueOp : forall dst op args,
I_insn (A.OpaqueOp dst op args) (B.OpaqueOp dst op args)
| ICopy : forall dst src,
I_insn (A.Copy dst src) (B.Copy dst src)
.
Inductive I_func : (list A.insn * nat) -> (list B.insn * nat) -> Prop :=
| IFunc : forall ret acode bcode,
Forall2 I_insn acode bcode ->
I_func (acode, ret) (bcode, ret).
Inductive I_frame : A.frame -> B.frame -> Prop :=
| IFrame : forall arg self locals,
I_frame (A.Frame arg self locals) (B.Frame arg self locals).
Hint Constructors I_frame.
Fixpoint flat_code k :=
match k with
| B.Kswitch code k' => code ++ flat_code k'
| _ => []
end.
Fixpoint flat_cont k :=
match k with
| B.Kswitch _ k' => flat_cont k'
| _ => k
end.
Inductive I_cont : A.cont -> B.cont -> Prop :=
| IkRet : forall ret dst acode af ak bcode bf bk,
Forall2 I_insn acode (bcode ++ flat_code bk) ->
I_frame af bf ->
I_cont ak (flat_cont bk) ->
I_cont (A.Kret acode ret dst af ak)
(B.Kret bcode ret dst bf bk)
| IkStop : forall ret,
I_cont (A.Kstop ret)
(B.Kstop ret).
Definition B_is_new_cont k :=
match k with
| B.Kswitch _ _ => True
| _ => False
end.
Inductive I : A.state -> B.state -> Prop :=
| IRun : forall acode af ak bi bcode bf bk,
Forall2 I_insn acode (bi :: bcode ++ flat_code bk) ->
I_frame af bf ->
I_cont ak (flat_cont bk) ->
I (A.Run acode af ak)
(B.Run (bi :: bcode) bf bk)
| IRunNil : forall af ak bf bk,
I_frame af bf ->
I_cont ak bk ->
I (A.Run [] af ak)
(B.Run [] bf bk)
| IStop : forall v,
I (A.Stop v) (B.Stop v).
Inductive almost_I : A.state -> B.state -> Prop :=
| AIRun : forall acode af ak bcode bf bk,
Forall2 I_insn acode (bcode ++ flat_code bk) ->
I_frame af bf ->
I_cont ak (flat_cont bk) ->
almost_I (A.Run acode af ak)
(B.Run bcode bf bk)
| AIStop : forall v,
almost_I (A.Stop v) (B.Stop v).
Lemma compile_I_insn : forall a b,
compile a = b ->
I_insn a b.
induction a using A.insn_rect_mut with
(Pl := fun a => forall b,
compile_list a = b ->
Forall2 I_insn a b)
(Pll := fun a => forall b,
compile_list_list a = b ->
Forall2 (Forall2 I_insn) a b);
intros0 Hcomp; simpl in Hcomp; try rewrite <- Hcomp; refold_compile;
try solve [econstructor; eauto].
Qed.
Lemma compile_list_I_insn : forall a b,
compile_list a = b ->
Forall2 I_insn a b.
induction a;
intros0 Hcomp; simpl in Hcomp; try rewrite <- Hcomp; refold_compile;
try solve [econstructor; eauto using compile_I_insn].
Qed.
Lemma compile_I_func : forall a b,
compile_func a = b ->
I_func a b.
intros0 Hcomp. destruct a.
unfold compile_func in Hcomp. rewrite <- Hcomp.
econstructor. eauto using compile_list_I_insn.
Qed.
Theorem compile_cu_I_env : forall a ameta b bmeta,
compile_cu (a, ameta) = (b, bmeta) ->
Forall2 I_func a b.
intros0 Hcomp. unfold compile_cu in *. inject_pair.
remember (map compile_func a) as b.
symmetry in Heqb. apply map_Forall2 in Heqb.
list_magic_on (a, (b, tt)). eauto using compile_I_func.
Qed.
Ltac B_start HS :=
match goal with
| [ |- context [ ?pred ?E ?s _ ] ] =>
lazymatch pred with
| B.sstep => idtac
| B.sstar => idtac
| B.splus => idtac
| _ => fail "unrecognized predicate:" pred
end;
let S_ := fresh "S" in
let S0 := fresh "S" in
set (S0 := s);
change s with S0;
assert (HS : B.sstar E S0 S0) by (eapply B.SStarNil)
end.
Ltac B_step HS :=
let S_ := fresh "S" in
let S2 := fresh "S" in
let HS' := fresh HS "'" in
let go E s0 s1 Brel solver :=
rename HS into HS';
evar (S2 : B.state);
assert (HS : Brel E s0 S2);
[ solver; unfold S2
| clear HS' ] in
match type of HS with
| B.sstar ?E ?s0 ?s1 => go E s0 s1 B.splus
ltac:(eapply sstar_then_splus with (1 := HS');
eapply B.SPlusOne)
| B.splus ?E ?s0 ?s1 => go E s0 s1 B.splus
ltac:(eapply splus_snoc with (1 := HS'))
end.
Ltac B_star HS :=
let S_ := fresh "S" in
let S2 := fresh "S" in
let HS' := fresh HS "'" in
let go E s0 s1 Brel solver :=
rename HS into HS';
evar (S2 : B.state);
assert (HS : Brel E s0 S2);
[ solver; unfold S2
| clear HS' ] in
match type of HS with
| B.sstar ?E ?s0 ?s1 => go E s0 s1 B.sstar
ltac:(eapply sstar_then_sstar with (1 := HS'))
| B.splus ?E ?s0 ?s1 => go E s0 s1 B.splus
ltac:(eapply splus_then_sstar with (1 := HS'))
end.
Ltac B_plus HS :=
let S_ := fresh "S" in
let S2 := fresh "S" in
let HS' := fresh HS "'" in
let go E s0 s1 Brel solver :=
rename HS into HS';
evar (S2 : B.state);
assert (HS : Brel E s0 S2);
[ solver; unfold S2
| clear HS' ] in
match type of HS with
| B.sstar ?E ?s0 ?s1 => go E s0 s1 B.splus
ltac:(eapply sstar_then_splus with (1 := HS'))
| B.splus ?E ?s0 ?s1 => go E s0 s1 B.splus
ltac:(eapply splus_then_splus with (1 := HS'))
end.
Ltac i_ctor := intros; econstructor; simpl; eauto.
Ltac i_lem H := intros; eapply H; simpl; eauto.
Ltac stk_simpl := compute [
A.set A.arg A.self A.locals
B.set B.arg B.self B.locals
] in *.
Lemma set_I_frame : forall af bf dst v,
I_frame af bf ->
I_frame (A.set af dst v) (B.set bf dst v).
intros0 II. invc II.
stk_simpl. constructor.
Qed.
Hint Resolve set_I_frame.
Lemma I_catchup : forall BE a b,
almost_I a b ->
exists b',
B.sstar BE b b' /\
I a b'.
destruct a as [acode af ak | av]; cycle 1.
{ (* easy case: `a` and `b` are Stop *)
intros. on >almost_I, invc.
eexists. split. eapply B.SStarNil. i_ctor. }
destruct b as [bcode bf bk | bv]; cycle 1.
{ (* impossible case: `a` is Run but `b` is Stop *)
intros. exfalso. on >almost_I, invc. }
(* Now we know `a` and `b` are Run, and `acode` is non-empty. Do induction. *)
make_first bk. induction bk; intros; on >almost_I, invc; simpl in *.
- destruct bcode as [| bi bcode ]; cycle 1.
{ eexists. split. eapply B.SStarNil. i_ctor. }
fwd eapply IHbk; eauto using AIRun. break_exists. break_and.
eexists. split. eapply B.SStarCons.
+ eapply B.SContSwitch.
+ eassumption.
+ assumption.
- eexists. split. eapply B.SStarNil.
rewrite app_nil_r in *. destruct acode; on >Forall2, invc.
+ i_ctor.
+ i_ctor. rewrite app_nil_r. eauto.
- eexists. split. eapply B.SStarNil.
rewrite app_nil_r in *. destruct acode; on >Forall2, invc.
+ i_ctor.
+ i_ctor. rewrite app_nil_r. eauto.
Qed.
Lemma I_sim_almost : forall AE BE a a' b,
Forall2 I_func AE BE ->
I a b ->
A.sstep AE a a' ->
exists b',
B.sstep BE b b' /\
almost_I a' b'.
destruct a as [ae af ak | ae];
intros0 Henv II Astep; [ | solve [invc Astep] ].
inv Astep; inv II;
try on (Forall2 _ (_ :: _) (_ :: _)), invc;
try on >I_insn, invc;
try on >I_frame, invc;
simpl in *.
- (* Arg *)
eexists. split. eapply B.SArg; stk_simpl. simpl.
i_ctor.
- (* Self *)
eexists. split. eapply B.SSelf; stk_simpl. simpl.
i_ctor.
- (* DerefinateConstr *)
eexists. split. eapply B.SDerefinateConstr; simpl; eauto.
i_ctor.
- (* DerefinateClose *)
eexists. split. eapply B.SDerefinateClose; simpl; eauto.
i_ctor.
- (* MkConstr *)
eexists. split. eapply B.SConstrDone; simpl; eauto.
i_ctor.
- (* MkClose *)
eexists. split. eapply B.SCloseDone; simpl; eauto.
i_ctor.
- (* OpaqueOp *)
eexists. split. eapply B.SOpaqueOpDone; simpl; eauto.
i_ctor.
- (* MakeCall *)
fwd eapply Forall2_nth_error_ex with (xs := AE) as HH; eauto.
destruct HH as ([bbody bret] & ? & ?).
on >I_func, invc.
eexists. split. eapply B.SMakeCall; simpl; eauto.
i_ctor.
{ rewrite app_nil_r. auto. }
i_ctor.
- (* Switchinate *)
fwd eapply Forall2_nth_error_ex with (xs := cases) as HH; eauto. destruct HH as (bcase & ? & ?).
eexists. split. eapply B.SSwitchinate; eauto using eq_refl.
i_ctor. eauto using Forall2_app.
- (* Copy *)
eexists. split. eapply B.SCopy; simpl; eauto.
i_ctor.
- exfalso. on >Forall2, invc.
- (* ContRet *)
on >I_cont, inv.
eexists. split. eapply B.SContRet; eauto using eq_refl.
i_ctor.
- exfalso. on >Forall2, invc.
- (* ContStop *)
on >I_cont, inv.
eexists. split. eapply B.SContStop; eauto using eq_refl.
i_ctor.
Qed.
Theorem I_sim : forall AE BE a a' b,
Forall2 I_func AE BE ->
I a b ->
A.sstep AE a a' ->
exists b',
B.splus BE b b' /\
I a' b'.
intros0 Henv II Astep.
fwd eapply I_sim_almost as HH; eauto.
destruct HH as (b' & Hb' & ?).
fwd eapply I_catchup as HH; eauto.
destruct HH as (b'' & Hb'' & ?).
B_start HS.
B_step HS. { exact Hb'. }
B_star HS. { exact Hb''. }
exists b''. eauto.
Qed.
Lemma compile_cu_metas : forall A Ameta B Bmeta,
compile_cu (A, Ameta) = (B, Bmeta) ->
Ameta = Bmeta.
simpl. inversion 1. auto.
Qed.
Require oeuf.Semantics.
Section Preservation.
Variable prog : A.prog_type.
Variable tprog : B.prog_type.
Hypothesis TRANSF : compile_cu prog = tprog.
Theorem fsim :
Semantics.forward_simulation (A.semantics prog) (B.semantics tprog).
Proof.
destruct prog as [A Ameta], tprog as [B Bmeta].
fwd eapply compile_cu_I_env; eauto.
fwd eapply compile_cu_metas; eauto.
eapply Semantics.forward_simulation_plus with
(match_states := I)
(match_values := @eq value).
- simpl. intros. on >B.is_callstate, invc. simpl in *.
destruct ltac:(i_lem Forall2_nth_error_ex') as ([abody aret] & ? & ?).
on >I_func, invc.
eexists. split; repeat i_ctor.
destruct abody.
+ on >Forall2, invc. repeat i_ctor.
+ on >Forall2, invc. repeat i_ctor.
rewrite app_nil_r. assumption.
- intros0 II Afinal. invc Afinal. invc II.
eexists; split; i_ctor.
- simpl. eauto.
- simpl. intros. tauto.
- intros0 Astep. intros0 II.
eapply splus_semantics_sim, I_sim; try eassumption.
Qed.
End Preservation.
|
{"author": "uwplse", "repo": "oeuf", "sha": "f3e4d236465ba872d1f1b8229548fa0edf8f7a3f", "save_path": "github-repos/coq/uwplse-oeuf", "path": "github-repos/coq/uwplse-oeuf/oeuf-f3e4d236465ba872d1f1b8229548fa0edf8f7a3f/src/FlatSwitchComp.v"}
|
[STATEMENT]
lemma preserves_quasi_inverse:
assumes "C.equivalence_map f"
shows "D.isomorphic (F (C.some_quasi_inverse f)) (D.some_quasi_inverse (F f))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. D.isomorphic (F (C.some_quasi_inverse f)) (D.some_quasi_inverse (F f))
[PROOF STEP]
using assms preserves_quasi_inverses C.quasi_inverses_some_quasi_inverse
D.quasi_inverse_unique D.quasi_inverses_some_quasi_inverse
preserves_equivalence_maps
[PROOF STATE]
proof (prove)
using this:
C.equivalence_map f
C.quasi_inverses ?f ?g \<Longrightarrow> D.quasi_inverses (F ?f) (F ?g)
C.equivalence_map ?f \<Longrightarrow> C.quasi_inverses ?f (C.some_quasi_inverse ?f)
C.equivalence_map ?f \<Longrightarrow> C.quasi_inverses (C.some_quasi_inverse ?f) ?f
\<lbrakk>D.quasi_inverses ?f ?g; D.quasi_inverses ?f ?g'\<rbrakk> \<Longrightarrow> D.isomorphic ?g ?g'
D.equivalence_map ?f \<Longrightarrow> D.quasi_inverses ?f (D.some_quasi_inverse ?f)
D.equivalence_map ?f \<Longrightarrow> D.quasi_inverses (D.some_quasi_inverse ?f) ?f
C.equivalence_map ?f \<Longrightarrow> D.equivalence_map (F ?f)
goal (1 subgoal):
1. D.isomorphic (F (C.some_quasi_inverse f)) (D.some_quasi_inverse (F f))
[PROOF STEP]
by blast
|
{"llama_tokens": 529, "file": "Bicategory_Pseudofunctor", "length": 2}
|
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
import tensorflow as tf
import utils
class MatplotlibTimeSeriesVisualization(utils.MatplotlibTimeSeriesVisualization):
@staticmethod
def time_query(dataset, date_attr, group_attr, attributes):
date_categories = sorted(list(dataset[date_attr].value_counts().index.values))
segments = []
for attr in attributes:
fact_table = dataset[[date_attr, group_attr, attr]].dropna()
for cat in fact_table[group_attr].value_counts().index.values:
series = fact_table[fact_table[group_attr] == cat]
x = np.array([date_categories.index(date) for date in series[date_attr].values])
y = series[attr].values.flatten()
y = np.convolve(y, [0.05, 0.1, 0.2, 0.3, 0.2, 0.1, 0.05], mode="same") # 7-days average
boundary = [np.min(x), np.max(x), np.min(y), np.max(y)]
segments.append({"category": cat, "attribute": attr, "x": x, "y": y, "boundary": boundary})
return segments, attributes, date_categories
@classmethod
def unit_test(cls):
dataset = Preprocessing(pd.read_csv("src/NYC Accidents 2020.csv")).dataset
time_series_attributes = [
"NUMBER OF PERSONS INJURED", "NUMBER OF PERSONS KILLED",
"NUMBER OF PEDESTRIANS INJURED", "NUMBER OF PEDESTRIANS KILLED",
"NUMBER OF CYCLIST INJURED", "NUMBER OF CYCLIST KILLED",
"NUMBER OF MOTORIST INJURED", "NUMBER OF MOTORIST KILLED"
]
cls(
cache_file="cache/nyc-accidents-time-series.pkl",
dataset=dataset,
date_attr="CRASH DATE",
group_attr="BOROUGH",
attributes=time_series_attributes
)
class MatplotlibTimeFreeVisualization(utils.MatplotlibTimeFreeVisualization):
@classmethod
def unit_test(cls):
dataset = Preprocessing(pd.read_csv("src/NYC Accidents 2020.csv")).dataset
one_hot = pd.get_dummies(dataset["BOROUGH"])
dataset = dataset[["CRASH DATE"]].join(one_hot)
cls(
cache_file="cache/nyc-accidents-time-free.pkl",
dataset=dataset,
group_attr="CRASH DATE",
attributes=list(one_hot.columns)
)
class Preprocessing(utils.Preprocessing):
def __init__(self, df: pd.DataFrame):
super().__init__(df)
self.dataset = df
self.dataset.info() # TODO: remove this line
self.dataset = self.data_integration(self.dataset)
self.dataset = self.drop_missing_data(self.dataset)
self.dataset = self.fill_missing_data(self.dataset)
self.dataset.info() # TODO: remove this line
def __call__(self, samples, *args, **kwargs):
samples = self.data_integration(samples)
samples = self.drop_missing_data(samples)
return self.fill_missing_data(samples)
def data_integration(self, samples):
attributes = [
"CRASH DATE", "CRASH TIME",
"BOROUGH",
"LATITUDE", "LONGITUDE",
"ON STREET NAME", "CROSS STREET NAME", "OFF STREET NAME",
"NUMBER OF PERSONS INJURED", "NUMBER OF PERSONS KILLED", "NUMBER OF PEDESTRIANS INJURED", "NUMBER OF PEDESTRIANS KILLED", "NUMBER OF CYCLIST INJURED", "NUMBER OF CYCLIST KILLED", "NUMBER OF MOTORIST INJURED", "NUMBER OF MOTORIST KILLED",
]
fact_table = samples[attributes]
attributes = [
"CONTRIBUTING FACTOR VEHICLE 1", "CONTRIBUTING FACTOR VEHICLE 2", "CONTRIBUTING FACTOR VEHICLE 3",
"CONTRIBUTING FACTOR VEHICLE 4", "CONTRIBUTING FACTOR VEHICLE 5",
]
series = samples[attributes].fillna("NULL")
values = [set(items) - {"NULL"} for items in series.values]
fact_table.insert(fact_table.columns.__len__(), "CONTRIBUTING FACTOR VEHICLE", values)
invalid_indices = [i for i, items in enumerate(values) if items == set()]
attributes = [
"VEHICLE TYPE CODE 1", "VEHICLE TYPE CODE 2", "VEHICLE TYPE CODE 3", "VEHICLE TYPE CODE 4",
"VEHICLE TYPE CODE 5"
]
series = samples[attributes].fillna("NULL")
values = [set(items) - {"NULL"} for items in series.values]
fact_table.insert(fact_table.columns.__len__(), "VEHICLE TYPE CODE", values)
invalid_indices += [i for i, items in enumerate(values) if items == set()]
return fact_table.drop(index=invalid_indices)
def drop_missing_data(self, samples):
""" All examples should have fully value in attributes `CRASH TIME`, `LATITUDE` and `LONGITUDE`.
"""
special_data = samples[["CRASH TIME", "LATITUDE", "LONGITUDE"]]
invalid_rows = special_data[special_data.isnull().any(axis=1)]
return samples.drop(invalid_rows.index)
def fill_missing_data(self, samples):
return samples.fillna("NULL")
@classmethod
def unit_test(cls):
cls(pd.read_csv("src/NYC Accidents 2020.csv"))
class Mining:
def __init__(self, df: pd.DataFrame):
self.dataset = df
def clustering(self, method):
"""
:param method: K-means, DBSCAN, etc
:return:
"""
pass
def regression(self):
# convert date to day of week
dataset = pd.to_datetime(self.dataset["CRASH DATE"]).dt.day_name()
dataset = pd.get_dummies(dataset)
# binning to hours
hour_series = {"CRASH TIME": [int(time[:2]) for time in self.dataset["CRASH TIME"].values]}
hour_series = pd.DataFrame(hour_series, index=dataset.index)
dataset = dataset.join(hour_series)
# add the location message
dataset = dataset.join(self.dataset[["LATITUDE", "LONGITUDE"]])
features = np.asarray(dataset.values, dtype=np.float32)
label_attributes = [
"NUMBER OF PERSONS INJURED", "NUMBER OF PERSONS KILLED",
"NUMBER OF PEDESTRIANS INJURED", "NUMBER OF PEDESTRIANS KILLED",
"NUMBER OF CYCLIST INJURED", "NUMBER OF CYCLIST KILLED",
"NUMBER OF MOTORIST INJURED", "NUMBER OF MOTORIST KILLED"
]
labels = np.asarray(self.dataset[label_attributes].values, dtype=np.float32)
random_indices = np.arange(features.shape[0])
np.random.shuffle(random_indices)
x_train, y_train = features[random_indices[:60000], :], labels[random_indices[:60000], :]
x_test, y_test = features[random_indices[60000:], :], labels[random_indices[60000:], :]
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(label_attributes.__len__())
])
model.compile(
optimizer=tf.keras.optimizers.Adam(0.005),
loss=tf.keras.losses.MeanSquaredError(),
metrics=['accuracy'])
model.fit(
x_train, y_train, batch_size=32, epochs=10,
validation_data=(x_test, y_test),
callbacks=[
tf.keras.callbacks.LearningRateScheduler(lambda epoch, lr: 0.75 ** (epoch // 10) * lr, verbose=1),
tf.keras.callbacks.TensorBoard(log_dir="logs/" + datetime.now().strftime("%Y%m%d-%H%M%S"),
histogram_freq=1)
],
)
# tensorboard --logdir logs
def association_mining(self):
examples = [list(item_set) for item_set in self.dataset["CONTRIBUTING FACTOR VEHICLE"]]
utils.AssociationMining().fit(
data=examples,
min_support=0.01,
min_confidence=0.6
)
examples = [list(item_set) for item_set in self.dataset["VEHICLE TYPE CODE"]]
utils.AssociationMining().fit(
data=examples,
min_support=0.01,
min_confidence=0.4
)
@classmethod
def unit_test(cls):
print("=" * 16 + " data preprocessing " + "=" * 16)
dataset = Preprocessing(pd.read_csv("src/NYC Accidents 2020.csv")).dataset
print("=" * 16 + " regression " + "=" * 16)
cls(dataset).regression()
print("=" * 16 + " association mining " + "=" * 16)
cls(dataset).association_mining()
if __name__ == "__main__":
MatplotlibTimeSeriesVisualization.unit_test()
MatplotlibTimeFreeVisualization.unit_test()
Preprocessing.unit_test()
Mining.unit_test()
|
{"hexsha": "b7ae9ec92e3f20c0dc6e2dd66861d2d5124b3c2a", "size": 8751, "ext": "py", "lang": "Python", "max_stars_repo_path": "nyc_accidents.py", "max_stars_repo_name": "ZhengLiCS/project", "max_stars_repo_head_hexsha": "ffaa8630bbf77bd29ab8d2439ebbc9544535eece", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nyc_accidents.py", "max_issues_repo_name": "ZhengLiCS/project", "max_issues_repo_head_hexsha": "ffaa8630bbf77bd29ab8d2439ebbc9544535eece", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nyc_accidents.py", "max_forks_repo_name": "ZhengLiCS/project", "max_forks_repo_head_hexsha": "ffaa8630bbf77bd29ab8d2439ebbc9544535eece", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3271889401, "max_line_length": 249, "alphanum_fraction": 0.6213004228, "include": true, "reason": "import numpy", "num_tokens": 2034}
|
[STATEMENT]
lemma C_subset : "C M2 M1 \<Omega> V m i \<subseteq> TS M2 M1 \<Omega> V m i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. C M2 M1 \<Omega> V m i \<subseteq> TS M2 M1 \<Omega> V m i
[PROOF STEP]
by (simp add: TS_union)
|
{"llama_tokens": 107, "file": "Adaptive_State_Counting_ASC_ASC_Suite", "length": 1}
|
[STATEMENT]
lemma natural_of_integer_of_natural [simp]:
"natural_of_integer (integer_of_natural n) = n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. natural_of_integer (integer_of_natural n) = n
[PROOF STEP]
by transfer simp
|
{"llama_tokens": 88, "file": null, "length": 1}
|
## --- Grid ---
# TODO: generalize for arbitrary rectangular grids
"""Two-dimensional grid."""
struct Grid
x::Vector{Int}
y::Vector{Int}
n::Tuple{Int,Int}
end
Base.length(grid::Grid) = prod(grid.n)
function Grid(n::Tuple{Int,Int})
x = [i for i = 1:n[1] for _ = 1:n[2]]
y = [j for _ = 1:n[1] for j = 1:n[2]]
Grid(x, y, n)
end
Grid(n::Int) = Grid((n, n))
## --- Line Types ---
struct LineTypes
x::Vector{Int}
y::Vector{Int}
k::Vector{Int}
end
Base.length(t::LineTypes) = length(t.k)
"""Generate all `x` and `y` differences and their multiple `k`
that represent each line type in a grid."""
function LineTypes(grid::Grid)
n = grid.n
dx_max = n[1] -1
dy_max = n[2] -1
xs = Int[0, 1]
ys = Int[1, 0]
ks = Int[dx_max, dy_max]
for dx = 1:dx_max
for dy = 1:dy_max
if gcd(dx, dy) == 1
k = min(div(dx_max, dx), div(dy_max, dy))
# Increasing y
push!(xs, dx)
push!(ys, dy)
push!(ks, k)
# Decreasing y
push!(xs, dx)
push!(ys, -dy)
push!(ks, k)
end
end
end
return LineTypes(xs, ys, ks)
end
function line_type(x1::Int, x2::Int, y1::Int, y2::Int)
x = x2 - x1
y = y2 - y1
d = gcd(x, y)
if (x < 0) || (iszero(x) && y < 0)
return (div(-x, d), div(-y, d))
else
return (div(x, d), div(y, d))
end
end
## --- Symmetries ---
struct Symmetries
p::Vector{Vector{Int}}
end
"""Generate all symmetrical permutations of lock pattern in a grid."""
function Symmetries(grid::Grid)
(n, _) = grid.n
# Horizontal reflection
reflect = [j + i * n for i = 0:n-1 for j = n:-1:1]
# Rotate 90 degrees
r90 = [j + i * n for j = 1:n for i = n-1:-1:0]
r180 = r90[r90]
r270 = r180[r90]
r180_reflect = r180[reflect]
r270_reflect = r270[reflect]
Symmetries([r90, r180, r270, reflect, r180_reflect, r270_reflect])
end
## --- Bounds ---
"""Upper and lower bound for taxicab distance."""
struct Bounds
lb::Int
ub::Int
end
function Bounds(grid::Grid)
M = length(grid) - 1
t = LineTypes(grid)
# Taxicab distance of each line type
x = abs.(t.y) + abs.(t.x)
# Taxicab distance of shortest unique line types
lb = sum(sort(x)[1:M])
# Taxicab distance of longest unique line types
ub = sum(reverse(sort(x .* t.k))[1:M])
return Bounds(lb, ub)
end
# --- Generate model data ---
"""Generate the input data for the MiniZinc model."""
function data(grid::Grid)
t = LineTypes(grid)
s = Symmetries(grid)
r90, r180, r270, reflect, r180_reflect, r270_reflect = s.p
b = Bounds(grid)
return [
"m=$(length(grid));", "gx=$(grid.x);", "gy=$(grid.y);",
"N=$(length(t));", "tx=$(t.x);", "ty=$(t.y);", "tk=$(t.k);",
"r90=$r90;", "r180=$r180;", "r270=$r270;", "reflect=$reflect;",
"r180_reflect=$r180_reflect;", "r270_reflect=$r270_reflect;",
"d_lb=$(b.lb);", "d_ub=$(b.ub);"
]
end
function create_instance(n::Int, directory::AbstractString)
grid = Grid(n)
d = join(data(grid), "\n")
write(joinpath(directory, "$(join(grid.n, "x")).dzn"), d)
end
## --- Extract solutions from output files ---
function parse_int_array(x::AbstractString; I = Int)
[parse(I, t.match) for t in eachmatch(r"([0-9]+)", x)]
end
function extract_solutions(file::AbstractString)
# Construct Regex pattern for extracting results from a text file
int_array_pattern = raw"(?<pattern>\[[0-9]+(, [0-9]+)*\])"
int_pattern = raw"(?<distance>[0-9]+)"
result_pattern = Regex("pattern=$int_array_pattern" * "\n" * "distance=$int_pattern")
txt = read(file, String)
results = Dict{Int,Vector{Vector{Int}}}()
for m in eachmatch(result_pattern, txt)
lock_pattern = parse_int_array(m["pattern"])
distance = parse(Int, m["distance"])
if haskey(results, distance)
push!(results[distance], lock_pattern)
else
results[distance] = [lock_pattern]
end
end
return results
end
|
{"hexsha": "1c4304fff4b18eb12cee05f6d0975a754480153d", "size": 4144, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/functions.jl", "max_stars_repo_name": "jaantollander/LockPatternComplexity.jl", "max_stars_repo_head_hexsha": "25ec93f855c47d4bb44a9f8e3839d48a0fcd5020", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-01T12:47:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-26T10:15:07.000Z", "max_issues_repo_path": "src/functions.jl", "max_issues_repo_name": "jaantollander/LockPatternComplexity.jl", "max_issues_repo_head_hexsha": "25ec93f855c47d4bb44a9f8e3839d48a0fcd5020", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/functions.jl", "max_forks_repo_name": "jaantollander/LockPatternComplexity.jl", "max_forks_repo_head_hexsha": "25ec93f855c47d4bb44a9f8e3839d48a0fcd5020", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4233128834, "max_line_length": 89, "alphanum_fraction": 0.5637065637, "num_tokens": 1299}
|
import pandas as pd
import numpy as np
# from copy import deepcopy
from sklearn.utils.metaestimators import _BaseComposition
from sklearn.preprocessing import LabelEncoder
from sklearn.externals.joblib import Parallel, delayed
from gravity_learn.utils import (force_array,
check_is_fitted,
fit_model)
__all__ = ['ModelDispatch']
class ModelDispatch(_BaseComposition):
"""
This class is responsible for dispatching models to different classes of
data, class label of data is predicted by dispatcher (unsupervised \
learning) or is labeled by y_train (supervised learning).
Downstream models will consume its corresponding class of data and \
implement fit and predict.
Use cases:
1. We want to have separate models for inliers and outliers
(unsupervise)
2. We want to have separate models for different groups of data with
different values of y (supervise)
Parameters
----------
dispatcher : A classifier, could be unsupervised or supervised
model_list : list of (string, base_model) tuples. The first
half of each tuple is the group name of the model.
supervise_cutoff : float, a cut-off, or a list of cut-offs to separate \
two or more groups of data. If None, then assuming dispatch is an \
unsupervise learning algo
NOTE: assuming cut-offs are in ascending order
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
"""
def __init__(self, dispatcher, model_list, supervise_cutoff=None,
n_jobs=1, verbose=0):
self.dispatcher = dispatcher
self.model_list = list(model_list)
self.supervise_cutoff = supervise_cutoff
self.n_jobs = n_jobs
self.verbose = verbose
if supervise_cutoff is not None:
if not isinstance(supervise_cutoff,
(list, tuple, np.ndarray, pd.Index)):
supervise_cutoff = [supervise_cutoff]
def get_params(self, deep=True):
return self._get_params('model_list', deep=deep)
def set_params(self, **kwargs):
self._set_params('model_list', **kwargs)
return self
@property
def get_model_list_(self):
return self.model_list
@property
def get_model_dict_(self):
check_is_fitted(self, 'model_dict')
return self.model_dict
def _fit_unsupervise(self, X, y=None, *args, **kwargs):
self.dispatcher = self.dispatcher.fit(X, *args, **kwargs)
self.group = self.dispatcher.predict(X)
def _fit_supervise(self, X, y, *args, **kwargs):
self.group = np.zeros(len(y))
for i, cutoff in enumerate(self.supervise_cutoff):
self.group[np.where(y > cutoff)[0]] = i + 1
self.dispatcher = self.dispatcher.fit(X, self.group, *args, **kwargs)
def fit(self, X, y=None, *args, **kwargs):
# NOTE: let's say we respect dataframe
if not isinstance(X, (pd.DataFrame, pd.Series)):
X = pd.DataFrame(force_array(X))
if not isinstance(y, (pd.DataFrame, pd.Series)):
y = pd.DataFrame(force_array(y))
# First, fit dispatcher and get group
if self.supervise_cutoff is None:
self._fit_unsupervise(X, *args, **kwargs)
else: # supervise
self._fit_supervise(X, y, *args, **kwargs)
# Second, fit Label encoder
self.le = LabelEncoder().fit(self.group)
self.group = self.le.transform(self.group)
self.unique_groups = np.unique(self.group)
# Third, get a model dictionary for two class of data
self.model_dict = \
{
group: self.model_list[i][-1]
for i, group in enumerate(self.unique_groups)
}
# Nest, get list of index
index_dict = \
{
group: np.where(self.group == group)[0]
for group in self.unique_groups
}
# Paralization and fit downstream models
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)
func = delayed(fit_model)
fitted_model_list = parallel(
func(self.model_dict[group], X.iloc[index], y.iloc[index])
for (group, index) in index_dict.items()
)
# update models
fitted_model_list = iter(fitted_model_list)
self.model_dict = {
group: next(fitted_model_list)
for group in self.unique_groups
}
return self
def predict(self, X):
check_is_fitted(self, 'model_dict')
# NOTE: let's say we respect dataframe
if not isinstance(X, (pd.DataFrame, pd.Series)):
X = pd.DataFrame(force_array(X))
# predict on dispatcher and get group
group_new = self.dispatcher.predict(X)
group_new = self.le.transform(group_new)
index_dict = \
{
group: np.where(group_new == group)[0]
for group in self.unique_groups
}
# predict by group
pred_dfs = []
for (group, index) in index_dict.items():
if len(index):
df_pred = pd.DataFrame(
self.model_dict[group].predict(X.iloc[index]),
index=index
)
pred_dfs.append(df_pred)
# concat all predictions into one dataframe
df_pred = pd.concat(pred_dfs)
return force_array(df_pred.sort_index())
def predict_proba(self, X):
check_is_fitted(self, 'model_dict')
# NOTE: let's say we respect dataframe
if not isinstance(X, (pd.DataFrame, pd.Series)):
X = pd.DataFrame(force_array(X))
# predict on dispatcher and get group
group_new = self.dispatcher.predict(X)
group_new = self.le.transform(group_new)
index_dict = \
{
group: np.where(group_new == group)[0]
for group in self.unique_groups
}
# predict by group
proba_dfs = []
for (group, index) in index_dict.items():
if len(index):
df_proba = pd.DataFrame(
self.model_dict[group].prodict_proba(X.iloc[index]),
index=index
)
proba_dfs.append(df_proba)
# concat all prodictions into one dataframe
df_proba = pd.concat(proba_dfs)
return force_array(df_proba.sort_index())
|
{"hexsha": "81b821747799af8cccd43765a9a386075efa2f23", "size": 6669, "ext": "py", "lang": "Python", "max_stars_repo_path": "klearn/ensemble/dispatch.py", "max_stars_repo_name": "KevinLiao159/klearn", "max_stars_repo_head_hexsha": "ffc0cb6b69cd21f2aac8934af55ac6e32c4db689", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-03T08:20:57.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-03T08:20:57.000Z", "max_issues_repo_path": "klearn/ensemble/dispatch.py", "max_issues_repo_name": "KevinLiao159/klearn", "max_issues_repo_head_hexsha": "ffc0cb6b69cd21f2aac8934af55ac6e32c4db689", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "klearn/ensemble/dispatch.py", "max_forks_repo_name": "KevinLiao159/klearn", "max_forks_repo_head_hexsha": "ffc0cb6b69cd21f2aac8934af55ac6e32c4db689", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2445652174, "max_line_length": 77, "alphanum_fraction": 0.5994901784, "include": true, "reason": "import numpy", "num_tokens": 1433}
|
'''
There are in total five algorithms: MLP, SVM, Bag, AdaBoost and GB
'''
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
global seed
seed = 0
class Ensemble_Classifier():
def __init__(self):
self.MLP = MLPClassifier(hidden_layer_sizes=(50,), max_iter=50, alpha=1e-4,
solver='sgd', verbose=0, tol=1e-4, random_state=seed,
learning_rate_init=.1)
self.SVM = SVC(kernel = 'linear')
self.Bag = BaggingClassifier(base_estimator=SVC(), n_estimators=10, random_state=seed)
self.AdaBoost = AdaBoostClassifier(n_estimators=100, random_state=seed)
self.GB = GradientBoostingClassifier(n_estimators=100, random_state=seed)
self.ratio = 0.2 * np.ones((5,))
# define the fit function for ensemble classifier
def fit(self, X, y):
# chose the same test_size with GAN model
xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2)
self.MLP.fit(xtrain, ytrain)
self.Bag.fit(xtrain, ytrain)
self.AdaBoost.fit(xtrain, ytrain)
self.GB.fit(xtrain, ytrain)
self.SVM.fit(xtrain, ytrain)
# compute the accuracy of classifiers on testing data
MLP_score = self.MLP.score(xtest, ytest)
Bag_score = self.Bag.score(xtest, ytest)
AdaBoost_score = self.AdaBoost.score(xtest, ytest)
GB_score = self.GB.score(xtest, ytest)
SVM_score = self.SVM.score(xtest, ytest)
self.alpha = np.array([MLP_score, Bag_score, AdaBoost_score, GB_score, SVM_score])
# calculate the percent of different score values
self.alpha = self.alpha/np.sum(self.alpha)
# define the predict function for ensemble classifier
def predict(self, X):
MLP_predict = self.MLP.predict(X)
SVM_predict = self.SVM.predict(X)
AdaBoost_predict = self.AdaBoost.predict(X)
GB_predict = self.GB.predict(X)
Bag_predict = self.Bag.predict(X)
Ensemble_predict = self.alpha[0] * MLP_predict + self.alpha[1] * SVM_predict + self.alpha[2] * AdaBoost_predict + \
self.alpha[3] * GB_predict + self.alpha[4] * Bag_predict
# filter the low score
Ensemble_predict = np.ones(Ensemble_predict.shape) * (Ensemble_predict > 0.5)
return Ensemble_predict
# define the score function for ensemble classifier
def score(self, X, y):
return accuracy_score(y, self.predict(X), sample_weight=None)
|
{"hexsha": "06422d7ffac3545579bd79ef0ae80fa47479ab43", "size": 2885, "ext": "py", "lang": "Python", "max_stars_repo_path": "GAN_Models/Ensemble_Classifiers.py", "max_stars_repo_name": "Wapiti08/Analysis_Ransome_with_GAN", "max_stars_repo_head_hexsha": "f908ec77b4df1029b10fd4f8a9e94daf1b4bbf7b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "GAN_Models/Ensemble_Classifiers.py", "max_issues_repo_name": "Wapiti08/Analysis_Ransome_with_GAN", "max_issues_repo_head_hexsha": "f908ec77b4df1029b10fd4f8a9e94daf1b4bbf7b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GAN_Models/Ensemble_Classifiers.py", "max_forks_repo_name": "Wapiti08/Analysis_Ransome_with_GAN", "max_forks_repo_head_hexsha": "f908ec77b4df1029b10fd4f8a9e94daf1b4bbf7b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.078125, "max_line_length": 124, "alphanum_fraction": 0.6537261698, "include": true, "reason": "import numpy", "num_tokens": 681}
|
import numpy as np
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import LinearRegression
def linear_factor_mod(y, x, p = None, regularize = None, return_alpha = False):
t_, n_ = y.shape
#set uniform weights if no kernel
if p is None:
p = np.ones(t_) / t_
m_y = p @ y
m_x = p @ x
y_p = ((y - m_y).T * np.sqrt(p)).T
x_p = ((x - m_x).T * np.sqrt(p)).T
#fit the model
if regularize == 'L1':
mod = Lasso(alpha = 0.01/(2.*t_), fit_intercept = True)
if regularize == 'L2':
mod = Ridge(alpha = 0.01/(2.*t_), fit_intercept = True)
if regularize == 'net':
mod = ElasticNet(alpha = 0.01/(2.*t_), fit_intercept = True)
else:
mod = LinearRegression()
mod.fit(x_p, y_p)
#retrieve coefficients and residuals
beta = mod.coef_
alpha = mod.intercept_
u = np.subtract(y - alpha, x @ np.atleast_2d(beta.T))
if not return_alpha:
return beta, u
return alpha, beta, u
|
{"hexsha": "66512e8e3f2bbb01ac6ba1bab1afc9dc1928ef37", "size": 1082, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/factor_models.py", "max_stars_repo_name": "matus-jan-lavko/ReinforcementLearning-vs-EW", "max_stars_repo_head_hexsha": "48f8f9285e08bf05f79173c6a0c57cb05a3a8dfb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-21T23:09:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-21T23:09:10.000Z", "max_issues_repo_path": "src/factor_models.py", "max_issues_repo_name": "matus-jan-lavko/ReinforcementLearning-vs-EW", "max_issues_repo_head_hexsha": "48f8f9285e08bf05f79173c6a0c57cb05a3a8dfb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/factor_models.py", "max_forks_repo_name": "matus-jan-lavko/ReinforcementLearning-vs-EW", "max_forks_repo_head_hexsha": "48f8f9285e08bf05f79173c6a0c57cb05a3a8dfb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1627906977, "max_line_length": 79, "alphanum_fraction": 0.6164510166, "include": true, "reason": "import numpy", "num_tokens": 320}
|
import sys
import os
import cv2
import numpy as np
import copy
import matplotlib.pyplot as plt
from cto.utility.logging_extension import logger
from VTKInterface.Interfaces.Render_Interface import RenderInterface
from cto.rendering.rendering_utility import build_render_compatible_focal_length
from cto.rendering.rendering_utility import build_affine_transformation_matrix
def build_vtk_render_compatible_intrinsics(intrinsics):
# The renderer of Open3D has the following (strong) restrictions
# * f_x and f_y must be equal
# * c_x must be equal to width / 2 - 0.5
# * c_y must be equal to height / 2 - 0.5
width = intrinsics.width
height = intrinsics.height
cx_renderer, cy_renderer = intrinsics.get_principal_point()
f_renderer = build_render_compatible_focal_length(intrinsics)
return width, height, f_renderer, f_renderer, cx_renderer, cy_renderer
def build_vtk_render_compatible_camera_parameters(camera_parameters):
width, height, f_renderer, f_renderer, cx_renderer, cy_renderer = build_vtk_render_compatible_intrinsics(
camera_parameters.intrinsic)
render_compatible_camera_parameters = copy.deepcopy(camera_parameters)
render_compatible_camera_parameters.intrinsic.set_intrinsics(
width, height, f_renderer, f_renderer, cx_renderer, cy_renderer)
return render_compatible_camera_parameters
def invert_transformation_mat(trans_mat):
# Exploit that the inverse of the rotation part is equal to the transposed of the rotation part
# This should be more robust than trans_mat_inv = np.linalg.inv(trans_mat)
trans_mat_inv = np.zeros_like(trans_mat)
rotation_part_inv = trans_mat[0:3, 0:3].T
trans_mat_inv[0:3, 0:3] = rotation_part_inv
trans_mat_inv[0:3, 3] = - np.dot(rotation_part_inv, trans_mat[0:3, 3])
trans_mat_inv[3, 3] = 1
return trans_mat_inv
def compute_depth_maps_from_geometry(mesh_ifp,
camera_trajectory,
ordered_image_names,
depth_map_callback,
config=None):
logger.info('create_depth_maps_from_mesh: ... ')
num_params = len(camera_trajectory.parameters)
logger.vinfo('num_params', num_params)
camera_parameter_list = camera_trajectory.parameters
num_images = None
if num_images is not None:
camera_parameter_list = camera_parameter_list[:num_images]
assert os.path.isfile(mesh_ifp)
# required for certain methods called below
off_screen_rendering = True
for image_name, camera_parameters in zip(ordered_image_names, camera_parameter_list):
extrinsics = camera_parameters.extrinsic
cam_to_world_mat_computer_vision = invert_transformation_mat(extrinsics)
# http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraIntrinsic.html
intrinsics = camera_parameters.intrinsic
render_compatible_camera_parameters = build_vtk_render_compatible_camera_parameters(
camera_parameters
)
width = intrinsics.width
height = intrinsics.height
render_interface = RenderInterface(
off_screen_rendering=off_screen_rendering,
width=width,
height=height,
background_color=(0, 127, 127))
# Can we avoid this redundant loading
render_interface.load_vtk_mesh_or_point_cloud(
mesh_ifp, texture_ifp=None)
render_interface.set_active_cam_from_computer_vision_cam_to_world_mat(
cam_to_world_mat_computer_vision,
render_compatible_camera_parameters.intrinsic.intrinsic_matrix,
width,
height,
max_clipping_range=sys.float_info.max)
render_interface.render()
#render_interface.show_z_buffer()
if not off_screen_rendering:
render_interface.render_and_start()
# We apply an affine transformation to the depth_map images
# to compensate differences in the intrinsic parameters
affine_mat = build_affine_transformation_matrix(
camera_parameters, render_compatible_camera_parameters)
depth_map = render_interface.get_computer_vision_depth_buffer_as_numpy_arr()
color_image = render_interface.get_rgba_buffer_as_numpy_arr()
color_image = cv2.warpAffine(
color_image,
affine_mat,
(color_image.shape[1], color_image.shape[0]),
cv2.WARP_INVERSE_MAP,
cv2.BORDER_CONSTANT, 0)
depth_map = cv2.warpAffine(
depth_map,
affine_mat,
(depth_map.shape[1], depth_map.shape[0]),
cv2.WARP_INVERSE_MAP,
cv2.BORDER_CONSTANT, 0)
depth_map_callback(image_name, depth_map, color_image)
# if not off_screen_rendering:
# render_interface.render_and_start()
logger.info('create_depth_maps_from_mesh: Done ')
######################################################################33
|
{"hexsha": "ac18314611780e08881746b5a3ed0a453b5ecf65", "size": 5074, "ext": "py", "lang": "Python", "max_stars_repo_path": "cto/rendering/vtk_rendering_utility.py", "max_stars_repo_name": "SBCV/ColmapTexturingWithOpen3D", "max_stars_repo_head_hexsha": "d45f10331c563ca874b618d5fee38d311de9437e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-06-24T10:40:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T01:41:11.000Z", "max_issues_repo_path": "cto/rendering/vtk_rendering_utility.py", "max_issues_repo_name": "SBCV/ColmapTextureMapping", "max_issues_repo_head_hexsha": "d45f10331c563ca874b618d5fee38d311de9437e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cto/rendering/vtk_rendering_utility.py", "max_forks_repo_name": "SBCV/ColmapTextureMapping", "max_forks_repo_head_hexsha": "d45f10331c563ca874b618d5fee38d311de9437e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-07T08:32:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-07T08:32:16.000Z", "avg_line_length": 35.4825174825, "max_line_length": 109, "alphanum_fraction": 0.699251084, "include": true, "reason": "import numpy", "num_tokens": 1075}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Beamer Presentation
% LaTeX Template
% Version 1.0 (10/11/12)
%
% This template has been downloaded from:
% http://www.LaTeXTemplates.com
%
% License:
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%----------------------------------------------------------------------------------------
% PACKAGES AND THEMES
%----------------------------------------------------------------------------------------
\documentclass{beamer}
\mode<presentation> {
% The Beamer class comes with a number of default slide themes
% which change the colors and layouts of slides. Below this is a list
% of all the themes, uncomment each in turn to see what they look like.
%\usetheme{default}
%\usetheme{AnnArbor}
%\usetheme{Antibes}
%\usetheme{Bergen}
%\usetheme{Berkeley}
%\usetheme{Berlin}
%\usetheme{Boadilla}
%\usetheme{CambridgeUS}
%\usetheme{Copenhagen}
%\usetheme{Darmstadt}
%\usetheme{Dresden}
%\usetheme{Frankfurt}
%\usetheme{Goettingen}
%\usetheme{Hannover}
%\usetheme{Ilmenau}
%\usetheme{JuanLesPins}
%\usetheme{Luebeck}
%\usetheme{Madrid}
%\usetheme{Malmoe}
%\usetheme{Marburg}
%\usetheme{Montpellier}
%\usetheme{PaloAlto}
%\usetheme{Pittsburgh}
%\usetheme{Rochester}
%\usetheme{Singapore}
%\usetheme{Szeged}
\usetheme{Warsaw}
% As well as themes, the Beamer class has a number of color themes
% for any slide theme. Uncomment each of these in turn to see how it
% changes the colors of your current slide theme.
%\usecolortheme{albatross}
%\usecolortheme{beaver}
%\usecolortheme{beetle}
%\usecolortheme{crane}
%\usecolortheme{dolphin}
%\usecolortheme{dove}
%\usecolortheme{fly}
%\usecolortheme{lily}
%\usecolortheme{orchid}
%\usecolortheme{rose}
%\usecolortheme{seagull}
\usecolortheme{seahorse}
%\usecolortheme{whale}
%\usecolortheme{wolverine}
%\setbeamertemplate{footline} % To remove the footer line in all slides uncomment this line
%\setbeamertemplate{footline}[page number] % To replace the footer line in all slides with a simple slide count uncomment this line
%\setbeamertemplate{navigation symbols}{} % To remove the navigation symbols from the bottom of all slides uncomment this line
}
\usepackage{graphicx} % Allows including images
\usepackage{booktabs} % Allows the use of \toprule, \midrule and \bottomrule in tables
\usepackage{amsmath}
\AtBeginSection[]
{
\begin{frame}
\frametitle{Outline}
\tableofcontents[currentsection]
\end{frame}
}
%----------------------------------------------------------------------------------------
% TITLE PAGE
%----------------------------------------------------------------------------------------
\title[Sum of Divergent Series]{Sum of Divergent Series} % The short title appears at the bottom of every slide, the full title is only on the title page
\author{Haoen CUI} % Your name
\institute[Uptake] % Your institution as it will appear on the bottom of every slide, may be shorthand to save space
{
Uptake Math Club Lightning Talk
% Your institution for the title page
% Your email address
}
%\date{\today} % Date, can be changed to a custom date
\date{February 15 \& 22, 2019}
\begin{document}
\begin{frame}
\titlepage % Print the title page as the first slide
\end{frame}
%----------------------------------------------------------------------------------------
% PRESENTATION SLIDES
%----------------------------------------------------------------------------------------
%------------------------------------------------
\section{Motivating Examples}
%------------------------------------------------
\begin{frame}
\frametitle{Motivating Examples}
\begin{figure}
\includegraphics[width=\linewidth]{success_kid.jpg}
\end{figure}
\end{frame}
%------------------------------------------------
\subsection{Recap from Last Time}
\begin{frame}
\frametitle{Convergence of Power Series}
Last time, \textcolor{cyan}{@willdiesel} talked about the following Maclaurin series (i.e. Taylor series expanded at zero)
$$ \frac{1}{1-x} = 1 + x + x^2 + x^3 + \cdots = \sum_{n=0}^{\infty} x^n $$
\begin{itemize}
\item \textcolor{cyan}{@bernie} asked about \alert{convergence} (which is $ |x| < 1 $)
\item But, what does it mean when we write $ \sum_{n=0}^{\infty} x^n = \textit{value} $ ?
\begin{itemize}
\item Is ``$=$'' a mathematical equivalence symbol or an assignment operator?
\end{itemize}
\end{itemize}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Review of Summation of Real Numbers}
\begin{block}{Properties of Sum of Real Numbers}
\begin{itemize}
\item \textbf{Commutativity}: For $ \forall x, y \in \mathbb{R} $, $x + y = y + x $
\item \textbf{Associativity}: For $ \forall x, y, z \in \mathbb{R} $, $ (x + y) + z = x + (y + z) $
\end{itemize}
\end{block}
\begin{block}{Recursive Definition of $ \sum $ Symbol}
For $ \forall \{a_i\}_{i \in \mathbb{N}} \subset \mathbb{R} $ and $ \forall n \in \mathbb{N} $, define the summation symbol,
$$ \sum_{i=1}^{n} a_i = a_n + \sum_{i=1}^{n-1} a_i
\quad (\text{for } n > 1) \quad \text{ and } \quad
\sum_{i=1}^{1} a_1 = a_1 $$
(Note: $ \infty $ is not a real number nor a natural number.)
\end{block}
\end{frame}
%------------------------------------------------
\subsection{Numberphile Video}
\begin{frame}
\frametitle{Sum of All Natural Numbers}
\href{https://www.youtube.com/watch?v=w-I6XTVZXww}{Numberphile Video: ASTOUNDING! $ 1 + 2 + 3 + \cdots = -\frac{1}{2} $} \\
\ \\
Consider the following sums
\begin{align*}
S &= 1 + 2 + 3 + 4 + 5 + 6 + \cdots \\
S_1 &= 1 - 1 + 1 - 1 + 1 - 1 + \cdots \\
S_2 &= 1 - 2 + 3 - 4 + 5 - 6 + \cdots
\end{align*}
We want to find $S$ and we establish $S_1$ and $S_2$ as intermediate steps.
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Sum of All Natural Numbers (Cont.)}
First, we find the values of ``helper'' sums $S_1$ and $S_2$.
\begin{block}{Claim 1 and 2: $ S_1 = \frac{1}{2} $ and $ 2 S_2 = S_1 \implies S_2 = \frac{1}{4} $}
Because the partial sums are $0, 1, 0, 1, \cdots$, so $\frac{1}{2}$ is sort of the average, hence $ S_1 = \frac{1}{2} $. Furthermore,
\begin{alignat*}{3}
2 S_2 &= S_2 && + && S_2 \\
&= 1 && - && 2 + 3 - 4 + 5 - 6 + \cdots \\
& && && 1 - 2 + 3 - 4 + 5 - \cdots \\
&= 1 && - && 1 + 1 - 1 + 1 - 1 + \cdots \\
&= S_1 && &&
\end{alignat*}
\end{block}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Sum of All Natural Numbers (Cont.)}
Next, we can fill the bridge to $S$.
\begin{block}{Claim 3: $ S - S_2 = 4 S \implies S = -\frac{1}{12} $}
\begin{align*}
S - S_2 = \quad &1 + 2 + 3 + 4 + 5 + 6 + \cdots \\
-( &1 - 2 + 3 - 4 + 5 - 6 + \cdots) \\
= \quad &0 + 4 + 0 + 8 + 0 + 12 + \cdots \\
= \quad &4(1 + 2 + 3 + \cdots) \\
= \quad &4S
\end{align*}
\end{block}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Sum of All Natural Numbers (Cont.)}
But, the result doesn't follow along with the intuition
$$ \{a_i\} \subset \mathbb{R}_{+} \implies \forall N \in \mathbb{N}, \sum_{i=1}^{N} a_i > 0 $$
What may go wrong?
\begin{itemize}
\item What is the definition of sum?
$$ \Sigma_{i=1}^{\infty} a_i = a_1 + a_2 + a_3 + \cdots \text{ is just a short hand notation} $$
\item Can we shift terms in a series under summation?
\item Can we ignore all the zeros, despite the infinite amount?
\item Is summation a linear operator?
\end{itemize}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Sum of All Natural Numbers (Cont.)}
We will now form a contradiction. Consider
\begin{align*}
S_3 &= 1 + 1 + 1 + 1 + 1 + \cdots \\
S_4 &= 1 + 3 + 5 + 7 + 9 + \cdots
\end{align*}
\begin{block}{Claim 4, 5, and 6: $ S_3 = \frac{1}{2} $, $ S_4 = -\frac{2}{3} $, and $S = \frac{2}{3} $}
\begin{align*}
S_3 - S_1 = 2 S_3 &\implies S_3 = \frac{1}{2} \\
S_4 = 2S - S_3 &\implies S_4 = -\frac{2}{3} \\
S = S_4 + 2S &\implies S = \frac{2}{3}
\end{align*}
\end{block}
\end{frame}
%------------------------------------------------
\subsection{Summary}
\begin{frame}
\frametitle{G.H.Hardy, Divergent Series (1949)}
\begin{quote}
It is natural to suppose that the ... formulae will prove to be correct, and our transformations justifiable, if they are interpreted appropriately... This remark is trivial now: it does not occur to a modern mathematician that a collection of mathematical symbols should have a ``\emph{meaning}'' until one has been assigned to it \emph{by definition}. It was not a triviality even to the greatest mathematicians of the 18th century... mathematicians before Cauchy asked not ``\alert{How shall we define} $ 1 - 1 + 1 - \cdots $?'' but ``\alert{What is} $ 1 - 1 + 1 - \cdots $?'', and that this habit of mind led them into unnecessary perplexities and controversies which were often really verbal.
\end{quote}
\end{frame}
%------------------------------------------------
\section{Summability}
%------------------------------------------------
\begin{frame}
\frametitle{Summability}
\begin{figure}
\includegraphics[width=0.9\linewidth]{dim_sum.png}
\end{figure}
\end{frame}
%------------------------------------------------
\subsection{Cauchy Summation}
\begin{frame}
\frametitle{Cauchy's Definition for Sum of an Infinite Series}
\begin{block}{Limit of a Sequence}
We define $ \{s_i\}_i \rightarrow L \in \mathbb{R} $ if \\
$$ \forall \epsilon > 0, \exists N \in \mathbb{N}, \text{s.t. } \forall i > N, |s_i - L| < \epsilon $$
\end{block}
\begin{block}{Convergent Series, i.e. Cauchy Summable}
We define the sum of $ \{a_i\}_i $ to be $S$ if \\
$$ \text{the sequence of partial sum } \bigg\{s_n = \sum_{i=1}^{n} a_i \bigg\}_n \rightarrow S $$
\end{block}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Cauchy's Definition for Sum of an Infinite Series (Cont.)}
\begin{block}{Remarks}
The $\sum$ operator should be think of as a partial function mapping a real sequence to a real number, if defined. (``partial'' means the function can be undefined for some elements in its domain.)
\end{block}
\begin{block}{Notation}
We will write ``sequence $ \{a_i\} $ is Cauchy summable to S'' as
\begin{itemize}
\item $ \sum (\{a_i\}) = S $, or equivalently
\item $ a_1 + a_2 + a_3 + \cdots \longrightarrow S $
\end{itemize}
In this case, we also say the sequence is \alert{convergent}.
\end{block}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Examples}
\begin{itemize}
\item Geometric Series: $ 1 + x + x^2 + x^3 + \cdots \longrightarrow \frac{1}{1-x} $ for $ \forall x \in (-1,1) $
\begin{itemize}
\item partial sum of first $n$ terms:
$$ 1 + x + x^2 + x^3 + \cdots + x^{n-1} = \frac{1 - x^n}{1-x} $$
\item as $ n \rightarrow \infty $, the partial sum converges if $ |x| < 1 $
\end{itemize}
\item Grandi's Series: $ 1 - 1 + 1 - 1 + \cdots \longrightarrow \text{undefined} $
\item Harmonic Series: $ 1 + \frac{1}{2} + \frac{1}{3} + \frac{1}{4} + \cdots \longrightarrow \text{undefined} $
\item $ 1 + 2 + 3 + 4 + \cdots \longrightarrow \text{undefined} $
\end{itemize}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Conditional Convergence}
\begin{block}{Absolutely Convergent}
We say $ \{a_i\}_i $ is \emph{absolutely convergent} if $ \{ |a_i| \}_i $ is convergent
\end{block}
\begin{block}{Conditionally Convergent}
We say $ \{a_i\}_i $ is \emph{conditionally convergent} if
\begin{itemize}
\item $ \{ |a_i| \}_i $ is divergent, but
\item $ \{a_i\}_i $ is convergent
\end{itemize}
\end{block}
\begin{block}{Remark}
If $ \{a_i\}_i $ is conditionally convergent, by arranging the order of terms, one can arrive at any sum.
\end{block}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Order Matters}
$ 1 - \frac{1}{2} + \frac{1}{3} - \frac{1}{4} + \frac{1}{5} - \frac{1}{6} + \frac{1}{7} - \frac{1}{8} + \frac{1}{9} - \frac{1}{10} + \frac{1}{11} - \cdots \longrightarrow \ln (2) $, but
\begin{itemize}
\item $ 1 - \frac{1}{2} - \frac{1}{4} - \frac{1}{6} - \frac{1}{8} + \frac{1}{3} - \frac{1}{10} - \frac{1}{12} - \frac{1}{14} - \frac{1}{16} + \frac{1}{5} - \cdots \longrightarrow 0 $
\item $ 1 + \frac{1}{3} - \frac{1}{2} + \frac{1}{5} + \frac{1}{7} - \frac{1}{4} + \frac{1}{9} + \frac{1}{11} - \frac{1}{6} + \frac{1}{13} + \frac{1}{15} - \cdots \longrightarrow \frac{3}{2} \ln (2) $
\item $ 1 + \frac{1}{3} - \frac{1}{2} - \frac{1}{4} + \frac{1}{5} + \frac{1}{7} - \frac{1}{6} - \frac{1}{8} + \frac{1}{9} + \frac{1}{11} - \frac{1}{10} - \cdots \longrightarrow \ln (2) $
\item $ 1 + \frac{1}{3} + \frac{1}{5} - \frac{1}{2} + \frac{1}{7} + \frac{1}{9} + \frac{1}{11} - \frac{1}{4} + \frac{1}{13} + \frac{1}{15} + \frac{1}{17} - \cdots \longrightarrow \frac{1}{2} \ln (12) $
\end{itemize}
\ \\
Proof is left as an exercise to the readers. Hint:
$$ \frac{1}{1 + n} = \int_0^1 x^n dx \qquad \underbrace{ \sum_{n=0}^{\infty} \int_0^1 = \int_0^1 \sum_{n=0}^{\infty} }_{\text{under some technical conditions}} \qquad \underbrace{ \sum_{n=0}^{\infty} x^n = \frac{1}{1-x} }_{\text{for } |x| < 1 } $$
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Solution to the 2nd Example}
$ 1 + \frac{1}{3} - \frac{1}{2} + \frac{1}{5} + \frac{1}{7} - \frac{1}{4} + \frac{1}{9} + \frac{1}{11} - \frac{1}{6} + \frac{1}{13} + \frac{1}{15} - \frac{1}{8} + \cdots \longrightarrow \quad ? $ \\
\ \\
First, we recognize that the above sum can be written as
$$ \sum_{k=0}^{\infty} \big( \frac{1}{4k+1} + \frac{1}{4k+3} - \frac{2}{4k+4} \big) $$
Then, we apply hint $ \frac{1}{1 + n} = \int_0^1 x^n dx $, e.g., $ 4k + 3 = 1 + (4k + 2)$
$$ \sum_{k=0}^{\infty} \big( \int_0^1 x^{4k} dx + \int_0^1 x^{4k+2} dx - \int_0^1 2x^{4k+3} dx \big) $$
Notice the linearity of integration
$$ \sum_{k=0}^{\infty} \int_0^1 (x^{4k} + x^{4k+2} - 2x^{4k+3}) dx $$
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Solution to the 2nd Example (Cont.)}
We can exchange the order of summation and integration (with some caution)
$$ \int_0^1 \big( \sum_{k=0}^{\infty} x^{4k} + x^{4k+2} - 2x^{4k+3} \big) dx $$
Since the integration restricts $ x \in (0,1) $, then the inner summation converges for each component, which is (close to) a geometric series
$$ \int_0^1 \bigg( \big( \sum_{k=0}^{\infty} x^{4k} \big) + \big( \sum_{k=0}^{\infty} x^{4k+2} \big) - \big( \sum_{k=0}^{\infty} 2x^{4k+3} \big) \bigg) dx $$
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Solution to the 2nd Example (Cont.)}
Notice that
$$
\sum_{k=0}^{\infty} x^{4k+2} = x^2 \cdot \sum_{k=0}^{\infty} {(x^4)}^k \qquad
\sum_{k=0}^{\infty} 2x^{4k+3} = 2x^3 \cdot \sum_{k=0}^{\infty} {(x^4)}^k
$$
Thus, we can apply the formula for geometric series with $ |x| < 1 $
$$ \int_0^1 \big( \frac{1}{1-x^{4}} + \frac{x^2}{1-x^{4}} - \frac{2x^3}{1-x^{4}} \big) dx = \frac{3}{2} \ln(2) $$
The above integral can be evaluated through partial fraction decomposition or by simply invoking Mathematica.
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{General Patterns}
Note that terms with odd denominators should be negative, and the opposite applies to even denominators. Also notice that the multipliers for $k$ must be the same within parentheses.
\begin{align*}
& \sum_{k=0}^{\infty} \big( \frac{4}{8k+4} - \frac{1}{8k+2} - \frac{1}{8k+4} - \frac{1}{8k+6} - \frac{1}{8k+8} \big) \\
& \sum_{k=0}^{\infty} \big( \frac{1}{4k+1} + \frac{1}{4k+3} - \frac{2}{4k+4} \big) \\
& \sum_{k=0}^{\infty} \big( \frac{1}{4k+1} + \frac{1}{4k+3} - \frac{1}{4k+2} - \frac{1}{4k+4} \big) \\
& \sum_{k=0}^{\infty} \big( \frac{1}{6k+1} + \frac{1}{6k+3} + \frac{1}{6k+5} - \frac{3}{6k+6} \big)
\end{align*}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Caveat: Fubini's Theorem is Not Trivial}
\begin{columns}[c] % The "c" option specifies centered vertical alignment while the "t" option is used for top vertical alignment
\begin{column}{0.4\textwidth} % Left column and width
Consider
$$ a_{ij} = \begin{cases}
+1 &, \text{ if } i = j + 1 \\
-1 &, \text{ if } i = j - 1 \\
0 &, \text{ otherwise}
\end{cases} $$
Then we can get
\begin{align*}
\sum_{i} \sum_{j} a_{ij} &= -1 \\
\sum_{j} \sum_{i} a_{ij} &= +1
\end{align*}
\end{column}
\begin{column}{0.6\textwidth} % Right column and width
Easier to digest in matrix form
$$ (a_{ij}) = \begin{bmatrix}
0 & -1 & 0 & 0 & 0 & \cdots \\
1 & 0 & -1 & 0 & 0 & \cdots \\
0 & 1 & 0 & -1 & 0 & \cdots \\
0 & 0 & 1 & 0 & -1 & \cdots \\
0 & 0 & 0 & 1 & 0 & \cdots \\
\cdots & \cdots & \cdots & \cdots & \cdots & \cdots
\end{bmatrix}$$
where for each row $i$, sum $ \sum_{j} a_{ij} $ is \\
\qquad $ -1, 0, 0, 0, 0, 0, \cdots $ \\
while for each column $j$, sum $ \sum_{i} a_{ij} $ is \\
\qquad $ +1, 0, 0, 0, 0, 0, \cdots $
\end{column}
\end{columns}
\end{frame}
%------------------------------------------------
\subsection{Cesaro Summation}
\begin{frame}
\frametitle{From Partial Sum to Partial Mean}
\begin{block}{Cesaro Summable}
We define $ a_1 + a_2 + a_3 + \cdots \stackrel{\mathfrak{C}}{\longrightarrow} S $, if \\
\qquad the sequence of \alert{Cesaro mean}
$$ \bigg\{ \sigma_n = \frac{\sum_{i=1}^n s_i}{n} \bigg\}_n \rightarrow S $$
\end{block}
Now, we have a precise definition for
$$ 1 - 1 + 1 - 1 + 1 - 1 + \cdots \stackrel{\mathfrak{C}}{\longrightarrow} \frac{1}{2} $$
This summation was implicitly used by Frobenius in 1880, prior to Cesaro (1890). Cesaro's key contribution was not the discovery of this method, but his idea that \emph{one should give an explicit definition of the sum of a divergent series}.
\end{frame}
%------------------------------------------------
\subsection{Abel Summation}
\begin{frame}
\frametitle{From Partial Series to the Entire Series at Once}
\begin{block}{Abel Summable}
We define $ a_0 + a_1 + a_2 + a_3 + \cdots \stackrel{\mathfrak{A}}{\longrightarrow} S $, if \\
\qquad for $ \forall r \in [0,1) $, \alert{Abel mean}
$$ A(r) = \sum_{k=0}^{\infty} a_k r^k $$
\qquad exists in Cauchy sense, and it has finite limit at $ r = 1 $
$$ \lim_{r \rightarrow 1} A(r) = S $$
\end{block}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Example}
$$ 1 - 2 + 3 - 4 + 5 - 6 + 7 - 8 + \cdots \stackrel{\mathfrak{A}}{\longrightarrow} \frac{1}{4} $$
\begin{itemize}
\item General term: $ (-1)^k (k+1) $ for $ k = 0, 1, 2, 3, \cdots $
\item Abel mean: for $ \forall r \in [0,1) $,
$$ A(r) = \sum_{k=0}^{\infty} (-1)^k (k+1)r^k = \frac{1}{(1+r)^2} $$
\item Its limit exists due to continuity,
$$ \lim_{r \rightarrow 1} A(r) = \lim_{r \rightarrow 1} \frac{1}{(1+r)^2} = \frac{1}{4} $$
\end{itemize}
\end{frame}
%------------------------------------------------
\subsection{Summation Methods}
\begin{frame}
\frametitle{Summability: Cauchy $\implies$ Cesaro $\implies$ Abel}
Recap on definitions: we say $ \{a_i\}_i $ is (...) summable to $S$ if
\begin{columns}[c] % The "c" option specifies centered vertical alignment while the "t" option is used for top vertical alignment
\begin{column}{0.5\textwidth} % Left column and width
\begin{itemize}
\item Cauchy:
$$ \lim_{n \rightarrow \infty} \underbrace{ \sum_{i=0}^{n} a_i }_{s_n} = S $$
\end{itemize}
\end{column}
\begin{column}{0.5\textwidth} % Right column and width
\begin{itemize}
\item Cesaro:
$$ \lim_{n \rightarrow \infty} \frac{1}{n+1} \sum_{k=0}^{n} \underbrace{ \big( \sum_{i=0}^{k} a_i \big) }_{s_k} = S $$
\end{itemize}
\end{column}
\end{columns}
\begin{columns}[c] % The "c" option specifies centered vertical alignment while the "t" option is used for top vertical alignment
\begin{column}{0.5\textwidth} % Left column and width
\begin{itemize}
\item Abel:
$$ \lim_{r \rightarrow 1} \underbrace{ \sum_{k=0}^{\infty} a_k r^k }_{A(r)} = S $$
\end{itemize}
\end{column}
\begin{column}{0.5\textwidth} % Right column and width
See visual demonstration.
\end{column}
\end{columns}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Desired Properties of Summation Methods}
Generally speaking, it is incorrect to manipulate infinite series as if they were finite sums. But there are some properties we still hope to maintain.
\begin{itemize}
\item \textbf{Regularity}: agree with Cauchy sum, if exists
\item \textbf{Linearity}: $ \Sigma( \{ k a_i + b_i \}_i ) = k \Sigma( \{ a_i \}_i ) + \Sigma( \{ b_i \}_i )$
\item \textbf{Stability} (a.k.a. Translativity): $ \Sigma( \{ a_i \}_{i=0}^{\infty} ) = a_0 + \Sigma( \{ a_i \}_{i=1}^{\infty} ) $
\begin{itemize}
\item \textbf{Finite Re-indexability}: $ \Sigma( \{a_i\}_i ) = \Sigma( \{a_{\pi(i)}\}_i )$ where $\pi$ is any permutation on a finite subset of indices
\item Stability $\implies$ Finite re-indexability
\end{itemize}
\end{itemize}
Note that not all conditions are equally important in summability theory and it is quite restrictive to require all properties.
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Examples: Linear and Stable Summation Method}
\begin{block}{Geometric Series: $ \Sigma(c, r) = \frac{c}{1-r}, \text{ for } \forall c \in \mathbb{R} \text{ and } r \neq 1 $}
Let $\Sigma(c, r)$ denote the sum of geometric series $ \{cr^k\}_{k=0}^{\infty} $ under any linear and stable summation method. Then
\begin{align*}
\Sigma(c, r)
= \sum_{k=0}^{\infty} c \cdot r^k
&= c + \sum_{k=0}^{\infty} c \cdot r^{k+1} && \text{(stability)} \\
&= c + r \cdot \sum_{k=0}^{\infty} c \cdot r^k && \text{(linearity)} \\
&= c + r \Sigma(c, r)
\end{align*}
\end{block}
Therefore, $ 1 + 2 + 4 + 8 + 16 + \cdots = \frac{1}{1 - 2} = -1$
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Examples: Linear and Stable Summation Method (Cont.)}
\begin{block}{$ 1 + 2 + 3 + \cdots $ is NOT Summable Under Any Linear Stable Method}
Suppose there exists linear and stable $\Sigma$ s.t. $ \Sigma( \{n\}_{n=1}^{\infty} ) = S \in \mathbb{R} $,
\begin{align*}
S &= 1 + 2 + 3 + 4 + 5 + \cdots \\
S &= 0 + 1 + 2 + 3 + 4 + \cdots && \text{(stability)} \\
&\implies 1 + 1 + 1 + \cdots = 0 && \text{(linearity)}
\end{align*}
Apply the same trick (shifting and subtraction) on $ 1 + 1 + 1 + \cdots $, one can arrive at
$$ 0 + 0 + 0 + 0 + \cdots = -1 $$
which is not compatible with linearity.
\end{block}
\end{frame}
%------------------------------------------------
\subsection{Ramanujan Summation}
\begin{frame}
\frametitle{$ 1 + 2 + 3 + 4 + \cdots $ is Ramanujan Summable}
\begin{block}{Ramanujan Summation}
For function $f$ with no divergence at zero
$$ C(a) = \int_0^a f(t) dt - \frac{1}{2} f(0) - \sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!} f^{(2k-1)} (0) $$
where $B_{2k}$ is the $2k$-th Bernoulli number and $C(0)$ is used as the sum of the divergent sequence. Consider $ f(x) = x $ and $B_2 = \frac{1}{6}$,
$$ f(1) + f(2) + f(3) + \cdots = -\frac{1}{2} f(0) - \sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!} f^{(2k-1)} (0) = - \frac{1}{12} $$
\end{block}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Why Bother?}
\begin{align*}
1 + 2 + 3 + 4 + 5 + 6 + \cdots &\stackrel{}{\longrightarrow} \text{undefined} \\
1 + 2 + 3 + 4 + 5 + 6 + \cdots &\stackrel{\mathfrak{R}}{\longrightarrow} -\frac{1}{12}
\end{align*}
\begin{columns}[c] % The "c" option specifies centered vertical alignment while the "t" option is used for top vertical alignment
\begin{column}{0.7\textwidth} % Left column and width
\begin{itemize}
\item Mathematically, it is interesting
\begin{itemize}
\item Any consistently defined mathematical object deserves study
\item Classification of ``convergence''
\end{itemize}
\item There are many use cases in physics
\begin{itemize}
\item String theory
\item Quantum Mechanics: Casimir Effect
\begin{itemize}
\item Attractive force between uncharged parallel metallic plates in a vacuum
\end{itemize}
\end{itemize}
\end{itemize}
\end{column}
\begin{column}{0.3\textwidth} % Right column and width
\begin{figure}
\includegraphics[width=\linewidth]{casimir.png}
\end{figure}
\end{column}
\end{columns}
\end{frame}
%------------------------------------------------
\section{Uniqueness}
%------------------------------------------------
\begin{frame}
\frametitle{Uniqueness}
\begin{figure}
\includegraphics[width=0.55\linewidth]{unique.jpeg}
\end{figure}
\end{frame}
%------------------------------------------------
\subsection{Analytic Continuation}
\begin{frame}
\frametitle{Going Above and Beyond}
\begin{block}{Analytic Function}
An analytic function is an infinitely differentiable function such that its Taylor series converges pointwise in the domain.
\end{block}
\begin{block}{Analytic Continuation}
Analytic continuation is a technique to extend the domain of a given analytic function while remain analytic. If an analytic continuation exists, it is guaranteed to be unique. Therefore, surprisingly, knowing the value of a complex function in some finite complex domain uniquely determines the value of the function at every other point.
\end{block}
\end{frame}
%------------------------------------------------
\subsection{Riemann Zeta Function}
\begin{frame}
\frametitle{Analytic Continuation of Euler Zeta Function}
Amazing video from 3Blue1Brown: \href{https://www.youtube.com/watch?v=sD0NjbwqlYw}{Visualizing the Riemann hypothesis and analytic continuation}. (9 min 40 s -- 17 min 20 s) \\
\begin{itemize}
\item Euler established that
$$ E(s) = \sum_{n=1}^{\infty} \frac{1}{n^s} = \sum_{n=1}^{\infty} n^{-s} $$
converges in Cauchy sense for $ \forall s > 1 $ \\
(Hint: easy check using integral test.)
\item Riemann treated it as a complex function and found its analytic continuation $\zeta$ which satisfies the functional equation
$$ \zeta(s) = 2^s \pi^{s-1} \sin \big( \frac{\pi s}{2} \big) \Gamma (1-s) \zeta(1-s) $$
\end{itemize}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Sum of All Natural Numbers}
To find the sum of all natural numbers, it remains to find $\zeta(-1)$,
$$ \zeta(-1) = 2^{-1} \pi^{-2} \sin \big( -\frac{\pi}{2} \big) \Gamma (2) \zeta(2) = - \frac{1}{2 \pi^2} \zeta(2) $$
where
\begin{itemize}
\item $\Gamma(n) = (n-1)!$ for $ \forall n \in \mathbb{N} $, hence $ \Gamma(2) = 1 $
\end{itemize}
We will now compute $\zeta(2)$, i.e. $E(2)$, which is convergent in Cauchy sense
$$ \zeta(2) = \sum_{n=1}^{\infty} \frac{1}{n^2} = 1 + \frac{1}{4} + \frac{1}{9} + \frac{1}{16} + \cdots $$
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Basel Problem: $ \zeta(2) = E(2) = \frac{\pi^2}{6} $}
Consider $ p(x) = \frac{\sin(x)}{x} $,
\begin{itemize}
\item Power Series Expansion
$$ p(x) = 1 - \frac{x^2}{3!} + \frac{x^4}{5!} - \frac{x^6}{7!} + \cdots = \sum_{n=0}^{\infty} (-1)^n \frac{x^{2n}}{(2n+1)!} $$
\item Factorization (Formally, Weierstrass Factorization Theorem)
\begin{align*}
p(x) &= \big(1 - \frac{x}{\pi}\big)\big(1 + \frac{x}{\pi}\big)\big(1 - \frac{x}{2\pi}\big)\big(1 + \frac{x}{2\pi}\big)\big(1 - \frac{x}{3\pi}\big)\big(1 + \frac{x}{3\pi}\big)\cdots \\
&= \prod_{n=1}^{\infty} \big(1 - \frac{x^2}{n^2 \pi^2}\big)
= 1 - \big( \frac{1}{\pi^2} + \frac{1}{4\pi^2} + \frac{1}{9\pi^2} + \cdots \big) x^2 + \cdots
\end{align*}
\end{itemize}
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Basel Problem: $ \zeta(2) = E(2) = \frac{\pi^2}{6} $ (Cont.)}
The two expression of $p(x)$ must match, i.e. coefficients for each $x^k$ power mush agree. In particular, let's look at coefficients for $x^2$ term. (This method works for any even order. Euler obtained up to 26.)
\begin{align*}
-\frac{1}{3!} &= - \big( \frac{1}{\pi^2} + \frac{1}{4\pi^2} + \frac{1}{9\pi^2} + \cdots \big) \\
-\frac{1}{6} &= - \frac{1}{\pi^2} \sum_{n=1}^{\infty} \frac{1}{n^2} \\
\implies \sum_{n=1}^{\infty} \frac{1}{n^2} &= \frac{\pi^2}{6}
\end{align*}
Consequently, we have \alert{ $ \zeta(-1) = -\frac{1}{12} $ }
\end{frame}
%------------------------------------------------
\begin{frame}
\frametitle{Bonus: Riemann Hypothesis}
Euler showed that (now known as Euler Product Formula)
$$ \zeta(s)
= \prod_{p \text{ prime}} \frac{1}{1 - p^{-s}}
= \frac{1}{1 - 2^{-s}} \cdot \frac{1}{1 - 3^{-s}} \cdot \frac{1}{1 - 5^{-s}} \cdot \cdots $$
In regards to zeros, Riemann hypothesized, for $ \forall s \in \mathbb{C} $ s.t. $ \zeta(s) = 0 $
\begin{itemize}
\item Either, $ s = -2n $ for some $ n \in \mathbb{N} $, known as \emph{trivial zeros}
\item Or, $ \text{Re}(s) = \frac{1}{2} $, forming a \emph{critical line}
\end{itemize}
\end{frame}
%------------------------------------------------
\section*{}
\begin{frame}
\Huge{\centerline{Questions?}}
\end{frame}
%----------------------------------------------------------------------------------------
\end{document}
|
{"hexsha": "c517ee1a3ab673a818fcaeac2fbf240f4865e2c1", "size": 29637, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "sum-divergent-series/presentation.tex", "max_stars_repo_name": "Haoen-Cui/talks", "max_stars_repo_head_hexsha": "133d85d70cd467e5a0df19cc5bec2454e9388898", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sum-divergent-series/presentation.tex", "max_issues_repo_name": "Haoen-Cui/talks", "max_issues_repo_head_hexsha": "133d85d70cd467e5a0df19cc5bec2454e9388898", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sum-divergent-series/presentation.tex", "max_forks_repo_name": "Haoen-Cui/talks", "max_forks_repo_head_hexsha": "133d85d70cd467e5a0df19cc5bec2454e9388898", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4509569378, "max_line_length": 702, "alphanum_fraction": 0.5715153356, "num_tokens": 10373}
|
# date_heatmap and date_heatmap_demo are from an answer on stackoverflow
# here: https://stackoverflow.com/questions/32485907/matplotlib-and-numpy-create-a-calendar-heatmap/51977000#51977000
# by user cbarrick
# we updated it slightly to work with the most current pandas version and changed some of the parameters around to work
# better with discord
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import discord
DAYS = ['Sun.', 'Mon.', 'Tues.', 'Wed.', 'Thurs.', 'Fri.', 'Sat.']
MONTHS = ['Jan.', 'Feb.', 'Mar.', 'Apr.', 'May', 'June', 'July', 'Aug.', 'Sept.', 'Oct.', 'Nov.', 'Dec.']
def date_heatmap(series, start=None, end=None, mean=False, ax=None, **kwargs):
'''Plot a calendar heatmap given a datetime series.
Arguments:
series (pd.Series):
A series of numeric values with a datetime index. Values occurring
on the same day are combined by sum.
start (Any):
The first day to be considered in the plot. The value can be
anything accepted by :func:`pandas.to_datetime`. The default is the
earliest date in the data.
end (Any):
The last day to be considered in the plot. The value can be
anything accepted by :func:`pandas.to_datetime`. The default is the
latest date in the data.
mean (bool):
Combine values occurring on the same day by mean instead of sum.
ax (matplotlib.Axes or None):
The axes on which to draw the heatmap. The default is the current
axes in the :module:`~matplotlib.pyplot` API.
**kwargs:
Forwarded to :meth:`~matplotlib.Axes.pcolormesh` for drawing the
heatmap.
Returns:
matplotlib.collections.Axes:
The axes on which the heatmap was drawn. This is set as the current
axes in the `~matplotlib.pyplot` API.
'''
# Combine values occurring on the same day.
dates = series.index.floor('D')
group = series.groupby(dates)
series = group.mean() if mean else group.sum()
# Parse start/end, defaulting to the min/max of the index.
start = pd.to_datetime(start or series.index.min())
end = pd.to_datetime(end or series.index.max())
# We use [start, end) as a half-open interval below.
end += np.timedelta64(1, 'D')
# Get the previous/following Sunday to start/end.
# Pandas and numpy day-of-week conventions are Monday=0 and Sunday=6.
start_sun = start - np.timedelta64((start.dayofweek + 1) % 7, 'D')
end_sun = end + np.timedelta64(7 - end.dayofweek - 1, 'D')
# Create the heatmap and track ticks.
num_weeks = (end_sun - start_sun).days // 7
heatmap = np.zeros((7, num_weeks))
ticks = {} # week number -> month name
for week in range(num_weeks):
for day in range(7):
date = start_sun + np.timedelta64(7 * week + day, 'D')
if date.day == 1:
ticks[week] = MONTHS[date.month - 1]
if date.dayofyear == 1:
ticks[week] += f'\n{date.year}'
if start <= date < end:
heatmap[day, week] = series.get(date, 0)
# Get the coordinates, offset by 0.5 to align the ticks.
y = np.arange(8) - .5
x = np.arange(num_weeks + 1) - 0.5
# Plot the heatmap. Prefer pcolormesh over imshow so that the figure can be
# vectorized when saved to a compatible format. We must invert the axis for
# pcolormesh, but not for imshow, so that it reads top-bottom, left-right.
ax = ax or plt.gca()
mesh = ax.pcolormesh(x, y, heatmap, **kwargs)
ax.invert_yaxis()
# Set the ticks.
ax.set_xticks(list(ticks.keys()))
ax.set_xticklabels(list(ticks.values()))
ax.set_yticks(np.arange(8) - .5)
ax.set_yticklabels(DAYS)
# Set the current image and axes in the pyplot API.
plt.sca(ax)
plt.sci(mesh)
return ax
def create_date_heatmap(data, date_range, colors, intensity):
'''An example for `date_heatmap`.
Most of the sizes here are chosen arbitrarily to look nice with 1yr of
data. You may need to fiddle with the numbers to look right on other data.
'''
# Get some data, a series of values with datetime index.
data.index = date_range
# Create the figure. For the aspect ratio, one year is 7 days by 53 weeks.
# We widen it further to account for the tick labels and color bar.
figsize = plt.figaspect(7 / 65)
fig = plt.figure(figsize=figsize)
# Plot the heatmap with a color bar.
ax = date_heatmap(data, edgecolor='black')
plt.colorbar(ticks=range(intensity), pad=0.02)
# Use a discrete color map with 5 colors (the data ranges from 0 to 4).
# Extending the color limits by 0.5 aligns the ticks in the color bar.
cmap = mpl.cm.get_cmap(colors, intensity)
plt.set_cmap(cmap)
plt.clim(0, intensity)
# Force the cells to be square. If this is set, the size of the color bar
# may look weird compared to the size of the heatmap. That can be corrected
# by the aspect ratio of the figure or scale of the color bar.
ax.set_aspect('equal')
# Save to a file. For embedding in a LaTeX doc, consider the PGF backend.
# http://sbillaudelle.de/2015/02/23/seamlessly-embedding-matplotlib-output-into-latex.html
fig.savefig('heatmap.png', bbox_inches='tight')
# The figure must be explicitly closed if it was not shown.
plt.close(fig)
def server_joined_data(date_range, players):
date_list = []
for day in date_range:
date_list.append(str(day).split()[0])
date_count = [0] * len(date_list)
# for each person in the server we get the date they joined and split it so it is just the day stamp
for guy in players:
temp_date = guy.joined_at
time = str(temp_date).split()[0]
# if the day stamp is in the the list of total days then increase that dates count by one
if time in date_list:
# date_list.index returns the index the time is found in so it increases the parallel arrays count at the
# correct spot
date_count[date_list.index(time)] += 1
return pd.Series(date_count, index=date_list)
def tweeted_date_data(date_range, tw):
date_list = []
for day in date_range:
date_list.append(str(day).split()[0])
date_count = [0] * len(date_list)
for tweet in tw.tweet_dates():
temp_date = str(tweet).split()[0]
time = str(temp_date).split()[0]
if time in date_list:
date_count[date_list.index(time)] += 1
return pd.Series(date_count, index=date_list)
def server_roles(players):
mpl.style.use('seaborn')
# total members in the server
print("There are " + str(len(players)) + " members in the server.")
# Tallies up date joined by day
date_list = []
date_count = []
for guy in players:
temp_date = guy.joined_at
time = str(temp_date).split()[0]
if time not in date_list:
date_list.append(time)
date_count.append(1)
else:
date_count[date_list.index(time)] += 1
x_pos = np.arange(len(date_list))
plt.yticks(np.arange(min(date_count), max(date_count) + 1, 1.0), fontsize=6)
plt.xlim(0, 96)
plt.xticks(x_pos, date_list, fontsize=3)
plt.xticks(rotation=90)
plt.plot(date_count)
plt.savefig('stats.pdf', bbox_inches='tight')
async def get_user_msg_count(channels):
# Create two parallel arrays to store users and their message counts
people = []
count = []
for channel in channels:
# If its a text channel, channels can be a few different types
if isinstance(channel, discord.channel.TextChannel):
# Creates a list containing every message from a specific channel
msgs = await channel.history(limit=5000).flatten()
# Goes through every message in the channel
for message in msgs:
# If author hasn't been recorded yet then add them on and make their count start at 1
if str(message.author) not in people:
people.append(str(message.author))
count.append(1)
# If they are already in the list we get their index in the list and increase their count by 1
else:
count[people.index(str(message.author))] += 1
# Not sure if this actually does anything but I haven't tested without it yet and it works as is
# Supposed to set the msgs back to nothing before we move on to the next channel
msgs = []
return pd.Series(count, index=people)
|
{"hexsha": "2a9ea095e88cb990526afa5e7627e938cd0b1af8", "size": 8930, "ext": "py", "lang": "Python", "max_stars_repo_path": "stocktonesportsbot/classes/Metrics.py", "max_stars_repo_name": "Dual-Exhaust/Stockton-Esports-Bot", "max_stars_repo_head_hexsha": "2ff02d210236f0436a28d0815a5321c4ee280e11", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-28T21:00:13.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-28T21:00:13.000Z", "max_issues_repo_path": "stocktonesportsbot/classes/Metrics.py", "max_issues_repo_name": "Dual-Exhaust/Stockton-Esports-Bot", "max_issues_repo_head_hexsha": "2ff02d210236f0436a28d0815a5321c4ee280e11", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-28T20:02:06.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-28T20:31:23.000Z", "max_forks_repo_path": "stocktonesportsbot/classes/Metrics.py", "max_forks_repo_name": "Dual-Exhaust/Stockton-Esports-Bot", "max_forks_repo_head_hexsha": "2ff02d210236f0436a28d0815a5321c4ee280e11", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1666666667, "max_line_length": 120, "alphanum_fraction": 0.6255319149, "include": true, "reason": "import numpy", "num_tokens": 2145}
|
[STATEMENT]
lemma vcg_wp_conseq:
assumes "HT_mods \<pi> mods P c Q"
assumes "P s"
assumes "\<And>s'. \<lbrakk>modifies mods s' s; Q s s'\<rbrakk> \<Longrightarrow> Q' s'"
shows "wp \<pi> c Q' s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wp \<pi> c Q' s
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
HT_mods \<pi> mods P c Q
P s
\<lbrakk>modifies mods ?s' s; Q s ?s'\<rbrakk> \<Longrightarrow> Q' ?s'
goal (1 subgoal):
1. wp \<pi> c Q' s
[PROOF STEP]
unfolding HT_mods_def HT_def
[PROOF STATE]
proof (prove)
using this:
\<forall>s\<^sub>0. P s\<^sub>0 \<longrightarrow> wp \<pi> c (\<lambda>s. modifies mods s s\<^sub>0 \<and> Q s\<^sub>0 s) s\<^sub>0
P s
\<lbrakk>modifies mods ?s' s; Q s ?s'\<rbrakk> \<Longrightarrow> Q' ?s'
goal (1 subgoal):
1. wp \<pi> c Q' s
[PROOF STEP]
by (metis (no_types, lifting) wp_def)
|
{"llama_tokens": 393, "file": "IMP2_automation_IMP2_Program_Analysis", "length": 3}
|
#!/usr/bin/env python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
gis_file = 'Annual_Average_Daily_Traffic__AADT___Beginning_1977.csv'
df = pd.read_csv(gis_file)
print(df.head())
# remove spaces from column names
cols = df.columns
cols = cols.map(lambda x: x.replace(' ', '_') if isinstance(x, (str, unicode)) else x)
df.columns = cols
print(df.columns)
# Delete the columns we don't care about
df = df.drop(['RC_ID', 'GIS_Code'], axis=1)
# Aggregations
# Find total by year
df_grouped_year = df.groupby(df.Year)
print(df_grouped_year)
df_total_grouped_year = df_grouped_year.sum()
print(df_total_grouped_year)
df_total_grouped_year = df_grouped_year.aggregate({'AADT': np.sum})
print(df_total_grouped_year)
print(df_total_grouped_year.columns)
municipalities = ['NEW YORK CITY', 'TROY', 'CROTON-ON-HUDSON']
df_grouped_muni = df.loc[df.Municipality.isin(municipalities)]
df_total_muni_aadt_grouped = df_grouped_muni.groupby(['Year'])
df_total_muni_aadt = df_total_muni_aadt_grouped.agg({'AADT': np.sum})
print(df_total_muni_aadt.columns)
print(df_total_muni_aadt.head())
exclude_cols = ['Region', 'Begin_Milepoint', 'End_Milepoint']
df_total_muni_aadt.ix[:, df_total_muni_aadt.columns.difference(exclude_cols)].plot(kind='bar')
plt.legend(loc='best').get_texts()[0].set_text('Annual Average Daily Traffic for {}'.format(', '.join(map(str,municipalities))))
file_name = 'AADT_{}'.format('_'.join(map(str,municipalities)))
file_name = re.sub('\s+','_',file_name)
plt.savefig(file_name)
plt.show()
|
{"hexsha": "565fab35501b38d695024467dca8c7914da11937", "size": 1548, "ext": "py", "lang": "Python", "max_stars_repo_path": "course-2/session-7/pandas/process_traffic.py", "max_stars_repo_name": "robmarano/nyu-python", "max_stars_repo_head_hexsha": "4406f157e6d6a63e512ed1595f56dcb65c5d8526", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-06-01T12:51:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-02T15:55:00.000Z", "max_issues_repo_path": "course-2/session-7/pandas/process_traffic.py", "max_issues_repo_name": "robmarano/nyu-python", "max_issues_repo_head_hexsha": "4406f157e6d6a63e512ed1595f56dcb65c5d8526", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "course-2/session-7/pandas/process_traffic.py", "max_forks_repo_name": "robmarano/nyu-python", "max_forks_repo_head_hexsha": "4406f157e6d6a63e512ed1595f56dcb65c5d8526", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-06-21T23:18:51.000Z", "max_forks_repo_forks_event_max_datetime": "2018-06-21T23:18:51.000Z", "avg_line_length": 28.6666666667, "max_line_length": 128, "alphanum_fraction": 0.7622739018, "include": true, "reason": "import numpy", "num_tokens": 429}
|
(*
Copyright © 2006 Russell O’Connor
Permission is hereby granted, free of charge, to any person obtaining a copy of
this proof and associated documentation files (the "Proof"), to deal in
the Proof without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Proof, and to permit persons to whom the Proof is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Proof.
THE PROOF IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE PROOF OR THE USE OR OTHER DEALINGS IN THE PROOF.
*)
(* This file rexports all the transcendental functions defined on CR *)
Require Export CoRN.reals.fast.CRArith.
Require Export CoRN.reals.fast.Compress.
Require Export CoRN.reals.fast.CRpower.
Require Export CoRN.reals.fast.CRroot.
Require Export CoRN.reals.fast.CRexp.
Require Export CoRN.reals.fast.CRln.
Require Export CoRN.reals.fast.CRsin.
Require Export CoRN.reals.fast.CRcos.
Require Export CoRN.reals.fast.CRpi.
Require Export CoRN.reals.fast.CRarctan.
Require Export CoRN.reals.fast.CRabs.
|
{"author": "coq-community", "repo": "corn", "sha": "cfbf6b297643935f0fe7e22d2b14b462bf7e3095", "save_path": "github-repos/coq/coq-community-corn", "path": "github-repos/coq/coq-community-corn/corn-cfbf6b297643935f0fe7e22d2b14b462bf7e3095/reals/fast/CRtrans.v"}
|
'''@file trainer.py
neural network trainer environment'''
from abc import ABCMeta, abstractmethod
import tensorflow as tf
import numpy as np
from classifiers import seq_convertors
class Trainer(object):
'''General class for the training environment for a neural net graph'''
__metaclass__ = ABCMeta
def __init__(self, classifier, input_dim, max_input_length,
max_target_length, init_learning_rate, l1_penalty,
l2_penalty, momentum, minibatch_size, clip_grad):
'''
NnetTrainer constructor, creates the training graph
Args:
classifier: the neural net classifier that will be trained
input_dim: the input dimension to the nnnetgraph
max_input_length: the maximal length of the input sequences
max_target_length: the maximal length of the target sequences
init_learning_rate: the initial learning rate
l1_penalty: the penalty param for l1 regularization
l2_penalty: the penalty param for l2 regularization
momentum:
minibatch_size: determines how many frames are
processed at a time to limit memory usage
'''
self.minibatch_size = minibatch_size
self.l2_penalty = l2_penalty
self.l1_penalty = l1_penalty
self.momentum = momentum
#create the graph
self.graph = tf.Graph()
#define the placeholders in the graph
with self.graph.as_default():
#create the inputs placeholder
self.inputs = tf.placeholder(
tf.float32, shape=[minibatch_size, input_dim],
name='inputs')
inputs = self.inputs
#reference labels
self.targets = tf.placeholder(
tf.int32, shape=[minibatch_size,1],
name='targets')
targets = self.targets
#the length of the input sequences
self.input_seq_length = tf.placeholder(
tf.int32, shape=[minibatch_size],
name='input_seq_length')
#the length of all the output sequences
self.target_seq_length = tf.placeholder(
tf.int32, shape=[minibatch_size],
name='output_seq_length')
#compute the training outputs of the nnetgraph
trainlogits, logit_seq_length, self.modelsaver, self.control_ops = (
classifier(inputs, self.input_seq_length, is_training=True
, reuse=False, scope='Classifier'))
#compute the validation output of the nnetgraph
logits, _, _, _ = classifier(inputs, self.target_seq_length,
is_training=False, reuse=True,
scope='Classifier')
#get a list of trainable variables in the decoder graph
params = tf.trainable_variables()
#add the variables and operations to the graph that are used for
#training
#the total loss of the entire block
block_loss = tf.get_variable(
'block_loss', [], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
#the total frame acc of the entire block
block_acc = tf.get_variable(
'block_acc', [], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
with tf.variable_scope('train_variables'):
#a variable to scale the learning rate (used to reduce the
#learning rate in case validation performance drops)
learning_rate_fact = tf.get_variable(
'learning_rate_fact', [],
initializer=tf.constant_initializer(1.0), trainable=False)
#compute the learning rate with exponential decay and scale with the learning rate factor
learning_rate = tf.train.exponential_decay(
init_learning_rate, 0, 1,
1.0) * learning_rate_fact
#create the optimizer
optimizer = tf.train.AdadeltaOptimizer(learning_rate)
#for every parameter create a variable that holds its gradients
with tf.variable_scope('gradients'):
grads = [tf.get_variable(
param.op.name, param.get_shape().as_list(),
initializer=tf.constant_initializer(0),
trainable=False) for param in params]
with tf.name_scope('train'):
#the total number of frames that are used in the block
num_frames = tf.get_variable(
name='num_frames', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
#operation to update num_frames
#pylint: disable=E1101
update_num_frames = num_frames.assign_add(self.minibatch_size)
#compute the training loss
loss, acc = self.compute_loss(targets, trainlogits)
#operation to half the learning rate
self.halve_learningrate_op = learning_rate_fact.assign(
learning_rate_fact/2).op
#create an operation to initialise the gradients
self.init_grads = tf.variables_initializer(grads)
#the operation to initialise the block loss
self.init_loss = block_loss.initializer #pylint: disable=E1101
#the operation to initialise the block acc
self.init_acc = block_acc.initializer
#the operation to initialize the num_frames
#pylint: disable=E1101
self.init_num_frames = num_frames.initializer
#compute the gradients of the batch
batchgrads = tf.gradients(loss, params)
#create an operation to update the block loss
#pylint: disable=E1101
self.update_loss = block_loss.assign_add(loss)
#create an operation to update the block acc
self.update_acc = block_acc.assign_add(acc)
#create an operation to update the gradients, the block_loss
#and do all other update ops
#pylint: disable=E1101
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.update_gradients_op = tf.group(
*([grads[p].assign_add(batchgrads[p])
for p in range(len(grads)) if batchgrads[p] is not None]
+ [self.update_loss] + [self.update_acc] + update_ops + [update_num_frames]),
name='update_gradients')
#create an operation to apply the gradients
#average the gradients
meangrads = [tf.div(grad, tf.cast(num_frames, tf.float32),
name=grad.op.name) for grad in grads]
#clip the gradients
meangrads = [tf.clip_by_value(grad, -clip_grad, clip_grad)
for grad in meangrads]
#apply the gradients
self.apply_gradients_op = optimizer.apply_gradients(
[(meangrads[p], params[p]) for p in range(len(meangrads))])
with tf.name_scope('valid'):
#compute the validation loss
valid_loss, valid_acc = self.compute_loss(targets, logits)
#operation to update the validation loss
#pylint: disable=E1101
self.update_valid_loss= tf.group(*([block_loss.assign_add(valid_loss),
block_acc.assign_add(valid_acc), update_num_frames]))
#operation to compute the average loss in the batch
self.average_loss = block_loss/tf.cast(num_frames, tf.float32)
self.average_acc = block_acc/tf.cast(num_frames, tf.float32)
# add an operation to initialise all the variables in the graph
self.init_op = tf.global_variables_initializer()
#saver for the training variables
self.saver = tf.train.Saver(tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='train_variables'))
#create the summaries for visualisation
self.summary = tf.summary.merge(
[tf.summary.histogram(val.name, val)
for val in params+meangrads]
+ [tf.summary.scalar('loss', self.average_loss)])
#specify that the graph can no longer be modified after this point
self.graph.finalize()
#start without visualisation
self.summarywriter = None
@abstractmethod
def compute_loss(self, targets, logits):
'''
Compute the loss
Creates the operation to compute the loss, this is specific to each
trainer
Args:
targets: a list that contains a Bx1 tensor containing the targets
for eacht time step where B is the batch size
logits: a list that contains a BxO tensor containing the output
logits for eacht time step where O is the output dimension
Returns:
a scalar value containing the total loss
'''
raise NotImplementedError("Abstract method")
def initialize(self):
'''Initialize all the variables in the graph'''
self.init_op.run() #pylint: disable=E1101
def start_visualization(self, logdir):
'''
open a summarywriter for visualisation and add the graph
Args:
logdir: directory where the summaries will be written
'''
self.summarywriter = tf.summary.FileWriter(logdir=logdir,
graph=self.graph)
def update(self, dispenser):
'''
update the neural model with a batch or training data
Args:
dispenser: a batchdispenser for training data
Returns:
the loss of this epoch
'''
num_blocks = 0
epoch_loss = 0.0
epoch_acc = 0.0
while dispenser.have_feature():
#get partial of the taining data
inputs, targets = dispenser.get_feature()
#get a list of sequence lengths
input_seq_length = inputs.shape[0]
output_seq_length = targets.shape[0]
assert input_seq_length == output_seq_length
#fill the inputs to have a round number of minibatches
added_inputs = np.append(inputs, np.zeros(((self.minibatch_size - len(inputs)%self.minibatch_size),inputs.shape[1])),0)
added_targets = np.append(targets, np.zeros(self.minibatch_size - len(targets)%self.minibatch_size))
input_seq_length = input_seq_length + (self.minibatch_size - input_seq_length%self.minibatch_size)
output_seq_length = output_seq_length + (self.minibatch_size - output_seq_length%self.minibatch_size)
#feed in the batches one by one and accumulate the gradients and loss
for k in range(input_seq_length/self.minibatch_size):
batch_inputs = added_inputs[ k*self.minibatch_size:(k+1)*self.minibatch_size,:]
batch_targets = added_targets[ k*self.minibatch_size:(k+1)*self.minibatch_size]
batch_targets = np.reshape(batch_targets, (self.minibatch_size, 1))
#pylint: disable=E1101
self.update_gradients_op.run(feed_dict={self.inputs:batch_inputs, self.targets:batch_targets})
#apply the accumulated gradients to update the model parameters and
#evaluate the loss
if self.summarywriter is not None:
[summary, _] = tf.get_default_session().run(
[ self.summary, self.apply_gradients_op])
#pylint: disable=E1101
self.summarywriter.add_summary(summary)
else:
[_] = tf.get_default_session().run(
[self.apply_gradients_op])
#get the loss
loss = self.average_loss.eval()
acc = self.average_acc.eval()
num_blocks += 1
epoch_loss += loss
epoch_acc += acc
print "the block cross entroy loss is: ", loss, " the block Frame Accuracy is: ", acc
self.init_loss.run()
self.init_acc.run()
self.init_num_frames.run()
dispenser.init_feature()
#reinitialize the gradients and the loss
self.init_grads.run() #pylint: disable=E1101
#self.init_loss.run()
#self.init_num_frames.run()
return epoch_loss/num_blocks
def evaluate(self, dispenser_dev):
'''
Evaluate the performance of the neural net
Args:
dispenser_dev: a batchdispenser for dev data
Returns:
the loss of the dev data
'''
num_blocks = 0
epoch_loss = 0.0
epoch_acc = 0.0
while dispenser_dev.have_feature():
#get partial of the taining data
inputs, targets = dispenser_dev.get_feature()
#get a list of sequence lengths
input_seq_length = inputs.shape[0]
output_seq_length = targets.shape[0]
assert input_seq_length == output_seq_length
#fill the inputs to have a round number of minibatches
added_inputs = np.row_stack((inputs, np.zeros(((self.minibatch_size - len(inputs)%self.minibatch_size),inputs.shape[1]))))
added_targets = np.append(targets, np.zeros(self.minibatch_size - len(targets)%self.minibatch_size))
input_seq_length = input_seq_length + (self.minibatch_size - input_seq_length%self.minibatch_size)
output_seq_length = output_seq_length + (self.minibatch_size - output_seq_length%self.minibatch_size)
#feed in the batches one by one and accumulate the gradients and loss
for k in range(input_seq_length/self.minibatch_size):
batch_inputs = added_inputs[k*self.minibatch_size:
(k+1)*self.minibatch_size,
:]
batch_targets = added_targets[k*self.minibatch_size:
(k+1)*self.minibatch_size]
batch_targets = np.reshape(batch_targets, (self.minibatch_size, 1))
#pylint: disable=E1101
self.update_valid_loss.run(
feed_dict={self.inputs:batch_inputs,
self.targets:batch_targets})
#get the loss
loss = self.average_loss.eval()
acc = self.average_acc.eval()
num_blocks += 1
epoch_loss += loss
epoch_acc += acc
print "the block cross entroy loss is: ", loss, " the block Frame Accuracy is: ", acc
self.init_loss.run()
self.init_acc.run()
self.init_num_frames.run()
dispenser_dev.init_feature()
#reinitialize the loss
#self.init_loss.run()
#self.init_num_frames.run()
return epoch_loss/num_blocks
def halve_learning_rate(self):
'''halve the learning rate'''
self.halve_learningrate_op.run()
def save_learning_rate(self):
raise NotImplementedError("Abstract method")
def save_model(self, filename):
'''
Save the model
Args:
filename: path to the model file
'''
self.modelsaver.save(tf.get_default_session(), filename)
def restore_model(self, filename):
'''
Load the model
Args:
filename: path where the model will be saved
'''
self.modelsaver.restore(tf.get_default_session(), filename)
def save_trainer(self, filedir, filename):
'''
Save the training progress (including the model)
Args:
filename: path where the model will be saved
'''
self.modelsaver.save(tf.get_default_session(), filedir+filename)
self.saver.save(tf.get_default_session(), filedir+filename + '_trainvars')
File = filedir+'mlp_best'
model_file = open(File, 'w')
model_file.write(filename)
model_file.close()
def restore_trainer(self, filedir):
'''
Load the training progress (including the model)
Args:
filename: path where the model will be saved
'''
File = filedir + 'mlp_best'
model_file = open(File,'r')
filename = model_file.readline()
filename = filename.split('\n')
filename = filedir + str(filename[0])
self.modelsaver.restore(tf.get_default_session(), filename)
self.saver.restore(tf.get_default_session(), filename + '_trainvars')
class CrossEnthropyTrainer(Trainer):
'''A trainer that minimises the cross-enthropy loss, the output sequences
must be of the same length as the input sequences'''
def compute_loss(self, targets, logits):
'''
Compute the loss
Creates the operation to compute the cross-enthropy loss for every input
frame (if you want to have a different loss function, overwrite this method)
Args:
targets: a Bx1 tensor containing the targets for each time step where B is the batch size
logits: a BxO tensor containing the output logits for each time step where O is the output dimension
Returns:
a scalar value containing the loss
'''
with tf.name_scope('cross_enthropy_loss'):
targets = tf.reshape(targets,[-1])
correct_pred = tf.nn.in_top_k(logits, targets, 1)
#compute the frame acc
acc = tf.reduce_sum(tf.cast(correct_pred, tf.float32))
#one hot encode the targets
#pylint: disable=E1101
targets = tf.one_hot(targets,int(logits.get_shape()[1]))
#compute the cross-enthropy loss
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=targets))
return loss, acc
|
{"hexsha": "6662892ce7a9ded09a98df5fa6006570dd7f3869", "size": 18639, "ext": "py", "lang": "Python", "max_stars_repo_path": "neuralNetworks/trainer.py", "max_stars_repo_name": "waterxt/tensorflowkaldi", "max_stars_repo_head_hexsha": "981cfb2bb0a8adec45379cee2410ef166c24b7e6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "neuralNetworks/trainer.py", "max_issues_repo_name": "waterxt/tensorflowkaldi", "max_issues_repo_head_hexsha": "981cfb2bb0a8adec45379cee2410ef166c24b7e6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neuralNetworks/trainer.py", "max_forks_repo_name": "waterxt/tensorflowkaldi", "max_forks_repo_head_hexsha": "981cfb2bb0a8adec45379cee2410ef166c24b7e6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-07-21T08:30:19.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-18T14:05:54.000Z", "avg_line_length": 38.5103305785, "max_line_length": 134, "alphanum_fraction": 0.5896239069, "include": true, "reason": "import numpy", "num_tokens": 3674}
|
import pysam,sys
import numpy as np
from collections import Counter
from resources.extract import extractRegion,fqRec,rc
MINCLUSTERSIZE=5
def main(parser):
args = parser.parse_args()
if args.inBAM and args.inFastq:
raise SampleReads_Exception('Only one input, either -b or -q')
if args.inBAM:
bam = pysam.AlignmentFile(args.inBAM,check_sq=False)
names = getReadNamesBam(bam,HP=args.haplotag)
if args.region:
if not args.reference:
raise SampleReads_Exception('Must pass reference for region extraction')
recGen = extractRegion(args.inBAM,
args.reference,
region=args.region,
flanksize=args.flanksize,
revcomp=args.revcomp)
else:
recGen = recIterBam(bam,revcomp=args.revcomp)
elif args.inFastq:
names = getReadNamesFq(args.inFastq)
recGen = recIterFq(args.inFastq,revcomp=args.revcomp)
else:
raise SampleReads_Exception('Must have input, either -b or -q')
if not len(names):
raise SampleReads_Exception('No reads returned')
if len(names)<MINCLUSTERSIZE:
raise SampleReads_Exception('Fewer than %i reads returned' % MINCLUSTERSIZE)
np.random.seed(args.seed)
size = args.nReads if args.nReads else len(names)
selected = Counter(np.random.choice(names,size=size,replace=args.replace))
nrecs = 0
with (open(args.out,'w') if args.out else sys.stdout) as oFile:
for name,seq,qual in recGen:
cname = clipReadName(name)
if cname in selected:
rec = fqRec(cname,seq,qual)
nrecs += 1
for _ in range(selected[cname]):
oFile.write(rec)
if nrecs == size:
break
return None
def recIterBam(bam,revcomp=False):
for rec in bam:
if rec.flag & 0x900:
continue
seq = rec.query_sequence
qual = ''.join([chr(q+33) for q in rec.query_qualities])
if revcomp:
seq = rc(seq)
qual = qual[::-1]
yield rec.query_name,seq,qual
def recIterFq(fastq,revcomp=False):
trs = rc if revcomp else (lambda x:x)
trq = (lambda v:v[::-1]) if revcomp else (lambda x:x)
for rec in pysam.FastxFile(fastq):
yield rec.name,trs(rec.sequence),trq(rec.quality)
def getReadNamesBam(bam,HP=None):
crit = (lambda rec: True) if HP is None else (lambda rec: rec.get_tag('HP')==HP)
try:
return sorted(set(rec.query_name for rec in bam if crit(rec)))
except KeyError:
raise SampleReads_Exception('No HP tag in BAM')
finally:
bam.reset()
def getReadNamesFq(fastq):
return [clipReadName(rec.name) for rec in pysam.FastxFile(fastq)]
def clipReadName(name,nFields=3):
return '/'.join(name.split('/')[:nFields])
class SampleReads_Exception(Exception):
pass
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(prog='sampleBam.py', description='export a random sample of reads in fastq format')
parser.add_argument('-b','--inBAM', dest='inBAM', type=str, default=None,
help='BAM containing reads to sample. Default None')
parser.add_argument('-q','--inFastq', dest='inFastq', type=str, default=None,
help='fastq containing reads to sample. No region or HP filtering, use all reads. Default None')
parser.add_argument('-n','--nReads', dest='nReads', type=int, default=0,
help='Reads to sample. To resample for bootstrapping, use default. Default 0 (all reads)')
parser.add_argument('--reg', dest='region', type=str, default=None,
help='Target region to extract, format \'[chr]:[start]-[stop]\'. Example \'4:3076604-3076660\'. Default None.')
parser.add_argument('--ref', dest='reference', type=str, default=None,
help='Reference fasta used for mapping BAM if extracting region. Must have .fai index. Default None')
parser.add_argument('-f','--flanksize', dest='flanksize', type=int, default=100,
help='Size of flanking sequence mapped for extracting repeat region. Default 100')
parser.add_argument('--rc', dest='revcomp', action='store_true', default=False,
help='Rev-comp extracted region. Default Reference Direction')
parser.add_argument('-H','--haplotag', dest='haplotag', type=int, default=None,
help='Sample from one HP tag value. Default None (all reads)')
parser.add_argument('-s','--seed', dest='seed', type=int, default=17,
help='Random seed. Default 17')
parser.add_argument('-o','--out', dest='out', type=str, default=None,
help='Output file. Default stdout')
parser.add_argument('-r','--replace', dest='replace', action='store_true', default=False,
help='Sample with replacement. default False')
try:
main(parser)
except SampleReads_Exception as e:
print('ERROR: %s' % e)
sys.exit(1)
|
{"hexsha": "78cee05a631527a668fe7ba0fadea5fd250c1d15", "size": 5217, "ext": "py", "lang": "Python", "max_stars_repo_path": "RepeatAnalysisTools/sampleReads.py", "max_stars_repo_name": "PacificBiosciences/apps-scripts", "max_stars_repo_head_hexsha": "dd741d72f6b5483eb6d1f9a7f33cd42f9b56b5a7", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2017-03-02T22:35:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T21:34:00.000Z", "max_issues_repo_path": "RepeatAnalysisTools/sampleReads.py", "max_issues_repo_name": "PacificBiosciences/apps-scripts", "max_issues_repo_head_hexsha": "dd741d72f6b5483eb6d1f9a7f33cd42f9b56b5a7", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2017-04-07T16:11:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-24T16:54:52.000Z", "max_forks_repo_path": "RepeatAnalysisTools/sampleReads.py", "max_forks_repo_name": "PacificBiosciences/apps-scripts", "max_forks_repo_head_hexsha": "dd741d72f6b5483eb6d1f9a7f33cd42f9b56b5a7", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2017-02-23T01:18:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T18:29:42.000Z", "avg_line_length": 41.0787401575, "max_line_length": 132, "alphanum_fraction": 0.614337742, "include": true, "reason": "import numpy", "num_tokens": 1276}
|
"""
Usage: main.py lookup <image>...
main.py insert <image>...
"""
import sys
import multiprocessing
from collections import Counter
from os import cpu_count
import cv2
import redis
import numpy as np
from .keypoints import compute_keypoints
from .phash import triangles_from_keypoints, hash_triangles
def phash_triangles(img, triangles, batch_size=None):
n = len(triangles)
if batch_size is None:
batch_size = n // cpu_count()
array = np.asarray(triangles, dtype='d')
tasks = [(img, array[i:i + batch_size]) for i in range(0, n, batch_size)]
results = []
with multiprocessing.Pool(processes=cpu_count()) as p:
for result in p.starmap(hash_triangles, tasks):
results += result
return results
def pipeline(r, data, chunk_size):
npartitions = len(data) // chunk_size
pipe = r.pipeline()
for chunk in np.array_split(data, npartitions or 1):
yield pipe, chunk
def insert(chunks, filename):
n = 0
for pipe, keys in chunks:
for key in keys:
pipe.sadd(key, filename)
n += sum(pipe.execute())
print(f'added {n} fragments for {filename}')
def lookup(chunks, filename):
count = Counter()
for pipe, keys in chunks:
for key in keys:
pipe.smembers(key)
for result in pipe.execute():
count.update(result)
print(f'matches for {filename}:')
for key, num in count.most_common():
print(f'{num:<10d} {key.decode("utf-8")}')
def main():
if len(sys.argv) < 3:
print(__doc__)
exit(1)
command, *filenames = sys.argv[1:]
command = insert if command == 'insert' else lookup
r = redis.StrictRedis(host='localhost', port=6379, db=0)
try:
r.ping
except redis.ConnectionError:
print('You need to install redis.')
return
for filename in filenames:
print('loading', filename)
img = cv2.imread(filename)
keypoints = compute_keypoints(img)
triangles = triangles_from_keypoints(keypoints, lower=50, upper=400)
hashes = phash_triangles(img, triangles)
chunks = pipeline(r, hashes, chunk_size=1e5)
print()
command(chunks, filename)
if __name__ == '__main__':
main()
|
{"hexsha": "84d20978c0d3107f8a1ca57aef72197febd6a1f8", "size": 2284, "ext": "py", "lang": "Python", "max_stars_repo_path": "transformation_invariant_image_search/main.py", "max_stars_repo_name": "xeddmc/transformationInvariantImageSearch", "max_stars_repo_head_hexsha": "10800ace74441382a41be1a48fe2e01cd8e89a9f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 285, "max_stars_repo_stars_event_min_datetime": "2017-08-06T06:15:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T14:52:33.000Z", "max_issues_repo_path": "transformation_invariant_image_search/main.py", "max_issues_repo_name": "xeddmc/transformationInvariantImageSearch", "max_issues_repo_head_hexsha": "10800ace74441382a41be1a48fe2e01cd8e89a9f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-08-13T11:10:28.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-25T10:22:26.000Z", "max_forks_repo_path": "transformation_invariant_image_search/main.py", "max_forks_repo_name": "xeddmc/transformationInvariantImageSearch", "max_forks_repo_head_hexsha": "10800ace74441382a41be1a48fe2e01cd8e89a9f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2017-08-10T07:24:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-26T15:24:20.000Z", "avg_line_length": 22.6138613861, "max_line_length": 77, "alphanum_fraction": 0.6295971979, "include": true, "reason": "import numpy", "num_tokens": 561}
|
#include <boost/thread/thread.hpp>
#include <boost/lockfree/spsc_queue.hpp>
#include <iostream>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <cctype>
#include <string>
#include <boost/atomic.hpp>
#define PORT 6070
#define TILE (1 << 20)
boost::lockfree::spsc_queue<char*, boost::lockfree::capacity<32> > input_queue;
boost::lockfree::spsc_queue<char*, boost::lockfree::capacity<32> > output_queue;
void gather(void) {
int server_fd;
if ((server_fd = socket(AF_INET, SOCK_STREAM, 0)) == 0) {
std::cerr << "Socket failed" << std::endl;
exit(EXIT_FAILURE);
}
int opt = 1;
if (setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt))) {
std::cerr << "Setsockopt failed" << std::endl;
exit(EXIT_FAILURE);
}
sockaddr_in address;
bzero(&address, sizeof(address));
address.sin_family = AF_INET;
address.sin_addr.s_addr = INADDR_ANY;
address.sin_port = htons(PORT);
if (bind(server_fd, (struct sockaddr *) &address, sizeof(address)) < 0) {
std::cerr << "Bind failed" << std::endl;
exit(EXIT_FAILURE);
}
if (listen(server_fd, 32) < 0) {
std::cerr << "Listen failed" << std::endl;
exit(EXIT_FAILURE);
}
int addrlen = sizeof(address);
int num_gather = 0;
while (true) {
int instance = accept(server_fd, (struct sockaddr *) &address, (socklen_t *) &addrlen);
if (instance < 0) {
std::cerr << "Accept failed" << std::endl;
}
else {
char* buffer = new char[TILE];
int n;
int total_size = TILE;
char* p = buffer;
while ((n = read(instance, p, total_size)) > 0) {
if (n == total_size) break;
p = p + n;
total_size -= n;
}
close(instance);
while (!input_queue.push(buffer)) ;
num_gather++;
//if (num_gather % 10 == 0)
// std::cout << "Received " << num_gather << " requests" << std::endl;
}
}
}
void compute(void) {
int num_compute = 0;
while (true) {
char* buffer = NULL;
while (!input_queue.pop(buffer)) ;
//for (int i=0; i<TILE; i++) buffer[i] = toupper(buffer[i]);
while (!output_queue.push(buffer)) ;
num_compute++;
//if (num_compute % 10 == 0)
// std::cout << "Processed " << num_compute << " requests" << std::endl;
}
}
void scatter(void) {
sockaddr_in serv_addr;
bzero(&serv_addr, sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
serv_addr.sin_port = htons(9520);
if (inet_pton(AF_INET, "127.0.0.1", &(serv_addr.sin_addr)) <= 0) {
std::cerr << "Inet_pton failed" << std::endl;
exit(EXIT_FAILURE);
}
int num_scatter = 0;
while (true) {
char* buffer = NULL;
while (!output_queue.pop(buffer)) ;
int sock;
if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
std::cerr << "Socket failed" << std::endl;
exit(EXIT_FAILURE);
}
if (connect(sock, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0) {
std::cerr << "Connect failed" << std::endl;
}
write(sock, buffer, TILE);
delete [] buffer;
close(sock);
num_scatter++;
//if (num_scatter % 10 == 0)
// std::cout << "Scatter " << num_scatter << " requests" << std::endl;
}
}
int main(int argc, char* argv[]) {
boost::thread gather_thread(gather);
boost::thread compute_thread(compute);
boost::thread scatter_thread(scatter);
gather_thread.join();
compute_thread.join();
scatter_thread.join();
return 0;
}
|
{"hexsha": "ef7c663311f7670c367705860e4750ae298d26ab", "size": 3635, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "aes/cpp/aesserver.cpp", "max_stars_repo_name": "peterpengwei/pipeline", "max_stars_repo_head_hexsha": "d1dc6534c92c2f377d9c0719347cc1d254d78f6c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aes/cpp/aesserver.cpp", "max_issues_repo_name": "peterpengwei/pipeline", "max_issues_repo_head_hexsha": "d1dc6534c92c2f377d9c0719347cc1d254d78f6c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aes/cpp/aesserver.cpp", "max_forks_repo_name": "peterpengwei/pipeline", "max_forks_repo_head_hexsha": "d1dc6534c92c2f377d9c0719347cc1d254d78f6c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5378787879, "max_line_length": 95, "alphanum_fraction": 0.5768913343, "num_tokens": 985}
|
(** Generated by coq-of-ocaml *)
Require Import OCaml.OCaml.
Local Set Primitive Projections.
Local Open Scope string_scope.
Local Open Scope Z_scope.
Local Open Scope type_scope.
Import ListNotations.
Unset Positivity Checking.
Unset Guard Checking.
Inductive nat : Set :=
| O : nat
| S : nat -> nat.
Inductive natural : Set :=
| Zero : natural
| Succ : natural -> natural.
Fixpoint plus (plus_arg0 : natural) (plus_arg1 : natural) {struct plus_arg0}
: natural :=
match plus_arg0 with
| Zero => plus_arg1
| Succ n => Succ (plus n plus_arg1)
end.
Fixpoint mult (mult_arg0 : natural) (mult_arg1 : natural) {struct mult_arg0}
: natural :=
match mult_arg0 with
| Zero => Zero
| Succ n => plus (mult n mult_arg1) mult_arg1
end.
Definition synth (lf2 : natural) (lf1 : natural) (y : natural) (x : natural)
: natural := plus Zero (plus y x).
|
{"author": "yalhessi", "repo": "lemmaranker", "sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a", "save_path": "github-repos/coq/yalhessi-lemmaranker", "path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal33_mult_succ_82_plus_assoc/goal33conj275_coqofml_70YDls.v"}
|
/-
Copyright (c) 2022 Julian Kuelshammer. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author : Julian Kuelshammer
-/
import easy_mode.sheet01
/-! Two-by-two matrices
This file defines two-by-two matrices and shows that they form a vector space.
-/
/- What do you want to be a 2x2-matrix. There are several possible approaches. I put some non-answer here to
make Lean stop complaining, replace it with a correct answer. -/
structure two_matrix : Type :=
(x : ℝ)
namespace two_matrix
notation `Mat₂` := two_matrix
/- Again we want to be able to write `A + B` if `A` and `B` are matrices without too complicated notation. -/
instance : has_add Mat₂ := sorry
lemma add_assoc (A B C : Mat₂) : A + B + C = A + (B + C) :=
begin
sorry
end
lemma add_comm (A B : Mat₂) : A + B = B + A :=
begin
sorry
end
def zero_matrix : Mat₂ := sorry
/- We even want to be able to write `0` for the zero matrix.-/
instance : has_zero Mat₂ := ⟨zero_matrix⟩
@[simp] lemma add_zero (A : Mat₂) : A + 0 = A :=
begin
sorry
end
@[simp] lemma zero_add (A : Mat₂) : 0 + A = A :=
begin
sorry
end
/- We want to define the negation of a matrix. -/
instance : has_neg Mat₂ := sorry
@[simp] lemma add_neg_self (A : Mat₂) : A + -A = 0 :=
begin
sorry
end
@[simp] lemma neg_add_self (A : Mat₂) : -A + A = 0 :=
begin
sorry
end
/- Finally we set up subtraction and scalar multiplication of matrices. -/
instance : has_sub Mat₂ := sorry
instance : has_scalar ℝ Mat₂ := sorry
lemma smul_assoc (a b : ℝ) (A : Mat₂) : (a * b) • A = a • (b • A) :=
begin
sorry
end
@[simp] lemma one_smul (A : Mat₂) : (1 : ℝ) • A = A :=
begin
sorry
end
@[simp] lemma smul_add (a : ℝ) (A B : Mat₂) : a • (A + B) = a • A + a • B :=
begin
sorry
end
@[simp] lemma add_smul (a b : ℝ) (A : Mat₂) : (a + b) • A = a • A + b • A :=
begin
sorry
end
end two_matrix
|
{"author": "Julian-Kuelshammer", "repo": "summer_maths_it_camp", "sha": "09b17b78de1c4cb3536649a6030fc14b60b08d24", "save_path": "github-repos/lean/Julian-Kuelshammer-summer_maths_it_camp", "path": "github-repos/lean/Julian-Kuelshammer-summer_maths_it_camp/summer_maths_it_camp-09b17b78de1c4cb3536649a6030fc14b60b08d24/src/hard_mode/sheet02.lean"}
|
///////////////////////////////////////////////////////////////////////////////
// calculator.hpp
//
// Copyright 2008 Eric Niebler. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <boost/proto/core.hpp>
#include <boost/proto/context.hpp>
#include <boost/test/unit_test.hpp>
using namespace boost;
struct placeholder {};
proto::terminal<placeholder>::type const _1 = {{}};
struct calculator : proto::callable_context<calculator const>
{
typedef int result_type;
calculator(int i)
: i_(i)
{}
int operator ()(proto::tag::terminal, placeholder) const
{
return this->i_;
}
int operator ()(proto::tag::terminal, int j) const
{
return j;
}
template<typename Left, typename Right>
int operator ()(proto::tag::plus, Left const &left, Right const &right) const
{
return proto::eval(left, *this) + proto::eval(right, *this);
}
template<typename Left, typename Right>
int operator ()(proto::tag::minus, Left const &left, Right const &right) const
{
return proto::eval(left, *this) - proto::eval(right, *this);
}
template<typename Left, typename Right>
int operator ()(proto::tag::multiplies, Left const &left, Right const &right) const
{
return proto::eval(left, *this) * proto::eval(right, *this);
}
template<typename Left, typename Right>
int operator ()(proto::tag::divides, Left const &left, Right const &right) const
{
return proto::eval(left, *this) / proto::eval(right, *this);
}
private:
int i_;
};
template<typename Fun, typename Expr>
struct functional
{
typedef typename proto::result_of::eval<Expr, Fun>::type result_type;
functional(Expr const &expr)
: expr_(expr)
{}
template<typename T>
result_type operator ()(T const &t) const
{
Fun fun(t);
return proto::eval(this->expr_, fun);
}
private:
Expr const &expr_;
};
template<typename Fun, typename Expr>
functional<Fun, Expr> as(Expr const &expr)
{
return functional<Fun, Expr>(expr);
}
void test_calculator()
{
BOOST_CHECK_EQUAL(10, proto::eval(((_1 + 42)-3)/4, calculator(1)));
BOOST_CHECK_EQUAL(11, proto::eval(((_1 + 42)-3)/4, calculator(5)));
BOOST_CHECK_EQUAL(10, as<calculator>(((_1 + 42)-3)/4)(1));
BOOST_CHECK_EQUAL(11, as<calculator>(((_1 + 42)-3)/4)(5));
}
using namespace unit_test;
///////////////////////////////////////////////////////////////////////////////
// init_unit_test_suite
//
test_suite* init_unit_test_suite( int argc, char* argv[] )
{
test_suite *test = BOOST_TEST_SUITE("test immediate evaluation of proto parse trees");
test->add(BOOST_TEST_CASE(&test_calculator));
return test;
}
|
{"hexsha": "a69480c9e5b51ac6ece9ff99f177734566965111", "size": 2955, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/proto/test/calculator.cpp", "max_stars_repo_name": "zyiacas/boost-doc-zh", "max_stars_repo_head_hexsha": "689e5a3a0a4dbead1a960f7b039e3decda54aa2c", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 198.0, "max_stars_repo_stars_event_min_datetime": "2015-01-13T05:47:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T04:46:46.000Z", "max_issues_repo_path": "libs/proto/test/calculator.cpp", "max_issues_repo_name": "sdfict/boost-doc-zh", "max_issues_repo_head_hexsha": "689e5a3a0a4dbead1a960f7b039e3decda54aa2c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2015-01-28T16:33:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-12T23:03:28.000Z", "max_forks_repo_path": "libs/proto/test/calculator.cpp", "max_forks_repo_name": "sdfict/boost-doc-zh", "max_forks_repo_head_hexsha": "689e5a3a0a4dbead1a960f7b039e3decda54aa2c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 139.0, "max_forks_repo_forks_event_min_datetime": "2015-01-15T20:09:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T15:21:16.000Z", "avg_line_length": 26.8636363636, "max_line_length": 91, "alphanum_fraction": 0.592893401, "num_tokens": 704}
|
[STATEMENT]
lemma mk_minsky_add1:
assumes "v \<noteq> w"
shows "mk_minsky (\<lambda>vs vs'. vs' = (\<lambda>x. if x = v then 0 else if x = w then vs v + vs w else vs x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mk_minsky (\<lambda>vs vs'. vs' = (\<lambda>x. if x = v then 0 else if x = w then vs v + vs w else vs x))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
v \<noteq> w
goal (1 subgoal):
1. mk_minsky (\<lambda>vs vs'. vs' = (\<lambda>x. if x = v then 0 else if x = w then vs v + vs w else vs x))
[PROOF STEP]
by (intro mk_minsky_loop[where v = v, OF \<comment> \<open> while v[v]$--$: \<close>
mk_minsky_inc[of w]]) auto \<comment> \<open> v[w]++ \<close>
|
{"llama_tokens": 294, "file": "Minsky_Machines_Minsky", "length": 2}
|
#ifndef INCLUDE_ASLAM_BACKEND_COMMON_HPP_
#define INCLUDE_ASLAM_BACKEND_COMMON_HPP_
#include <Eigen/Core>
#include <sm/timing/Timer.hpp>
#if !defined(LIKELY) || !defined(UNLIKELY)
#if defined(__GNUC__) || defined(__GNUG__) /* GNU GCC/G++ */
#define LIKELY(x) __builtin_expect (!!(x), 1)
#define UNLIKELY(x) __builtin_expect (!!(x), 0)
#else
#define LIKELY(x) x
#define UNLIKELY(x) x
#endif
#endif
namespace aslam {
namespace backend {
#ifdef aslam_backend_ENABLE_TIMING
typedef sm::timing::Timer Timer;
#else
typedef sm::timing::DummyTimer Timer;
#endif
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> ColumnVectorType;
typedef Eigen::Matrix<double, 1, Eigen::Dynamic> RowVectorType;
}
}
#endif /* INCLUDE_ASLAM_BACKEND_COMMON_HPP_ */
|
{"hexsha": "3a553ee9b16a440bece880503b935a0517b543d0", "size": 782, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "aslam_backend/include/aslam/backend/util/CommonDefinitions.hpp", "max_stars_repo_name": "ethz-asl/aslam_optimizer", "max_stars_repo_head_hexsha": "8e9dd18f9f0d8af461e88e108a3beda2003daf11", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 33.0, "max_stars_repo_stars_event_min_datetime": "2017-04-26T13:30:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T01:52:22.000Z", "max_issues_repo_path": "aslam_backend/include/aslam/backend/util/CommonDefinitions.hpp", "max_issues_repo_name": "ethz-asl/aslam_optimizer", "max_issues_repo_head_hexsha": "8e9dd18f9f0d8af461e88e108a3beda2003daf11", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 15.0, "max_issues_repo_issues_event_min_datetime": "2017-02-14T16:02:31.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-12T06:07:22.000Z", "max_forks_repo_path": "aslam_backend/include/aslam/backend/util/CommonDefinitions.hpp", "max_forks_repo_name": "ethz-asl/aslam_optimizer", "max_forks_repo_head_hexsha": "8e9dd18f9f0d8af461e88e108a3beda2003daf11", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2017-06-28T04:17:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-10T04:58:36.000Z", "avg_line_length": 23.696969697, "max_line_length": 68, "alphanum_fraction": 0.7173913043, "num_tokens": 221}
|
#coding=utf-8
import pandas as pd
from sklearn.metrics import log_loss, roc_auc_score
from sklearn .model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, OneHotEncoder,LabelBinarizer
import warnings
from deepctr.models import DeepFM,DeepFMMTL
from deepctr.inputs import SparseFeat, DenseFeat, get_feature_names
import scipy as sp
from keras.utils import np_utils
import numpy as np
import matplotlib.pyplot as plt
def llfun(act, pred,idx):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred[idx])
pred = sp.minimum(1-epsilon, pred)
ll = sum(act[idx]*sp.log(pred) + sp.subtract(1,act[idx])*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act[idx])
return ll
def training_vis(hist,file_name):
loss = hist.history['loss']
val_loss = hist.history['val_loss']
# acc = np.sum([hist.history['output1_acc'],hist.history['output2_acc']], axis = 0)
# val_acc = np.sum([hist.history['val_output1_acc'],hist.history['val_output2_acc']], axis=0)
acc = hist.history['output2_acc']
val_acc = hist.history['val_output2_acc']
# output1_loss = hist.history['output1_loss']
output2_loss = hist.history['output2_loss']
# val_output1_loss = hist.history['val_output1_loss']
val_output2_loss = hist.history['val_output2_loss']
# output1_acc = hist.history['output1_acc']
output2_acc = hist.history['output2_acc']
# val_output1_acc = hist.history['val_output1_acc']
val_output2_acc = hist.history['val_output2_acc']
# make a figure
fig = plt.figure(figsize=(12,6))
# subplot loss
ax1 = fig.add_subplot(121)
ax1.plot(loss,label='train_loss')
ax1.plot(val_loss,label='val_loss')
# ax1.plot(output1_loss,label='output1.loss')
ax1.plot(output2_loss, label='output2.loss')
# ax1.plot(val_output1_loss, label='val_output1.loss')
ax1.plot(val_output2_loss, label='val_output2.loss')
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Loss')
ax1.set_title('Loss on Training and Validation Data')
ax1.legend()
# subplot acc
ax2 = fig.add_subplot(122)
ax2.plot(acc,label='train_acc')
ax2.plot(val_acc,label='val_acc')
# ax2.plot(val_output1_acc,label = 'val_output1_acc')
ax2.plot(val_output2_acc, label='val_output2_acc')
# ax2.plot(output1_acc,label='output1_acc')
ax2.plot(output2_acc, label='output2_acc')
ax2.set_xlabel('Epochs')
ax2.set_ylabel('Accuracy')
ax2.set_title('Accuracy on Training and Validation Data')
ax2.legend()
plt.tight_layout()
plt.show()
if __name__ == "__main__":
use_plot = True
warnings.filterwarnings("ignore")
data = pd.read_csv('./test1.csv')
sparse_features = ['Sparse' + str(i) for i in range(1, 348)]
chinese_features = ['year', 'provname', 'urbcode', 'birthyear', 'marriage', 'houseowner', 'bathroom', 'familyincm',
'expenditure', 'worktype', 'industry']
sparse_features = sparse_features + chinese_features
dense_features1 = ['sex', 'eduyr','income', 'earnlvl', 'hauslvl', 'birthyear_1', 'urbcode_1', 'sex_1', 'marriage_1', 'houseowner_1', 'bathroom_1', 'education_1' ,'familyincm_1' ,'expenditure_1', 'worktype_1', 'industry_1', 'townincm', 'dispincm', 'workavgincm', 'villmean'
]
mms = MinMaxScaler(feature_range=(0, 1))
# lb = LabelBinarizer()
dense_features = dense_features1
data[dense_features] = mms.fit_transform(data[dense_features])
target_list = ['education','faminlvl']
for ii in range( 1,len(target_list)+1):
name = 'target' + str(ii)
locals() ['target' + str(ii) ]= target_list[ii - 1]
target1 = 'education'
target2 = 'faminlvl'
label_feature_number1 = len(data[target1].unique())
label_feature_number2 = len(data[target2].unique())
label_number = []
label_number.append(label_feature_number1)
label_number.append(label_feature_number2)
Y1 = data[target1].values
Y2 = data[target2].values
encoder = LabelEncoder()
encoded_Y1 = encoder.fit_transform(Y1)
encoded_Y2 = encoder.fit_transform(Y2)
# convert integers to dummy variables (one hot encoding)
dummy_target1 = np_utils.to_categorical(encoded_Y1)
dummy_target2 = np_utils.to_categorical(encoded_Y2)
# data[target] = lb.fit_transform(data[target])
# 2.count #unique features for each sparse field,and record dense feature field name
fixlen_feature_columns = [SparseFeat(feat, data[feat].nunique())
for feat in sparse_features] + [DenseFeat(feat, 1,)
for feat in dense_features]
dnn_feature_columns = fixlen_feature_columns
linear_feature_columns = fixlen_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# 3.generate input data for model
train, test, target1_train, target1_test,target2_train,target2_test = train_test_split(data, dummy_target1,dummy_target2, test_size=0.4, random_state=0)
# train, test = train_test_split(data, test_size=0.2)
train_model_input = {name:train[name] for name in feature_names}
test_model_input = {name:test[name] for name in feature_names}
# print(type(train_model_input))
# 4.Define Model,train,predict and evaluate
model = DeepFMMTL(linear_feature_columns, dnn_feature_columns,label_number = label_number)
# compile(self, optimizer, loss, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None,
# target_tensors=None)
# model.compile(optimizer='adam',loss={'output1': 'sparse_categorical_crossentropy', 'output2': 'sparse_categorical_crossentropy'}, \
# loss_weights={'output1': 1, 'output2': 1},metrics=['acc'])
model.compile(optimizer='adam',
loss={'output1': 'categorical_crossentropy', 'output2': 'categorical_crossentropy'}, \
loss_weights={'output1': 0, 'output2': 1}, metrics=['accuracy'])
# model.compile("adam", "binary_crossentropy",
# metrics=['binary_crossentropy'], )
# record = LossHistory()
history = model.fit(train_model_input,
y=[target1_train, target2_train],
batch_size = 512 , epochs=300, steps_per_epoch=4,validation_split=0.2)
# pred_ans = model.predict(test_model_input, batch_size=256)
score = model.evaluate(x=test_model_input,
y=[target1_test, target2_test], steps=64)
print('Test score:', score[0])
print('Test accuracy:', score[1])
training_vis(history,file_name='img_output')
# record.loss_plot('epoch')
# print("test LogLoss", round(log_loss(test[target1].values, pred_ans), 4))
# acc_score1 = []
# acc_score2 = []
# acc_scores = []
# for index in range(0, len(pred_ans)):
# result1 = llfun(target1_test, pred_ans[0], index)
# result2 = llfun(target2_test, pred_ans[1], index)
# acc_score1.append(result1)
# acc_score2.append(result2)
# acc_scores.append(result1)
# acc_scores.append(result2)
# print("task1 loss:", int(sum(acc_score1) / len(acc_score1)))
# print("task2 loss",int(sum(acc_score2) / len(acc_score2)))
# print("Total loss", int(sum(acc_scores) / len(acc_scores)))
# print("test LogLoss", round(log_loss(test[target1].values, pred_ans), 4))
|
{"hexsha": "4f88c9bad92bc8b0f91942e6ffaa18747440fd18", "size": 7408, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/run_duorenwu_Deepfm.py", "max_stars_repo_name": "lora-chen/Deepfm", "max_stars_repo_head_hexsha": "5ccb93ec918d1b14fffba316c428fb8d23e6228e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-07T09:12:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-07T09:12:45.000Z", "max_issues_repo_path": "examples/run_duorenwu_Deepfm.py", "max_issues_repo_name": "lora-chen/Deepfm", "max_issues_repo_head_hexsha": "5ccb93ec918d1b14fffba316c428fb8d23e6228e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-12-16T22:22:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:37:34.000Z", "max_forks_repo_path": "examples/run_duorenwu_Deepfm.py", "max_forks_repo_name": "lora-chen/Deepfm", "max_forks_repo_head_hexsha": "5ccb93ec918d1b14fffba316c428fb8d23e6228e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-07T09:12:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-07T09:12:21.000Z", "avg_line_length": 40.2608695652, "max_line_length": 276, "alphanum_fraction": 0.682100432, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1948}
|
(* Title: Nominal2_Base
Authors: Christian Urban, Brian Huffman, Cezary Kaliszyk
Basic definitions and lemma infrastructure for
Nominal Isabelle.
*)
theory Nominal2_Base
imports Main
"~~/src/HOL/Library/Infinite_Set"
"~~/src/HOL/Quotient_Examples/FSet"
"GPerm"
"~~/src/HOL/Library/List_lexord"
"~~/src/HOL/Library/Product_ord"
"~~/src/HOL/Library/Efficient_Nat"
"~~/src/HOL/Library/Char_ord"
"~~/src/HOL/Library/Code_Char_chr"
"~~/src/HOL/Library/Code_Char_ord"
keywords
"atom_decl" "equivariance" :: thy_decl
uses ("nominal_basics.ML")
("nominal_thmdecls.ML")
("nominal_permeq.ML")
("nominal_library.ML")
("nominal_atoms.ML")
("nominal_eqvt.ML")
begin
section {* Atoms and Sorts *}
text {* A simple implementation for atom_sorts is strings. *}
(* types atom_sort = string *)
text {* To deal with Church-like binding we use trees of
strings as sorts. *}
datatype atom_sort = Sort "string" "atom_sort list"
datatype atom = Atom atom_sort nat
text {* Basic projection function. *}
primrec
sort_of :: "atom \<Rightarrow> atom_sort"
where
"sort_of (Atom s n) = s"
primrec
nat_of :: "atom \<Rightarrow> nat"
where
"nat_of (Atom s n) = n"
text {* There are infinitely many atoms of each sort. *}
lemma INFM_sort_of_eq:
shows "INFM a. sort_of a = s"
proof -
have "INFM i. sort_of (Atom s i) = s" by simp
moreover have "inj (Atom s)" by (simp add: inj_on_def)
ultimately show "INFM a. sort_of a = s" by (rule INFM_inj)
qed
lemma infinite_sort_of_eq:
shows "infinite {a. sort_of a = s}"
using INFM_sort_of_eq unfolding INFM_iff_infinite .
lemma atom_infinite [simp]:
shows "infinite (UNIV :: atom set)"
using subset_UNIV infinite_sort_of_eq
by (rule infinite_super)
lemma obtain_atom:
fixes X :: "atom set"
assumes X: "finite X"
obtains a where "a \<notin> X" "sort_of a = s"
proof -
from X have "MOST a. a \<notin> X"
unfolding MOST_iff_cofinite by simp
with INFM_sort_of_eq
have "INFM a. sort_of a = s \<and> a \<notin> X"
by (rule INFM_conjI)
then obtain a where "a \<notin> X" "sort_of a = s"
by (auto elim: INFM_E)
then show ?thesis ..
qed
lemma atom_components_eq_iff:
fixes a b :: atom
shows "a = b \<longleftrightarrow> sort_of a = sort_of b \<and> nat_of a = nat_of b"
by (induct a, induct b, simp)
section {* Sort-Respecting Permutations *}
definition "sort_respecting p \<longleftrightarrow> (\<forall>a. sort_of (gpermute p a) = sort_of a)"
lemma sort_respecting_0[simp]:
"sort_respecting (0\<Colon>atom gperm)"
by (simp add: sort_respecting_def)
typedef (open) perm = "{p::atom gperm. sort_respecting p}"
by (auto intro: exI[of _ "0"])
setup_lifting type_definition_perm
lemma perm_eq_rep:
"p = q \<longleftrightarrow> Rep_perm p = Rep_perm q"
by (simp add: Rep_perm_inject)
lift_definition mk_perm :: "atom gperm \<Rightarrow> perm" is
"\<lambda>p. if sort_respecting p then p else 0" by simp
lemma Rep_perm_mk_perm [simp]:
"Rep_perm (mk_perm p) = (if sort_respecting p then p else 0)"
by (simp add: mk_perm_def Abs_perm_inverse)
instance perm :: size ..
subsection {* Permutations form a (multiplicative) group *}
instantiation perm :: group_add
begin
lift_definition zero_perm :: "perm" is "0" by simp
lift_definition uminus_perm :: "perm \<Rightarrow> perm" is "uminus"
unfolding sort_respecting_def
by transfer (auto, metis perm_apply_minus)
lift_definition plus_perm :: "perm \<Rightarrow> perm \<Rightarrow> perm" is "plus"
unfolding sort_respecting_def
by transfer (simp add: perm_add_apply)
definition "(p :: perm) - q = p + - q"
lemma Rep_perm_0 [simp, code abstract]:
"Rep_perm 0 = 0"
by (metis (mono_tags) zero_perm.rep_eq)
lemma Rep_perm_uminus [simp, code abstract]:
"Rep_perm (- p) = - (Rep_perm p)"
by (metis uminus_perm.rep_eq)
lemma Rep_perm_add [simp, code abstract]:
"Rep_perm (p + q) = (Rep_perm p) + (Rep_perm q)"
by (simp add: plus_perm.rep_eq)
instance
apply default
unfolding minus_perm_def
by (transfer, simp add: add_assoc)+
end
section {* Implementation of swappings *}
lift_definition swap :: "atom \<Rightarrow> atom \<Rightarrow> perm" ("'(_ \<rightleftharpoons> _')")
is "(\<lambda>a b. (if sort_of a = sort_of b then mk_perm (gswap a b) else 0))" .
lemma sort_respecting_swap [simp]:
"sort_of a = sort_of b \<Longrightarrow> sort_respecting (gswap a b)"
unfolding sort_respecting_def
by transfer auto
lemma Rep_swap [simp, code abstract]:
"Rep_perm (swap a b) = (if sort_of a = sort_of b then gswap a b else 0)"
by (simp add: swap_def)
lemma swap_different_sorts [simp]:
"sort_of a \<noteq> sort_of b \<Longrightarrow> (a \<rightleftharpoons> b) = 0"
by (simp add: perm_eq_rep)
lemma swap_cancel:
shows "(a \<rightleftharpoons> b) + (a \<rightleftharpoons> b) = 0"
and "(a \<rightleftharpoons> b) + (b \<rightleftharpoons> a) = 0"
by (simp_all add: perm_eq_rep)
lemma swap_self [simp]:
"(a \<rightleftharpoons> a) = 0"
by (simp add: perm_eq_rep)
lemma minus_swap [simp]:
"- (a \<rightleftharpoons> b) = (a \<rightleftharpoons> b)"
by (simp add: perm_eq_rep)
lemma swap_commute:
"(a \<rightleftharpoons> b) = (b \<rightleftharpoons> a)"
by (simp add: perm_eq_rep swap_commute)
lemma swap_triple:
assumes "a \<noteq> b" and "c \<noteq> b"
assumes "sort_of a = sort_of b" "sort_of b = sort_of c"
shows "(a \<rightleftharpoons> c) + (b \<rightleftharpoons> c) + (a \<rightleftharpoons> c) = (a \<rightleftharpoons> b)"
using assms by (simp add: perm_eq_rep swap_triple)
section {* Permutation Types *}
text {*
Infix syntax for @{text permute} has higher precedence than
addition, but lower than unary minus.
*}
class pt =
fixes permute :: "perm \<Rightarrow> 'a \<Rightarrow> 'a" ("_ \<bullet> _" [76, 75] 75)
assumes permute_zero [simp]: "0 \<bullet> x = x"
assumes permute_plus [simp]: "(p + q) \<bullet> x = p \<bullet> (q \<bullet> x)"
begin
lemma permute_diff [simp]:
shows "(p - q) \<bullet> x = p \<bullet> - q \<bullet> x"
unfolding diff_minus by simp
lemma permute_minus_cancel [simp]:
shows "p \<bullet> - p \<bullet> x = x"
and "- p \<bullet> p \<bullet> x = x"
unfolding permute_plus [symmetric] by simp_all
lemma permute_swap_cancel [simp]:
shows "(a \<rightleftharpoons> b) \<bullet> (a \<rightleftharpoons> b) \<bullet> x = x"
unfolding permute_plus [symmetric]
by (simp add: swap_cancel)
lemma permute_swap_cancel2 [simp]:
shows "(a \<rightleftharpoons> b) \<bullet> (b \<rightleftharpoons> a) \<bullet> x = x"
unfolding permute_plus [symmetric]
by (simp add: swap_commute)
lemma inj_permute [simp]:
shows "inj (permute p)"
by (rule inj_on_inverseI)
(rule permute_minus_cancel)
lemma surj_permute [simp]:
shows "surj (permute p)"
by (rule surjI, rule permute_minus_cancel)
lemma bij_permute [simp]:
shows "bij (permute p)"
by (rule bijI [OF inj_permute surj_permute])
lemma inv_permute:
shows "inv (permute p) = permute (- p)"
by (rule inv_equality) (simp_all)
lemma permute_minus:
shows "permute (- p) = inv (permute p)"
by (simp add: inv_permute)
lemma permute_eq_iff [simp]:
shows "p \<bullet> x = p \<bullet> y \<longleftrightarrow> x = y"
by (rule inj_permute [THEN inj_eq])
end
subsection {* Permutations for atoms *}
instantiation atom :: pt
begin
definition
"p \<bullet> a = gpermute (Rep_perm p) a"
instance
by default (simp_all add: permute_atom_def)
end
lemma sort_of_permute [simp]:
shows "sort_of (p \<bullet> a) = sort_of a"
by (metis Rep_perm mem_Collect_eq sort_respecting_def permute_atom_def)
lemma swap_atom:
shows "(a \<rightleftharpoons> b) \<bullet> c =
(if sort_of a = sort_of b
then (if c = a then b else if c = b then a else c) else c)"
by (auto simp add: permute_atom_def)
lemma swap_atom_simps [simp]:
"sort_of a = sort_of b \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> a = b"
"sort_of a = sort_of b \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> b = a"
"c \<noteq> a \<Longrightarrow> c \<noteq> b \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> c = c"
unfolding swap_atom by simp_all
lemma perm_eq_iff:
fixes p q :: "perm"
shows "p = q \<longleftrightarrow> (\<forall>a::atom. p \<bullet> a = q \<bullet> a)"
unfolding permute_atom_def perm_eq_rep
by (simp add: gperm_eq)
subsection {* Permutations for permutations *}
instantiation perm :: pt
begin
definition
"p \<bullet> q = p + q - p"
instance
by default
(simp_all add: permute_perm_def diff_minus minus_add add_assoc)
end
lemma permute_self:
shows "p \<bullet> p = p"
unfolding permute_perm_def
by (simp add: diff_minus add_assoc)
lemma pemute_minus_self:
shows "- p \<bullet> p = p"
unfolding permute_perm_def
by (simp add: diff_minus add_assoc)
subsection {* Permutations for functions *}
instantiation "fun" :: (pt, pt) pt
begin
definition
"p \<bullet> f = (\<lambda>x. p \<bullet> (f (- p \<bullet> x)))"
instance
by default
(simp_all add: permute_fun_def minus_add)
end
lemma permute_fun_app_eq:
shows "p \<bullet> (f x) = (p \<bullet> f) (p \<bullet> x)"
unfolding permute_fun_def by simp
subsection {* Permutations for booleans *}
instantiation bool :: pt
begin
definition "p \<bullet> (b::bool) = b"
instance
by (default)
(simp_all add: permute_bool_def)
end
lemma permute_boolE:
fixes P::"bool"
shows "p \<bullet> P \<Longrightarrow> P"
by (simp add: permute_bool_def)
lemma permute_boolI:
fixes P::"bool"
shows "P \<Longrightarrow> p \<bullet> P"
by(simp add: permute_bool_def)
subsection {* Permutations for sets *}
instantiation "set" :: (pt) pt
begin
definition
"p \<bullet> X = {p \<bullet> x | x. x \<in> X}"
instance
apply default
apply (auto simp add: permute_set_def)
done
end
lemma permute_set_eq:
shows "p \<bullet> X = {x. - p \<bullet> x \<in> X}"
unfolding permute_set_def
by (auto) (metis permute_minus_cancel(1))
lemma permute_set_eq_image:
shows "p \<bullet> X = permute p ` X"
unfolding permute_set_def by auto
lemma permute_set_eq_vimage:
shows "p \<bullet> X = permute (- p) -` X"
unfolding permute_set_eq vimage_def
by simp
lemma permute_finite [simp]:
shows "finite (p \<bullet> X) = finite X"
unfolding permute_set_eq_vimage
using bij_permute by (rule finite_vimage_iff)
lemma swap_set_not_in:
assumes a: "a \<notin> S" "b \<notin> S"
shows "(a \<rightleftharpoons> b) \<bullet> S = S"
unfolding permute_set_def
using a by (auto simp add: swap_atom)
lemma swap_set_in:
assumes a: "a \<in> S" "b \<notin> S" "sort_of a = sort_of b"
shows "(a \<rightleftharpoons> b) \<bullet> S \<noteq> S"
unfolding permute_set_def
using a by (auto simp add: swap_atom)
lemma swap_set_in_eq:
assumes a: "a \<in> S" "b \<notin> S" "sort_of a = sort_of b"
shows "(a \<rightleftharpoons> b) \<bullet> S = (S - {a}) \<union> {b}"
unfolding permute_set_def
using a by (auto simp add: swap_atom)
lemma swap_set_both_in:
assumes a: "a \<in> S" "b \<in> S"
shows "(a \<rightleftharpoons> b) \<bullet> S = S"
unfolding permute_set_def
using a by (auto simp add: swap_atom)
lemma mem_permute_iff:
shows "(p \<bullet> x) \<in> (p \<bullet> X) \<longleftrightarrow> x \<in> X"
unfolding permute_set_def
by auto
lemma empty_eqvt:
shows "p \<bullet> {} = {}"
unfolding permute_set_def
by (simp)
lemma insert_eqvt:
shows "p \<bullet> (insert x A) = insert (p \<bullet> x) (p \<bullet> A)"
unfolding permute_set_eq_image image_insert ..
subsection {* Permutations for @{typ unit} *}
instantiation unit :: pt
begin
definition "p \<bullet> (u::unit) = u"
instance
by (default) (simp_all add: permute_unit_def)
end
subsection {* Permutations for products *}
instantiation prod :: (pt, pt) pt
begin
primrec
permute_prod
where
Pair_eqvt: "p \<bullet> (x, y) = (p \<bullet> x, p \<bullet> y)"
instance
by default auto
end
subsection {* Permutations for sums *}
instantiation sum :: (pt, pt) pt
begin
primrec
permute_sum
where
Inl_eqvt: "p \<bullet> (Inl x) = Inl (p \<bullet> x)"
| Inr_eqvt: "p \<bullet> (Inr y) = Inr (p \<bullet> y)"
instance
by (default) (case_tac [!] x, simp_all)
end
subsection {* Permutations for @{typ "'a list"} *}
instantiation list :: (pt) pt
begin
primrec
permute_list
where
Nil_eqvt: "p \<bullet> [] = []"
| Cons_eqvt: "p \<bullet> (x # xs) = p \<bullet> x # p \<bullet> xs"
instance
by (default) (induct_tac [!] x, simp_all)
end
lemma set_eqvt:
shows "p \<bullet> (set xs) = set (p \<bullet> xs)"
by (induct xs) (simp_all add: empty_eqvt insert_eqvt)
subsection {* Permutations for @{typ "'a option"} *}
instantiation option :: (pt) pt
begin
primrec
permute_option
where
None_eqvt: "p \<bullet> None = None"
| Some_eqvt: "p \<bullet> (Some x) = Some (p \<bullet> x)"
instance
by (default) (induct_tac [!] x, simp_all)
end
subsection {* Permutations for @{typ "'a multiset"} *}
instantiation multiset :: (pt) pt
begin
definition
"p \<bullet> M = {# p \<bullet> x. x :# M #}"
instance
proof
fix M :: "'a multiset" and p q :: "perm"
show "0 \<bullet> M = M"
unfolding permute_multiset_def
by (induct_tac M) (simp_all)
show "(p + q) \<bullet> M = p \<bullet> q \<bullet> M"
unfolding permute_multiset_def
by (induct_tac M) (simp_all)
qed
end
lemma permute_multiset [simp]:
fixes M N::"('a::pt) multiset"
shows "(p \<bullet> {#}) = ({#} ::('a::pt) multiset)"
and "(p \<bullet> {# x #}) = {# p \<bullet> x #}"
and "(p \<bullet> (M + N)) = (p \<bullet> M) + (p \<bullet> N)"
unfolding permute_multiset_def
by (simp_all)
subsection {* Permutations for @{typ "'a fset"} *}
instantiation fset :: (pt) pt
begin
quotient_definition
"permute_fset :: perm \<Rightarrow> 'a fset \<Rightarrow> 'a fset"
is
"permute :: perm \<Rightarrow> 'a list \<Rightarrow> 'a list"
by (simp add: set_eqvt[symmetric])
instance
proof
fix x :: "'a fset" and p q :: "perm"
have lst: "\<And>l :: 'a list. 0 \<bullet> l = l" by simp
show "0 \<bullet> x = x" by (lifting lst)
have lst: "\<And>p q :: perm. \<And>x :: 'a list. (p + q) \<bullet> x = p \<bullet> q \<bullet> x" by simp
show "(p + q) \<bullet> x = p \<bullet> q \<bullet> x" by (lifting lst)
qed
end
lemma permute_fset [simp]:
fixes S::"('a::pt) fset"
shows "(p \<bullet> {||}) = ({||} ::('a::pt) fset)"
and "(p \<bullet> insert_fset x S) = insert_fset (p \<bullet> x) (p \<bullet> S)"
by (lifting permute_list.simps)
lemma fset_eqvt:
shows "p \<bullet> (fset S) = fset (p \<bullet> S)"
by (lifting set_eqvt)
subsection {* Permutations for @{typ char}, @{typ nat}, and @{typ int} *}
instantiation char :: pt
begin
definition "p \<bullet> (c::char) = c"
instance
by (default) (simp_all add: permute_char_def)
end
instantiation nat :: pt
begin
definition "p \<bullet> (n::nat) = n"
instance
by (default) (simp_all add: permute_nat_def)
end
instantiation int :: pt
begin
definition "p \<bullet> (i::int) = i"
instance
by (default) (simp_all add: permute_int_def)
end
section {* Pure types *}
text {* Pure types will have always empty support. *}
class pure = pt +
assumes permute_pure: "p \<bullet> x = x"
text {* Types @{typ unit} and @{typ bool} are pure. *}
instance unit :: pure
proof qed (rule permute_unit_def)
instance bool :: pure
proof qed (rule permute_bool_def)
text {* Other type constructors preserve purity. *}
instance "fun" :: (pure, pure) pure
by default (simp add: permute_fun_def permute_pure)
instance set :: (pure) pure
by default (simp add: permute_set_def permute_pure)
instance prod :: (pure, pure) pure
by default (induct_tac x, simp add: permute_pure)
instance sum :: (pure, pure) pure
by default (induct_tac x, simp_all add: permute_pure)
instance list :: (pure) pure
by default (induct_tac x, simp_all add: permute_pure)
instance option :: (pure) pure
by default (induct_tac x, simp_all add: permute_pure)
subsection {* Types @{typ char}, @{typ nat}, and @{typ int} *}
instance char :: pure
proof qed (rule permute_char_def)
instance nat :: pure
proof qed (rule permute_nat_def)
instance int :: pure
proof qed (rule permute_int_def)
section {* Infrastructure for Equivariance and Perm_simp *}
subsection {* Basic functions about permutations *}
use "nominal_basics.ML"
subsection {* Eqvt infrastructure *}
text {* Setup of the theorem attributes @{text eqvt} and @{text eqvt_raw} *}
use "nominal_thmdecls.ML"
setup "Nominal_ThmDecls.setup"
lemmas [eqvt] =
(* pt types *)
permute_prod.simps
permute_list.simps
permute_option.simps
permute_sum.simps
(* sets *)
empty_eqvt insert_eqvt set_eqvt
(* fsets *)
permute_fset fset_eqvt
(* multisets *)
permute_multiset
subsection {* perm_simp infrastructure *}
definition
"unpermute p = permute (- p)"
lemma eqvt_apply:
fixes f :: "'a::pt \<Rightarrow> 'b::pt"
and x :: "'a::pt"
shows "p \<bullet> (f x) \<equiv> (p \<bullet> f) (p \<bullet> x)"
unfolding permute_fun_def by simp
lemma eqvt_lambda:
fixes f :: "'a::pt \<Rightarrow> 'b::pt"
shows "p \<bullet> f \<equiv> (\<lambda>x. p \<bullet> (f (unpermute p x)))"
unfolding permute_fun_def unpermute_def by simp
lemma eqvt_bound:
shows "p \<bullet> unpermute p x \<equiv> x"
unfolding unpermute_def by simp
text {* provides perm_simp methods *}
use "nominal_permeq.ML"
method_setup perm_simp =
{* Nominal_Permeq.args_parser >> Nominal_Permeq.perm_simp_meth *}
{* pushes permutations inside. *}
method_setup perm_strict_simp =
{* Nominal_Permeq.args_parser >> Nominal_Permeq.perm_strict_simp_meth *}
{* pushes permutations inside, raises an error if it cannot solve all permutations. *}
subsubsection {* Equivariance for permutations and swapping *}
lemma permute_eqvt:
shows "p \<bullet> (q \<bullet> x) = (p \<bullet> q) \<bullet> (p \<bullet> x)"
unfolding permute_perm_def by simp
(* the normal version of this lemma would cause loops *)
lemma permute_eqvt_raw [eqvt_raw]:
shows "p \<bullet> permute \<equiv> permute"
apply(simp add: fun_eq_iff permute_fun_def)
apply(subst permute_eqvt)
apply(simp)
done
lemma zero_perm_eqvt [eqvt]:
shows "p \<bullet> (0::perm) = 0"
unfolding permute_perm_def by simp
lemma add_perm_eqvt [eqvt]:
fixes p p1 p2 :: perm
shows "p \<bullet> (p1 + p2) = p \<bullet> p1 + p \<bullet> p2"
unfolding permute_perm_def
by (simp add: perm_eq_iff)
lemma swap_eqvt [eqvt]:
shows "p \<bullet> (a \<rightleftharpoons> b) = (p \<bullet> a \<rightleftharpoons> p \<bullet> b)"
unfolding permute_perm_def
by (auto simp add: swap_atom perm_eq_iff)
lemma uminus_eqvt [eqvt]:
fixes p q::"perm"
shows "p \<bullet> (- q) = - (p \<bullet> q)"
unfolding permute_perm_def
by (simp add: diff_minus minus_add add_assoc)
subsubsection {* Equivariance of Logical Operators *}
lemma eq_eqvt [eqvt]:
shows "p \<bullet> (x = y) \<longleftrightarrow> (p \<bullet> x) = (p \<bullet> y)"
unfolding permute_eq_iff permute_bool_def ..
lemma Not_eqvt [eqvt]:
shows "p \<bullet> (\<not> A) \<longleftrightarrow> \<not> (p \<bullet> A)"
by (simp add: permute_bool_def)
lemma conj_eqvt [eqvt]:
shows "p \<bullet> (A \<and> B) \<longleftrightarrow> (p \<bullet> A) \<and> (p \<bullet> B)"
by (simp add: permute_bool_def)
lemma imp_eqvt [eqvt]:
shows "p \<bullet> (A \<longrightarrow> B) \<longleftrightarrow> (p \<bullet> A) \<longrightarrow> (p \<bullet> B)"
by (simp add: permute_bool_def)
declare imp_eqvt[folded induct_implies_def, eqvt]
lemma all_eqvt [eqvt]:
shows "p \<bullet> (\<forall>x. P x) = (\<forall>x. (p \<bullet> P) x)"
unfolding All_def
by (perm_simp) (rule refl)
declare all_eqvt[folded induct_forall_def, eqvt]
lemma ex_eqvt [eqvt]:
shows "p \<bullet> (\<exists>x. P x) = (\<exists>x. (p \<bullet> P) x)"
unfolding Ex_def
by (perm_simp) (rule refl)
lemma ex1_eqvt [eqvt]:
shows "p \<bullet> (\<exists>!x. P x) = (\<exists>!x. (p \<bullet> P) x)"
unfolding Ex1_def
by (perm_simp) (rule refl)
lemma if_eqvt [eqvt]:
shows "p \<bullet> (if b then x else y) = (if p \<bullet> b then p \<bullet> x else p \<bullet> y)"
by (simp add: permute_fun_def permute_bool_def)
lemma Let_eqvt [eqvt]:
shows "p \<bullet> Let x y = Let (p \<bullet> x) (p \<bullet> y)"
unfolding Let_def permute_fun_app_eq ..
lemma True_eqvt [eqvt]:
shows "p \<bullet> True = True"
unfolding permute_bool_def ..
lemma False_eqvt [eqvt]:
shows "p \<bullet> False = False"
unfolding permute_bool_def ..
lemma disj_eqvt [eqvt]:
shows "p \<bullet> (A \<or> B) \<longleftrightarrow> (p \<bullet> A) \<or> (p \<bullet> B)"
by (simp add: permute_bool_def)
lemma all_eqvt2:
shows "p \<bullet> (\<forall>x. P x) = (\<forall>x. p \<bullet> P (- p \<bullet> x))"
by (perm_simp add: permute_minus_cancel) (rule refl)
lemma ex_eqvt2:
shows "p \<bullet> (\<exists>x. P x) = (\<exists>x. p \<bullet> P (- p \<bullet> x))"
by (perm_simp add: permute_minus_cancel) (rule refl)
lemma ex1_eqvt2:
shows "p \<bullet> (\<exists>!x. P x) = (\<exists>!x. p \<bullet> P (- p \<bullet> x))"
by (perm_simp add: permute_minus_cancel) (rule refl)
lemma the_eqvt:
assumes unique: "\<exists>!x. P x"
shows "(p \<bullet> (THE x. P x)) = (THE x. (p \<bullet> P) x)"
apply(rule the1_equality [symmetric])
apply(rule_tac p="-p" in permute_boolE)
apply(perm_simp add: permute_minus_cancel)
apply(rule unique)
apply(rule_tac p="-p" in permute_boolE)
apply(perm_simp add: permute_minus_cancel)
apply(rule theI'[OF unique])
done
lemma the_eqvt2:
assumes unique: "\<exists>!x. P x"
shows "(p \<bullet> (THE x. P x)) = (THE x. p \<bullet> P (- p \<bullet> x))"
apply(rule the1_equality [symmetric])
apply(simp add: ex1_eqvt2[symmetric])
apply(simp add: permute_bool_def unique)
apply(simp add: permute_bool_def)
apply(rule theI'[OF unique])
done
subsubsection {* Equivariance of Set operators *}
lemma mem_eqvt [eqvt]:
shows "p \<bullet> (x \<in> A) \<longleftrightarrow> (p \<bullet> x) \<in> (p \<bullet> A)"
unfolding permute_bool_def permute_set_def
by (auto)
lemma Collect_eqvt [eqvt]:
shows "p \<bullet> {x. P x} = {x. (p \<bullet> P) x}"
unfolding permute_set_eq permute_fun_def
by (auto simp add: permute_bool_def)
lemma inter_eqvt [eqvt]:
shows "p \<bullet> (A \<inter> B) = (p \<bullet> A) \<inter> (p \<bullet> B)"
unfolding Int_def
by (perm_simp) (rule refl)
lemma Bex_eqvt [eqvt]:
shows "p \<bullet> (\<exists>x \<in> S. P x) = (\<exists>x \<in> (p \<bullet> S). (p \<bullet> P) x)"
unfolding Bex_def
by (perm_simp) (rule refl)
lemma Ball_eqvt [eqvt]:
shows "p \<bullet> (\<forall>x \<in> S. P x) = (\<forall>x \<in> (p \<bullet> S). (p \<bullet> P) x)"
unfolding Ball_def
by (perm_simp) (rule refl)
lemma image_eqvt [eqvt]:
shows "p \<bullet> (f ` A) = (p \<bullet> f) ` (p \<bullet> A)"
unfolding image_def
by (perm_simp) (rule refl)
lemma Image_eqvt [eqvt]:
shows "p \<bullet> (R `` A) = (p \<bullet> R) `` (p \<bullet> A)"
unfolding Image_def
by (perm_simp) (rule refl)
lemma UNIV_eqvt [eqvt]:
shows "p \<bullet> UNIV = UNIV"
unfolding UNIV_def
by (perm_simp) (rule refl)
lemma union_eqvt [eqvt]:
shows "p \<bullet> (A \<union> B) = (p \<bullet> A) \<union> (p \<bullet> B)"
unfolding Un_def
by (perm_simp) (rule refl)
lemma Diff_eqvt [eqvt]:
fixes A B :: "'a::pt set"
shows "p \<bullet> (A - B) = (p \<bullet> A) - (p \<bullet> B)"
unfolding set_diff_eq
by (perm_simp) (rule refl)
lemma Compl_eqvt [eqvt]:
fixes A :: "'a::pt set"
shows "p \<bullet> (- A) = - (p \<bullet> A)"
unfolding Compl_eq_Diff_UNIV
by (perm_simp) (rule refl)
lemma subset_eqvt [eqvt]:
shows "p \<bullet> (S \<subseteq> T) \<longleftrightarrow> (p \<bullet> S) \<subseteq> (p \<bullet> T)"
unfolding subset_eq
by (perm_simp) (rule refl)
lemma psubset_eqvt [eqvt]:
shows "p \<bullet> (S \<subset> T) \<longleftrightarrow> (p \<bullet> S) \<subset> (p \<bullet> T)"
unfolding psubset_eq
by (perm_simp) (rule refl)
lemma vimage_eqvt [eqvt]:
shows "p \<bullet> (f -` A) = (p \<bullet> f) -` (p \<bullet> A)"
unfolding vimage_def
by (perm_simp) (rule refl)
lemma Union_eqvt [eqvt]:
shows "p \<bullet> (\<Union> S) = \<Union> (p \<bullet> S)"
unfolding Union_eq
by (perm_simp) (rule refl)
lemma Inter_eqvt [eqvt]:
shows "p \<bullet> (\<Inter> S) = \<Inter> (p \<bullet> S)"
unfolding Inter_eq
by (perm_simp) (rule refl)
lemma foldr_eqvt[eqvt]:
"p \<bullet> foldr a b c = foldr (p \<bullet> a) (p \<bullet> b) (p \<bullet> c)"
apply (induct b)
apply simp_all
apply (perm_simp)
apply simp
done
(* FIXME: eqvt attribute *)
lemma Sigma_eqvt:
shows "(p \<bullet> (X \<times> Y)) = (p \<bullet> X) \<times> (p \<bullet> Y)"
unfolding Sigma_def
unfolding SUP_def
by (perm_simp) (rule refl)
text {*
In order to prove that lfp is equivariant we need two
auxiliary classes which specify that (op <=) and
Inf are equivariant. Instances for bool and fun are
given.
*}
class le_eqvt = order +
assumes le_eqvt [eqvt]: "p \<bullet> (x \<le> y) = ((p \<bullet> x) \<le> (p \<bullet> (y::('a::{pt, order}))))"
class inf_eqvt = complete_lattice +
assumes inf_eqvt [eqvt]: "p \<bullet> (Inf X) = Inf (p \<bullet> (X::('a::{pt, Inf}) set))"
instantiation bool :: le_eqvt
begin
instance
apply(default)
apply perm_simp
apply(rule refl)
done
end
instantiation "fun" :: (pt, le_eqvt) le_eqvt
begin
instance
apply(default)
unfolding le_fun_def
apply(perm_simp)
apply(rule refl)
done
end
instantiation bool :: inf_eqvt
begin
instance
apply(default)
apply(perm_simp)
apply(rule refl)
done
end
instantiation "fun" :: (pt, inf_eqvt) inf_eqvt
begin
instance
apply(default)
unfolding Inf_fun_def INF_def
apply(perm_simp)
apply(rule refl)
done
end
lemma lfp_eqvt [eqvt]:
fixes F::"('a \<Rightarrow> 'b) \<Rightarrow> ('a::pt \<Rightarrow> 'b::{inf_eqvt, le_eqvt})"
shows "p \<bullet> (lfp F) = lfp (p \<bullet> F)"
unfolding lfp_def
by (perm_simp) (rule refl)
lemma finite_eqvt [eqvt]:
shows "p \<bullet> finite A = finite (p \<bullet> A)"
unfolding finite_def
by (perm_simp) (rule refl)
subsubsection {* Equivariance for product operations *}
lemma fst_eqvt [eqvt]:
shows "p \<bullet> (fst x) = fst (p \<bullet> x)"
by (cases x) simp
lemma snd_eqvt [eqvt]:
shows "p \<bullet> (snd x) = snd (p \<bullet> x)"
by (cases x) simp
lemma split_eqvt [eqvt]:
shows "p \<bullet> (split P x) = split (p \<bullet> P) (p \<bullet> x)"
unfolding split_def
by (perm_simp) (rule refl)
subsubsection {* Equivariance for list operations *}
lemma append_eqvt [eqvt]:
shows "p \<bullet> (xs @ ys) = (p \<bullet> xs) @ (p \<bullet> ys)"
by (induct xs) auto
lemma rev_eqvt [eqvt]:
shows "p \<bullet> (rev xs) = rev (p \<bullet> xs)"
by (induct xs) (simp_all add: append_eqvt)
lemma map_eqvt [eqvt]:
shows "p \<bullet> (map f xs) = map (p \<bullet> f) (p \<bullet> xs)"
by (induct xs) (simp_all, simp only: permute_fun_app_eq)
lemma removeAll_eqvt [eqvt]:
shows "p \<bullet> (removeAll x xs) = removeAll (p \<bullet> x) (p \<bullet> xs)"
by (induct xs) (auto)
lemma filter_eqvt [eqvt]:
shows "p \<bullet> (filter f xs) = filter (p \<bullet> f) (p \<bullet> xs)"
apply(induct xs)
apply(simp)
apply(simp only: filter.simps permute_list.simps if_eqvt)
apply(simp only: permute_fun_app_eq)
done
lemma distinct_eqvt [eqvt]:
shows "p \<bullet> (distinct xs) = distinct (p \<bullet> xs)"
apply(induct xs)
apply(simp add: permute_bool_def)
apply(simp add: conj_eqvt Not_eqvt mem_eqvt set_eqvt)
done
lemma length_eqvt [eqvt]:
shows "p \<bullet> (length xs) = length (p \<bullet> xs)"
by (induct xs) (simp_all add: permute_pure)
subsubsection {* Equivariance for @{typ "'a option"} *}
lemma option_map_eqvt[eqvt]:
shows "p \<bullet> (Option.map f x) = Option.map (p \<bullet> f) (p \<bullet> x)"
by (cases x) (simp_all, simp add: permute_fun_app_eq)
subsubsection {* Equivariance for @{typ "'a fset"} *}
lemma in_fset_eqvt [eqvt]:
shows "(p \<bullet> (x |\<in>| S)) = ((p \<bullet> x) |\<in>| (p \<bullet> S))"
unfolding in_fset
by (perm_simp) (simp)
lemma union_fset_eqvt [eqvt]:
shows "(p \<bullet> (S |\<union>| T)) = ((p \<bullet> S) |\<union>| (p \<bullet> T))"
by (induct S) (simp_all)
lemma inter_list_eqvt [eqvt]:
shows "p \<bullet> (inter_list S T) = inter_list (p \<bullet> S) (p \<bullet> T)"
unfolding list_eq_def inter_list_def
by perm_simp simp
lemma inter_fset_eqvt [eqvt]:
shows "(p \<bullet> (S |\<inter>| T)) = ((p \<bullet> S) |\<inter>| (p \<bullet> T))"
by (lifting inter_list_eqvt)
lemma sub_list_eqvt [eqvt]:
shows "p \<bullet> (sub_list S T) = sub_list (p \<bullet> S) (p \<bullet> T)"
unfolding sub_list_def
by perm_simp simp
lemma subset_fset_eqvt [eqvt]:
shows "(p \<bullet> (S |\<subseteq>| T)) = ((p \<bullet> S) |\<subseteq>| (p \<bullet> T))"
by (lifting sub_list_eqvt)
lemma map_fset_eqvt [eqvt]:
shows "p \<bullet> (map_fset f S) = map_fset (p \<bullet> f) (p \<bullet> S)"
by (lifting map_eqvt)
section {* Supp, Freshness and Supports *}
context pt
begin
definition
supp :: "'a \<Rightarrow> atom set"
where
"supp x = {a. infinite {b. (a \<rightleftharpoons> b) \<bullet> x \<noteq> x}}"
definition
fresh :: "atom \<Rightarrow> 'a \<Rightarrow> bool" ("_ \<sharp> _" [55, 55] 55)
where
"a \<sharp> x \<equiv> a \<notin> supp x"
end
lemma supp_conv_fresh:
shows "supp x = {a. \<not> a \<sharp> x}"
unfolding fresh_def by simp
lemma swap_rel_trans:
assumes "sort_of a = sort_of b"
assumes "sort_of b = sort_of c"
assumes "(a \<rightleftharpoons> c) \<bullet> x = x"
assumes "(b \<rightleftharpoons> c) \<bullet> x = x"
shows "(a \<rightleftharpoons> b) \<bullet> x = x"
proof (cases)
assume "a = b \<or> c = b"
with assms show "(a \<rightleftharpoons> b) \<bullet> x = x" by auto
next
assume *: "\<not> (a = b \<or> c = b)"
have "((a \<rightleftharpoons> c) + (b \<rightleftharpoons> c) + (a \<rightleftharpoons> c)) \<bullet> x = x"
using assms by simp
also have "(a \<rightleftharpoons> c) + (b \<rightleftharpoons> c) + (a \<rightleftharpoons> c) = (a \<rightleftharpoons> b)"
using assms * by (simp add: swap_triple)
finally show "(a \<rightleftharpoons> b) \<bullet> x = x" .
qed
lemma swap_fresh_fresh:
assumes a: "a \<sharp> x"
and b: "b \<sharp> x"
shows "(a \<rightleftharpoons> b) \<bullet> x = x"
proof (cases)
assume asm: "sort_of a = sort_of b"
have "finite {c. (a \<rightleftharpoons> c) \<bullet> x \<noteq> x}" "finite {c. (b \<rightleftharpoons> c) \<bullet> x \<noteq> x}"
using a b unfolding fresh_def supp_def by simp_all
then have "finite ({c. (a \<rightleftharpoons> c) \<bullet> x \<noteq> x} \<union> {c. (b \<rightleftharpoons> c) \<bullet> x \<noteq> x})" by simp
then obtain c
where "(a \<rightleftharpoons> c) \<bullet> x = x" "(b \<rightleftharpoons> c) \<bullet> x = x" "sort_of c = sort_of b"
by (rule obtain_atom) (auto)
then show "(a \<rightleftharpoons> b) \<bullet> x = x" using asm by (rule_tac swap_rel_trans) (simp_all)
next
assume "sort_of a \<noteq> sort_of b"
then show "(a \<rightleftharpoons> b) \<bullet> x = x" by simp
qed
subsection {* supp and fresh are equivariant *}
lemma supp_eqvt [eqvt]:
shows "p \<bullet> (supp x) = supp (p \<bullet> x)"
unfolding supp_def
by (perm_simp)
(simp only: permute_eqvt[symmetric])
lemma fresh_eqvt [eqvt]:
shows "p \<bullet> (a \<sharp> x) = (p \<bullet> a) \<sharp> (p \<bullet> x)"
unfolding fresh_def
by (perm_simp) (rule refl)
lemma fresh_permute_iff:
shows "(p \<bullet> a) \<sharp> (p \<bullet> x) \<longleftrightarrow> a \<sharp> x"
by (simp only: fresh_eqvt[symmetric] permute_bool_def)
lemma fresh_permute_left:
shows "a \<sharp> p \<bullet> x \<longleftrightarrow> - p \<bullet> a \<sharp> x"
proof
assume "a \<sharp> p \<bullet> x"
then have "- p \<bullet> a \<sharp> - p \<bullet> p \<bullet> x" by (simp only: fresh_permute_iff)
then show "- p \<bullet> a \<sharp> x" by simp
next
assume "- p \<bullet> a \<sharp> x"
then have "p \<bullet> - p \<bullet> a \<sharp> p \<bullet> x" by (simp only: fresh_permute_iff)
then show "a \<sharp> p \<bullet> x" by simp
qed
section {* supports *}
definition
supports :: "atom set \<Rightarrow> 'a::pt \<Rightarrow> bool" (infixl "supports" 80)
where
"S supports x \<equiv> \<forall>a b. (a \<notin> S \<and> b \<notin> S \<longrightarrow> (a \<rightleftharpoons> b) \<bullet> x = x)"
lemma supp_is_subset:
fixes S :: "atom set"
and x :: "'a::pt"
assumes a1: "S supports x"
and a2: "finite S"
shows "(supp x) \<subseteq> S"
proof (rule ccontr)
assume "\<not> (supp x \<subseteq> S)"
then obtain a where b1: "a \<in> supp x" and b2: "a \<notin> S" by auto
from a1 b2 have "\<forall>b. b \<notin> S \<longrightarrow> (a \<rightleftharpoons> b) \<bullet> x = x" unfolding supports_def by auto
then have "{b. (a \<rightleftharpoons> b) \<bullet> x \<noteq> x} \<subseteq> S" by auto
with a2 have "finite {b. (a \<rightleftharpoons> b) \<bullet> x \<noteq> x}" by (simp add: finite_subset)
then have "a \<notin> (supp x)" unfolding supp_def by simp
with b1 show False by simp
qed
lemma supports_finite:
fixes S :: "atom set"
and x :: "'a::pt"
assumes a1: "S supports x"
and a2: "finite S"
shows "finite (supp x)"
proof -
have "(supp x) \<subseteq> S" using a1 a2 by (rule supp_is_subset)
then show "finite (supp x)" using a2 by (simp add: finite_subset)
qed
lemma supp_supports:
fixes x :: "'a::pt"
shows "(supp x) supports x"
unfolding supports_def
proof (intro strip)
fix a b
assume "a \<notin> (supp x) \<and> b \<notin> (supp x)"
then have "a \<sharp> x" and "b \<sharp> x" by (simp_all add: fresh_def)
then show "(a \<rightleftharpoons> b) \<bullet> x = x" by (simp add: swap_fresh_fresh)
qed
lemma supports_fresh:
fixes x :: "'a::pt"
assumes a1: "S supports x"
and a2: "finite S"
and a3: "a \<notin> S"
shows "a \<sharp> x"
unfolding fresh_def
proof -
have "(supp x) \<subseteq> S" using a1 a2 by (rule supp_is_subset)
then show "a \<notin> (supp x)" using a3 by auto
qed
lemma supp_is_least_supports:
fixes S :: "atom set"
and x :: "'a::pt"
assumes a1: "S supports x"
and a2: "finite S"
and a3: "\<And>S'. finite S' \<Longrightarrow> (S' supports x) \<Longrightarrow> S \<subseteq> S'"
shows "(supp x) = S"
proof (rule equalityI)
show "(supp x) \<subseteq> S" using a1 a2 by (rule supp_is_subset)
with a2 have fin: "finite (supp x)" by (rule rev_finite_subset)
have "(supp x) supports x" by (rule supp_supports)
with fin a3 show "S \<subseteq> supp x" by blast
qed
lemma subsetCI:
shows "(\<And>x. x \<in> A \<Longrightarrow> x \<notin> B \<Longrightarrow> False) \<Longrightarrow> A \<subseteq> B"
by auto
lemma finite_supp_unique:
assumes a1: "S supports x"
assumes a2: "finite S"
assumes a3: "\<And>a b. \<lbrakk>a \<in> S; b \<notin> S; sort_of a = sort_of b\<rbrakk> \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> x \<noteq> x"
shows "(supp x) = S"
using a1 a2
proof (rule supp_is_least_supports)
fix S'
assume "finite S'" and "S' supports x"
show "S \<subseteq> S'"
proof (rule subsetCI)
fix a
assume "a \<in> S" and "a \<notin> S'"
have "finite (S \<union> S')"
using `finite S` `finite S'` by simp
then obtain b where "b \<notin> S \<union> S'" and "sort_of b = sort_of a"
by (rule obtain_atom)
then have "b \<notin> S" and "b \<notin> S'" and "sort_of a = sort_of b"
by simp_all
then have "(a \<rightleftharpoons> b) \<bullet> x = x"
using `a \<notin> S'` `S' supports x` by (simp add: supports_def)
moreover have "(a \<rightleftharpoons> b) \<bullet> x \<noteq> x"
using `a \<in> S` `b \<notin> S` `sort_of a = sort_of b`
by (rule a3)
ultimately show "False" by simp
qed
qed
section {* Support w.r.t. relations *}
text {*
This definition is used for unquotient types, where
alpha-equivalence does not coincide with equality.
*}
definition
"supp_rel R x = {a. infinite {b. \<not>(R ((a \<rightleftharpoons> b) \<bullet> x) x)}}"
section {* Finitely-supported types *}
class fs = pt +
assumes finite_supp: "finite (supp x)"
lemma pure_supp:
fixes x::"'a::pure"
shows "supp x = {}"
unfolding supp_def by (simp add: permute_pure)
lemma pure_fresh:
fixes x::"'a::pure"
shows "a \<sharp> x"
unfolding fresh_def by (simp add: pure_supp)
instance pure < fs
by default (simp add: pure_supp)
subsection {* Type @{typ atom} is finitely-supported. *}
lemma supp_atom:
shows "supp a = {a}"
by (rule finite_supp_unique)
(auto simp add: supports_def)
lemma fresh_atom:
shows "a \<sharp> b \<longleftrightarrow> a \<noteq> b"
unfolding fresh_def supp_atom by simp
instance atom :: fs
by default (simp add: supp_atom)
section {* Type @{typ perm} is finitely-supported. *}
lemma perm_swap_eq:
shows "(a \<rightleftharpoons> b) \<bullet> p = p \<longleftrightarrow> (p \<bullet> (a \<rightleftharpoons> b)) = (a \<rightleftharpoons> b)"
unfolding permute_perm_def
by (metis add_diff_cancel minus_perm_def)
lemma supports_perm:
shows "{a. p \<bullet> a \<noteq> a} supports p"
unfolding supports_def
unfolding perm_swap_eq
by (simp add: swap_eqvt)
lemma finite_perm_lemma:
shows "finite {a::atom. p \<bullet> a \<noteq> a}"
unfolding permute_atom_def
using finite_gpermute_neq .
lemma supp_perm:
shows "supp p = {a. p \<bullet> a \<noteq> a}"
apply (rule finite_supp_unique)
apply (simp_all add: perm_swap_eq swap_eqvt supports_perm finite_perm_lemma)
apply (auto simp add: perm_eq_iff swap_atom perm_swap_eq swap_eqvt)
done
lemma fresh_perm:
shows "a \<sharp> p \<longleftrightarrow> p \<bullet> a = a"
unfolding fresh_def
by (simp add: supp_perm)
lemma supp_swap:
shows "supp (a \<rightleftharpoons> b) = (if a = b \<or> sort_of a \<noteq> sort_of b then {} else {a, b})"
by (auto simp add: supp_perm swap_atom)
lemma fresh_zero_perm:
shows "a \<sharp> (0::perm)"
unfolding fresh_perm by simp
lemma supp_zero_perm:
shows "supp (0::perm) = {}"
unfolding supp_perm by simp
lemma fresh_plus_perm:
fixes p q::perm
assumes "a \<sharp> p" "a \<sharp> q"
shows "a \<sharp> (p + q)"
using assms
unfolding fresh_def
by (auto simp add: supp_perm)
lemma supp_plus_perm:
fixes p q::perm
shows "supp (p + q) \<subseteq> supp p \<union> supp q"
by (auto simp add: supp_perm)
lemma fresh_minus_perm:
fixes p::perm
shows "a \<sharp> (- p) \<longleftrightarrow> a \<sharp> p"
unfolding fresh_def supp_perm
by (simp) (metis permute_minus_cancel(1))
lemma supp_minus_perm:
fixes p::perm
shows "supp (- p) = supp p"
unfolding supp_conv_fresh
by (simp add: fresh_minus_perm)
lemma plus_perm_eq:
fixes p q::"perm"
assumes asm: "supp p \<inter> supp q = {}"
shows "p + q = q + p"
unfolding perm_eq_iff
proof
fix a::"atom"
show "(p + q) \<bullet> a = (q + p) \<bullet> a"
proof -
{ assume "a \<notin> supp p" "a \<notin> supp q"
then have "(p + q) \<bullet> a = (q + p) \<bullet> a"
by (simp add: supp_perm)
}
moreover
{ assume a: "a \<in> supp p" "a \<notin> supp q"
then have "p \<bullet> a \<in> supp p" by (simp add: supp_perm)
then have "p \<bullet> a \<notin> supp q" using asm by auto
with a have "(p + q) \<bullet> a = (q + p) \<bullet> a"
by (simp add: supp_perm)
}
moreover
{ assume a: "a \<notin> supp p" "a \<in> supp q"
then have "q \<bullet> a \<in> supp q" by (simp add: supp_perm)
then have "q \<bullet> a \<notin> supp p" using asm by auto
with a have "(p + q) \<bullet> a = (q + p) \<bullet> a"
by (simp add: supp_perm)
}
ultimately show "(p + q) \<bullet> a = (q + p) \<bullet> a"
using asm by blast
qed
qed
lemma supp_plus_perm_eq:
fixes p q::perm
assumes asm: "supp p \<inter> supp q = {}"
shows "supp (p + q) = supp p \<union> supp q"
proof -
{ fix a::"atom"
assume "a \<in> supp p"
then have "a \<notin> supp q" using asm by auto
then have "a \<in> supp (p + q)" using `a \<in> supp p`
by (simp add: supp_perm)
}
moreover
{ fix a::"atom"
assume "a \<in> supp q"
then have "a \<notin> supp p" using asm by auto
then have "a \<in> supp (q + p)" using `a \<in> supp q`
by (simp add: supp_perm)
then have "a \<in> supp (p + q)" using asm plus_perm_eq
by metis
}
ultimately have "supp p \<union> supp q \<subseteq> supp (p + q)"
by blast
then show "supp (p + q) = supp p \<union> supp q" using supp_plus_perm
by blast
qed
instance perm :: fs
by default (simp add: supp_perm finite_perm_lemma)
section {* Finite Support instances for other types *}
subsection {* Type @{typ "'a \<times> 'b"} is finitely-supported. *}
lemma supp_Pair:
shows "supp (x, y) = supp x \<union> supp y"
by (simp add: supp_def Collect_imp_eq Collect_neg_eq)
lemma fresh_Pair:
shows "a \<sharp> (x, y) \<longleftrightarrow> a \<sharp> x \<and> a \<sharp> y"
by (simp add: fresh_def supp_Pair)
lemma supp_Unit:
shows "supp () = {}"
by (simp add: supp_def)
lemma fresh_Unit:
shows "a \<sharp> ()"
by (simp add: fresh_def supp_Unit)
instance prod :: (fs, fs) fs
by default (auto simp add: supp_Pair finite_supp)
subsection {* Type @{typ "'a + 'b"} is finitely supported *}
lemma supp_Inl:
shows "supp (Inl x) = supp x"
by (simp add: supp_def)
lemma supp_Inr:
shows "supp (Inr x) = supp x"
by (simp add: supp_def)
lemma fresh_Inl:
shows "a \<sharp> Inl x \<longleftrightarrow> a \<sharp> x"
by (simp add: fresh_def supp_Inl)
lemma fresh_Inr:
shows "a \<sharp> Inr y \<longleftrightarrow> a \<sharp> y"
by (simp add: fresh_def supp_Inr)
instance sum :: (fs, fs) fs
apply default
apply (case_tac x)
apply (simp_all add: supp_Inl supp_Inr finite_supp)
done
subsection {* Type @{typ "'a option"} is finitely supported *}
lemma supp_None:
shows "supp None = {}"
by (simp add: supp_def)
lemma supp_Some:
shows "supp (Some x) = supp x"
by (simp add: supp_def)
lemma fresh_None:
shows "a \<sharp> None"
by (simp add: fresh_def supp_None)
lemma fresh_Some:
shows "a \<sharp> Some x \<longleftrightarrow> a \<sharp> x"
by (simp add: fresh_def supp_Some)
instance option :: (fs) fs
apply default
apply (induct_tac x)
apply (simp_all add: supp_None supp_Some finite_supp)
done
subsubsection {* Type @{typ "'a list"} is finitely supported *}
lemma supp_Nil:
shows "supp [] = {}"
by (simp add: supp_def)
lemma fresh_Nil:
shows "a \<sharp> []"
by (simp add: fresh_def supp_Nil)
lemma supp_Cons:
shows "supp (x # xs) = supp x \<union> supp xs"
by (simp add: supp_def Collect_imp_eq Collect_neg_eq)
lemma fresh_Cons:
shows "a \<sharp> (x # xs) \<longleftrightarrow> a \<sharp> x \<and> a \<sharp> xs"
by (simp add: fresh_def supp_Cons)
lemma supp_append:
shows "supp (xs @ ys) = supp xs \<union> supp ys"
by (induct xs) (auto simp add: supp_Nil supp_Cons)
lemma fresh_append:
shows "a \<sharp> (xs @ ys) \<longleftrightarrow> a \<sharp> xs \<and> a \<sharp> ys"
by (induct xs) (simp_all add: fresh_Nil fresh_Cons)
lemma supp_rev:
shows "supp (rev xs) = supp xs"
by (induct xs) (auto simp add: supp_append supp_Cons supp_Nil)
lemma fresh_rev:
shows "a \<sharp> rev xs \<longleftrightarrow> a \<sharp> xs"
by (induct xs) (auto simp add: fresh_append fresh_Cons fresh_Nil)
lemma supp_removeAll:
fixes x::"atom"
shows "supp (removeAll x xs) = supp xs - {x}"
by (induct xs)
(auto simp add: supp_Nil supp_Cons supp_atom)
lemma supp_of_atom_list:
fixes as::"atom list"
shows "supp as = set as"
by (induct as)
(simp_all add: supp_Nil supp_Cons supp_atom)
instance list :: (fs) fs
apply default
apply (induct_tac x)
apply (simp_all add: supp_Nil supp_Cons finite_supp)
done
section {* Support and Freshness for Applications *}
lemma fresh_conv_MOST:
shows "a \<sharp> x \<longleftrightarrow> (MOST b. (a \<rightleftharpoons> b) \<bullet> x = x)"
unfolding fresh_def supp_def
unfolding MOST_iff_cofinite by simp
lemma fresh_fun_app:
assumes "a \<sharp> f" and "a \<sharp> x"
shows "a \<sharp> f x"
using assms
unfolding fresh_conv_MOST
unfolding permute_fun_app_eq
by (elim MOST_rev_mp) (simp)
lemma supp_fun_app:
shows "supp (f x) \<subseteq> (supp f) \<union> (supp x)"
using fresh_fun_app
unfolding fresh_def
by auto
subsection {* Equivariance Predicate @{text eqvt} and @{text eqvt_at}*}
definition
"eqvt f \<equiv> \<forall>p. p \<bullet> f = f"
lemma eqvt_boolI:
fixes f::"bool"
shows "eqvt f"
unfolding eqvt_def
by (simp add: permute_bool_def)
text {* equivariance of a function at a given argument *}
definition
"eqvt_at f x \<equiv> \<forall>p. p \<bullet> (f x) = f (p \<bullet> x)"
lemma eqvtI:
shows "(\<And>p. p \<bullet> f \<equiv> f) \<Longrightarrow> eqvt f"
unfolding eqvt_def
by simp
lemma eqvt_at_perm:
assumes "eqvt_at f x"
shows "eqvt_at f (q \<bullet> x)"
proof -
{ fix p::"perm"
have "p \<bullet> (f (q \<bullet> x)) = p \<bullet> q \<bullet> (f x)"
using assms by (simp add: eqvt_at_def)
also have "\<dots> = (p + q) \<bullet> (f x)" by simp
also have "\<dots> = f ((p + q) \<bullet> x)"
using assms by (simp add: eqvt_at_def)
finally have "p \<bullet> (f (q \<bullet> x)) = f (p \<bullet> q \<bullet> x)" by simp }
then show "eqvt_at f (q \<bullet> x)" unfolding eqvt_at_def
by simp
qed
lemma supp_fun_eqvt:
assumes a: "eqvt f"
shows "supp f = {}"
using a
unfolding eqvt_def
unfolding supp_def
by simp
lemma fresh_fun_eqvt:
assumes a: "eqvt f"
shows "a \<sharp> f"
using a
unfolding fresh_def
by (simp add: supp_fun_eqvt)
lemma fresh_fun_eqvt_app:
assumes a: "eqvt f"
shows "a \<sharp> x \<Longrightarrow> a \<sharp> f x"
proof -
from a have "supp f = {}" by (simp add: supp_fun_eqvt)
then show "a \<sharp> x \<Longrightarrow> a \<sharp> f x"
unfolding fresh_def
using supp_fun_app by auto
qed
lemma supp_fun_app_eqvt:
assumes a: "eqvt f"
shows "supp (f x) \<subseteq> supp x"
using fresh_fun_eqvt_app[OF a]
unfolding fresh_def
by auto
lemma supp_eqvt_at:
assumes asm: "eqvt_at f x"
and fin: "finite (supp x)"
shows "supp (f x) \<subseteq> supp x"
apply(rule supp_is_subset)
unfolding supports_def
unfolding fresh_def[symmetric]
using asm
apply(simp add: eqvt_at_def swap_fresh_fresh)
apply(rule fin)
done
lemma finite_supp_eqvt_at:
assumes asm: "eqvt_at f x"
and fin: "finite (supp x)"
shows "finite (supp (f x))"
apply(rule finite_subset)
apply(rule supp_eqvt_at[OF asm fin])
apply(rule fin)
done
lemma fresh_eqvt_at:
assumes asm: "eqvt_at f x"
and fin: "finite (supp x)"
and fresh: "a \<sharp> x"
shows "a \<sharp> f x"
using fresh
unfolding fresh_def
using supp_eqvt_at[OF asm fin]
by auto
text {* for handling of freshness of functions *}
simproc_setup fresh_fun_simproc ("a \<sharp> (f::'a::pt \<Rightarrow>'b::pt)") = {* fn _ => fn ss => fn ctrm =>
let
val Const(@{const_name fresh}, _) $ _ $ f = term_of ctrm
in
case (Term.add_frees f [], Term.add_vars f []) of
([], []) => SOME(@{thm fresh_fun_eqvt[simplified eqvt_def, THEN Eq_TrueI]})
| (x::_, []) => let
val thy = Proof_Context.theory_of (Simplifier.the_context ss)
val argx = Free x
val absf = absfree x f
val cty_inst = [SOME (ctyp_of thy (fastype_of argx)), SOME (ctyp_of thy (fastype_of f))]
val ctrm_inst = [NONE, SOME (cterm_of thy absf), SOME (cterm_of thy argx)]
val thm = Drule.instantiate' cty_inst ctrm_inst @{thm fresh_fun_app}
in
SOME(thm RS @{thm Eq_TrueI})
end
| (_, _) => NONE
end
*}
subsection {* helper functions for nominal_functions *}
lemma THE_defaultI2:
assumes "\<exists>!x. P x" "\<And>x. P x \<Longrightarrow> Q x"
shows "Q (THE_default d P)"
by (iprover intro: assms THE_defaultI')
lemma the_default_eqvt:
assumes unique: "\<exists>!x. P x"
shows "(p \<bullet> (THE_default d P)) = (THE_default (p \<bullet> d) (p \<bullet> P))"
apply(rule THE_default1_equality [symmetric])
apply(rule_tac p="-p" in permute_boolE)
apply(simp add: ex1_eqvt)
apply(rule unique)
apply(rule_tac p="-p" in permute_boolE)
apply(rule subst[OF permute_fun_app_eq])
apply(simp)
apply(rule THE_defaultI'[OF unique])
done
lemma fundef_ex1_eqvt:
fixes x::"'a::pt"
assumes f_def: "f == (\<lambda>x::'a. THE_default (d x) (G x))"
assumes eqvt: "eqvt G"
assumes ex1: "\<exists>!y. G x y"
shows "(p \<bullet> (f x)) = f (p \<bullet> x)"
apply(simp only: f_def)
apply(subst the_default_eqvt)
apply(rule ex1)
apply(rule THE_default1_equality [symmetric])
apply(rule_tac p="-p" in permute_boolE)
apply(perm_simp add: permute_minus_cancel)
using eqvt[simplified eqvt_def]
apply(simp)
apply(rule ex1)
apply(rule THE_defaultI2)
apply(rule_tac p="-p" in permute_boolE)
apply(perm_simp add: permute_minus_cancel)
apply(rule ex1)
apply(perm_simp)
using eqvt[simplified eqvt_def]
apply(simp)
done
lemma fundef_ex1_eqvt_at:
fixes x::"'a::pt"
assumes f_def: "f == (\<lambda>x::'a. THE_default (d x) (G x))"
assumes eqvt: "eqvt G"
assumes ex1: "\<exists>!y. G x y"
shows "eqvt_at f x"
unfolding eqvt_at_def
using assms
by (auto intro: fundef_ex1_eqvt)
lemma fundef_ex1_prop:
fixes x::"'a::pt"
assumes f_def: "f \<equiv> (\<lambda>x::'a. THE_default (d x) (G x))"
assumes P_all: "\<And>x y. G x y \<Longrightarrow> P x y"
assumes ex1: "\<exists>!y. G x y"
shows "P x (f x)"
unfolding f_def
using ex1
apply(erule_tac ex1E)
apply(rule THE_defaultI2)
apply(blast)
apply(rule P_all)
apply(assumption)
done
section {* Support of Finite Sets of Finitely Supported Elements *}
text {* support and freshness for atom sets *}
lemma supp_finite_atom_set:
fixes S::"atom set"
assumes "finite S"
shows "supp S = S"
apply(rule finite_supp_unique)
apply(simp add: supports_def)
apply(simp add: swap_set_not_in)
apply(rule assms)
apply(simp add: swap_set_in)
done
lemma supp_cofinite_atom_set:
fixes S::"atom set"
assumes "finite (UNIV - S)"
shows "supp S = (UNIV - S)"
apply(rule finite_supp_unique)
apply(simp add: supports_def)
apply(simp add: swap_set_both_in)
apply(rule assms)
apply(subst swap_commute)
apply(simp add: swap_set_in)
done
lemma fresh_finite_atom_set:
fixes S::"atom set"
assumes "finite S"
shows "a \<sharp> S \<longleftrightarrow> a \<notin> S"
unfolding fresh_def
by (simp add: supp_finite_atom_set[OF assms])
lemma fresh_minus_atom_set:
fixes S::"atom set"
assumes "finite S"
shows "a \<sharp> S - T \<longleftrightarrow> (a \<notin> T \<longrightarrow> a \<sharp> S)"
unfolding fresh_def
by (auto simp add: supp_finite_atom_set assms)
lemma Union_supports_set:
shows "(\<Union>x \<in> S. supp x) supports S"
proof -
{ fix a b
have "\<forall>x \<in> S. (a \<rightleftharpoons> b) \<bullet> x = x \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> S = S"
unfolding permute_set_def by force
}
then show "(\<Union>x \<in> S. supp x) supports S"
unfolding supports_def
by (simp add: fresh_def[symmetric] swap_fresh_fresh)
qed
lemma Union_of_finite_supp_sets:
fixes S::"('a::fs set)"
assumes fin: "finite S"
shows "finite (\<Union>x\<in>S. supp x)"
using fin by (induct) (auto simp add: finite_supp)
lemma Union_included_in_supp:
fixes S::"('a::fs set)"
assumes fin: "finite S"
shows "(\<Union>x\<in>S. supp x) \<subseteq> supp S"
proof -
have eqvt: "eqvt (\<lambda>S. \<Union> supp ` S)"
unfolding eqvt_def
by (perm_simp) (simp)
have "(\<Union>x\<in>S. supp x) = supp (\<Union>x\<in>S. supp x)"
by (rule supp_finite_atom_set[symmetric]) (rule Union_of_finite_supp_sets[OF fin])
also have "\<dots> = supp ((\<lambda>S. \<Union> supp ` S) S)" by simp
also have "\<dots> \<subseteq> supp S" using eqvt
by (rule supp_fun_app_eqvt)
finally show "(\<Union>x\<in>S. supp x) \<subseteq> supp S" .
qed
lemma supp_of_finite_sets:
fixes S::"('a::fs set)"
assumes fin: "finite S"
shows "(supp S) = (\<Union>x\<in>S. supp x)"
apply(rule subset_antisym)
apply(rule supp_is_subset)
apply(rule Union_supports_set)
apply(rule Union_of_finite_supp_sets[OF fin])
apply(rule Union_included_in_supp[OF fin])
done
lemma finite_sets_supp:
fixes S::"('a::fs set)"
assumes "finite S"
shows "finite (supp S)"
using assms
by (simp only: supp_of_finite_sets Union_of_finite_supp_sets)
lemma supp_of_finite_union:
fixes S T::"('a::fs) set"
assumes fin1: "finite S"
and fin2: "finite T"
shows "supp (S \<union> T) = supp S \<union> supp T"
using fin1 fin2
by (simp add: supp_of_finite_sets)
lemma supp_of_finite_insert:
fixes S::"('a::fs) set"
assumes fin: "finite S"
shows "supp (insert x S) = supp x \<union> supp S"
using fin
by (simp add: supp_of_finite_sets)
lemma fresh_finite_insert:
fixes S::"('a::fs) set"
assumes fin: "finite S"
shows "a \<sharp> (insert x S) \<longleftrightarrow> a \<sharp> x \<and> a \<sharp> S"
using fin unfolding fresh_def
by (simp add: supp_of_finite_insert)
lemma supp_set_empty:
shows "supp {} = {}"
unfolding supp_def
by (simp add: empty_eqvt)
lemma fresh_set_empty:
shows "a \<sharp> {}"
by (simp add: fresh_def supp_set_empty)
lemma supp_set:
fixes xs :: "('a::fs) list"
shows "supp (set xs) = supp xs"
apply(induct xs)
apply(simp add: supp_set_empty supp_Nil)
apply(simp add: supp_Cons supp_of_finite_insert)
done
lemma fresh_set:
fixes xs :: "('a::fs) list"
shows "a \<sharp> (set xs) \<longleftrightarrow> a \<sharp> xs"
unfolding fresh_def
by (simp add: supp_set)
subsection {* Type @{typ "'a multiset"} is finitely supported *}
lemma set_of_eqvt[eqvt]:
shows "p \<bullet> (set_of M) = set_of (p \<bullet> M)"
by (induct M) (simp_all add: insert_eqvt empty_eqvt)
lemma supp_set_of:
shows "supp (set_of M) \<subseteq> supp M"
apply (rule supp_fun_app_eqvt)
unfolding eqvt_def
apply(perm_simp)
apply(simp)
done
lemma Union_finite_multiset:
fixes M::"'a::fs multiset"
shows "finite (\<Union>{supp x | x. x \<in># M})"
proof -
have "finite (\<Union>(supp ` {x. x \<in># M}))"
by (induct M) (simp_all add: Collect_imp_eq Collect_neg_eq finite_supp)
then show "finite (\<Union>{supp x | x. x \<in># M})"
by (simp only: image_Collect)
qed
lemma Union_supports_multiset:
shows "\<Union>{supp x | x. x :# M} supports M"
proof -
have sw: "\<And>a b. ((\<And>x. x :# M \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> x = x) \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> M = M)"
unfolding permute_multiset_def
apply(induct M)
apply(simp_all)
done
show "(\<Union>{supp x | x. x :# M}) supports M"
unfolding supports_def
apply(clarify)
apply(rule sw)
apply(rule swap_fresh_fresh)
apply(simp_all only: fresh_def)
apply(auto)
apply(metis neq0_conv)+
done
qed
lemma Union_included_multiset:
fixes M::"('a::fs multiset)"
shows "(\<Union>{supp x | x. x \<in># M}) \<subseteq> supp M"
proof -
have "(\<Union>{supp x | x. x \<in># M}) = (\<Union>{supp x | x. x \<in> set_of M})" by simp
also have "... \<subseteq> (\<Union>x \<in> set_of M. supp x)" by auto
also have "... = supp (set_of M)" by (simp add: subst supp_of_finite_sets)
also have " ... \<subseteq> supp M" by (rule supp_set_of)
finally show "(\<Union>{supp x | x. x \<in># M}) \<subseteq> supp M" .
qed
lemma supp_of_multisets:
fixes M::"('a::fs multiset)"
shows "(supp M) = (\<Union>{supp x | x. x :# M})"
apply(rule subset_antisym)
apply(rule supp_is_subset)
apply(rule Union_supports_multiset)
apply(rule Union_finite_multiset)
apply(rule Union_included_multiset)
done
lemma multisets_supp_finite:
fixes M::"('a::fs multiset)"
shows "finite (supp M)"
by (simp only: supp_of_multisets Union_finite_multiset)
lemma supp_of_multiset_union:
fixes M N::"('a::fs) multiset"
shows "supp (M + N) = supp M \<union> supp N"
by (auto simp add: supp_of_multisets)
lemma supp_empty_mset [simp]:
shows "supp {#} = {}"
unfolding supp_def
by simp
instance multiset :: (fs) fs
apply (default)
apply (rule multisets_supp_finite)
done
subsection {* Type @{typ "'a fset"} is finitely supported *}
lemma supp_fset [simp]:
shows "supp (fset S) = supp S"
unfolding supp_def
by (simp add: fset_eqvt fset_cong)
lemma supp_empty_fset [simp]:
shows "supp {||} = {}"
unfolding supp_def
by simp
lemma fresh_empty_fset:
shows "a \<sharp> {||}"
unfolding fresh_def
by (simp)
lemma supp_insert_fset [simp]:
fixes x::"'a::fs"
and S::"'a fset"
shows "supp (insert_fset x S) = supp x \<union> supp S"
apply(subst supp_fset[symmetric])
apply(simp add: supp_of_finite_insert)
done
lemma fresh_insert_fset:
fixes x::"'a::fs"
and S::"'a fset"
shows "a \<sharp> insert_fset x S \<longleftrightarrow> a \<sharp> x \<and> a \<sharp> S"
unfolding fresh_def
by (simp)
lemma fset_finite_supp:
fixes S::"('a::fs) fset"
shows "finite (supp S)"
by (induct S) (simp_all add: finite_supp)
lemma supp_union_fset:
fixes S T::"'a::fs fset"
shows "supp (S |\<union>| T) = supp S \<union> supp T"
by (induct S) (auto)
lemma fresh_union_fset:
fixes S T::"'a::fs fset"
shows "a \<sharp> S |\<union>| T \<longleftrightarrow> a \<sharp> S \<and> a \<sharp> T"
unfolding fresh_def
by (simp add: supp_union_fset)
instance fset :: (fs) fs
apply (default)
apply (rule fset_finite_supp)
done
section {* Freshness and Fresh-Star *}
lemma fresh_Unit_elim:
shows "(a \<sharp> () \<Longrightarrow> PROP C) \<equiv> PROP C"
by (simp add: fresh_Unit)
lemma fresh_Pair_elim:
shows "(a \<sharp> (x, y) \<Longrightarrow> PROP C) \<equiv> (a \<sharp> x \<Longrightarrow> a \<sharp> y \<Longrightarrow> PROP C)"
by rule (simp_all add: fresh_Pair)
(* this rule needs to be added before the fresh_prodD is *)
(* added to the simplifier with mksimps *)
lemma fresh_PairD:
shows "a \<sharp> (x, y) \<Longrightarrow> a \<sharp> x"
and "a \<sharp> (x, y) \<Longrightarrow> a \<sharp> y"
by (simp_all add: fresh_Pair)
declaration {* fn _ =>
let
val mksimps_pairs = (@{const_name Nominal2_Base.fresh}, @{thms fresh_PairD}) :: mksimps_pairs
in
Simplifier.map_ss (fn ss => Simplifier.set_mksimps (mksimps mksimps_pairs) ss)
end
*}
text {* The fresh-star generalisation of fresh is used in strong
induction principles. *}
definition
fresh_star :: "atom set \<Rightarrow> 'a::pt \<Rightarrow> bool" ("_ \<sharp>* _" [80,80] 80)
where
"as \<sharp>* x \<equiv> \<forall>a \<in> as. a \<sharp> x"
lemma fresh_star_supp_conv:
shows "supp x \<sharp>* y \<Longrightarrow> supp y \<sharp>* x"
by (auto simp add: fresh_star_def fresh_def)
lemma fresh_star_perm_set_conv:
fixes p::"perm"
assumes fresh: "as \<sharp>* p"
and fin: "finite as"
shows "supp p \<sharp>* as"
apply(rule fresh_star_supp_conv)
apply(simp add: supp_finite_atom_set fin fresh)
done
lemma fresh_star_atom_set_conv:
assumes fresh: "as \<sharp>* bs"
and fin: "finite as" "finite bs"
shows "bs \<sharp>* as"
using fresh
unfolding fresh_star_def fresh_def
by (auto simp add: supp_finite_atom_set fin)
lemma atom_fresh_star_disjoint:
assumes fin: "finite bs"
shows "as \<sharp>* bs \<longleftrightarrow> (as \<inter> bs = {})"
unfolding fresh_star_def fresh_def
by (auto simp add: supp_finite_atom_set fin)
lemma fresh_star_Pair:
shows "as \<sharp>* (x, y) = (as \<sharp>* x \<and> as \<sharp>* y)"
by (auto simp add: fresh_star_def fresh_Pair)
lemma fresh_star_list:
shows "as \<sharp>* (xs @ ys) \<longleftrightarrow> as \<sharp>* xs \<and> as \<sharp>* ys"
and "as \<sharp>* (x # xs) \<longleftrightarrow> as \<sharp>* x \<and> as \<sharp>* xs"
and "as \<sharp>* []"
by (auto simp add: fresh_star_def fresh_Nil fresh_Cons fresh_append)
lemma fresh_star_set:
fixes xs::"('a::fs) list"
shows "as \<sharp>* set xs \<longleftrightarrow> as \<sharp>* xs"
unfolding fresh_star_def
by (simp add: fresh_set)
lemma fresh_star_singleton:
fixes a::"atom"
shows "as \<sharp>* {a} \<longleftrightarrow> as \<sharp>* a"
by (simp add: fresh_star_def fresh_finite_insert fresh_set_empty)
lemma fresh_star_fset:
fixes xs::"('a::fs) list"
shows "as \<sharp>* fset S \<longleftrightarrow> as \<sharp>* S"
by (simp add: fresh_star_def fresh_def)
lemma fresh_star_Un:
shows "(as \<union> bs) \<sharp>* x = (as \<sharp>* x \<and> bs \<sharp>* x)"
by (auto simp add: fresh_star_def)
lemma fresh_star_insert:
shows "(insert a as) \<sharp>* x = (a \<sharp> x \<and> as \<sharp>* x)"
by (auto simp add: fresh_star_def)
lemma fresh_star_Un_elim:
"((as \<union> bs) \<sharp>* x \<Longrightarrow> PROP C) \<equiv> (as \<sharp>* x \<Longrightarrow> bs \<sharp>* x \<Longrightarrow> PROP C)"
unfolding fresh_star_def
apply(rule)
apply(erule meta_mp)
apply(auto)
done
lemma fresh_star_insert_elim:
"(insert a as \<sharp>* x \<Longrightarrow> PROP C) \<equiv> (a \<sharp> x \<Longrightarrow> as \<sharp>* x \<Longrightarrow> PROP C)"
unfolding fresh_star_def
by rule (simp_all add: fresh_star_def)
lemma fresh_star_empty_elim:
"({} \<sharp>* x \<Longrightarrow> PROP C) \<equiv> PROP C"
by (simp add: fresh_star_def)
lemma fresh_star_Unit_elim:
shows "(a \<sharp>* () \<Longrightarrow> PROP C) \<equiv> PROP C"
by (simp add: fresh_star_def fresh_Unit)
lemma fresh_star_Pair_elim:
shows "(a \<sharp>* (x, y) \<Longrightarrow> PROP C) \<equiv> (a \<sharp>* x \<Longrightarrow> a \<sharp>* y \<Longrightarrow> PROP C)"
by (rule, simp_all add: fresh_star_Pair)
lemma fresh_star_zero:
shows "as \<sharp>* (0::perm)"
unfolding fresh_star_def
by (simp add: fresh_zero_perm)
lemma fresh_star_plus:
fixes p q::perm
shows "\<lbrakk>a \<sharp>* p; a \<sharp>* q\<rbrakk> \<Longrightarrow> a \<sharp>* (p + q)"
unfolding fresh_star_def
by (simp add: fresh_plus_perm)
lemma fresh_star_permute_iff:
shows "(p \<bullet> a) \<sharp>* (p \<bullet> x) \<longleftrightarrow> a \<sharp>* x"
unfolding fresh_star_def
by (metis mem_permute_iff permute_minus_cancel(1) fresh_permute_iff)
lemma fresh_star_eqvt [eqvt]:
shows "p \<bullet> (as \<sharp>* x) \<longleftrightarrow> (p \<bullet> as) \<sharp>* (p \<bullet> x)"
unfolding fresh_star_def
by (perm_simp) (rule refl)
section {* Induction principle for permutations *}
lemma smaller_supp:
assumes a: "a \<in> supp p"
shows "supp ((p \<bullet> a \<rightleftharpoons> a) + p) \<subset> supp p"
proof -
have "supp ((p \<bullet> a \<rightleftharpoons> a) + p) \<subseteq> supp p"
unfolding supp_perm by (auto simp add: swap_atom)
moreover
have "a \<notin> supp ((p \<bullet> a \<rightleftharpoons> a) + p)" by (simp add: supp_perm)
then have "supp ((p \<bullet> a \<rightleftharpoons> a) + p) \<noteq> supp p" using a by auto
ultimately
show "supp ((p \<bullet> a \<rightleftharpoons> a) + p) \<subset> supp p" by auto
qed
lemma perm_struct_induct[consumes 1, case_names zero swap]:
assumes S: "supp p \<subseteq> S"
and zero: "P 0"
and swap: "\<And>p a b. \<lbrakk>P p; supp p \<subseteq> S; a \<in> S; b \<in> S; a \<noteq> b; sort_of a = sort_of b\<rbrakk> \<Longrightarrow> P ((a \<rightleftharpoons> b) + p)"
shows "P p"
proof -
have "finite (supp p)" by (simp add: finite_supp)
then show "P p" using S
proof(induct A\<equiv>"supp p" arbitrary: p rule: finite_psubset_induct)
case (psubset p)
then have ih: "\<And>q. supp q \<subset> supp p \<Longrightarrow> P q" by auto
have as: "supp p \<subseteq> S" by fact
{ assume "supp p = {}"
then have "p = 0" by (simp add: supp_perm perm_eq_iff)
then have "P p" using zero by simp
}
moreover
{ assume "supp p \<noteq> {}"
then obtain a where a0: "a \<in> supp p" by blast
then have a1: "p \<bullet> a \<in> S" "a \<in> S" "sort_of (p \<bullet> a) = sort_of a" "p \<bullet> a \<noteq> a"
using as by (auto simp add: supp_atom supp_perm swap_atom)
let ?q = "(p \<bullet> a \<rightleftharpoons> a) + p"
have a2: "supp ?q \<subset> supp p" using a0 smaller_supp by simp
then have "P ?q" using ih by simp
moreover
have "supp ?q \<subseteq> S" using as a2 by simp
ultimately have "P ((p \<bullet> a \<rightleftharpoons> a) + ?q)" using as a1 swap by simp
moreover
have "p = (p \<bullet> a \<rightleftharpoons> a) + ?q" by (simp add: perm_eq_iff)
ultimately have "P p" by simp
}
ultimately show "P p" by blast
qed
qed
lemma perm_simple_struct_induct[case_names zero swap]:
assumes zero: "P 0"
and swap: "\<And>p a b. \<lbrakk>P p; a \<noteq> b; sort_of a = sort_of b\<rbrakk> \<Longrightarrow> P ((a \<rightleftharpoons> b) + p)"
shows "P p"
by (rule_tac S="supp p" in perm_struct_induct)
(auto intro: zero swap)
lemma perm_struct_induct2[consumes 1, case_names zero swap plus]:
assumes S: "supp p \<subseteq> S"
assumes zero: "P 0"
assumes swap: "\<And>a b. \<lbrakk>sort_of a = sort_of b; a \<noteq> b; a \<in> S; b \<in> S\<rbrakk> \<Longrightarrow> P (a \<rightleftharpoons> b)"
assumes plus: "\<And>p1 p2. \<lbrakk>P p1; P p2; supp p1 \<subseteq> S; supp p2 \<subseteq> S\<rbrakk> \<Longrightarrow> P (p1 + p2)"
shows "P p"
using S
by (induct p rule: perm_struct_induct)
(auto intro: zero plus swap simp add: supp_swap)
lemma perm_simple_struct_induct2[case_names zero swap plus]:
assumes zero: "P 0"
assumes swap: "\<And>a b. \<lbrakk>sort_of a = sort_of b; a \<noteq> b\<rbrakk> \<Longrightarrow> P (a \<rightleftharpoons> b)"
assumes plus: "\<And>p1 p2. \<lbrakk>P p1; P p2\<rbrakk> \<Longrightarrow> P (p1 + p2)"
shows "P p"
by (rule_tac S="supp p" in perm_struct_induct2)
(auto intro: zero swap plus)
lemma supp_perm_singleton:
fixes p::"perm"
shows "supp p \<subseteq> {b} \<longleftrightarrow> p = 0"
proof -
{ assume "supp p \<subseteq> {b}"
then have "p = 0"
by (induct p rule: perm_struct_induct) (simp_all)
}
then show "supp p \<subseteq> {b} \<longleftrightarrow> p = 0" by (auto simp add: supp_zero_perm)
qed
lemma supp_perm_pair:
fixes p::"perm"
shows "supp p \<subseteq> {a, b} \<longleftrightarrow> p = 0 \<or> p = (b \<rightleftharpoons> a)"
proof -
{ assume "supp p \<subseteq> {a, b}"
then have "p = 0 \<or> p = (b \<rightleftharpoons> a)"
apply (induct p rule: perm_struct_induct)
apply (auto simp add: swap_cancel supp_zero_perm supp_swap)
apply (simp add: swap_commute)
done
}
then show "supp p \<subseteq> {a, b} \<longleftrightarrow> p = 0 \<or> p = (b \<rightleftharpoons> a)"
by (auto simp add: supp_zero_perm supp_swap split: if_splits)
qed
lemma supp_perm_eq:
assumes "(supp x) \<sharp>* p"
shows "p \<bullet> x = x"
proof -
from assms have "supp p \<subseteq> {a. a \<sharp> x}"
unfolding supp_perm fresh_star_def fresh_def by auto
then show "p \<bullet> x = x"
proof (induct p rule: perm_struct_induct)
case zero
show "0 \<bullet> x = x" by simp
next
case (swap p a b)
then have "a \<sharp> x" "b \<sharp> x" "p \<bullet> x = x" by simp_all
then show "((a \<rightleftharpoons> b) + p) \<bullet> x = x" by (simp add: swap_fresh_fresh)
qed
qed
text {* same lemma as above, but proved with a different induction principle *}
lemma supp_perm_eq_test:
assumes "(supp x) \<sharp>* p"
shows "p \<bullet> x = x"
proof -
from assms have "supp p \<subseteq> {a. a \<sharp> x}"
unfolding supp_perm fresh_star_def fresh_def by auto
then show "p \<bullet> x = x"
proof (induct p rule: perm_struct_induct2)
case zero
show "0 \<bullet> x = x" by simp
next
case (swap a b)
then have "a \<sharp> x" "b \<sharp> x" by simp_all
then show "(a \<rightleftharpoons> b) \<bullet> x = x" by (simp add: swap_fresh_fresh)
next
case (plus p1 p2)
have "p1 \<bullet> x = x" "p2 \<bullet> x = x" by fact+
then show "(p1 + p2) \<bullet> x = x" by simp
qed
qed
lemma perm_supp_eq:
assumes a: "(supp p) \<sharp>* x"
shows "p \<bullet> x = x"
proof -
from assms have "supp p \<subseteq> {a. a \<sharp> x}"
unfolding supp_perm fresh_star_def fresh_def by auto
then show "p \<bullet> x = x"
proof (induct p rule: perm_struct_induct2)
case zero
show "0 \<bullet> x = x" by simp
next
case (swap a b)
then have "a \<sharp> x" "b \<sharp> x" by simp_all
then show "(a \<rightleftharpoons> b) \<bullet> x = x" by (simp add: swap_fresh_fresh)
next
case (plus p1 p2)
have "p1 \<bullet> x = x" "p2 \<bullet> x = x" by fact+
then show "(p1 + p2) \<bullet> x = x" by simp
qed
qed
lemma supp_perm_perm_eq:
assumes a: "\<forall>a \<in> supp x. p \<bullet> a = q \<bullet> a"
shows "p \<bullet> x = q \<bullet> x"
proof -
from a have "\<forall>a \<in> supp x. (-q + p) \<bullet> a = a" by simp
then have "\<forall>a \<in> supp x. a \<notin> supp (-q + p)"
unfolding supp_perm by simp
then have "supp x \<sharp>* (-q + p)"
unfolding fresh_star_def fresh_def by simp
then have "(-q + p) \<bullet> x = x" by (simp only: supp_perm_eq)
then show "p \<bullet> x = q \<bullet> x"
by (metis permute_minus_cancel(1) permute_plus)
qed
text {* disagreement set *}
definition
dset :: "perm \<Rightarrow> perm \<Rightarrow> atom set"
where
"dset p q = {a::atom. p \<bullet> a \<noteq> q \<bullet> a}"
lemma ds_fresh:
assumes "dset p q \<sharp>* x"
shows "p \<bullet> x = q \<bullet> x"
using assms
unfolding dset_def fresh_star_def fresh_def
by (auto intro: supp_perm_perm_eq)
lemma atom_set_perm_eq:
assumes a: "as \<sharp>* p"
shows "p \<bullet> as = as"
proof -
from a have "supp p \<subseteq> {a. a \<notin> as}"
unfolding supp_perm fresh_star_def fresh_def by auto
then show "p \<bullet> as = as"
proof (induct p rule: perm_struct_induct)
case zero
show "0 \<bullet> as = as" by simp
next
case (swap p a b)
then have "a \<notin> as" "b \<notin> as" "p \<bullet> as = as" by simp_all
then show "((a \<rightleftharpoons> b) + p) \<bullet> as = as" by (simp add: swap_set_not_in)
qed
qed
section {* Avoiding of atom sets *}
text {*
For every set of atoms, there is another set of atoms
avoiding a finitely supported c and there is a permutation
which 'translates' between both sets.
*}
lemma at_set_avoiding_aux:
fixes Xs::"atom set"
and As::"atom set"
assumes b: "Xs \<subseteq> As"
and c: "finite As"
shows "\<exists>p. (p \<bullet> Xs) \<inter> As = {} \<and> (supp p) = (Xs \<union> (p \<bullet> Xs))"
proof -
from b c have "finite Xs" by (rule finite_subset)
then show ?thesis using b
proof (induct rule: finite_subset_induct)
case empty
have "0 \<bullet> {} \<inter> As = {}" by simp
moreover
have "supp (0::perm) = {} \<union> 0 \<bullet> {}" by (simp add: supp_zero_perm)
ultimately show ?case by blast
next
case (insert x Xs)
then obtain p where
p1: "(p \<bullet> Xs) \<inter> As = {}" and
p2: "supp p = (Xs \<union> (p \<bullet> Xs))" by blast
from `x \<in> As` p1 have "x \<notin> p \<bullet> Xs" by fast
with `x \<notin> Xs` p2 have "x \<notin> supp p" by fast
hence px: "p \<bullet> x = x" unfolding supp_perm by simp
have "finite (As \<union> p \<bullet> Xs \<union> supp p)"
using `finite As` `finite Xs`
by (simp add: permute_set_eq_image finite_supp)
then obtain y where "y \<notin> (As \<union> p \<bullet> Xs \<union> supp p)" "sort_of y = sort_of x"
by (rule obtain_atom)
hence y: "y \<notin> As" "y \<notin> p \<bullet> Xs" "y \<notin> supp p" "sort_of y = sort_of x"
by simp_all
hence py: "p \<bullet> y = y" "x \<noteq> y" using `x \<in> As`
by (auto simp add: supp_perm)
let ?q = "(x \<rightleftharpoons> y) + p"
have q: "?q \<bullet> insert x Xs = insert y (p \<bullet> Xs)"
unfolding insert_eqvt
using `p \<bullet> x = x` `sort_of y = sort_of x`
using `x \<notin> p \<bullet> Xs` `y \<notin> p \<bullet> Xs`
by (simp add: swap_atom swap_set_not_in)
have "?q \<bullet> insert x Xs \<inter> As = {}"
using `y \<notin> As` `p \<bullet> Xs \<inter> As = {}`
unfolding q by simp
moreover
have "supp (x \<rightleftharpoons> y) \<inter> supp p = {}" using px py `sort_of y = sort_of x`
unfolding supp_swap by (simp add: supp_perm)
then have "supp ?q = (supp (x \<rightleftharpoons> y) \<union> supp p)"
by (simp add: supp_plus_perm_eq)
then have "supp ?q = insert x Xs \<union> ?q \<bullet> insert x Xs"
using p2 `sort_of y = sort_of x` `x \<noteq> y` unfolding q supp_swap
by auto
ultimately show ?case by blast
qed
qed
lemma at_set_avoiding:
assumes a: "finite Xs"
and b: "finite (supp c)"
obtains p::"perm" where "(p \<bullet> Xs)\<sharp>*c" and "(supp p) = (Xs \<union> (p \<bullet> Xs))"
using a b at_set_avoiding_aux [where Xs="Xs" and As="Xs \<union> supp c"]
unfolding fresh_star_def fresh_def by blast
lemma at_set_avoiding1:
assumes "finite xs"
and "finite (supp c)"
shows "\<exists>p. (p \<bullet> xs) \<sharp>* c"
using assms
apply(erule_tac c="c" in at_set_avoiding)
apply(auto)
done
lemma at_set_avoiding2:
assumes "finite xs"
and "finite (supp c)" "finite (supp x)"
and "xs \<sharp>* x"
shows "\<exists>p. (p \<bullet> xs) \<sharp>* c \<and> supp x \<sharp>* p"
using assms
apply(erule_tac c="(c, x)" in at_set_avoiding)
apply(simp add: supp_Pair)
apply(rule_tac x="p" in exI)
apply(simp add: fresh_star_Pair)
apply(rule fresh_star_supp_conv)
apply(auto simp add: fresh_star_def)
done
lemma at_set_avoiding3:
assumes "finite xs"
and "finite (supp c)" "finite (supp x)"
and "xs \<sharp>* x"
shows "\<exists>p. (p \<bullet> xs) \<sharp>* c \<and> supp x \<sharp>* p \<and> supp p = xs \<union> (p \<bullet> xs)"
using assms
apply(erule_tac c="(c, x)" in at_set_avoiding)
apply(simp add: supp_Pair)
apply(rule_tac x="p" in exI)
apply(simp add: fresh_star_Pair)
apply(rule fresh_star_supp_conv)
apply(auto simp add: fresh_star_def)
done
lemma at_set_avoiding2_atom:
assumes "finite (supp c)" "finite (supp x)"
and b: "a \<sharp> x"
shows "\<exists>p. (p \<bullet> a) \<sharp> c \<and> supp x \<sharp>* p"
proof -
have a: "{a} \<sharp>* x" unfolding fresh_star_def by (simp add: b)
obtain p where p1: "(p \<bullet> {a}) \<sharp>* c" and p2: "supp x \<sharp>* p"
using at_set_avoiding2[of "{a}" "c" "x"] assms a by blast
have c: "(p \<bullet> a) \<sharp> c" using p1
unfolding fresh_star_def Ball_def
by(erule_tac x="p \<bullet> a" in allE) (simp add: permute_set_def)
hence "p \<bullet> a \<sharp> c \<and> supp x \<sharp>* p" using p2 by blast
then show "\<exists>p. (p \<bullet> a) \<sharp> c \<and> supp x \<sharp>* p" by blast
qed
section {* Renaming permutations *}
lemma set_renaming_perm:
assumes b: "finite bs"
shows "\<exists>q. (\<forall>b \<in> bs. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> bs \<union> (p \<bullet> bs)"
using b
proof (induct)
case empty
have "(\<forall>b \<in> {}. 0 \<bullet> b = p \<bullet> b) \<and> supp (0::perm) \<subseteq> {} \<union> p \<bullet> {}"
by (simp add: permute_set_def supp_perm)
then show "\<exists>q. (\<forall>b \<in> {}. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> {} \<union> p \<bullet> {}" by blast
next
case (insert a bs)
then have " \<exists>q. (\<forall>b \<in> bs. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> bs \<union> p \<bullet> bs" by simp
then obtain q where *: "\<forall>b \<in> bs. q \<bullet> b = p \<bullet> b" and **: "supp q \<subseteq> bs \<union> p \<bullet> bs"
by auto
{ assume 1: "q \<bullet> a = p \<bullet> a"
have "\<forall>b \<in> (insert a bs). q \<bullet> b = p \<bullet> b" using 1 * by simp
moreover
have "supp q \<subseteq> insert a bs \<union> p \<bullet> insert a bs"
using ** by (auto simp add: insert_eqvt)
ultimately
have "\<exists>q. (\<forall>b \<in> insert a bs. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> insert a bs \<union> p \<bullet> insert a bs" by blast
}
moreover
{ assume 2: "q \<bullet> a \<noteq> p \<bullet> a"
def q' \<equiv> "((q \<bullet> a) \<rightleftharpoons> (p \<bullet> a)) + q"
have "\<forall>b \<in> insert a bs. q' \<bullet> b = p \<bullet> b" using 2 * `a \<notin> bs` unfolding q'_def
by (auto simp add: swap_atom)
moreover
{ have "{q \<bullet> a, p \<bullet> a} \<subseteq> insert a bs \<union> p \<bullet> insert a bs"
using **
apply (auto simp add: supp_perm insert_eqvt)
apply (subgoal_tac "q \<bullet> a \<in> bs \<union> p \<bullet> bs")
apply(auto)[1]
apply(subgoal_tac "q \<bullet> a \<in> {a. q \<bullet> a \<noteq> a}")
apply(blast)
apply(simp)
done
then have "supp (q \<bullet> a \<rightleftharpoons> p \<bullet> a) \<subseteq> insert a bs \<union> p \<bullet> insert a bs" by (simp add: supp_swap)
moreover
have "supp q \<subseteq> insert a bs \<union> p \<bullet> insert a bs"
using ** by (auto simp add: insert_eqvt)
ultimately
have "supp q' \<subseteq> insert a bs \<union> p \<bullet> insert a bs"
unfolding q'_def using supp_plus_perm by blast
}
ultimately
have "\<exists>q. (\<forall>b \<in> insert a bs. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> insert a bs \<union> p \<bullet> insert a bs" by blast
}
ultimately show "\<exists>q. (\<forall>b \<in> insert a bs. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> insert a bs \<union> p \<bullet> insert a bs"
by blast
qed
lemma set_renaming_perm2:
shows "\<exists>q. (\<forall>b \<in> bs. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> bs \<union> (p \<bullet> bs)"
proof -
have "finite (bs \<inter> supp p)" by (simp add: finite_supp)
then obtain q
where *: "\<forall>b \<in> bs \<inter> supp p. q \<bullet> b = p \<bullet> b" and **: "supp q \<subseteq> (bs \<inter> supp p) \<union> (p \<bullet> (bs \<inter> supp p))"
using set_renaming_perm by blast
from ** have "supp q \<subseteq> bs \<union> (p \<bullet> bs)" by (auto simp add: inter_eqvt)
moreover
have "\<forall>b \<in> bs - supp p. q \<bullet> b = p \<bullet> b"
apply(auto)
apply(subgoal_tac "b \<notin> supp q")
apply(simp add: fresh_def[symmetric])
apply(simp add: fresh_perm)
apply(clarify)
apply(rotate_tac 2)
apply(drule subsetD[OF **])
apply(simp add: inter_eqvt supp_eqvt permute_self)
done
ultimately have "(\<forall>b \<in> bs. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> bs \<union> (p \<bullet> bs)" using * by auto
then show "\<exists>q. (\<forall>b \<in> bs. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> bs \<union> (p \<bullet> bs)" by blast
qed
lemma list_renaming_perm:
shows "\<exists>q. (\<forall>b \<in> set bs. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> set bs \<union> (p \<bullet> set bs)"
proof (induct bs)
case (Cons a bs)
then have " \<exists>q. (\<forall>b \<in> set bs. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> set bs \<union> p \<bullet> (set bs)" by simp
then obtain q where *: "\<forall>b \<in> set bs. q \<bullet> b = p \<bullet> b" and **: "supp q \<subseteq> set bs \<union> p \<bullet> (set bs)"
by (blast)
{ assume 1: "a \<in> set bs"
have "q \<bullet> a = p \<bullet> a" using * 1 by (induct bs) (auto)
then have "\<forall>b \<in> set (a # bs). q \<bullet> b = p \<bullet> b" using * by simp
moreover
have "supp q \<subseteq> set (a # bs) \<union> p \<bullet> (set (a # bs))" using ** by (auto simp add: insert_eqvt)
ultimately
have "\<exists>q. (\<forall>b \<in> set (a # bs). q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> set (a # bs) \<union> p \<bullet> (set (a # bs))" by blast
}
moreover
{ assume 2: "a \<notin> set bs"
def q' \<equiv> "((q \<bullet> a) \<rightleftharpoons> (p \<bullet> a)) + q"
have "\<forall>b \<in> set (a # bs). q' \<bullet> b = p \<bullet> b"
unfolding q'_def using 2 * `a \<notin> set bs` by (auto simp add: swap_atom)
moreover
{ have "{q \<bullet> a, p \<bullet> a} \<subseteq> set (a # bs) \<union> p \<bullet> (set (a # bs))"
using **
apply (auto simp add: supp_perm insert_eqvt)
apply (subgoal_tac "q \<bullet> a \<in> set bs \<union> p \<bullet> set bs")
apply(auto)[1]
apply(subgoal_tac "q \<bullet> a \<in> {a. q \<bullet> a \<noteq> a}")
apply(blast)
apply(simp)
done
then have "supp (q \<bullet> a \<rightleftharpoons> p \<bullet> a) \<subseteq> set (a # bs) \<union> p \<bullet> set (a # bs)" by (simp add: supp_swap)
moreover
have "supp q \<subseteq> set (a # bs) \<union> p \<bullet> (set (a # bs))"
using ** by (auto simp add: insert_eqvt)
ultimately
have "supp q' \<subseteq> set (a # bs) \<union> p \<bullet> (set (a # bs))"
unfolding q'_def using supp_plus_perm by blast
}
ultimately
have "\<exists>q. (\<forall>b \<in> set (a # bs). q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> set (a # bs) \<union> p \<bullet> (set (a # bs))" by blast
}
ultimately show "\<exists>q. (\<forall>b \<in> set (a # bs). q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> set (a # bs) \<union> p \<bullet> (set (a # bs))"
by blast
next
case Nil
have "(\<forall>b \<in> set []. 0 \<bullet> b = p \<bullet> b) \<and> supp (0::perm) \<subseteq> set [] \<union> p \<bullet> set []"
by (simp add: supp_zero_perm)
then show "\<exists>q. (\<forall>b \<in> set []. q \<bullet> b = p \<bullet> b) \<and> supp q \<subseteq> set [] \<union> p \<bullet> (set [])" by blast
qed
section {* Concrete Atoms Types *}
text {*
Class @{text at_base} allows types containing multiple sorts of atoms.
Class @{text at} only allows types with a single sort.
*}
class at_base = pt +
fixes atom :: "'a \<Rightarrow> atom"
assumes atom_eq_iff [simp]: "atom a = atom b \<longleftrightarrow> a = b"
assumes atom_eqvt: "p \<bullet> (atom a) = atom (p \<bullet> a)"
declare atom_eqvt[eqvt]
class at = at_base +
assumes sort_of_atom_eq [simp]: "sort_of (atom a) = sort_of (atom b)"
lemma sort_ineq [simp]:
assumes "sort_of (atom a) \<noteq> sort_of (atom b)"
shows "atom a \<noteq> atom b"
using assms by metis
lemma supp_at_base:
fixes a::"'a::at_base"
shows "supp a = {atom a}"
by (simp add: supp_atom [symmetric] supp_def atom_eqvt)
lemma fresh_at_base:
shows "sort_of a \<noteq> sort_of (atom b) \<Longrightarrow> a \<sharp> b"
and "a \<sharp> b \<longleftrightarrow> a \<noteq> atom b"
unfolding fresh_def
apply(simp_all add: supp_at_base)
apply(metis)
done
lemma fresh_atom_at_base:
fixes b::"'a::at_base"
shows "a \<sharp> atom b \<longleftrightarrow> a \<sharp> b"
by (simp add: fresh_def supp_at_base supp_atom)
lemma fresh_star_atom_at_base:
fixes b::"'a::at_base"
shows "as \<sharp>* atom b \<longleftrightarrow> as \<sharp>* b"
by (simp add: fresh_star_def fresh_atom_at_base)
lemma if_fresh_at_base [simp]:
shows "atom a \<sharp> x \<Longrightarrow> P (if a = x then t else s) = P s"
and "atom a \<sharp> x \<Longrightarrow> P (if x = a then t else s) = P s"
by (simp_all add: fresh_at_base)
simproc_setup fresh_ineq ("x \<noteq> (y::'a::at_base)") = {* fn _ => fn ss => fn ctrm =>
let
fun first_is_neg lhs rhs [] = NONE
| first_is_neg lhs rhs (thm::thms) =
(case Thm.prop_of thm of
_ $ (@{term "HOL.Not"} $ (Const ("HOL.eq", _) $ l $ r)) =>
(if l = lhs andalso r = rhs then SOME(thm)
else if r = lhs andalso l = rhs then SOME(thm RS @{thm not_sym})
else first_is_neg lhs rhs thms)
| _ => first_is_neg lhs rhs thms)
val simp_thms = @{thms fresh_Pair fresh_at_base atom_eq_iff}
val prems = Simplifier.prems_of ss
|> filter (fn thm => case Thm.prop_of thm of
_ $ (Const (@{const_name fresh}, _) $ _ $ _) => true | _ => false)
|> map (simplify (HOL_basic_ss addsimps simp_thms))
|> map HOLogic.conj_elims
|> flat
in
case term_of ctrm of
@{term "HOL.Not"} $ (Const ("HOL.eq", _) $ lhs $ rhs) =>
(case first_is_neg lhs rhs prems of
SOME(thm) => SOME(thm RS @{thm Eq_TrueI})
| NONE => NONE)
| _ => NONE
end
*}
instance at_base < fs
proof qed (simp add: supp_at_base)
lemma at_base_infinite [simp]:
shows "infinite (UNIV :: 'a::at_base set)" (is "infinite ?U")
proof
obtain a :: 'a where "True" by auto
assume "finite ?U"
hence "finite (atom ` ?U)"
by (rule finite_imageI)
then obtain b where b: "b \<notin> atom ` ?U" "sort_of b = sort_of (atom a)"
by (rule obtain_atom)
from b(2) have "b = atom ((atom a \<rightleftharpoons> b) \<bullet> a)"
unfolding atom_eqvt [symmetric]
by (simp add: swap_atom)
hence "b \<in> atom ` ?U" by simp
with b(1) show "False" by simp
qed
lemma swap_at_base_simps [simp]:
fixes x y::"'a::at_base"
shows "sort_of (atom x) = sort_of (atom y) \<Longrightarrow> (atom x \<rightleftharpoons> atom y) \<bullet> x = y"
and "sort_of (atom x) = sort_of (atom y) \<Longrightarrow> (atom x \<rightleftharpoons> atom y) \<bullet> y = x"
and "atom x \<noteq> a \<Longrightarrow> atom x \<noteq> b \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> x = x"
unfolding atom_eq_iff [symmetric]
unfolding atom_eqvt [symmetric]
by simp_all
lemma obtain_at_base:
assumes X: "finite X"
obtains a::"'a::at_base" where "atom a \<notin> X"
proof -
have "inj (atom :: 'a \<Rightarrow> atom)"
by (simp add: inj_on_def)
with X have "finite (atom -` X :: 'a set)"
by (rule finite_vimageI)
with at_base_infinite have "atom -` X \<noteq> (UNIV :: 'a set)"
by auto
then obtain a :: 'a where "atom a \<notin> X"
by auto
thus ?thesis ..
qed
lemma obtain_fresh':
assumes fin: "finite (supp x)"
obtains a::"'a::at_base" where "atom a \<sharp> x"
using obtain_at_base[where X="supp x"]
by (auto simp add: fresh_def fin)
lemma supp_finite_set_at_base:
assumes a: "finite S"
shows "supp S = atom ` S"
apply(simp add: supp_of_finite_sets[OF a])
apply(simp add: supp_at_base)
apply(auto)
done
(* FIXME
lemma supp_cofinite_set_at_base:
assumes a: "finite (UNIV - S)"
shows "supp S = atom ` (UNIV - S)"
apply(rule finite_supp_unique)
*)
lemma fresh_finite_set_at_base:
fixes a::"'a::at_base"
assumes a: "finite S"
shows "atom a \<sharp> S \<longleftrightarrow> a \<notin> S"
unfolding fresh_def
apply(simp add: supp_finite_set_at_base[OF a])
apply(subst inj_image_mem_iff)
apply(simp add: inj_on_def)
apply(simp)
done
lemma fresh_at_base_permute_iff [simp]:
fixes a::"'a::at_base"
shows "atom (p \<bullet> a) \<sharp> p \<bullet> x \<longleftrightarrow> atom a \<sharp> x"
unfolding atom_eqvt[symmetric]
by (simp add: fresh_permute_iff)
section {* Infrastructure for concrete atom types *}
definition
flip :: "'a::at_base \<Rightarrow> 'a \<Rightarrow> perm" ("'(_ \<leftrightarrow> _')")
where
"(a \<leftrightarrow> b) = (atom a \<rightleftharpoons> atom b)"
lemma flip_self [simp]: "(a \<leftrightarrow> a) = 0"
unfolding flip_def by (rule swap_self)
lemma flip_commute: "(a \<leftrightarrow> b) = (b \<leftrightarrow> a)"
unfolding flip_def by (rule swap_commute)
lemma minus_flip [simp]: "- (a \<leftrightarrow> b) = (a \<leftrightarrow> b)"
unfolding flip_def by (rule minus_swap)
lemma add_flip_cancel: "(a \<leftrightarrow> b) + (a \<leftrightarrow> b) = 0"
unfolding flip_def by (rule swap_cancel)
lemma permute_flip_cancel [simp]: "(a \<leftrightarrow> b) \<bullet> (a \<leftrightarrow> b) \<bullet> x = x"
unfolding permute_plus [symmetric] add_flip_cancel by simp
lemma permute_flip_cancel2 [simp]: "(a \<leftrightarrow> b) \<bullet> (b \<leftrightarrow> a) \<bullet> x = x"
by (simp add: flip_commute)
lemma flip_eqvt [eqvt]:
fixes a b c::"'a::at_base"
shows "p \<bullet> (a \<leftrightarrow> b) = (p \<bullet> a \<leftrightarrow> p \<bullet> b)"
unfolding flip_def
by (simp add: swap_eqvt atom_eqvt)
lemma flip_at_base_simps [simp]:
shows "sort_of (atom a) = sort_of (atom b) \<Longrightarrow> (a \<leftrightarrow> b) \<bullet> a = b"
and "sort_of (atom a) = sort_of (atom b) \<Longrightarrow> (a \<leftrightarrow> b) \<bullet> b = a"
and "\<lbrakk>a \<noteq> c; b \<noteq> c\<rbrakk> \<Longrightarrow> (a \<leftrightarrow> b) \<bullet> c = c"
and "sort_of (atom a) \<noteq> sort_of (atom b) \<Longrightarrow> (a \<leftrightarrow> b) \<bullet> x = x"
unfolding flip_def
unfolding atom_eq_iff [symmetric]
unfolding atom_eqvt [symmetric]
by simp_all
text {* the following two lemmas do not hold for at_base,
only for single sort atoms from at *}
lemma permute_flip_at:
fixes a b c::"'a::at"
shows "(a \<leftrightarrow> b) \<bullet> c = (if c = a then b else if c = b then a else c)"
unfolding flip_def
apply (rule atom_eq_iff [THEN iffD1])
apply (subst atom_eqvt [symmetric])
apply (simp add: swap_atom)
done
lemma flip_at_simps [simp]:
fixes a b::"'a::at"
shows "(a \<leftrightarrow> b) \<bullet> a = b"
and "(a \<leftrightarrow> b) \<bullet> b = a"
unfolding permute_flip_at by simp_all
lemma flip_fresh_fresh:
fixes a b::"'a::at_base"
assumes "atom a \<sharp> x" "atom b \<sharp> x"
shows "(a \<leftrightarrow> b) \<bullet> x = x"
using assms
by (simp add: flip_def swap_fresh_fresh)
subsection {* Syntax for coercing at-elements to the atom-type *}
syntax
"_atom_constrain" :: "logic \<Rightarrow> type \<Rightarrow> logic" ("_:::_" [4, 0] 3)
translations
"_atom_constrain a t" => "CONST atom (_constrain a t)"
subsection {* A lemma for proving instances of class @{text at}. *}
setup {* Sign.add_const_constraint (@{const_name "permute"}, NONE) *}
setup {* Sign.add_const_constraint (@{const_name "atom"}, NONE) *}
text {*
New atom types are defined as subtypes of @{typ atom}.
*}
lemma exists_eq_simple_sort:
shows "\<exists>a. a \<in> {a. sort_of a = s}"
by (rule_tac x="Atom s 0" in exI, simp)
lemma exists_eq_sort:
shows "\<exists>a. a \<in> {a. sort_of a \<in> range sort_fun}"
by (rule_tac x="Atom (sort_fun x) y" in exI, simp)
lemma at_base_class:
fixes sort_fun :: "'b \<Rightarrow> atom_sort"
fixes Rep :: "'a \<Rightarrow> atom" and Abs :: "atom \<Rightarrow> 'a"
assumes type: "type_definition Rep Abs {a. sort_of a \<in> range sort_fun}"
assumes atom_def: "\<And>a. atom a = Rep a"
assumes permute_def: "\<And>p a. p \<bullet> a = Abs (p \<bullet> Rep a)"
shows "OFCLASS('a, at_base_class)"
proof
interpret type_definition Rep Abs "{a. sort_of a \<in> range sort_fun}" by (rule type)
have sort_of_Rep: "\<And>a. sort_of (Rep a) \<in> range sort_fun" using Rep by simp
fix a b :: 'a and p p1 p2 :: perm
show "0 \<bullet> a = a"
unfolding permute_def by (simp add: Rep_inverse)
show "(p1 + p2) \<bullet> a = p1 \<bullet> p2 \<bullet> a"
unfolding permute_def by (simp add: Abs_inverse sort_of_Rep)
show "atom a = atom b \<longleftrightarrow> a = b"
unfolding atom_def by (simp add: Rep_inject)
show "p \<bullet> atom a = atom (p \<bullet> a)"
unfolding permute_def atom_def by (simp add: Abs_inverse sort_of_Rep)
qed
(*
lemma at_class:
fixes s :: atom_sort
fixes Rep :: "'a \<Rightarrow> atom" and Abs :: "atom \<Rightarrow> 'a"
assumes type: "type_definition Rep Abs {a. sort_of a \<in> range (\<lambda>x::unit. s)}"
assumes atom_def: "\<And>a. atom a = Rep a"
assumes permute_def: "\<And>p a. p \<bullet> a = Abs (p \<bullet> Rep a)"
shows "OFCLASS('a, at_class)"
proof
interpret type_definition Rep Abs "{a. sort_of a \<in> range (\<lambda>x::unit. s)}" by (rule type)
have sort_of_Rep: "\<And>a. sort_of (Rep a) = s" using Rep by (simp add: image_def)
fix a b :: 'a and p p1 p2 :: perm
show "0 \<bullet> a = a"
unfolding permute_def by (simp add: Rep_inverse)
show "(p1 + p2) \<bullet> a = p1 \<bullet> p2 \<bullet> a"
unfolding permute_def by (simp add: Abs_inverse sort_of_Rep)
show "sort_of (atom a) = sort_of (atom b)"
unfolding atom_def by (simp add: sort_of_Rep)
show "atom a = atom b \<longleftrightarrow> a = b"
unfolding atom_def by (simp add: Rep_inject)
show "p \<bullet> atom a = atom (p \<bullet> a)"
unfolding permute_def atom_def by (simp add: Abs_inverse sort_of_Rep)
qed
*)
lemma at_class:
fixes s :: atom_sort
fixes Rep :: "'a \<Rightarrow> atom" and Abs :: "atom \<Rightarrow> 'a"
assumes type: "type_definition Rep Abs {a. sort_of a = s}"
assumes atom_def: "\<And>a. atom a = Rep a"
assumes permute_def: "\<And>p a. p \<bullet> a = Abs (p \<bullet> Rep a)"
shows "OFCLASS('a, at_class)"
proof
interpret type_definition Rep Abs "{a. sort_of a = s}" by (rule type)
have sort_of_Rep: "\<And>a. sort_of (Rep a) = s" using Rep by (simp add: image_def)
fix a b :: 'a and p p1 p2 :: perm
show "0 \<bullet> a = a"
unfolding permute_def by (simp add: Rep_inverse)
show "(p1 + p2) \<bullet> a = p1 \<bullet> p2 \<bullet> a"
unfolding permute_def by (simp add: Abs_inverse sort_of_Rep)
show "sort_of (atom a) = sort_of (atom b)"
unfolding atom_def by (simp add: sort_of_Rep)
show "atom a = atom b \<longleftrightarrow> a = b"
unfolding atom_def by (simp add: Rep_inject)
show "p \<bullet> atom a = atom (p \<bullet> a)"
unfolding permute_def atom_def by (simp add: Abs_inverse sort_of_Rep)
qed
lemma at_class_sort:
fixes s :: atom_sort
fixes Rep :: "'a \<Rightarrow> atom" and Abs :: "atom \<Rightarrow> 'a"
fixes a::"'a"
assumes type: "type_definition Rep Abs {a. sort_of a = s}"
assumes atom_def: "\<And>a. atom a = Rep a"
shows "sort_of (atom a) = s"
using atom_def type
unfolding type_definition_def by simp
setup {* Sign.add_const_constraint
(@{const_name "permute"}, SOME @{typ "perm \<Rightarrow> 'a::pt \<Rightarrow> 'a"}) *}
setup {* Sign.add_const_constraint
(@{const_name "atom"}, SOME @{typ "'a::at_base \<Rightarrow> atom"}) *}
section {* The freshness lemma according to Andy Pitts *}
lemma freshness_lemma:
fixes h :: "'a::at \<Rightarrow> 'b::pt"
assumes a: "\<exists>a. atom a \<sharp> (h, h a)"
shows "\<exists>x. \<forall>a. atom a \<sharp> h \<longrightarrow> h a = x"
proof -
from a obtain b where a1: "atom b \<sharp> h" and a2: "atom b \<sharp> h b"
by (auto simp add: fresh_Pair)
show "\<exists>x. \<forall>a. atom a \<sharp> h \<longrightarrow> h a = x"
proof (intro exI allI impI)
fix a :: 'a
assume a3: "atom a \<sharp> h"
show "h a = h b"
proof (cases "a = b")
assume "a = b"
thus "h a = h b" by simp
next
assume "a \<noteq> b"
hence "atom a \<sharp> b" by (simp add: fresh_at_base)
with a3 have "atom a \<sharp> h b"
by (rule fresh_fun_app)
with a2 have d1: "(atom b \<rightleftharpoons> atom a) \<bullet> (h b) = (h b)"
by (rule swap_fresh_fresh)
from a1 a3 have d2: "(atom b \<rightleftharpoons> atom a) \<bullet> h = h"
by (rule swap_fresh_fresh)
from d1 have "h b = (atom b \<rightleftharpoons> atom a) \<bullet> (h b)" by simp
also have "\<dots> = ((atom b \<rightleftharpoons> atom a) \<bullet> h) ((atom b \<rightleftharpoons> atom a) \<bullet> b)"
by (rule permute_fun_app_eq)
also have "\<dots> = h a"
using d2 by simp
finally show "h a = h b" by simp
qed
qed
qed
lemma freshness_lemma_unique:
fixes h :: "'a::at \<Rightarrow> 'b::pt"
assumes a: "\<exists>a. atom a \<sharp> (h, h a)"
shows "\<exists>!x. \<forall>a. atom a \<sharp> h \<longrightarrow> h a = x"
proof (rule ex_ex1I)
from a show "\<exists>x. \<forall>a. atom a \<sharp> h \<longrightarrow> h a = x"
by (rule freshness_lemma)
next
fix x y
assume x: "\<forall>a. atom a \<sharp> h \<longrightarrow> h a = x"
assume y: "\<forall>a. atom a \<sharp> h \<longrightarrow> h a = y"
from a x y show "x = y"
by (auto simp add: fresh_Pair)
qed
text {* packaging the freshness lemma into a function *}
definition
Fresh :: "('a::at \<Rightarrow> 'b::pt) \<Rightarrow> 'b"
where
"Fresh h = (THE x. \<forall>a. atom a \<sharp> h \<longrightarrow> h a = x)"
lemma Fresh_apply:
fixes h :: "'a::at \<Rightarrow> 'b::pt"
assumes a: "\<exists>a. atom a \<sharp> (h, h a)"
assumes b: "atom a \<sharp> h"
shows "Fresh h = h a"
unfolding Fresh_def
proof (rule the_equality)
show "\<forall>a'. atom a' \<sharp> h \<longrightarrow> h a' = h a"
proof (intro strip)
fix a':: 'a
assume c: "atom a' \<sharp> h"
from a have "\<exists>x. \<forall>a. atom a \<sharp> h \<longrightarrow> h a = x" by (rule freshness_lemma)
with b c show "h a' = h a" by auto
qed
next
fix fr :: 'b
assume "\<forall>a. atom a \<sharp> h \<longrightarrow> h a = fr"
with b show "fr = h a" by auto
qed
lemma Fresh_apply':
fixes h :: "'a::at \<Rightarrow> 'b::pt"
assumes a: "atom a \<sharp> h" "atom a \<sharp> h a"
shows "Fresh h = h a"
apply (rule Fresh_apply)
apply (auto simp add: fresh_Pair intro: a)
done
lemma Fresh_eqvt:
fixes h :: "'a::at \<Rightarrow> 'b::pt"
assumes a: "\<exists>a. atom a \<sharp> (h, h a)"
shows "p \<bullet> (Fresh h) = Fresh (p \<bullet> h)"
using a
apply (clarsimp simp add: fresh_Pair)
apply (subst Fresh_apply', assumption+)
apply (drule fresh_permute_iff [where p=p, THEN iffD2])
apply (drule fresh_permute_iff [where p=p, THEN iffD2])
apply (simp only: atom_eqvt permute_fun_app_eq [where f=h])
apply (erule (1) Fresh_apply' [symmetric])
done
lemma Fresh_supports:
fixes h :: "'a::at \<Rightarrow> 'b::pt"
assumes a: "\<exists>a. atom a \<sharp> (h, h a)"
shows "(supp h) supports (Fresh h)"
apply (simp add: supports_def fresh_def [symmetric])
apply (simp add: Fresh_eqvt [OF a] swap_fresh_fresh)
done
notation Fresh (binder "FRESH " 10)
lemma FRESH_f_iff:
fixes P :: "'a::at \<Rightarrow> 'b::pure"
fixes f :: "'b \<Rightarrow> 'c::pure"
assumes P: "finite (supp P)"
shows "(FRESH x. f (P x)) = f (FRESH x. P x)"
proof -
obtain a::'a where "atom a \<sharp> P" using P by (rule obtain_fresh')
show "(FRESH x. f (P x)) = f (FRESH x. P x)"
apply (subst Fresh_apply' [where a=a, OF _ pure_fresh])
apply (cut_tac `atom a \<sharp> P`)
apply (simp add: fresh_conv_MOST)
apply (elim MOST_rev_mp, rule MOST_I, clarify)
apply (simp add: permute_fun_def permute_pure fun_eq_iff)
apply (subst Fresh_apply' [where a=a, OF `atom a \<sharp> P` pure_fresh])
apply (rule refl)
done
qed
lemma FRESH_binop_iff:
fixes P :: "'a::at \<Rightarrow> 'b::pure"
fixes Q :: "'a::at \<Rightarrow> 'c::pure"
fixes binop :: "'b \<Rightarrow> 'c \<Rightarrow> 'd::pure"
assumes P: "finite (supp P)"
and Q: "finite (supp Q)"
shows "(FRESH x. binop (P x) (Q x)) = binop (FRESH x. P x) (FRESH x. Q x)"
proof -
from assms have "finite (supp (P, Q))" by (simp add: supp_Pair)
then obtain a::'a where "atom a \<sharp> (P, Q)" by (rule obtain_fresh')
then have "atom a \<sharp> P" and "atom a \<sharp> Q" by (simp_all add: fresh_Pair)
show ?thesis
apply (subst Fresh_apply' [where a=a, OF _ pure_fresh])
apply (cut_tac `atom a \<sharp> P` `atom a \<sharp> Q`)
apply (simp add: fresh_conv_MOST)
apply (elim MOST_rev_mp, rule MOST_I, clarify)
apply (simp add: permute_fun_def permute_pure fun_eq_iff)
apply (subst Fresh_apply' [where a=a, OF `atom a \<sharp> P` pure_fresh])
apply (subst Fresh_apply' [where a=a, OF `atom a \<sharp> Q` pure_fresh])
apply (rule refl)
done
qed
lemma FRESH_conj_iff:
fixes P Q :: "'a::at \<Rightarrow> bool"
assumes P: "finite (supp P)" and Q: "finite (supp Q)"
shows "(FRESH x. P x \<and> Q x) \<longleftrightarrow> (FRESH x. P x) \<and> (FRESH x. Q x)"
using P Q by (rule FRESH_binop_iff)
lemma FRESH_disj_iff:
fixes P Q :: "'a::at \<Rightarrow> bool"
assumes P: "finite (supp P)" and Q: "finite (supp Q)"
shows "(FRESH x. P x \<or> Q x) \<longleftrightarrow> (FRESH x. P x) \<or> (FRESH x. Q x)"
using P Q by (rule FRESH_binop_iff)
section {* Library functions for the nominal infrastructure *}
use "nominal_library.ML"
section {* Automation for creating concrete atom types *}
text {* at the moment only single-sort concrete atoms are supported *}
use "nominal_atoms.ML"
section {* automatic equivariance procedure for inductive definitions *}
use "nominal_eqvt.ML"
instantiation atom_sort :: ord begin
fun less_atom_sort where
"less_atom_sort (Sort s1 l1) (Sort s2 []) \<longleftrightarrow> s1 < s2"
| "less_atom_sort (Sort s1 []) (Sort s2 (h # t)) \<longleftrightarrow> s1 \<le> s2"
| "less_atom_sort (Sort s1 (h1 # t1)) (Sort s2 (h2 # t2)) \<longleftrightarrow> s1 < s2 \<or> s1 \<le> s2 \<and> ((less_atom_sort h1 h2) \<or> (h1 = h2 \<and> less_atom_sort (Sort s1 t1) (Sort s2 t2)))"
definition less_eq_atom_sort where
less_eq_atom_sort_def: "less_eq_atom_sort (x :: atom_sort) y \<longleftrightarrow> x < y \<or> x = y"
instance ..
end
lemma less_st_less: "(Sort s1 l1) < (Sort s2 l2) \<longleftrightarrow> s1 < s2 \<or> s1 \<le> s2 \<and> l1 < l2"
by (induct l1 l2 rule: list_induct2') auto
lemma not_as_le_as: "\<not>((x :: atom_sort) < x)"
apply (rule less_atom_sort.induct[of "\<lambda>x y. x = y \<longrightarrow> \<not>x < y" "x" "x", simplified]) ..
instance atom_sort :: linorder
proof (default, auto simp add: less_eq_atom_sort_def not_as_le_as)
fix x y :: atom_sort
assume x: "x < y" "y < x"
then show False
by (induct x y rule: less_atom_sort.induct) (case_tac l1, auto)
with x show "x = y"
by (induct x y rule: less_atom_sort.induct) (case_tac l1, auto)
next
fix x y z :: atom_sort
assume "x < y" "y < z"
then show "x < z"
apply (induct x z arbitrary: y rule: less_atom_sort.induct)
apply (case_tac [!] y) apply auto
apply (case_tac [!] list2) apply auto
apply (case_tac l1) apply auto[2]
done
next
fix x y :: atom_sort
assume x: "\<not>x < y" "y \<noteq> x"
then show "y < x"
apply (induct x y rule: less_atom_sort.induct)
apply auto
apply (case_tac [!] l1)
apply auto
done
qed
instantiation atom :: linorder begin
definition less_eq_atom where
[simp]: "less_eq_atom x y \<longleftrightarrow> sort_of x < sort_of y \<or> sort_of x \<le> sort_of y \<and> nat_of x \<le> nat_of y"
definition less_atom where
[simp]: "less_atom x y \<longleftrightarrow> sort_of x < sort_of y \<or> sort_of x \<le> sort_of y \<and> nat_of x < nat_of y"
instance apply default
apply auto
apply (case_tac x, case_tac y)
apply auto
done
end
instantiation perm :: equal begin
definition "equal_perm a b \<longleftrightarrow> Rep_perm a = Rep_perm b"
instance
apply default
unfolding equal_perm_def perm_eq_rep ..
end
(* Test: export_code swap in SML *)
end
|
{"author": "goodlyrottenapple", "repo": "Nominal2-Isabelle", "sha": "214274ed6db74c19b8694fc5c8dd9cafa13b056a", "save_path": "github-repos/isabelle/goodlyrottenapple-Nominal2-Isabelle", "path": "github-repos/isabelle/goodlyrottenapple-Nominal2-Isabelle/Nominal2-Isabelle-214274ed6db74c19b8694fc5c8dd9cafa13b056a/Nominal/Nominal2_Base_Exec.thy"}
|
\subsection{Write Math}
\begin{frame}{write-math.com}
\begin{itemize}
\item a website where users can add labeled training data
\item works with desktop computers and touch devices
\item symbol recognition can be done by multiple classifiers
\item users can contribute formulas
\item users can vote for formulas
\item user who wrote the formula can accept one formula
\end{itemize}
\end{frame}
\framedgraphic{Classify}{../images/classify.png}
\framedgraphic{Workflow}{../images/workflow.png}
\framedgraphic{User page}{../images/user-page.png}
\framedgraphic{Information about handwritten-data}{../images/view.png}
\framedgraphic{Non-mathematical symbols}{../images/yinyang.png}
\framedgraphic{Training}{../images/train.png}
\framedgraphic{Ranking}{../images/ranking.png}
\framedgraphic{Symbol page}{../images/symbol.png}
\begin{frame}{Statistics}
\begin{itemize}
\item 40 users
\item 1076 symbols
\item 5519 handwritten symbols (e.g. 195 times the letter \enquote{A})
\begin{itemize}
\item only 264 have 4 lines
\item only 36 have 5 lines
\item only 16 have 6 lines
\item only 19 have 7 lines or more
\item none has more than 12 lines
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}{First classification worker}
\begin{itemize}
\item preprocessing: Scale to fit into unit square while keeping the aspect
ratio
\item applies dynamic time warping
\item compares a new handwritten symbol with every handwritten symbol
in the database
\item[$\Rightarrow$] Classification time is in $\mathcal{O}(\text{handwritten symbols})$,
but we rather would like $\mathcal{O}(\text{symbols})$
\item the current server / workflow can only handle about 4000 handwritten
symbols
\item[$\Rightarrow$] Another way to classify is necessary
\end{itemize}
\end{frame}
|
{"hexsha": "2cd43122b4982cb212791c16906ba10e785b7d93", "size": 2028, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "presentations/Bachelor-Short/LaTeX/work-done.tex", "max_stars_repo_name": "tungel/LaTeX-examples", "max_stars_repo_head_hexsha": "9558d8b3c19776cb068b9753dcd3f88645dd7134", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-04-23T17:20:42.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-23T17:20:42.000Z", "max_issues_repo_path": "presentations/Bachelor-Short/LaTeX/work-done.tex", "max_issues_repo_name": "everbot/LaTeX-examples", "max_issues_repo_head_hexsha": "9558d8b3c19776cb068b9753dcd3f88645dd7134", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "presentations/Bachelor-Short/LaTeX/work-done.tex", "max_forks_repo_name": "everbot/LaTeX-examples", "max_forks_repo_head_hexsha": "9558d8b3c19776cb068b9753dcd3f88645dd7134", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7647058824, "max_line_length": 97, "alphanum_fraction": 0.6711045365, "num_tokens": 505}
|
[STATEMENT]
theorem quot_rep: "\<exists>a. A = \<lfloor>a\<rfloor>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>a. A = \<lfloor>a\<rfloor>
[PROOF STEP]
proof (cases A)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>y. \<lbrakk>A = Abs_quot y; y \<in> quot\<rbrakk> \<Longrightarrow> \<exists>a. A = \<lfloor>a\<rfloor>
[PROOF STEP]
fix R
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>y. \<lbrakk>A = Abs_quot y; y \<in> quot\<rbrakk> \<Longrightarrow> \<exists>a. A = \<lfloor>a\<rfloor>
[PROOF STEP]
assume R: "A = Abs_quot R"
[PROOF STATE]
proof (state)
this:
A = Abs_quot R
goal (1 subgoal):
1. \<And>y. \<lbrakk>A = Abs_quot y; y \<in> quot\<rbrakk> \<Longrightarrow> \<exists>a. A = \<lfloor>a\<rfloor>
[PROOF STEP]
assume "R \<in> quot"
[PROOF STATE]
proof (state)
this:
R \<in> quot
goal (1 subgoal):
1. \<And>y. \<lbrakk>A = Abs_quot y; y \<in> quot\<rbrakk> \<Longrightarrow> \<exists>a. A = \<lfloor>a\<rfloor>
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
R \<in> quot
[PROOF STEP]
have "\<exists>a. R = {x. a \<sim> x}"
[PROOF STATE]
proof (prove)
using this:
R \<in> quot
goal (1 subgoal):
1. \<exists>a. R = {x. a \<sim> x}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>a. R = {x. a \<sim> x}
goal (1 subgoal):
1. \<And>y. \<lbrakk>A = Abs_quot y; y \<in> quot\<rbrakk> \<Longrightarrow> \<exists>a. A = \<lfloor>a\<rfloor>
[PROOF STEP]
with R
[PROOF STATE]
proof (chain)
picking this:
A = Abs_quot R
\<exists>a. R = {x. a \<sim> x}
[PROOF STEP]
have "\<exists>a. A = Abs_quot {x. a \<sim> x}"
[PROOF STATE]
proof (prove)
using this:
A = Abs_quot R
\<exists>a. R = {x. a \<sim> x}
goal (1 subgoal):
1. \<exists>a. A = Abs_quot {x. a \<sim> x}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>a. A = Abs_quot {x. a \<sim> x}
goal (1 subgoal):
1. \<And>y. \<lbrakk>A = Abs_quot y; y \<in> quot\<rbrakk> \<Longrightarrow> \<exists>a. A = \<lfloor>a\<rfloor>
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>a. A = Abs_quot {x. a \<sim> x}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<exists>a. A = Abs_quot {x. a \<sim> x}
goal (1 subgoal):
1. \<exists>a. A = \<lfloor>a\<rfloor>
[PROOF STEP]
by (unfold eqv_class_def)
[PROOF STATE]
proof (state)
this:
\<exists>a. A = \<lfloor>a\<rfloor>
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1094, "file": null, "length": 14}
|
# -*- coding: utf-8 -*-
## all SI units
########################################################################################
## Plot the membrane potential for a leaky integrate and fire neuron with current injection
## Author: Aditya Gilra
## Creation Date: 2012-06-08
## Modification Date: 2012-06-08
########################################################################################
#import os
#os.environ['NUMPTHREADS'] = '1'
import sys
sys.path.append('../../../python')
## simulation parameters
SIMDT = 5e-5 # seconds
PLOTDT = 5e-5 # seconds
RUNTIME = 2.0 # seconds
injectI = 1e-8#2.5e-12 # Amperes
## moose imports
import moose
from moose.neuroml import *
from moose.utils import * # has setupTable(), resetSim() etc
import math
## import numpy and matplotlib in matlab style commands
from pylab import *
def create_twoLIFs():
NML = NetworkML({'temperature':37.0,'model_dir':'.'})
## Below returns populationDict = { 'populationname1':(cellname,{instanceid1:moosecell, ... }) , ... }
## and projectionDict = { 'projectionname1':(source,target,[(syn_name1,pre_seg_path,post_seg_path),...]) , ... }
(populationDict,projectionDict) = \
NML.readNetworkMLFromFile('twoLIFs.net.xml', {}, params={})
return populationDict,projectionDict
def run_twoLIFs():
## reset and run the simulation
print "Reinit MOOSE."
## from moose_utils.py sets clocks and resets
resetSim(['/cells[0]'], SIMDT, PLOTDT, simmethod='ee')
print "Running now..."
moose.start(RUNTIME)
if __name__ == '__main__':
populationDict,projectionDict = create_twoLIFs()
## element returns the right element and error if not present
IF1Soma = moose.element(populationDict['LIFs'][1][0].path+'/soma_0')
IF1Soma.inject = injectI
IF2Soma = moose.element(populationDict['LIFs'][1][1].path+'/soma_0')
IF2Soma.inject = 0.0#injectI*2.0
#IF2Soma.inject = injectI
IF1vmTable = setupTable("vmTableIF1",IF1Soma,'Vm')
IF2vmTable = setupTable("vmTableIF2",IF2Soma,'Vm')
table_path = moose.Neutral(IF1Soma.path+'/data').path
IF1spikesTable = moose.Table(table_path+'/spikesTable')
moose.connect(IF1Soma,'spikeOut',IF1spikesTable,'input') ## spikeGen gives spiketimes
## record Gk of the synapse on IF2
#print IF2Soma.children
IF2SynChanTable = moose.Table(table_path+'/synChanTable')
moose.connect(IF2SynChanTable,'requestOut',IF2Soma.path+'/exc_syn','getIk')
run_twoLIFs()
print "Spiketimes :",IF1spikesTable.vector
## plot the membrane potential of the neuron
timevec = arange(0.0,RUNTIME+PLOTDT/2.0,PLOTDT)
figure(facecolor='w')
plot(timevec, IF1vmTable.vector*1000,'r-')
xlabel('time(s)')
ylabel('Vm (mV)')
title('Vm of presynaptic IntFire')
figure(facecolor='w')
plot(timevec, IF2vmTable.vector*1000,'b-')
xlabel('time(s)')
ylabel('Vm (mV)')
title('Vm of postsynaptic IntFire')
figure(facecolor='w')
plot(timevec, IF2SynChanTable.vector*1e12,'b-')
xlabel('time(s)')
ylabel('Ik (pA)')
title('Ik entering postsynaptic IntFire')
show()
|
{"hexsha": "aed0ef29b58286a4c03737530955c3dcd0917be1", "size": 3082, "ext": "py", "lang": "Python", "max_stars_repo_path": "tutorials/chemical switches/moose/neuroml/LIF/twoLIFxml_firing.py", "max_stars_repo_name": "h-mayorquin/camp_india_2016", "max_stars_repo_head_hexsha": "a8bf8db7778c39c7ca959a7f876c1aa85f2cae8b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-04-10T07:38:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-15T18:33:18.000Z", "max_issues_repo_path": "tutorials/chemical switches/moose/neuroml/LIF/twoLIFxml_firing.py", "max_issues_repo_name": "h-mayorquin/camp_india_2016", "max_issues_repo_head_hexsha": "a8bf8db7778c39c7ca959a7f876c1aa85f2cae8b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tutorials/chemical switches/moose/neuroml/LIF/twoLIFxml_firing.py", "max_forks_repo_name": "h-mayorquin/camp_india_2016", "max_forks_repo_head_hexsha": "a8bf8db7778c39c7ca959a7f876c1aa85f2cae8b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8372093023, "max_line_length": 116, "alphanum_fraction": 0.6492537313, "include": true, "reason": "import numpy", "num_tokens": 903}
|
import unittest
import random
import numpy as np
from src.data_arrays import DataArrays
from collections import Counter
class TestDataArrays(unittest.TestCase):
data_arrays: DataArrays
def setUp(self):
self.data_arrays = DataArrays()
def test_remove_duplicates(self):
a = np.random.randint(0, 9, 200)
final_array = self.data_arrays.remove_duplicates(a)
s = Counter(final_array)
for key in s.keys():
self.assertEqual(s[key], 1)
def test_sort_with_duplicates(self):
a = np.random.randint(0, 9, 200)
sorted_a = self.data_arrays.sort(a)
self.assertLess(len(sorted_a), len(a))
def test_sort_with_decimals(self):
a = np.random.rand(200)
sorted_a = self.data_arrays.sort(a)
i = 0
while i < (len(sorted_a)-1):
self.assertLess(sorted_a[i], sorted_a[i+1])
i += 1
|
{"hexsha": "75a51ffda84d039411028367487a0947265e74ee", "size": 911, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/test_data_arrays.py", "max_stars_repo_name": "marciojustino/data-arrays-lib", "max_stars_repo_head_hexsha": "d2495eb9d00d5ee3a885d6f215d9c28eba9fab66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/test_data_arrays.py", "max_issues_repo_name": "marciojustino/data-arrays-lib", "max_issues_repo_head_hexsha": "d2495eb9d00d5ee3a885d6f215d9c28eba9fab66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test_data_arrays.py", "max_forks_repo_name": "marciojustino/data-arrays-lib", "max_forks_repo_head_hexsha": "d2495eb9d00d5ee3a885d6f215d9c28eba9fab66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6060606061, "max_line_length": 59, "alphanum_fraction": 0.6377607025, "include": true, "reason": "import numpy", "num_tokens": 215}
|
#This script normalizes reactivity (theta) values from a reactivities.out file produced by spats v 0.8.0.
#It does so following the method outlined in Lucks et al PNAS (2011).
#The top 2% of thetas are excluded.
#Then the 3-10th percentiles of thetas are averaged and all theta values are then normalized by this value.
#This script will also filter out any RNAs that have less than <filter> number of stops in
#the + channel. This is to remove RNAs for which there were very few reads.
#The reactivities.out file from spats supplied to this script must be sorted by RNA name (field 1)
#and nucleotide sequence (field 3). This is the default output.
#Also, when mode == fastasplit, this will split a fasta file of many sequences in many individual fasta
#files of one sequence each. This is useful in preparation for submitting them to the partition function
#in RNAstructure. If mode == SHAPEfiles, puts SHAPE reactivity files for use with RNAstructure into a separate
#directory. These fasta and SHAPEfile directories are then ready for use with shape_rnastructure.py
import argparse
from numpy import mean
import os
from Bio import SeqIO
import sys
def filterRNAbystops(reactivities, minstops):
print 'Filtering RNAs by the number of stops in the + channel...'
minstops = int(minstops)
reactivitiesfh = open(reactivities, 'r')
stopsdict = {} # {RNA : number of cumulative stops in + channel at all nucleotide positions}
filteredRNAs = [] #list of RNAs that pass the stops filter
for line in reactivitiesfh:
line = line.strip().split('\t')
if line[0] != 'sequence': #skip header
sequence = line[0]
treatedmods = int(line[4])
if sequence in stopsdict:
stopsdict[sequence] += treatedmods
elif sequence not in stopsdict:
stopsdict[sequence] = treatedmods
for RNA in stopsdict:
if stopsdict[RNA] >= minstops:
filteredRNAs.append(RNA)
print '{0} of {1} RNAs have at least {2} stops in the treated channel.'.format(len(filteredRNAs), len(stopsdict), minstops)
reactivitiesfh.close()
return filteredRNAs
def getThetanormfactor(reactivities):
print 'Normalizing theta values according to the 2%/8% rule...'
reactivitiesfh = open(reactivities, 'r')
reactivities = [] #list of all reactivity values, unnormalized
for line in reactivitiesfh:
line = line.strip().split('\t')
#skip header and the 5' most nucleotide of all RNAs, which has theta = '-' and any nucleotide after 108, which never has any stops in either channel
if line[0] != 'sequence' and int(line[2]) > 0 and int(line[2]) < 109:
theta = float(line[7])
reactivities.append(theta)
reactivities = sorted(reactivities, reverse=True)
top2percentile = int(len(reactivities) * 0.02)
top10percentile = int(len(reactivities) * 0.1)
print 'There were {0} reactivities. The top 2% are therefore the first {1} and the top 10% are the first {2}.'.format(len(reactivities), top2percentile, top10percentile)
#Calculate average of 3-10 percentile
normfactor = float(mean(reactivities[top2percentile + 1 : top10percentile]))
print 'After throwing out the top 2%, the average reactivity of the next 8% is {0}.'.format(normfactor)
reactivitiesfh.close()
return normfactor
def writeNormalizedreactivities(reactivities, normfactor, outfile):
reactivitiesfh = open(reactivities, 'r')
outfh = open(outfile, 'w')
outfh.write(('\t').join(['sequence','rt_start','five_prime_offset','nucleotide','treated_mods','untreated_mods','beta','theta','normalized_theta','c']) + '\n')
for line in reactivitiesfh:
line = line.strip().split('\t')
if line[0] == 'sequence':
continue
if line[7] == '-': #if this is the 5' most nucleotide and therefore theta = '-'
outfh.write(('\t').join([line[0], line[1], line[2], line[3], line[4], line[5], line[6], line[7], '-', line[8]]) + '\n')
continue
outfh.write(('\t').join([line[0], line[1], line[2], line[3], line[4], line[5], line[6], line[7], str(float(line[7]) / normfactor), line[8]]) + '\n')
outfh.close()
def writeSHAPEfiles(reactivities, normfactor, filteredRNAs, outdir):
reactivitiesfh = open(reactivities, 'r')
#See if outdir exists. If not, make it.
if os.path.isdir(os.path.abspath(outdir)) == False:
os.mkdir(os.path.abspath(outdir))
for line in reactivitiesfh:
line = line.strip().split('\t')
if line[0] != 'sequence':
RNAname = line[0]
nucleotide = int(line[2])
if nucleotide == 0: #if this is the first time we see this RNA
outfh = open(os.path.join(os.path.abspath(outdir), RNAname + '.SHAPE'), 'w')
elif nucleotide > 0 and nucleotide < 133: #any nucleotide after 133 has a reactivity of 0 due to RT primer binding
theta = float(line[7])
normalizedtheta = theta/normfactor
outfh.write(str(nucleotide) + '\t' + str(normalizedtheta) + '\n')
elif nucleotide == 133:
outfh.close()
elif nucleotide > 133:
continue
def splitfasta(fasta, outdir):
#Split a fasta file of many sequences into many small files of one sequence each.
#See if outdir exists. If not, make it.
if os.path.isdir(os.path.abspath(outdir)) == False:
os.mkdir(os.path.abspath(outdir))
for record in SeqIO.parse(fasta, 'fasta'):
outfh = open(os.path.join(os.path.abspath(outdir), record.id + '.fasta'), 'w')
outfh.write('>' + str(record.id) + '\n')
outfh.write(str(record.seq))
outfh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--reactivities', type = str, help = 'reactivities.out from spats output.')
parser.add_argument('--outfile', type = str, help = 'Required if writing normalized reactivities.out.')
parser.add_argument('--minstops', type = str, help = 'Required if writing SHAPE files. Minimum number of stops required in the + channel for an RNA to be considered.')
parser.add_argument('--fasta', type = str, help = 'Required if using fastasplit mode. A fasta file of many sequences to be split to many small files of one sequence each.')
parser.add_argument('--outdir', type = str, help = 'Required if writing SHAPE reactivity files for RNAstructure OR splitting fasta file into many files with a single sequence.')
parser.add_argument('--mode', type = str, required = True, choices = ['normalizetheta', 'SHAPEfiles', 'splitfasta'],
help = 'Choose normalizetheta to output a reactivities.out file with normalized theta values. Choose SHAPEfiles to output many SHAPE reactivity files for use with RNA structure. Choose fastasplit to split a fasta file of many sequences into many files of one sequence each.')
args = parser.parse_args()
if args.mode == 'normalizetheta':
if args.reactivities == None:
print 'Error: must provide reactivities.out from spats output.'
sys.exit()
normfactor = getThetanormfactor(args.reactivities)
writeNormalizedreactivities(args.reactivities, normfactor, args.outfile)
elif args.mode == 'SHAPEfiles':
if args.reactivities == None:
print 'Error: must provide reactivities.out from spats output.'
sys.exit()
filteredRNAs = filterRNAbystops(args.reactivities, args.minstops)
normfactor = getThetanormfactor(args.reactivities)
writeSHAPEfiles(args.reactivities, normfactor, filteredRNAs, args.outdir)
elif args.mode == 'splitfasta':
if args.fasta == None or args.outdir == None:
print 'Error: must provide a fasta to split and a directory to put split files in.'
sys.exit()
splitfasta(args.fasta, args.outdir)
|
{"hexsha": "67ba85a5c2747423056da798897ef6bf54941a40", "size": 7986, "ext": "py", "lang": "Python", "max_stars_repo_path": "shape_normalizereactivities.py", "max_stars_repo_name": "TaliaferroLab/AnalysisScripts", "max_stars_repo_head_hexsha": "3df37d2f8fca9bc402afe5ea870c42200fca1ed3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "shape_normalizereactivities.py", "max_issues_repo_name": "TaliaferroLab/AnalysisScripts", "max_issues_repo_head_hexsha": "3df37d2f8fca9bc402afe5ea870c42200fca1ed3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shape_normalizereactivities.py", "max_forks_repo_name": "TaliaferroLab/AnalysisScripts", "max_forks_repo_head_hexsha": "3df37d2f8fca9bc402afe5ea870c42200fca1ed3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-30T07:37:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-30T07:37:19.000Z", "avg_line_length": 52.5394736842, "max_line_length": 299, "alphanum_fraction": 0.6686701728, "include": true, "reason": "from numpy", "num_tokens": 1994}
|
classdef CEC2008_F4 < PROBLEM
% <single> <real> <large/none> <expensive/none>
% Shifted Rastrign's function
%------------------------------- Reference --------------------------------
% K. Tang, X. Yao, P. N. Suganthan, C. MacNish, Y.-P. Chen, C.-M. Chen, and
% Z. Yang, Benchmark functions for the CEC'2008 special session and
% competition on large scale global optimization, Nature Inspired
% Computation and Applications Laboratory, USTC, China, 2007.
%------------------------------- Copyright --------------------------------
% Copyright (c) 2023 BIMK Group. You are free to use the PlatEMO for
% research purposes. All publications which use this platform or any code
% in the platform should acknowledge the use of "PlatEMO" and reference "Ye
% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB platform
% for evolutionary multi-objective optimization [educational forum], IEEE
% Computational Intelligence Magazine, 2017, 12(4): 73-87".
%--------------------------------------------------------------------------
properties
O; % Optimal decision vector
end
methods
%% Default settings of the problem
function Setting(obj)
CallStack = dbstack('-completenames');
load(fullfile(fileparts(CallStack(1).file),'CEC2008.mat'),'Data');
obj.O = Data{4};
obj.M = 1;
if isempty(obj.D); obj.D = 100; end
obj.D = min(obj.D,length(obj.O));
obj.lower = zeros(1,obj.D) - 5;
obj.upper = zeros(1,obj.D) + 5;
obj.encoding = ones(1,obj.D);
end
%% Calculate objective values
function PopObj = CalObj(obj,PopDec)
Z = PopDec - repmat(obj.O(1:size(PopDec,2)),size(PopDec,1),1);
PopObj = sum(Z.^2-10*cos(2*pi*Z)+10,2);
end
end
end
|
{"author": "BIMK", "repo": "PlatEMO", "sha": "c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5", "save_path": "github-repos/MATLAB/BIMK-PlatEMO", "path": "github-repos/MATLAB/BIMK-PlatEMO/PlatEMO-c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5/PlatEMO/Problems/Single-objective optimization/CEC 2008/CEC2008_F4.m"}
|
#include "engine/oblique_engine.hpp"
#include <boost/scoped_array.hpp>
void oblique_engine::render(level_ptr level, boost::shared_ptr<image_operations> oper)
{
Cube part_c(mc::MapX + 1, mc::MapY + 1, mc::MapZ + 1);
pos_t iw, ih;
part_c.get_oblique_limits(iw, ih);
BlockRotation b_r(s, level->get_blocks());
BlockRotation b_d(s, level->get_data());
BlockRotation bl_r(s, level->get_blocklight());
BlockRotation sl_r(s, level->get_skylight());
pos_t bmt = iw * ih;
boost::scoped_array<bool> blocked(new bool[bmt]);
memset(blocked.get(), 0x0, sizeof(bool) * bmt);
oper->set_limits(iw + 1, ih);
for (int z = mc::MapZ - 1; z >= 0; z--) {
for (int x = mc::MapX - 1; x >= 0; x--) {
bool cave_initial = true;
bool hell_initial = true;
bool hell_solid = true;
b_r.set_xz(x, z);
b_d.set_xz(x, z);
bl_r.set_xz(x, z);
sl_r.set_xz(x, z);
if (s.hellmode) {
for (int y = s.top; y >= s.bottom && hell_solid; y--) { hell_solid = !is_open(b_r.get8(y)); }
}
for (int y = s.top; y >= s.bottom; y--) {
int bt = b_r.get8(y);
if (s.cavemode && cave_ignore_block(s, y, bt, b_r, cave_initial)) {
continue;
}
if (s.hellmode && !hell_solid && hell_ignore_block(s, y, bt, b_r, hell_initial)) {
continue;
}
if (s.excludes[bt]) {
continue;
}
point p(x, y, z);
pos_t px, py;
part_c.project_oblique(p, px, py);
color top, side;
if(bt == mc::Wool) {
int md = b_d.get4(y);
top = mc::WoolColor[md];
side = mc::WoolColor[md];
} else {
top = mc::MaterialColor[bt];
side = mc::MaterialSideColor[bt];
}
int bp = px + iw * py;
if (blocked[bp]) {
continue;
}
blocked[bp] = top.is_opaque();
int bl = bl_r.get4(y + 1);
apply_shading(s, bl, sl_r.get4(y + 1), 0, y, top);
oper->add_pixel(px, py, top);
apply_shading(s, bl, -1, 0, y, side);
oper->add_pixel(px, py + 1, side);
}
}
}
}
|
{"hexsha": "4d160c84858d3f9dae76da3ca7f1c481e4da5271", "size": 2263, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/engine/oblique_engine.cpp", "max_stars_repo_name": "eisbehr/c10t", "max_stars_repo_head_hexsha": "c30e55613fa0203cba84cb153392a55391279551", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2016-06-07T17:34:32.000Z", "max_stars_repo_stars_event_max_datetime": "2016-06-07T17:34:32.000Z", "max_issues_repo_path": "src/engine/oblique_engine.cpp", "max_issues_repo_name": "eisbehr/c10t", "max_issues_repo_head_hexsha": "c30e55613fa0203cba84cb153392a55391279551", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/engine/oblique_engine.cpp", "max_forks_repo_name": "eisbehr/c10t", "max_forks_repo_head_hexsha": "c30e55613fa0203cba84cb153392a55391279551", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7159090909, "max_line_length": 101, "alphanum_fraction": 0.5011047282, "num_tokens": 691}
|
# preprocess.r
# 20190319
message('Preprocessing: initial cleanup.')
# Remove columns that are unnecessary and/or artifacts of the merge process
abcd_frame <- abcd_frame %>%
select(
-contains('eventname'),
-contains('collection_id'),
-contains('collection_title'),
-contains('study_cohort_name'),
-contains('dataset_id'),
-contains('visit'))
message('Preprocessing: transformations.')
# Recode family history of psychosis."
abcd_frame <- abcd_frame %>%
mutate(fh_psychosis = case_when(
fam_history_8_yes_no == 1 ~ 1,
fam_history_8_yes_no == 0 ~ 0,
fam_history_8_yes_no == 999 ~ NA_real_))
# Recode sex; 3 is for the one intersex participant (who we'll later remove)
abcd_frame <- abcd_frame %>%
mutate(sex_index = recode(sex, M = 1, F = 0)) %>%
mutate(sex = recode(demo_sex_v2, `1` = 1L, `2` = 0L, .default = 3L))
# Recode ethnicity; 3 is people without ethnicity information (to remove)
abcd_frame <- abcd_frame %>%
mutate(ethnicity_latinx = recode(
demo_ethn_v2, `1` = 1L, `2` = 0L, .default = 3L))
# Derive a "number of races" indicator to figure out whether someone's
# multiracial. Derive three mutually-exclusive categories: white, black, other.
abcd_frame <- abcd_frame %>%
rename(race_white = demo_race_a_p___10) %>%
rename(race_black = demo_race_a_p___11) %>%
rename(race_native_american = demo_race_a_p___12) %>%
rename(race_native_alaskan = demo_race_a_p___13) %>%
rename(race_native_hawaiian = demo_race_a_p___14) %>%
rename(race_guamanian = demo_race_a_p___15) %>%
rename(race_samoan = demo_race_a_p___16) %>%
rename(race_other_pacific_islander = demo_race_a_p___17) %>%
rename(race_asian_indian = demo_race_a_p___18) %>%
rename(race_chinese = demo_race_a_p___19) %>%
rename(race_filipinx = demo_race_a_p___20) %>%
rename(race_japanese = demo_race_a_p___21) %>%
rename(race_korean = demo_race_a_p___22) %>%
rename(race_vietnamese = demo_race_a_p___23) %>%
rename(race_other_asian = demo_race_a_p___24) %>%
rename(race_other_race = demo_race_a_p___25) %>%
rename(race_refused_to_answer = demo_race_a_p___77) %>%
rename(race_unknown = demo_race_a_p___99) %>%
mutate(n_races_raw =
race_white +
race_black +
race_native_american +
race_native_alaskan +
race_native_hawaiian +
race_guamanian +
race_samoan +
race_other_pacific_islander +
race_asian_indian +
race_chinese +
race_filipinx +
race_japanese +
race_korean +
race_vietnamese +
race_other_asian +
race_other_race +
race_unknown) %>%
mutate(n_races = n_races_raw) %>%
# if the person has zero races - we'll assume they have 1
# mutate_at("n_races", function(x) if_else(x == 0, 1, x)) %>%
mutate(multiracial =
if_else(n_races > 1, 1, 0)) %>%
mutate(race_white_only = if_else(
race_white == 1 & multiracial == 0, 1, 0)) %>%
mutate(race_black_only = if_else(
race_black == 1 & multiracial == 0, 1, 0)) %>%
mutate(race_asian_only = if_else(
(race_asian_indian == 1 |
race_chinese == 1 |
race_filipinx == 1 |
race_japanese == 1 |
race_korean == 1 |
race_vietnamese == 1 |
race_other_asian == 1) & multiracial == 0, 1, 0)) %>%
mutate(race_other = if_else(
race_native_american == 1 |
race_native_alaskan == 1 |
race_native_hawaiian == 1 |
race_guamanian == 1 |
race_samoan == 1 |
race_other_pacific_islander == 1 |
race_other_race == 1 |
race_unknown == 1 |
multiracial == 1, 1, 0)) %>%
mutate(race_other_expanded = if_else(
race_asian_only == 1 |
race_native_american == 1 |
race_native_alaskan == 1 |
race_native_hawaiian == 1 |
race_guamanian == 1 |
race_samoan == 1 |
race_other_pacific_islander == 1 |
race_other_race == 1 |
race_unknown == 1 |
multiracial == 1, 1, 0))
# Lay the groundwork for deriving income-to-needs later
abcd_frame <- abcd_frame %>%
rename(n_in_household = demo_roster_v2) %>%
mutate(n_in_household_factor = n_in_household) %>%
mutate(n_in_household_with_zero = n_in_household) %>%
mutate(household_income = case_when(
demo_comb_income_v2 == 1 ~ 4999,
demo_comb_income_v2 == 2 ~ 11999,
demo_comb_income_v2 == 3 ~ 15999,
demo_comb_income_v2 == 4 ~ 24999,
demo_comb_income_v2 == 5 ~ 34999,
demo_comb_income_v2 == 6 ~ 49999,
demo_comb_income_v2 == 7 ~ 74999,
demo_comb_income_v2 == 8 ~ 99999,
demo_comb_income_v2 == 9 ~ 199999,
demo_comb_income_v2 == 10 ~ 200000,
demo_comb_income_v2 == 999 ~ NA_real_,
demo_comb_income_v2 == 777 ~ NA_real_)) %>%
rename(household_income_category = demo_comb_income_v2)
# NIH toolbox measures
abcd_frame <- abcd_frame %>%
rename(tb_cardsort = nihtbx_cardsort_agecorrected) %>%
rename(tb_pattern = nihtbx_pattern_agecorrected) %>%
rename(tb_list = nihtbx_list_agecorrected) %>%
rename(tb_picture = nihtbx_picture_agecorrected) %>%
rename(tb_flanker = nihtbx_flanker_agecorrected) %>%
rename(tb_fluid = nihtbx_fluidcomp_agecorrected)
# Country of origin
abcd_frame <- abcd_frame %>%
rename(country_of_origin_child = demo_origin_v2) %>%
rename(country_of_origin_parent = demo_prnt_origin_v2) %>%
mutate(binary_origin_child = if_else(
country_of_origin_child == 189, 1, 0)) %>%
mutate(binary_origin_parent = if_else(
country_of_origin_parent == 189, 1, 0))
# Low birth weight/preterm status
abcd_frame <- abcd_frame %>%
mutate(birth_weight_oz_decimal = birth_weight_oz/16) %>%
mutate(birth_weight_grams =
(birth_weight_lbs + birth_weight_oz_decimal)*0.45359237*1000) %>%
mutate(lbw = if_else(birth_weight_grams < 2500, 1, 0)) %>%
mutate(preterm = if_else(devhx_12a_p == 1, 1, 0))
# Other transformations
abcd_frame <- abcd_frame %>%
rename(maternal_age_at_birth = devhx_3_p) %>%
rename(paternal_age_at_birth = devhx_4_p) %>%
mutate(age_years = interview_age/12) %>%
mutate(trauma_score =
ksads_ptsd_raw_754_p +
ksads_ptsd_raw_755_p +
ksads_ptsd_raw_756_p +
ksads_ptsd_raw_757_p +
ksads_ptsd_raw_758_p +
ksads_ptsd_raw_759_p +
ksads_ptsd_raw_760_p +
ksads_ptsd_raw_761_p +
ksads_ptsd_raw_762_p +
ksads_ptsd_raw_763_p +
ksads_ptsd_raw_764_p +
ksads_ptsd_raw_765_p +
ksads_ptsd_raw_766_p +
ksads_ptsd_raw_767_p +
ksads_ptsd_raw_768_p +
ksads_ptsd_raw_769_p +
ksads_ptsd_raw_770_p) %>%
# treating no information about delayed speech as "no delayed speech"
mutate(delayed_speech = if_else(devhx_21_p >= 4, 1, 0))
# If n_in_household is zero, replace with the modal value of 4
#abcd_frame <- abcd_frame %>%
# mutate_at("n_in_household", function(x) if_else(x == 0, 4, x)) %>%
# mutate_at("n_in_household_factor", function(x) if_else(x == 0, 4, x))
abcd_frame$interview_date <- mdy(abcd_frame$interview_date)
# Clean up and merge poverty threshold data
thresholds <- thresholds %>%
map(select, -threshold) %>%
map(mutate, household_size = case_when(
grepl('One', household_size) ~ 1,
grepl('Two', household_size) ~ 2,
grepl('Three', household_size) ~ 3,
grepl('Four', household_size) ~ 4,
grepl('Five', household_size) ~ 5,
grepl('Six', household_size) ~ 6,
grepl('Seven', household_size) ~ 7,
grepl('Eight', household_size) ~ 8,
grepl('Nine', household_size) ~ 9)) %>%
map(drop_na) %>%
reduce(inner_join, by='household_size')
message('Preprocessing: ASD.')
# ASD as defined by CBCL profile.
abcd_frame <- abcd_frame %>%
mutate(cbcl_asd = if_else(
cbcl_scr_syn_withdep_t +
cbcl_scr_syn_thought_t +
cbcl_scr_syn_social_t > 195, 1, 0))
# ASD as defined by short SRS score from 1-year follow-up.
abcd_frame <- abcd_frame %>%
mutate(short_srs_total =
ssrs_6_p +
ssrs_15r_p +
ssrs_16_p +
ssrs_18_p +
ssrs_24_p +
ssrs_29_p +
ssrs_35_p +
ssrs_37_p +
ssrs_39_p +
ssrs_42_p +
ssrs_58_p)
message('Preprocessing: psychosis.')
# This works but there's a lot of clumsy/inefficient/repetitious code here,
# particularly in the way 1 year follow-up scores are calculated.
construct_item <- function(number, type) {
# `type` parameter:
# 1 = normal item
# 2 = distress item
# 3 = normal item (1 year follow-up)
# 4 = distress item (1 year follow-up)
if(type==1) { return(paste0('prodromal_', number, '_y')) }
else if(type==2) { return(paste0('prodromal_', number, 'b_y')) }
else if(type==3) { return(paste0('prodromal_', number, '_y_1_yr')) }
else if(type==4) { return(paste0('prodromal_', number, 'b_y_1_yr')) }
}
# Prodromal summary score: just the number of items endorsed
abcd_frame <- abcd_frame %>%
mutate(prodromal_summary_score = rowSums(
# Regex below will exclude variables ending in "r" so it doesn't pull in the
# 1-year follow-up data
select(., matches('prodromal_\\d{1,2}_y*[^r]$'))))
abcd_frame <- abcd_frame %>%
mutate(prodromal_summary_score_1_yr = rowSums(
select(., matches('prodromal_\\d{1,2}_y_1_yr'))))
# Prodromal distress score: severity-weighted version of summary score.
# Since prodromal distress items that were 0 are coded as NA, we need to fix
# this, but in a way that doesn't affect other NAs.
distress_list <-
1:20 %>%
map(construct_item, 2) %>%
paste0(' = 0, ') %>%
paste0(collapse='') %>%
paste0(construct_item(21, 2), ' = 0') %>%
paste0('list(', ., ')')
distress_list_1_yr <-
1:20 %>%
map(construct_item, 4) %>%
paste0(' = 0, ') %>%
paste0(collapse='') %>%
paste0(construct_item(21, 4), ' = 0') %>%
paste0('list(', ., ')')
abcd_frame <- abcd_frame %>% replace_na(eval(parse(text=distress_list)))
abcd_frame <- abcd_frame %>% replace_na(eval(parse(text=distress_list_1_yr)))
construct_distress_score <- function(x) {
var1 <- construct_item(x, 1)
var2 <- construct_item(x, 2)
return(paste0(var1, '*', var2))
}
construct_distress_score_1_yr <- function(x) {
var1 <- construct_item(x, 3)
var2 <- construct_item(x, 4)
return(paste0(var1, '*', var2))
}
distress_scores <-
1:20 %>%
map(construct_distress_score) %>%
paste0(' + ') %>%
paste0(collapse='') %>%
paste0(construct_distress_score(21))
distress_scores_1_yr <-
1:20 %>%
map(construct_distress_score_1_yr) %>%
paste0(' + ') %>%
paste0(collapse='') %>%
paste0(construct_distress_score_1_yr(21))
abcd_frame <- abcd_frame %>%
mutate(prodromal_distress_score = eval(parse(text=distress_scores)))
abcd_frame <- abcd_frame %>%
mutate(prodromal_distress_score_1_yr = eval(parse(text=distress_scores_1_yr)))
abcd_frame <- abcd_frame %>%
# These binary variables use a 2 SD cutoff
mutate(binary_psychosis = if_else(
rescale(prodromal_summary_score) >= 1, 1, 0)) %>%
mutate(binary_distress_psychosis = if_else(
rescale(prodromal_distress_score)>=1,1,0)) %>%
# Progression from low to high symptom severity
mutate(progression_12_months = if_else(
rescale(prodromal_distress_score_1_yr) >= 1 &
rescale(prodromal_distress_score) < 1, 1, 0))
message('Deriving population density.')
# Get density for location of most recent residence
abcd_frame <- abcd_frame %>% mutate(pop_density =
ifelse(!is.na(reshist_addr6_popdensity), reshist_addr6_popdensity,
ifelse(!is.na(reshist_addr5_popdensity), reshist_addr5_popdensity,
ifelse(!is.na(reshist_addr4_popdensity), reshist_addr4_popdensity,
ifelse(!is.na(reshist_addr3_popdensity), reshist_addr3_popdensity,
ifelse(!is.na(reshist_addr2_popdensity), reshist_addr2_popdensity,
ifelse(!is.na(reshist_addr1_popdensity), reshist_addr1_popdensity,
NA)))))))
# Factorize factors
factors <- c(
'site_id_l',
'rel_family_id',
'scrn_asd',
'cbcl_asd',
'fh_psychosis',
'sex',
'binary_psychosis',
'binary_distress_psychosis',
'delayed_speech',
'country_of_origin_child',
'country_of_origin_parent',
'binary_origin_child',
'binary_origin_parent',
'household_income_category',
'n_in_household_factor',
'race_white_only',
'race_black_only',
'race_other_expanded',
'ethnicity_latinx')
for (factor in factors) {
abcd_frame[[factor]] <- as.factor(abcd_frame[[factor]])
}
message('Deriving income-to-needs.')
derive_income_to_needs <- function(size, income, year) {
# table doesn't have values for household size greater than 9,
# so in this case treat the household size as if it's 9
if (!is.na(size) && size > 9) { size <- 9 }
row <- thresholds %>% filter(household_size==size)
column <- paste0('threshold_', year)
threshold = as.integer(row[column])
return(income/threshold)
}
abcd_frame$income_to_needs <- NA
for (row in 1:nrow(abcd_frame)) {
abcd_frame[row, 'income_to_needs'] <-
derive_income_to_needs(
as.integer(abcd_frame[row, 'n_in_household']),
as.integer(abcd_frame[row, 'household_income']),
year(as.data.frame(abcd_frame)[row, 'interview_date']))
}
message('Identifying complete cases.')
# create a group for observations with complete data
abcd_frame <- abcd_frame %>%
mutate(complete_data = ifelse(
!is.na(scrn_asd) &
sex != 3 & # excluding the single intersex participant
ethnicity_latinx !=3 &
!is.na(prodromal_summary_score) &
!is.na(n_in_household) &
n_in_household > 0 &
!is.na(household_income) &
!is.na(fh_psychosis) &
!is.na(maternal_age_at_birth) &
!is.na(paternal_age_at_birth) &
paternal_age_at_birth != 332 & # implausible ages
paternal_age_at_birth != 389 &
devhx_21_p != 999 &
!is.na(devhx_21_p) &
devhx_12a_p != 999 &
!is.na(devhx_12a_p) &
!is.na(tb_cardsort) &
!is.na(tb_pattern) &
!is.na(tb_list) &
# !is.na(binary_origin_child) &
# !is.na(pop_density) &
!is.na(trauma_score), 1, 0))
message('Subsetting data.')
abcd_frame_full <- abcd_frame
# Only complete observations
abcd_frame <- abcd_frame_full %>% filter(complete_data==1)
abcd_frame <- abcd_frame %>%
mutate(
asd_pls_group = case_when(
scrn_asd == 1 & binary_distress_psychosis == 1 ~ "ASD+/PLS+",
scrn_asd == 1 & binary_distress_psychosis == 0 ~ "ASD+/PLS-",
scrn_asd == 0 & binary_distress_psychosis == 1 ~ "ASD-/PLS+"))
abcd_continuous <- abcd_frame %>%
select(
scrn_asd, #not continuous but this is the grouping factor
age_years,
prodromal_summary_score,
prodromal_distress_score,
income_to_needs,
trauma_score,
maternal_age_at_birth,
paternal_age_at_birth,
tb_cardsort,
tb_pattern,
tb_list,
trauma_score)
abcd_categorical <- abcd_frame %>%
select(
scrn_asd,
sex,
race_white_only,
race_black_only,
race_other_expanded,
ethnicity_latinx,
fh_psychosis,
delayed_speech)
message('Preprocess step complete.')
|
{"hexsha": "efe5bf1e326efb5775080d1314d507afec1003ab", "size": 14811, "ext": "r", "lang": "R", "max_stars_repo_path": "r/2_preprocess.r", "max_stars_repo_name": "amandeepjutla/2019-abcd-asd", "max_stars_repo_head_hexsha": "06860acbc83af4ccccb41b8ba6bb3d0678a39a3b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-23T19:56:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T19:56:58.000Z", "max_issues_repo_path": "r/2_preprocess.r", "max_issues_repo_name": "amandeepjutla/2019-abcd-asd", "max_issues_repo_head_hexsha": "06860acbc83af4ccccb41b8ba6bb3d0678a39a3b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "r/2_preprocess.r", "max_forks_repo_name": "amandeepjutla/2019-abcd-asd", "max_forks_repo_head_hexsha": "06860acbc83af4ccccb41b8ba6bb3d0678a39a3b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.767699115, "max_line_length": 80, "alphanum_fraction": 0.6878671258, "num_tokens": 4608}
|
function [ c, seed ] = c8vec_uniform_01 ( n, seed )
%*****************************************************************************80
%
%% C8VEC_UNIFORM_01 returns a unit pseudorandom C8VEC.
%
% Discussion:
%
% The angles should be uniformly distributed between 0 and 2 * PI,
% the square roots of the radius uniformly distributed between 0 and 1.
%
% This results in a uniform distribution of values in the unit circle.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 21 September 2006
%
% Author:
%
% John Burkardt
%
% Reference:
%
% Paul Bratley, Bennett Fox, Linus Schrage,
% A Guide to Simulation,
% Second Edition,
% Springer, 1987,
% ISBN: 0387964673,
% LC: QA76.9.C65.B73.
%
% Bennett Fox,
% Algorithm 647:
% Implementation and Relative Efficiency of Quasirandom
% Sequence Generators,
% ACM Transactions on Mathematical Software,
% Volume 12, Number 4, December 1986, pages 362-376.
%
% Pierre L'Ecuyer,
% Random Number Generation,
% in Handbook of Simulation,
% edited by Jerry Banks,
% Wiley, 1998,
% ISBN: 0471134031,
% LC: T57.62.H37.
%
% Peter Lewis, Allen Goodman, James Miller,
% A Pseudo-Random Number Generator for the System/360,
% IBM Systems Journal,
% Volume 8, Number 2, 1969, pages 136-143.
%
% Parameters:
%
% Input, integer N, the number of values to compute.
%
% Input, integer SEED, a seed for the random number generator.
%
% Output, complex C(N), the pseudorandom complex vector.
%
% Output, integer SEED, a seed for the random number generator.
%
c = zeros ( n, 1 );
i4_huge = 2147483647;
if ( seed == 0 )
fprintf ( 1, '\n' );
fprintf ( 1, 'C8VEC_UNIFORM_01 - Fatal error!\n' );
fprintf ( 1, ' Input SEED = 0!\n' );
error ( 'C8VEC_UNIFORM_01 - Fatal error!' );
end
for j = 1 : n
k = floor ( seed / 127773 );
seed = 16807 * ( seed - k * 127773 ) - k * 2836;
if ( seed < 0 )
seed = seed + i4_huge;
end
r = sqrt ( seed * 4.656612875E-10 );
k = floor ( seed / 127773 );
seed = 16807 * ( seed - k * 127773 ) - k * 2836;
if ( seed < 0 )
seed = seed + i4_huge;
end
theta = 2.0 * pi * seed * 4.656612875E-10;
c(j) = r * ( cos ( theta ) + sin ( theta ) * i );
end
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/uniform/c8vec_uniform_01.m"}
|
from __future__ import division
import numpy
from chainer.dataset import iterator
from chainer.iterators.order_samplers import ShuffleOrderSampler
class SerialIterator(iterator.Iterator):
"""Dataset iterator that serially reads the examples.
This is a simple implementation of :class:`~chainer.dataset.Iterator`
that just visits each example in either the order of indexes or a shuffled
order.
To avoid unintentional performance degradation, the ``shuffle`` option is
set to ``True`` by default. For validation, it is better to set it to
``False`` when the underlying dataset supports fast slicing. If the
order of examples has an important meaning and the updater depends on the
original order, this option should be set to ``False``.
This iterator saves ``-1`` instead of ``None`` in snapshots since some
serializers do not support ``None``.
Args:
dataset: Dataset to iterate.
batch_size (int): Number of examples within each batch.
repeat (bool): If ``True``, it infinitely loops over the dataset.
Otherwise, it stops iteration at the end of the first epoch.
shuffle (bool): If ``True``, the order of examples is shuffled at the
beginning of each epoch. Otherwise, examples are extracted in the
order of indexes. If ``None`` and no ``order_sampler`` is given,
the behavior is the same as the case with ``shuffle=True``.
order_sampler (callable): A callable that generates the order
of the indices to sample in the next epoch when a epoch finishes.
This function should take two arguements: the current order
and the current position of the iterator.
This should return the next order. The size of the order
should remain constant.
This option cannot be used when ``shuffle`` is not ``None``.
"""
def __init__(self, dataset, batch_size,
repeat=True, shuffle=None, order_sampler=None):
self.dataset = dataset
self.batch_size = batch_size
self._repeat = repeat
self._shuffle = shuffle
if self._shuffle is not None:
if order_sampler is not None:
raise ValueError('`shuffle` is not `None` and a custom '
'`order_sampler` is set. Please set '
'`shuffle` to `None` to use the custom '
'order sampler.')
else:
if self._shuffle:
order_sampler = ShuffleOrderSampler()
else:
if order_sampler is None:
order_sampler = ShuffleOrderSampler()
self.order_sampler = order_sampler
self.reset()
def __next__(self):
if not self._repeat and self.epoch > 0:
raise StopIteration
self._previous_epoch_detail = self.epoch_detail
i = self.current_position
i_end = i + self.batch_size
N = self._epoch_size
if self._order is None:
batch = self.dataset[i:i_end]
else:
batch = [self.dataset[index] for index in self._order[i:i_end]]
if i_end >= N:
if self._repeat:
rest = i_end - N
if self._order is not None:
new_order = self.order_sampler(self._order, i)
if len(self._order) != len(new_order):
raise ValueError('The size of order does not match '
'the size of the previous order.')
self._order = new_order
if rest > 0:
if self._order is None:
batch.extend(self.dataset[:rest])
else:
batch.extend([self.dataset[index]
for index in self._order[:rest]])
self.current_position = rest
else:
self.current_position = 0
self.epoch += 1
self.is_new_epoch = True
else:
self.is_new_epoch = False
self.current_position = i_end
return batch
next = __next__
@property
def epoch_detail(self):
return self.epoch + self.current_position / self._epoch_size
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def serialize(self, serializer):
self.current_position = serializer('current_position',
self.current_position)
self.epoch = serializer('epoch', self.epoch)
self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
if self._order is not None:
try:
serializer('order', self._order)
except KeyError:
serializer('_order', self._order)
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / self._epoch_size
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
def reset(self):
self.current_position = 0
self.epoch = 0
self.is_new_epoch = False
# use -1 instead of None internally.
self._previous_epoch_detail = -1.
if self.order_sampler:
self._order = self.order_sampler(
numpy.arange(len(self.dataset)), 0)
else:
self._order = None
@property
def _epoch_size(self):
if self._order is None:
return len(self.dataset)
else:
return len(self._order)
|
{"hexsha": "ceb7b2c147a398386528d52e3ce53e77a4340137", "size": 6119, "ext": "py", "lang": "Python", "max_stars_repo_path": "chainer/iterators/serial_iterator.py", "max_stars_repo_name": "maomran/chainer", "max_stars_repo_head_hexsha": "a69103a4aa59d5b318f39b01dbcb858d465b89cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-12-12T20:23:51.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-12T20:23:51.000Z", "max_issues_repo_path": "chainer/iterators/serial_iterator.py", "max_issues_repo_name": "maomran/chainer", "max_issues_repo_head_hexsha": "a69103a4aa59d5b318f39b01dbcb858d465b89cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chainer/iterators/serial_iterator.py", "max_forks_repo_name": "maomran/chainer", "max_forks_repo_head_hexsha": "a69103a4aa59d5b318f39b01dbcb858d465b89cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3109756098, "max_line_length": 78, "alphanum_fraction": 0.582284687, "include": true, "reason": "import numpy", "num_tokens": 1209}
|
import pandas as pd
import numpy as np
import visualml as vml
from sklearn.datasets import make_classification
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.neighbors import KNeighborsClassifier as KNN
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
# Activating interactive mode for closing the window automatically without
# displaying it. Source: https://community.esri.com/thread/185110-matplotlib-show-prevents-script-from-completing
plt.ion()
def test_decision_boundary_grid(n_feats=4, feat_list=None):
columns_list = ['A','B','C','D','E', 'F', 'G', 'H', 'I']
X, y = make_classification(n_features=n_feats, random_state=42)
X = pd.DataFrame(X, columns=columns_list[:n_feats])
clf = RF(random_state=42).fit(X,y)
#fig, ax = plt.subplots(2,1)
#vml.plot_decision_boundary(clf, X, y, 'A', ax=ax[0])
#vml.plot_decision_boundary(clf, X, y, 'B', ax=ax[1])
#plt.show()
vml.decision_boundary_grid(clf, X, y, feat_list=feat_list)
plt.close('all')
def test_plot_decision_boundary(input_dim='1D'):
if input_dim=='1D':
X, y = make_classification(n_features=4, random_state=42)
clf = SVC(random_state=42).fit(X,y)
X = pd.DataFrame(X, columns=['A','B','C','D'])
vml.plot_decision_boundary(clf, X, y, 'A')
plt.close('all')
elif input_dim=='1Dlist':
X, y = make_classification(n_features=4, random_state=42)
clf = SVC(random_state=42).fit(X,y)
X = pd.DataFrame(X, columns=['A','B','C','D'])
vml.plot_decision_boundary(clf, X, y, ['A'])
plt.close('all')
elif input_dim=='2D':
X, y = make_classification(n_features=4, random_state=42)
clf = SVC(random_state=42).fit(X,y)
X = pd.DataFrame(X, columns=['A','B','C','D'])
vml.plot_decision_boundary(clf, X, y, ['A','C'])
plt.close('all')
else:
print("The parameter's value input_dim has to be either '1D' or '2D'")
#def test_create_X_grid():
# X = pd.DataFrame(np.ones([5,5]), columns=['A','B','C','D','E'])
# x = np.ones([5,2])
# X_map = vml._create_X_grid(X, x, ['B', 'D'])
# print("Input is {}, {} and ['B', 'D']".format(X, x))
# print("Output is {}".format(X_map))
#def test_get_mesh_coordinates(input_dim='1D'):
# if input_dim=='1D':
# X, y = make_classification(n_features=4, random_state=42)
# clf = SVC(random_state=42, probability=True).fit(X,y)
# X = pd.DataFrame(X, columns=['first','second','third','fourth'])
#
# print("Testing 1D input")
# print("input is {}, {}, {} and {}".format(clf, X, y, 'first'))
# xx, yy, Z = vml._get_mesh_coordinates(clf, X, y, 'first', [1, 2, 4])
# print("output is {}, {} and {}".format(xx, yy, Z))
# cm = plt.cm.RdBu
# plt.contourf(xx,yy,Z, cmap=cm)
# plt.show()
# elif input_dim=='2D':
# X, y = make_classification(n_features=4, random_state=42)
# clf = SVC(random_state=42, probability=True).fit(X,y)
# X = pd.DataFrame(X, columns=['A','B','C','D'])
# print("input is {}, {}, {} and {}".format(clf, X, y, ['A', 'B']))
# xx, yy, Z = vml._get_mesh_coordinates(clf, X, y, ['A', 'B'])
# print("output is {}, {} and {}".format(xx, yy, Z))
# cm = plt.cm.RdBu
# plt.contourf(xx,yy,Z, cmap=cm)
# plt.show()
# else:
# print("The parameter's value input_dim has to be either '1D' or '2D'")
def main():
# test_create_X_grid()
# test_get_mesh_coordinates()
# test_plot_decision_boundary(input_dim='1D')
# test_decision_boundary_grid(n_feats=5, feat_list=['D','B','A'])
pass
if __name__ == '__main__':
main()
|
{"hexsha": "6199eca75213e786622562b8a2b86e6f8b0d5f25", "size": 3740, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_visualml.py", "max_stars_repo_name": "WittmannF/visual-ml", "max_stars_repo_head_hexsha": "f2e967688d2d2fe22c275eeee46d9f7132311fd5", "max_stars_repo_licenses": ["BSD-4-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-05-15T16:46:16.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-18T20:53:45.000Z", "max_issues_repo_path": "tests/test_visualml.py", "max_issues_repo_name": "WittmannF/visual-ml", "max_issues_repo_head_hexsha": "f2e967688d2d2fe22c275eeee46d9f7132311fd5", "max_issues_repo_licenses": ["BSD-4-Clause"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2018-04-06T21:11:44.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-09T22:43:14.000Z", "max_forks_repo_path": "tests/test_visualml.py", "max_forks_repo_name": "WittmannF/visual-ml", "max_forks_repo_head_hexsha": "f2e967688d2d2fe22c275eeee46d9f7132311fd5", "max_forks_repo_licenses": ["BSD-4-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-02T01:07:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-02T01:07:12.000Z", "avg_line_length": 37.4, "max_line_length": 117, "alphanum_fraction": 0.6114973262, "include": true, "reason": "import numpy", "num_tokens": 1094}
|
#!/usr/bin/env python3
import sys
sys.path.append('../helper_utils')
sys.path.append('/home/kkalyan3/code/helper_utils')
import time
from sklearn.utils import shuffle
from utils import load_array, max_model, max_transform
from sklearn.svm import SVC
import logging
import numpy as np
from sklearn.metrics import accuracy_score
class UniformSvm(object):
def __init__(self):
self.uniform_experts = 50
self.cache_size = 50000
self.C = 10
self.cache_size = 50000
self.gamma = 6
self.train_dim = None
self.test_dim = None
self.val_dim = None
self.experts = None
def svc_model(self, X, y):
X, y = shuffle(X, y)
clf = SVC(C=self.C, kernel='rbf', gamma=self.gamma,
cache_size=self.cache_size, verbose=True, probability=True)
model = clf.fit(X, y)
return model
def svc_eval(self, model, X, y):
y_hat = model.predict(X)
return accuracy_score(y, y_hat)
def get_random(self):
local_expert = {}
random_bucket = np.random.choice(self.experts, self.train_dim[0])
for i, e in enumerate(random_bucket):
if e not in local_expert:
local_expert[e] = [i]
else:
local_expert[e].append(i)
return local_expert
def train_model(self, x_train, y_train, x_test, y_test, x_val, y_val):
split_buckets = self.get_random()
y_hat_train = 0
y_hat_test = 0
y_hat_val = 0
for key in sorted(split_buckets):
X = x_train[split_buckets[key]]
y = y_train[split_buckets[key]]
model = self.svc_model(X, y)
y_hat_train += model.predict(x_train)
y_hat_test += model.predict(x_test)
y_hat_val += model.predict(x_val)
y_hat_train *= (1/self.experts)
y_hat_test *= (1 / self.experts)
y_hat_val *= (1 / self.experts)
train_error = (1 - accuracy_score(y_train, y_hat_train > 0.5)) * 100
test_error = (1 - accuracy_score(y_test, y_hat_test > 0.5)) * 100
val_error = (1 - accuracy_score(y_val, y_hat_val > 0.5)) * 100
return train_error, val_error, test_error
if __name__ == '__main__':
logging.basicConfig(filename='../log/forest_uniform_seq.txt', level=logging.INFO)
start_time = time.time()
logging.info('##### NEW EXPERIMENT #####')
TRAIN = '/home/kkalyan3/data/train/100/'
TEST = '/home/kkalyan3/data/test/100/'
VAL = '/home/kkalyan3/data/val/100/'
x_train = load_array(TRAIN + 'X_train.bc/')
y_train = load_array(TRAIN + 'y_train.bc/')
x_test = load_array(TEST + 'X_test.bc/')
y_test = load_array(TEST + 'y_test.bc/')
x_val = load_array(VAL + 'X_val.bc/')
y_val = load_array(VAL + 'y_val.bc/')
max_values = max_model(x_train)
x_train = max_transform(max_values, x_train)
x_test = max_transform(max_values, x_test)
x_val = max_transform(max_values, x_val)
usvm = UniformSvm()
usvm.train_dim = x_train.shape
usvm.test_dim = x_test.shape
usvm.val_dim = x_val.shape
usvm.experts = 50
logging.info('C : {}, gamma : {}'.format(usvm.C, usvm.gamma))
train_error, val_error, test_error = usvm.train_model(x_train, y_train, x_test, y_test, x_val, y_val)
total_time = (time.time() - start_time) / 60
logging.info('Train Error, Validation Error, Test Error, Time Taken')
logging.info('{}, {}, {}, {}'.format(train_error, val_error, test_error, total_time))
logging.info('##### EXPERIMENT COMPLETE #####')
|
{"hexsha": "e1d8504123824e81db586d5018b704ec573d37ba", "size": 3594, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/forest/uniform_seq.py", "max_stars_repo_name": "krishnakalyan3/MixtureOfExperts", "max_stars_repo_head_hexsha": "ec43e312b3b3abddf0bd7281535842e73268b771", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2017-08-10T04:22:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-10T14:00:19.000Z", "max_issues_repo_path": "code/forest/uniform_seq.py", "max_issues_repo_name": "krishnakalyan3/MixtureOfExperts", "max_issues_repo_head_hexsha": "ec43e312b3b3abddf0bd7281535842e73268b771", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/forest/uniform_seq.py", "max_forks_repo_name": "krishnakalyan3/MixtureOfExperts", "max_forks_repo_head_hexsha": "ec43e312b3b3abddf0bd7281535842e73268b771", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2017-08-27T20:11:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-09T00:38:36.000Z", "avg_line_length": 31.8053097345, "max_line_length": 105, "alphanum_fraction": 0.6271563717, "include": true, "reason": "import numpy", "num_tokens": 948}
|
[STATEMENT]
lemma sorted_inorder_induct_last: "sorted_less (inorder (Node ts t)) \<Longrightarrow> sorted_less (inorder t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sorted_less (inorder (Node ts t)) \<Longrightarrow> sorted_less (inorder t)
[PROOF STEP]
by (simp add: sorted_wrt_append)
|
{"llama_tokens": 106, "file": "BTree_BPlusTree", "length": 1}
|
'''Backtest Moving Average (MA) crossover strategies
'''
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from MA import MABacktester
class MADelayBacktester(MABacktester):
'''Backtest a Moving Average (MA) crossover strategy
When you get a signal you wait one more day to see if the signal
is confirmed.
Parameters:
series: (Panda Series) a list of CLOSE prices by date
ms: (int) short moving average
ml: (int) long moving average
long_only: (boolean) True if the strategy can only go long
ema: (boolean) True if you want exponential MA's
'''
def __init__(self, series, ms=1, ml=10, long_only=False, ema=False):
super(MADelayBacktester,self).__init__(series,ms=ms, ml=ml, long_only=long_only, ema=ema)
def __str__(self):
return "MA Delay Backtest Strategy (ms=%d, ml=%d, ema=%s, long_only=%s, start=%s, end=%s)" % (
self._ms, self._ml, str(self._ema), str(self._long_only), str(self._start_date), str(self._end_date))
def _trade_logic(self):
'''Implements the trade logic in order to come up with
a set of stances
'''
self._indicators()
self._df['stance'] = np.where( (self._df['mdiff'] >= 0) & (self._df['mdiff'].shift(1) >= 0), 1, 0)
if not self._long_only:
self._df['stance'] = np.where( (self._df['mdiff'] >= 0) & (self._df['mdiff'].shift(1) >= 0), 1, -1)
|
{"hexsha": "a912a45e345dd86e2ba818df1607fce8ad09db1a", "size": 1485, "ext": "py", "lang": "Python", "max_stars_repo_path": "backtesters/MA_delay.py", "max_stars_repo_name": "learn-crypto-trading/crypto-price-analysis", "max_stars_repo_head_hexsha": "70618ecf296e40404f3ebaa2e640c90097c227cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2018-07-06T18:05:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-29T18:22:36.000Z", "max_issues_repo_path": "backtesters/MA_delay.py", "max_issues_repo_name": "dcimring/crypto-price-analysis", "max_issues_repo_head_hexsha": "70618ecf296e40404f3ebaa2e640c90097c227cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "backtesters/MA_delay.py", "max_forks_repo_name": "dcimring/crypto-price-analysis", "max_forks_repo_head_hexsha": "70618ecf296e40404f3ebaa2e640c90097c227cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2018-06-15T17:36:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T07:27:47.000Z", "avg_line_length": 33.0, "max_line_length": 113, "alphanum_fraction": 0.6424242424, "include": true, "reason": "import numpy", "num_tokens": 431}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.