text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
"""
This module provides a function to check the SNR of the white and gray matter
"""
# -----------------------------------------------------------------------------
def checkSNR(subjects_dir, subject, nb_erode=3, ref_image="norm.mgz", aparc_image="aparc+aseg.mgz"):
"""
A function to check the SNR of the white and gray matter.
This function checks the SNR of the white and the gray matter. The white
matter segmentation is taken from the aparc+aseg image and the gray matter
from the aseg image. The white matter is eroded by three voxels in order to
ignore partial volumes. For the gray matter this is not possible, because
the layer is aready very thin. An erosion would eliminate nearly the whole
signal.
Required arguments:
- subjects_dir : path to the subjects directory
- subject : subject ID
Optional arguments:
- nb_erode : the number of erosions, default = 3
- ref_image : the reference image, default = "norm.mgz", can be changed
to "orig.mgz"
- aparc_image : the aparc+aseg image, default = "aparc+aseg.mgz", can
be changed to "aparc+aseg.orig.mgz" for FastSurfer output
Returns:
- wm_snr, gm_snr
Requires valid mri/norm.mgz, mri/aseg.mgz, and mri/aparc+aseg.mgz files for
FreeSurfer output, and valid mri/norm.mgz, mri/aseg.mgz, and
mri/aparc+aseg.orig.mgz files for FastSurfer output.
If not found, NaNs will be returned.
"""
# Imports
import os
import numpy as np
import nibabel as nib
from skimage.morphology import binary_erosion
# Message
print("Computing white and gray matter SNR for "+ref_image+" ...")
# Get data
try:
path_reference_image = os.path.join(subjects_dir,subject,"mri",ref_image)
norm = nib.load(path_reference_image)
norm_data = norm.get_fdata()
except FileNotFoundError:
print("WARNING: could not open "+path_reference_image+", returning NaNs.")
return np.nan, np.nan
try:
path_aseg = os.path.join(subjects_dir,subject,"mri","aseg.mgz")
aseg = nib.load(path_aseg)
data_aseg = aseg.get_fdata()
except FileNotFoundError:
print("WARNING: could not open "+path_aseg+", returning NaNs.")
return np.nan, np.nan
try:
path_aparc_aseg = os.path.join(subjects_dir,subject,"mri",aparc_image)
inseg = nib.load(path_aparc_aseg)
data_aparc_aseg = inseg.get_fdata()
except FileNotFoundError:
print("WARNING: could not open "+path_aparc_aseg+", returning NaNs.")
return np.nan, np.nan
# Process white matter image
# Create 3D binary data where the white matter locations are encoded with 1, all the others with zero
b_wm_data = np.zeros((256,256,256))
# The following keys represent the white matter labels in the aparc+aseg image
wm_labels = [2, 41, 7, 46, 251, 252, 253, 254, 255, 77, 78, 79]
# Find the wm labels in the aparc+aseg image and set the locations in the binary image to one
for i in wm_labels:
x, y, z = np.where(data_aparc_aseg == i)
b_wm_data[x,y,z] = 1
# Erode white matter image
nb_erode = nb_erode
b_wm_data = binary_erosion(b_wm_data,np.ones((nb_erode, nb_erode,nb_erode)))
# Computation of the SNR of the white matter
x, y, z = np.where(b_wm_data == 1)
signal_wm = norm_data[x,y,z]
signal_wm_mean = np.mean(signal_wm)
signal_wm_std = np.std(signal_wm)
wm_snr = signal_wm_mean/signal_wm_std
print("White matter signal to noise ratio:", '{:.4}'.format(wm_snr))
# Process gray matter image
# Create 3D binary data where the gray matter locations are encoded with 1, all the others with zero
b_gm_data = np.zeros((256,256,256))
# The following keys represent the gray matter labels in the aseg image
gm_labels = [ 3, 42 ]
# Find the gm labels in the aseg image and set the locations in the binary image to one
for i in gm_labels:
x, y, z = np.where(data_aseg == i)
b_gm_data[x, y, z] = 1
# Computation of the SNR of the gray matter
x, y, z = np.where(b_gm_data == 1)
signal_gm = norm_data[x,y,z]
signal_gm_mean = np.mean(signal_gm)
signal_gm_std = np.std(signal_gm)
gm_snr =signal_gm_mean/signal_gm_std
print ("Gray matter signal to noise ratio:", '{:.4}'.format(gm_snr))
# Return
return wm_snr, gm_snr
|
{"hexsha": "1660796b9050554de1d724342236a62593473237", "size": 4444, "ext": "py", "lang": "Python", "max_stars_repo_path": "qatoolspython/checkSNR.py", "max_stars_repo_name": "AhmedFaisal95/qatools-python", "max_stars_repo_head_hexsha": "580530b24f7f29cc1c7ab3f9211998493f49be7c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-03-20T17:50:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T01:10:44.000Z", "max_issues_repo_path": "qatoolspython/checkSNR.py", "max_issues_repo_name": "AhmedFaisal95/qatools-python", "max_issues_repo_head_hexsha": "580530b24f7f29cc1c7ab3f9211998493f49be7c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2020-09-29T01:37:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T07:48:15.000Z", "max_forks_repo_path": "qatoolspython/checkSNR.py", "max_forks_repo_name": "AhmedFaisal95/qatools-python", "max_forks_repo_head_hexsha": "580530b24f7f29cc1c7ab3f9211998493f49be7c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-07-02T01:29:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-04T08:44:22.000Z", "avg_line_length": 35.552, "max_line_length": 105, "alphanum_fraction": 0.6597659766, "include": true, "reason": "import numpy", "num_tokens": 1239}
|
from sage.all import EllipticCurve
def is_embedding_degree(E: EllipticCurve, k):
return (E.base_field().order() ** k - 1) % E.order() == 0
|
{"hexsha": "27055f3a60cf0a74448b782252282910cca89bc3", "size": 144, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "adamivora/pairing_friendly_curves_generation", "max_stars_repo_head_hexsha": "7cad587e26f420fad5f9becb99bf1ec85d6b884d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.py", "max_issues_repo_name": "adamivora/pairing_friendly_curves_generation", "max_issues_repo_head_hexsha": "7cad587e26f420fad5f9becb99bf1ec85d6b884d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "adamivora/pairing_friendly_curves_generation", "max_forks_repo_head_hexsha": "7cad587e26f420fad5f9becb99bf1ec85d6b884d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8, "max_line_length": 61, "alphanum_fraction": 0.6805555556, "include": true, "reason": "from sage", "num_tokens": 45}
|
'''
BSD 3-Clause License
Copyright (c) 2017, Jack Miles Hunt
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys
sys.path.insert(0, './gplvm_lib')
import urllib.request
import os.path
import csv
import numpy as np
import matplotlib.pyplot as plt
import gplvm_lib as gp
iris_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_fname = 'iris.data'
#Control plotting here.
show_plots = True
save_plots = False
def get_iris(use_colouring = True):
"""
Loads the four dimensional Fisher Iris dataset.
If the 'iris.data' file is not present in the working directory,
this function attempts to download it.
The last column of the dataset(the text labels) are ommitted.
"""
iris = []
colours = []
if not os.path.isfile(iris_fname):
print("Attempting to download the iris dataset.")
try:
urllib.request.urlretrieve(iris_url, iris_fname)
except urllib.request.URLError:
sys.exit("Unable to download iris dataset. Quitting.")
with open(iris_fname, newline='') as file:
reader = csv.reader(file, delimiter = ',')
for line in reader:
if len(line) != 0:
#Extract feature vector.
iris.append(list(map(float, line[0:4])))
#Extract class label and assign colour, if necessary.
if use_colouring:
if line[4] == "Iris-setosa":
colours.append("red")
elif line[4] == "Iris-versicolor":
colours.append("green")
elif line[4] == "Iris-virginica":
colours.append("blue")
else:
sys.exit("Error reading class assignments. Check iris.data")
#Randomise order - TO-DO: make this pythonic.
for iter in range(0, 20):
randA = np.random.randint(len(iris), size = len(iris))
randB = np.random.randint(len(iris), size = len(iris))
for i in range(0, len(iris)):
#Permute feature vectors.
tmp = iris[randA[i]]
iris[randA[i]] = iris[randB[i]]
iris[randA[i]] = tmp
#Permute colours.
tmp = colours[randA[i]]
colours[randA[i]] = colours[randB[i]]
colours[randA[i]] = tmp
return {'features' : np.asarray(iris), 'colours' : colours}
def plot(data, colours, dimensionality, title, method):
"""
Helper function to reduce code duplication.
"""
if dimensionality == 1:
gp.plot_1D(data, title, method, save_plots)
elif dimensionality == 2:
gp.plot_2D(data, title, method, colours, save_plots)
elif dimensionality == 3:
gp.plot_3D(data, title, method, colours, save_plots)
else:
return None
def run_pca(data, reduced_dimensions, show_scree):
"""
Runs standard PCA on the given dataset, optionally showing the associated
Scree plot(normalised Eigenvalues)
"""
print("-->Running PCA.")
latent = gp.pca(data['features'], reduced_dimensions, show_scree, save_plots)
plot(latent, data['colours'], reduced_dimensions, "Iris Dataset", "PCA")
def run_linear_gplvm(data, reduced_dimensions, beta):
"""
Runs the Linear Gaussian Process Latent Variable Model on the given dataset.
The resultant data plotted if the latent space is 1, 2 or 3 dimensional.
"""
print("-->Running Linear GP-LVM.")
gplvm = gp.LinearGPLVM(data['features'])
gplvm.compute(reduced_dimensions, beta)
latent = gplvm.get_latent_space_representation()
plot(latent, data['colours'], reduced_dimensions, "Iris Dataset", "Linear GP-LVM")
def run_nonlinear_gplvm(data, reduced_dimensions):
"""
Runs the Nonlinear Gaussian Process Latent Variable Model on the given dataset,
for a given covariance matrix generating kernel.
The resultant data plotted if the latent space is 1, 2 or 3 dimensional.
"""
print("-->Running Nonlinear GP-LVM.")
gplvm = gp.NonlinearGPLVM(data['features'])
gplvm.compute(reduced_dimensions, 50, max_iterations = 50, jitter = 4, learn_rate = 0.01, momentum = 0.01, verbose = True)
latent = gplvm.get_latent_space_representation()
plot(latent, data['colours'], reduced_dimensions, "Iris Dataset", "Nonlinear GP-LVM")
if __name__ == "__main__":
"""
Parameters of the algorithms may be tweaked here.
"""
#Dimension to reduce to.
new_dimensionality = 2
#Beta parameter for Linear GP-LVM.
beta = 2.0
#Whether to display the Scree plot for PCA.
scree = True
data = get_iris()
run_pca(data, new_dimensionality, scree)
run_linear_gplvm(data, new_dimensionality, beta)
run_nonlinear_gplvm(data, new_dimensionality)
if show_plots:
plt.show()
|
{"hexsha": "6bd7918268f1d9ec40debaf0379bfc6a00974822", "size": 6300, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo.py", "max_stars_repo_name": "JackHunt/GP-LVM", "max_stars_repo_head_hexsha": "2f2aade7207db2f54d5ab13fd304c93eaeb8fc2a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-10-01T02:33:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-04T07:02:59.000Z", "max_issues_repo_path": "demo.py", "max_issues_repo_name": "JackHunt/GP-LVM", "max_issues_repo_head_hexsha": "2f2aade7207db2f54d5ab13fd304c93eaeb8fc2a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-30T10:46:09.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-04T23:26:21.000Z", "max_forks_repo_path": "demo.py", "max_forks_repo_name": "JackHunt/GP-LVM", "max_forks_repo_head_hexsha": "2f2aade7207db2f54d5ab13fd304c93eaeb8fc2a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-03-13T21:45:35.000Z", "max_forks_repo_forks_event_max_datetime": "2018-03-13T21:45:35.000Z", "avg_line_length": 38.1818181818, "max_line_length": 126, "alphanum_fraction": 0.6720634921, "include": true, "reason": "import numpy", "num_tokens": 1447}
|
export testcase1a, testcase1a_point, testcase1b, testcase1c
export testcase1abis, testcase1ater
export testcase2
export testcase4a, testcase4b, testcase4c
# Test case 1.a
function testcase1a_point()
x = PointE([Matrix{Float64}(I, 3, 3)], Float64[])
end
function testcase1a(; symmetric=false)
if symmetric
cmat = [Symmetric(sparse([1, 2, 3], [1, 2, 3], [1., 1., 1.]))]
a1mats = [Symmetric(sparse([1], [1], [1.], 3, 3), :L)]
a2mats = [Symmetric(sparse([2, 3], [2, 1], [1., 1.], 3, 3), :L)]
a3mats = [Symmetric(sparse([2, 3], [1, 3], [1., 1.], 3, 3), :L)]
else
cmat = [sparse([1, 2, 3], [1, 2, 3], [1., 1., 1.])]
a1mats = [sparse([1], [1], [1.], 3, 3)]
a2mats = [sparse([1, 2, 3], [3, 2, 1], [1., 1., 1.], 3, 3)]
a3mats = [sparse([1, 2, 3], [2, 1, 3], [1., 1., 1.], 3, 3)]
end
c = PointE(cmat, Vector{Float64}())
a1 = PointE(a1mats, Vector{Float64}())
a2 = PointE(a2mats, Vector{Float64}())
a3 = PointE(a3mats, Vector{Float64}())
A = [a1, a2, a3]
b = Vector{Float64}([1, 1, 1])
problem = SDCOContext(c, A, b)
x = PointE([Matrix{Float64}(I, 3, 3)], Float64[])
s = PointE([Matrix{Float64}(I, 3, 3)], Float64[])
y = zeros(Float64, length(b))
return problem, PointPrimalDual(x, y, s)
end
function testcase1abis()
cmat = [sparse([1, 2, 3], [1, 2, 3], [1., 1., 1.])]
c = PointE(cmat, Vector{Float64}())
a1mats = [sparse([1], [1], [1.], 3, 3)]
a1 = PointE(a1mats, Vector{Float64}())
a2mats = [sparse([2, 3], [2, 1], [1., 1.], 3, 3)]
a2 = PointE(a2mats, Vector{Float64}())
a3mats = [sparse([2, 3], [1, 3], [1., 1.], 3, 3)]
a3 = PointE(a3mats, Vector{Float64}())
A = [a1, a2, a3]
b = Vector{Float64}([1, 1, 1])
problem = SDCOContext(c, A, b)
x = PointE([Matrix{Float64}(I, 3, 3)], Float64[])
y = ones(Float64, length(b)) * -1 * (1+sqrt(17)) / 4
s = c - evaluate(A, y)
return problem, PointPrimalDual(x, y, s)
end
function testcase1ater()
cmat = [sparse([1, 2, 3], [1, 2, 3], [1., 1., 1.])]
c = PointE(cmat, Vector{Float64}())
a1mats = [sparse([1], [1], [1.], 3, 3)]
a1 = PointE(a1mats, Vector{Float64}())
a2mats = [sparse([2, 3], [2, 1], [1., 1.], 3, 3)]
a2 = PointE(a2mats, Vector{Float64}())
a3mats = [sparse([2, 3], [1, 3], [1., 1.], 3, 3)]
a3 = PointE(a3mats, Vector{Float64}())
A = [a1, a2, a3]
b = Vector{Float64}([1, 1, 1])
problem = SDCOContext(c, A, b)
x = PointE([Matrix{Float64}(I, 3, 3)], Float64[])
y = Float64[0, -1, -0.5]
s = c - evaluate(A, y)
return problem, PointPrimalDual(x, y, s)
end
function testcase1b()
c = PointE(Dense{Float64}[], Float64[1., 1.])
A = [PointE(Dense{Float64}[], Float64[1., 2.])]
b = [1.]
problem = SDCOContext(c, A, b)
x = PointE(Dense{Float64}[], Float64[1/3, 1/3])
y = [0.]
s = PointE(Dense{Float64}[], Float64[1., 1.])
return problem, PointPrimalDual(x, y, s)
end
function testcase1c(; symmetric=false)
if symmetric
cmat = [Symmetric(sparse(1.0I, 3, 3), :L), Symmetric(sparse(1.0I, 3, 3), :L)]
mat1 = Symmetric(sparse([1], [1], [1.], 3, 3), :L)
mat2 = Symmetric(sparse([2, 3], [2, 1], [1., 1.], 3, 3), :L)
mat3 = Symmetric(sparse([2, 3], [1, 3], [1., 1.], 3, 3), :L)
matnull = Symmetric(spzeros(3,3), :L)
else
cmat = [sparse(1.0I, 3, 3), sparse(1.0I, 3, 3)]
mat1 = sparse([1], [1], [1.], 3, 3)
mat2 = sparse([1, 2, 3], [3, 2, 1], [1., 1., 1.], 3, 3)
mat3 = sparse([1, 2, 3], [2, 1, 3], [1., 1., 1.], 3, 3)
matnull = spzeros(3,3)
end
c = PointE(cmat, Float64[1., 1.])
a1 = PointE([mat1, matnull], Float64[0, 0])
a2 = PointE([mat2, matnull], Float64[0, 0])
a3 = PointE([mat3, matnull], Float64[0, 0])
a4 = PointE([matnull, mat1], Float64[0, 0])
a5 = PointE([matnull, mat2], Float64[0, 0])
a6 = PointE([matnull, mat3], Float64[0, 0])
a7 = PointE([matnull, matnull], Float64[1., 2.])
A = [a1, a2, a3, a4, a5, a6, a7]
b = Vector{Float64}([1, 1, 1, 1, 1, 1, 1])
problem = SDCOContext(c, A, b)
y0 = -(1+sqrt(17)) / 4
y = y0 * ones(7)
x = PointE([Matrix{Float64}(I, 3, 3), Matrix([1 0 0.25; 0 0.5 0; 0.25 0 1])], Float64[(5-sqrt(17))/2, (-3+sqrt(17))/4])
s = c - evaluate(A, y)
return problem, PointPrimalDual(x, y, s)
end
function testcase2(p, q, r; storage=:sparsefull)
## Objective
B0 = rand(q, r)
cmat::Sparse{Float64} = spzeros(q+r, q+r)
if storage == :sparsesym
for i=1:r, j=1:q
cmat[q+i, j] = B0[j, i]
end
c = PointE([Symmetric(cmat, :L)], Float64[])
elseif storage == :sparsefull
for i=1:r, j=1:q
cmat[q+i, j] = B0[j, i]
cmat[j, i+q] = B0[j, i]
end
c = PointE([cmat], Float64[])
else
error("Unknown parameter $storage, choose from :sparsesym, :sparsefull")
end
if storage == :sparsesym
A = PointE{Float64, SparseSym{Float64}}[]
elseif storage == :sparsefull
A = PointE{Float64, Sparse{Float64}}[]
end
b = Float64[]
if storage == :sparsesym
push!(A, PointE([Symmetric(sparse(-1.0I, q+r, q+r), :L)], Float64[]))
elseif storage == :sparsefull
push!(A, PointE([sparse(-1.0I, q+r, q+r)], Float64[]))
end
push!(b, -1.)
for ctrind=1:p
aimat = spzeros(q+r, q+r)
Bi = rand(q, r)
if storage == :sparsesym
for i=1:r, j=1:q
aimat[q+i, j] = Bi[j, i]
end
push!(A, PointE([Symmetric(aimat, :L)], Float64[]))
elseif storage == :sparsefull
for i=1:r, j=1:q
aimat[q+i, j] = Bi[j, i]
aimat[j, q+i] = Bi[j, i]
end
push!(A, PointE([aimat], Float64[]))
end
push!(b, 0.)
end
problem = SDCOContext(c, A, b)
y = zeros(p+1)
y[1] = norm(B0) + 1
x = PointE([1/(q+r) * Matrix(1.0I, q+r, q+r)], Float64[])
s = c - evaluate(A, y)
z = PointPrimalDual(x, y, s)
return problem, z
end
function testcase4a()
cmat = [sparse([3], [3], [1.], 3, 3)]
a1mats = [sparse([1, 2, 3], [2, 1, 3], [1., 1., 1.], 3, 3)]
a2mats = [sparse([2], [2], [1.], 3, 3)]
c = PointE(cmat, Vector{Float64}())
a1 = PointE(a1mats, Vector{Float64}())
a2 = PointE(a2mats, Vector{Float64}())
A = [a1, a2]
b = Vector{Float64}([-1, 0])
return SDCOContext(c, A, b)
end
function testcase4b()
cmat = [sparse(Float64[0 1; 1 0])]
a1mats = [sparse(Float64[-1 0; 0 0])]
a1mats = [sparse(Float64[0 0; 0 -1])]
c = PointE(cmat, Vector{Float64}())
a1 = PointE(a1mats, Vector{Float64}())
a2 = PointE(a2mats, Vector{Float64}())
A = [a1, a2]
b = Vector{Float64}([-1, 0])
return SDCOContext(c, A, b)
end
function testcase4c()
cmat = [sparse(Float64[0 0; 0 0])]
a1mats = [sparse(Float64[1 0; 0 0])]
a1mats = [sparse(Float64[0 1; 1 0])]
c = PointE(cmat, Vector{Float64}())
a1 = PointE(a1mats, Vector{Float64}())
a2 = PointE(a2mats, Vector{Float64}())
A = [a1, a2]
b = Vector{Float64}([0, 2])
return SDCOContext(c, A, b)
end
|
{"hexsha": "0ae57c8580ed6d6f6fccb539cb806fc969affb71", "size": 7273, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/test_cases.jl", "max_stars_repo_name": "GillesBareilles/SDCO.jl", "max_stars_repo_head_hexsha": "f1514689b77d4410224d472c82776e9c26fa1cc4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-21T22:08:08.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-21T22:08:08.000Z", "max_issues_repo_path": "src/test_cases.jl", "max_issues_repo_name": "GillesBareilles/SDCO.jl", "max_issues_repo_head_hexsha": "f1514689b77d4410224d472c82776e9c26fa1cc4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test_cases.jl", "max_forks_repo_name": "GillesBareilles/SDCO.jl", "max_forks_repo_head_hexsha": "f1514689b77d4410224d472c82776e9c26fa1cc4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8376383764, "max_line_length": 123, "alphanum_fraction": 0.5153306751, "num_tokens": 3060}
|
//////////////////////////////////////////////////////////////////////////////
// Boost.Assign v2 //
// //
// Copyright (C) 2003-2004 Thorsten Ottosen //
// Copyright (C) 2011 Erwann Rogard //
// Use, modification and distribution are subject to the //
// Boost Software License, Version 1.0. (See accompanying file //
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) //
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_ASSIGN_V2_SUPPORT_CHECK_EQUAL_CONTAINER_DEDUCE_ER_2011_HPP
#define BOOST_ASSIGN_V2_SUPPORT_CHECK_EQUAL_CONTAINER_DEDUCE_ER_2011_HPP
#include <boost/assign/v2/support/traits/container.hpp>
#include <boost/assign/v2/support/check/equal_container/array.hpp>
#include <boost/assign/v2/support/check/equal_container/fifo.hpp>
#include <boost/assign/v2/support/check/equal_container/lifo.hpp>
#include <boost/assign/v2/support/check/equal_container/range.hpp>
#include <boost/assign/v2/support/check/equal_container/sorted.hpp>
#include <boost/assign/v2/support/switch.hpp>
namespace boost{
namespace assign{
namespace v2{
namespace switch_tag{
struct check_container{};
}// switch_tag
namespace switch_aux{
template<>
struct case_<switch_tag::check_container, 0> :
switch_aux::helper<
check_aux::fifo,
container_aux::is_fifo
>{};
template<>
struct case_<switch_tag::check_container, 1> :
switch_aux::helper<
check_aux::lifo,
container_aux::is_lifo
>{};
template<>
struct case_<switch_tag::check_container, 2> :
switch_aux::helper<
check_aux::sorted,
container_aux::is_sorted
>{};
template<>
struct case_<switch_tag::check_container, 3> :
switch_aux::helper<
check_aux::array,
container_aux::is_array
>{};
template<>
struct case_<switch_tag::check_container, 4> :
switch_aux::helper<
check_aux::range
>{};
}// switch_aux
namespace check_aux{
template<typename T>
struct deduce_equal_container_tag : switch_aux::result<
v2::switch_tag::check_container,
T
>
{};
}// check_aux
}// v2
}// assign
}// boost
#endif // BOOST_ASSIGN_V2_SUPPORT_CHECK_EQUAL_CONTAINER_DEDUCE_ER_2011_HPP
|
{"hexsha": "7966ec087a2e3201bc6413d9f773d41d4c3d80d0", "size": 2570, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/assign/v2/support/check/equal_container/deduce.hpp", "max_stars_repo_name": "rogard/assign_v2", "max_stars_repo_head_hexsha": "8735f57177dbee57514b4e80c498dd4b89f845e5", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "boost/assign/v2/support/check/equal_container/deduce.hpp", "max_issues_repo_name": "rogard/assign_v2", "max_issues_repo_head_hexsha": "8735f57177dbee57514b4e80c498dd4b89f845e5", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "boost/assign/v2/support/check/equal_container/deduce.hpp", "max_forks_repo_name": "rogard/assign_v2", "max_forks_repo_head_hexsha": "8735f57177dbee57514b4e80c498dd4b89f845e5", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9487179487, "max_line_length": 78, "alphanum_fraction": 0.5793774319, "num_tokens": 543}
|
import sys
import numpy as np
from . import nn_translator
from . import predict
from . import visualizer
inputfile = sys.argv[1]
nn_input, input_mat, empirical \
= nn_translator.nn_translator(inputfile, train=True)
nn_input = np.array(nn_input).reshape((1, predict._N_DIMS_IN))
p = predict.Predictor(net='hyak_long.h5')
nn_output = p._nn.predict(nn_input)
nn_output = np.reshape(nn_output, (432, 432))
# Create Visual output
real = visualizer.Visualizer(nn_output, empirical)
visual = visualizer.Visualizer(np.array(input_mat), empirical)
visual.draw2Dstructure()
real.draw2Dstructure()
|
{"hexsha": "9a824aec1dfd9bef6251e83c304f19eff26ad54d", "size": 597, "ext": "py", "lang": "Python", "max_stars_repo_path": "sspinn/__main__.py", "max_stars_repo_name": "awild82/SSPINN", "max_stars_repo_head_hexsha": "8b2680b6556f73ee75847b1d5e842f66e2af2f59", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-01-08T22:39:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-29T20:49:41.000Z", "max_issues_repo_path": "sspinn/__main__.py", "max_issues_repo_name": "awild82/SSPINN", "max_issues_repo_head_hexsha": "8b2680b6556f73ee75847b1d5e842f66e2af2f59", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2018-02-28T03:35:07.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-15T15:42:34.000Z", "max_forks_repo_path": "sspinn/__main__.py", "max_forks_repo_name": "awild82/SSPINN", "max_forks_repo_head_hexsha": "8b2680b6556f73ee75847b1d5e842f66e2af2f59", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-03-19T17:56:49.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-19T17:56:49.000Z", "avg_line_length": 27.1363636364, "max_line_length": 62, "alphanum_fraction": 0.7738693467, "include": true, "reason": "import numpy", "num_tokens": 154}
|
import sys
sys.path.insert(0,'./../../..')
from limix.core.mean.mean_base import MeanBase as lin_mean
from limix.core.covar import SQExpCov
from limix.core.covar import FixedCov
from limix.core.covar import SumCov
from limix.core.gp import GP
import pdb
import scipy as sp
import scipy.linalg as LA
import time as TIME
import copy
import pylab as pl
sp.random.seed(1)
if __name__ == "__main__":
# generate data
N = 400
X = sp.linspace(0,2,N)[:,sp.newaxis]
v_noise = 0.01
Y = sp.sin(X) + sp.sqrt(v_noise) * sp.randn(N, 1)
# for out-of-sample preditions
Xstar = sp.linspace(0,2,1000)[:,sp.newaxis]
# define mean term
W = 1. * (sp.rand(N, 2) < 0.2)
mean = lin_mean(Y, W)
# define covariance matrices
sqexp = SQExpCov(X, Xstar = Xstar)
noise = FixedCov(sp.eye(N))
covar = SumCov(sqexp, noise)
# define gp
gp = GP(covar=covar,mean=mean)
# initialize params
sqexp.scale = 1e-4
sqexp.length = 1
noise.scale = Y.var()
# optimize
gp.optimize(calc_ste=True)
# predict out-of-sample
Ystar = gp.predict()
# print optimized values and standard errors
print('weights of fixed effects')
print(mean.b[0, 0], '+/-', mean.b_ste[0, 0])
print(mean.b[1, 0], '+/-', mean.b_ste[1, 0])
print('scale of sqexp')
print(sqexp.scale, '+/-', sqexp.scale_ste)
print('length of sqexp')
print(sqexp.length, '+/-', sqexp.length_ste)
print('scale of fixed')
print(noise.scale, '+/-', noise.scale_ste)
# plot
pl.subplot(111)
pl.title('GP regression with SQExp')
pl.plot(X.ravel(),Y.ravel(), 'xk', label = 'Data points')
pl.plot(Xstar.ravel(),Ystar.ravel(),'FireBrick',lw=2, label = 'GP')
pl.xlabel('x')
pl.ylabel('y')
pl.legend(loc = 4)
pl.tight_layout()
pl.show()
|
{"hexsha": "94f92fb7c4431f5e10e038ab4e4cadbbb6c5006d", "size": 1817, "ext": "py", "lang": "Python", "max_stars_repo_path": "svca_limix/demos/demo_gp_regression.py", "max_stars_repo_name": "DenisSch/svca", "max_stars_repo_head_hexsha": "bd029c120ca8310f43311253e4d7ce19bc08350c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2015-01-20T20:46:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-27T14:40:35.000Z", "max_issues_repo_path": "svca_limix/demos/demo_gp_regression.py", "max_issues_repo_name": "DenisSch/svca", "max_issues_repo_head_hexsha": "bd029c120ca8310f43311253e4d7ce19bc08350c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2015-02-01T22:35:17.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-07T08:18:23.000Z", "max_forks_repo_path": "svca_limix/demos/demo_gp_regression.py", "max_forks_repo_name": "DenisSch/svca", "max_forks_repo_head_hexsha": "bd029c120ca8310f43311253e4d7ce19bc08350c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2015-02-01T17:26:50.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-13T07:06:16.000Z", "avg_line_length": 25.5915492958, "max_line_length": 71, "alphanum_fraction": 0.6263070996, "include": true, "reason": "import scipy", "num_tokens": 573}
|
//==============================================================================
// Copyright 2003 - 2011 LASMEA UMR 6602 CNRS/Univ. Clermont II
// Copyright 2009 - 2011 LRI UMR 8623 CNRS/Univ Paris Sud XI
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//==============================================================================
#ifndef NT2_TRIGONOMETRIC_CONSTANTS_RADINDEG_HPP_INCLUDED
#define NT2_TRIGONOMETRIC_CONSTANTS_RADINDEG_HPP_INCLUDED
#include <nt2/include/functor.hpp>
#include <boost/simd/constant/hierarchy.hpp>
#include <boost/simd/constant/register.hpp>
namespace nt2
{
namespace tag
{
/*!
@brief Radindeg generic tag
Represents the Radindeg constant in generic contexts.
@par Models:
Hierarchy
**/
BOOST_SIMD_CONSTANT_REGISTER( Radindeg, double
, 57, 0x42652ee1
, 0x404ca5dc1a63c1f8ll
)
}
namespace ext
{
template<class Site, class... Ts>
BOOST_FORCEINLINE generic_dispatcher<tag::Radindeg, Site> dispatching_Radindeg(adl_helper, boost::dispatch::meta::unknown_<Site>, boost::dispatch::meta::unknown_<Ts>...)
{
return generic_dispatcher<tag::Radindeg, Site>();
}
template<class... Args>
struct impl_Radindeg;
}
/*!
Constant Radindeg : Degree in Radian multiplier, \f$\frac{180}\pi\f$.
@par Semantic:
For type T0:
@code
T0 r = Radindeg<T0>();
@endcode
is similar to:
@code
T0 r = _180<T0>()/Pi<T0>();
@endcode
@see @funcref{inrad}, @funcref{indeg}, @funcref{Radindegr}, @funcref{Deginrad}
@return a value of type T0
**/
BOOST_SIMD_CONSTANT_IMPLEMENTATION(tag::Radindeg, Radindeg);
}
namespace nt2
{
/// INTERNAL ONLY
}
#endif
|
{"hexsha": "ca9ba1402006db1e572f499a074509bfa2969ba9", "size": 1954, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "modules/core/trigonometric/include/nt2/trigonometric/constants/radindeg.hpp", "max_stars_repo_name": "psiha/nt2", "max_stars_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modules/core/trigonometric/include/nt2/trigonometric/constants/radindeg.hpp", "max_issues_repo_name": "psiha/nt2", "max_issues_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/core/trigonometric/include/nt2/trigonometric/constants/radindeg.hpp", "max_forks_repo_name": "psiha/nt2", "max_forks_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7671232877, "max_line_length": 172, "alphanum_fraction": 0.5818833163, "num_tokens": 512}
|
#include <json/json.h>
#include <thread>
#include <mutex>
#include <list>
#include "settings.h"
#include <boost/uuid/uuid.hpp>
#include <boost/uuid/uuid_io.hpp>
#include <boost/lexical_cast.hpp>
OverlaySettingsWithDirtyFlag globalSettings;
//OverlaySettingsServer settingServer;
//OverlaySettingsManager settingManager;
bool OverlaySettingsManager::UpdateSettings(const boost::uuids::uuid& id, const Json::Value& root, bool apply)
{
bool ret = false;
{
this->tag = id;
std::lock_guard <std::recursive_mutex> w(m);
UpdatePositions();
nextSettings.useResizeGrip = root.get("useResizeGrip", settings.useResizeGrip).asBool();
nextSettings.useDragFilter = root.get("useDragFilter", settings.useDragFilter).asBool();
nextSettings.useAppRegion = root.get("useAppRegion", settings.useAppRegion).asBool();
nextSettings.useDragMove = root.get("useDragMove", settings.useDragMove).asBool();
nextSettings.title = root.get("title", settings.title).asString();
nextSettings.url = root.get("url", settings.url).asString();
nextSettings.width = root.get("width", settings.width).asInt();
nextSettings.height = root.get("height", settings.height).asInt();
nextSettings.x = root.get("x", settings.x).asInt();
nextSettings.y = root.get("y", settings.y).asInt();
nextSettings.useHide = root.get("hide", settings.useHide).asBool();
nextSettings.zoom = root.get("zoom", settings.zoom).asDouble();
nextSettings.opacity = root.get("opacity", settings.opacity).asDouble();
nextSettings.fps = root.get("fps", settings.fps).asDouble();
nextSettings.useTransparent = root.get("Transparent", settings.useTransparent).asBool();
nextSettings.useNoActivate = root.get("NoActivate", settings.useNoActivate).asBool();
CheckDirty();
ret = true;
}
if (apply)
{
ApplySettings(root);
}
return ret;
}
void OverlaySettingsWithDirtyFlag::CheckDirty()
{
settingsDirty =
nextSettings.useResizeGrip != settings.useResizeGrip ||
nextSettings.useDragFilter != settings.useDragFilter ||
nextSettings.useAppRegion != settings.useAppRegion ||
nextSettings.useDragMove != settings.useDragMove ||
nextSettings.title != settings.title ||
nextSettings.url != settings.url ||
nextSettings.width != settings.width ||
nextSettings.height != settings.height ||
nextSettings.x != settings.x ||
nextSettings.y != settings.y ||
nextSettings.useHide != settings.useHide ||
nextSettings.zoom != settings.zoom ||
nextSettings.opacity != settings.opacity ||
nextSettings.useTransparent != settings.useTransparent ||
nextSettings.useNoActivate != settings.useNoActivate ||
false
;
}
Json::Value OverlaySettingsManager::GetSettings(bool next)
{
OverlaySettings* s = nullptr;
s = next ? &nextSettings : &settings;
UpdatePositions();
Json::Value root;
{
std::lock_guard<std::recursive_mutex> r(m);
root["useResizeGrip"] = s->useResizeGrip;
root["useDragFilter"] = s->useDragFilter;
root["useAppRegion"] = s->useAppRegion;
root["useDragMove"] = s->useDragMove;
root["title"] = s->title;
root["url"] = s->url;
root["width"] = s->width;
root["height"] = s->height;
root["x"] = s->x;
root["y"] = s->y;
root["hide"] = s->useHide;
root["zoom"] = s->zoom;
root["opacity"] = s->opacity;
root["fps"] = s->fps;
root["Transparent"] = s->useTransparent;
root["NoActivate"] = s->useNoActivate;
}
return root;
}
Json::Value OverlaySettingsManager::GetPositions()
{
OverlaySettings* s = &settings;
Json::Value root;
{
root["width"] = s->width;
root["height"] = s->height;
root["x"] = s->x;
root["y"] = s->y;
}
return root;
}
void OverlaySettingsServer::UpdateTitle(const boost::uuids::uuid& id, const std::string& title)
{
std::lock_guard<std::recursive_mutex> l(m);
Json::Value value;
{
value["id"] = boost::uuids::to_string(id);
value["title"] = title;
}
settings[id].UpdateSettings(id, value, true);
}
bool OverlaySettingsServer::UpdateOverlaySettings(const boost::uuids::uuid& id, const Json::Value& value, bool apply) {
std::lock_guard<std::recursive_mutex> l(m);
return settings[id].UpdateSettings(id, value, apply);
}
Json::Value OverlaySettingsServer::GetOverlaySettings(const boost::uuids::uuid& id, bool next) {
std::lock_guard<std::recursive_mutex> l(m);
Json::Value val = settings[id].GetSettings(next);
val["id"] = boost::uuids::to_string(id);
return val;
}
Json::Value OverlaySettingsServer::GetOverlaySettings() {
std::lock_guard<std::recursive_mutex> l(m);
Json::Reader reader;
Json::Value root;
Json::Value null;
Json::Value title;
for (auto i = settings.begin(); i != settings.end(); ++i) {
root[boost::uuids::to_string(i->first)] = i->second.GetSettings(false);
}
Json::StyledWriter writer;
return writer.write(root);
}
Json::Value OverlaySettingsServer::SetOverlaySettings(const Json::Value& root) {
std::lock_guard<std::recursive_mutex> l(m);
// TODO : CloseAll
settings.clear();
Json::FastWriter writer;
for (auto i = root.begin(); i != root.end(); ++i) {
auto id = boost::lexical_cast<boost::uuids::uuid>(i.key().asString());
// TODO : Create
settings[id].UpdateSettings(id, *i, true);
}
return root;
}
Json::Value OverlaySettingsServer::set(const Json::Value& value)
{
Json::Value null;
Json::Value id;
try {
if ((id = value.get("id", null)) != null) {
boost::uuids::uuid id_ = boost::lexical_cast<boost::uuids::uuid>(id.asString());
UpdateOverlaySettings(id_, value, true);
return GetOverlaySettings(id_, true);
}
else {
boost::uuids::uuid id_ = boost::uuids::random_generator()();
UpdateOverlaySettings(id_, value, true);
return GetOverlaySettings(id_, true);
}
}
catch (std::runtime_error& e)
{
Json::Value error;
error["error"] = e.what();
return error;
}
}
Json::Value OverlaySettingsServer::set_all(const Json::Value& value) {
std::lock_guard<std::recursive_mutex> l(m);
try {
std::set<boost::uuids::uuid> current_ids, new_ids, intersection_ids;
std::list<boost::uuids::uuid> temp_ids;
{
for (auto i = settings.begin(); i != settings.end(); ++i) {
current_ids.insert(i->first);
}
}
{
for (auto i = value.begin(); i != value.end(); ++i) {
new_ids.insert(boost::lexical_cast<boost::uuids::uuid>(i.key().asString()));
}
}
std::set_intersection(current_ids.begin(), current_ids.end(), new_ids.begin(), new_ids.end(), std::back_inserter(temp_ids));
{
for (auto i = temp_ids.begin(); i != temp_ids.end(); ++i) {
intersection_ids.insert(*i);
}
}
for (auto i = current_ids.begin(); i != current_ids.end(); ++i)
{
if (intersection_ids.find(*i) == intersection_ids.end())
{
CloseOverlayWindow(*i);
}
}
for (auto i = value.begin(); i != value.end(); ++i) {
boost::uuids::uuid id_ = boost::lexical_cast<boost::uuids::uuid>(i.key().asString());
UpdateOverlaySettings(id_, *i, true);
}
return value;
}
catch (std::runtime_error& e)
{
Json::Value error;
error["error"] = e.what();
return error;
}
}
Json::Value OverlaySettingsServer::get(const Json::Value& value) {
Json::Value null;
Json::Value id;
if ((id = value.get("id", null)) != null) {
boost::uuids::uuid id_ = boost::lexical_cast<boost::uuids::uuid>(id.asString());
return GetOverlaySettings(id_);
}
else {
Json::Value error;
error["error"] = "NotFound";
return error;
}
}
Json::Value OverlaySettingsServer::get_all() {
std::lock_guard<std::recursive_mutex> l(m);
Json::Value root;
for (auto i = settings.begin(); i != settings.end(); ++i) {
root[boost::uuids::to_string(i->first)] = GetOverlaySettings(i->first);
}
return root;
}
Json::Value OverlaySettingsServer::close(const Json::Value& value) {
Json::Value null;
Json::Value id;
if ((id = value.get("id", null)) != null) {
boost::uuids::uuid id_ = boost::lexical_cast<boost::uuids::uuid>(id.asString());
CloseOverlayWindow(id_);
return null;
}
else {
Json::Value error;
error["error"] = "NotFound";
return error;
}
}
Json::Value OverlaySettingsServer::close_all() {
Json::Value null;
{
std::lock_guard<std::recursive_mutex> l(m);
Json::Value root;
while (settings.begin() != settings.end()) {
CloseOverlayWindow(settings.begin()->first);
}
settings.clear();
}
return null;
}
Json::Value OverlaySettingsServer::capture(const Json::Value& value) {
Json::Value null;
Json::Value id;
if ((id = value.get("id", null)) != null) {
boost::uuids::uuid id_ = boost::lexical_cast<boost::uuids::uuid>(id.asString());
return CaptureOverlayWindow(id_);
}
else {
Json::Value error;
error["error"] = "NotFound";
return error;
}
}
|
{"hexsha": "d5bd9d7e139f49d8c08f29042d28ef0265b0f5b9", "size": 8566, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/settings/settings.cpp", "max_stars_repo_name": "ZCube/OverlayProc", "max_stars_repo_head_hexsha": "b57bab06644be6c7fe3468c726591f5a3a0f1a6c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2017-01-02T03:28:38.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-31T13:13:59.000Z", "max_issues_repo_path": "src/settings/settings.cpp", "max_issues_repo_name": "ZCube/OverlayProc", "max_issues_repo_head_hexsha": "b57bab06644be6c7fe3468c726591f5a3a0f1a6c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/settings/settings.cpp", "max_forks_repo_name": "ZCube/OverlayProc", "max_forks_repo_head_hexsha": "b57bab06644be6c7fe3468c726591f5a3a0f1a6c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0372881356, "max_line_length": 126, "alphanum_fraction": 0.6836329675, "num_tokens": 2319}
|
import os
import numpy as np
# Ask user which folder we want to merge with
print("Merge 'processed_data' with: ")
print(" 1. 'train_data'")
print(" 2. 'test_data'")
while True:
val = input("Enter '1' or '2': ")
if val == "1":
path = "train_data\\"
break
elif val == "2":
path = "test_data\\"
break
else:
print("Invalid input\n")
# Load 'processed_file\y.txt'
processed_file = open("processed_data\\y.txt", "r")
directions = np.loadtxt(processed_file, dtype=int)
processed_length = directions.shape[0]
processed_file.close()
# Create file if it doesn't exist yet
if not os.path.isfile(path + "y.txt"):
os.system("type nul > " + path + "y.txt")
# Find next index that hasn't been used in 'final's' contents
# This is the index at which our incoming data should start
final_file = open(path + "y.txt", "r")
start_index = np.loadtxt(final_file).shape[0]
# Append text from 'processed_data' to our final text file
final_file = open(path + "y.txt", "a")
for direction in directions:
final_file.write(str(direction) + "\n")
final_file.close()
# Copy images over to our final folder
for i in range(0, processed_length):
os.system("copy processed_data\\"+str(i)+".png "+path+str(i + start_index)+".png")
|
{"hexsha": "26fe0c62b51cc703bcc90ddffe3caed391bf416e", "size": 1271, "ext": "py", "lang": "Python", "max_stars_repo_path": "windows_code/merge_data.py", "max_stars_repo_name": "daniel-luper/self-driving-picar", "max_stars_repo_head_hexsha": "d8d7bb3ee450db8995b85273d3015555a9ed3b1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "windows_code/merge_data.py", "max_issues_repo_name": "daniel-luper/self-driving-picar", "max_issues_repo_head_hexsha": "d8d7bb3ee450db8995b85273d3015555a9ed3b1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "windows_code/merge_data.py", "max_forks_repo_name": "daniel-luper/self-driving-picar", "max_forks_repo_head_hexsha": "d8d7bb3ee450db8995b85273d3015555a9ed3b1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5581395349, "max_line_length": 86, "alphanum_fraction": 0.6640440598, "include": true, "reason": "import numpy", "num_tokens": 342}
|
cd(@__DIR__) # changes the directory to the current directory, the default I guess is the HOME
using Pkg; Pkg.activate("."); Pkg.instantiate()
#=
Pkg is Julia's built-in package manager, and handles operations such as installing, updating and removing packages.
Just like cargo it creates a toml-file that describes the versions of the software used & dependencies.
=#
Pkg.add("OrdinaryDiffEq")
Pkg.add("ModelingToolkit")
Pkg.add("DataDrivenDiffEq")
Pkg.add("LinearAlgebra")
Pkg.add("DiffEqSensitivity")
Pkg.add("Optim")
Pkg.add("DiffEqFlux")
Pkg.add("Flux")
Pkg.add("Plots")
using OrdinaryDiffEq
using ModelingToolkit
using DataDrivenDiffEq
using LinearAlgebra, DiffEqSensitivity, Optim
using DiffEqFlux, Flux
using Plots
gr() # function from Plots
function corona!(du,u,p,t)
S,E,I,R,N,D,C = u
F, β0,α,κ,μ,σ,γ,d,λ = p
dS = -β0*S*F/N - β(t,β0,D,N,κ,α)*S*I/N -μ*S # susceptible
dE = β0*S*F/N + β(t,β0,D,N,κ,α)*S*I/N -(σ+μ)*E # exposed
dI = σ*E - (γ+μ)*I # infected
dR = γ*I - μ*R # removed (recovered + dead)
dN = -μ*N # total population
dD = d*γ*I - λ*D # severe, critical cases, and deaths
dC = σ*E # +cumulative cases
du[1] = dS; du[2] = dE; du[3] = dI; du[4] = dR
du[5] = dN; du[6] = dD; du[7] = dC
end
β(t,β0,D,N,κ,α) = β0*(1-α)*(1-D/N)^κ
S0 = 14e6
u0 = [0.9*S0, 0.0, 0.0, 0.0, S0, 0.0, 0.0]
p_ = [10.0, 0.5944, 0.4239, 1117.3, 0.02, 1/3, 1/5,0.2, 1/11.2]
R0 = p_[2]/p_[7]*p_[6]/(p_[6]+p_[5])
tspan = (0.0, 21.0)
prob = ODEProblem(corona!, u0, tspan, p_)
solution = solve(prob, Vern7(), abstol=1e-12, reltol=1e-12, saveat = 1)
tspan2 = (0.0,60.0)
prob = ODEProblem(corona!, u0, tspan2, p_)
solution_extrapolate = solve(prob, Vern7(), abstol=1e-12, reltol=1e-12, saveat = 1)
# Ideal data
tsdata = Array(solution)
# Add noise to the data
noisy_data = tsdata + Float32(1e-5)*randn(eltype(tsdata), size(tsdata))
plot(abs.(tsdata-noisy_data)')
### Neural ODE
ann_node = FastChain(FastDense(7, 64, tanh),FastDense(64, 64, tanh), FastDense(64, 64, tanh), FastDense(64, 7))
p = Float64.(initial_params(ann_node))
function dudt_node(u,p,t)
S,E,I,R,N,D,C = u
F,β0,α,κ,μ,σ,γ,d,λ = p_
dS,dE,dI,dR,dD = ann_node([S/N,E,I,R,N,D/N,C],p)
dN = -μ*N # total population
dC = σ*E # +cumulative cases
[dS,dE,dI,dR,dN,dD,dC]
end
prob_node = ODEProblem(dudt_node, u0, tspan, p)
s = concrete_solve(prob_node, Tsit5(), u0, p, saveat = solution.t)
function predict(θ)
Array(concrete_solve(prob_node, Vern7(), u0, θ, saveat = 1,
abstol=1e-6, reltol=1e-6,
sensealg = InterpolatingAdjoint(autojacvec=ReverseDiffVJP())))
end
# No regularisation right now
function loss(θ)
pred = predict(θ)
sum(abs2, (noisy_data[2:4,:] .- pred[2:4,:])), pred # + 1e-5*sum(sum.(abs, params(ann)))
end
loss(p)
const losses = []
callback(θ,l,pred) = begin
push!(losses, l)
if length(losses)%50==0
println(losses[end])
end
false
end
res1_node = DiffEqFlux.sciml_train(loss, p, ADAM(0.01), cb=callback, maxiters = 500)
res2_node = DiffEqFlux.sciml_train(loss, res1_node.minimizer, BFGS(initial_stepnorm=0.01), cb=callback, maxiters = 10000)
prob_node2 = ODEProblem(dudt_node, u0, tspan, res2_node.minimizer)
s = solve(prob_node2, Tsit5(), saveat = 1)
scatter(solution, vars=[2,3,4], label=["True Exposed" "True Infected" "True Recovered"])
plot!(s, vars=[2,3,4], label=["Estimated Exposed" "Estimated Infected" "Estimated Recovered"])
# Plot the losses
plot(losses, yaxis = :log, xaxis = :log, xlabel = "Iterations", ylabel = "Loss")
# Extrapolate out
prob_node_extrapolate = ODEProblem(dudt_node,u0, tspan2, res2_node.minimizer)
_sol_node = solve(prob_node_extrapolate, Vern7(), abstol=1e-12, reltol=1e-12, saveat = 1)
p_node = scatter(solution_extrapolate, vars=[2,3,4], legend = :topleft, label=["True Exposed" "True Infected" "True Recovered"], title="Neural ODE Extrapolation")
plot!(p_node,_sol_node, lw=5, vars=[2,3,4], label=["Estimated Exposed" "Estimated Infected" "Estimated Recovered"])
plot!(p_node,[20.99,21.01],[0.0,maximum(hcat(Array(solution_extrapolate[2:4,:]),Array(_sol_node[2:4,:])))],lw=5,color=:black,label="Training Data End")
savefig("neuralode_extrapolation.png")
savefig("neuralode_extrapolation.pdf")
### Universal ODE Part 1
ann = FastChain(FastDense(3, 64, tanh),FastDense(64, 64, tanh), FastDense(64, 1))
p = Float64.(initial_params(ann))
function dudt_(u,p,t)
S,E,I,R,N,D,C = u
F, β0,α,κ,μ,σ,γ,d,λ = p_
z = ann([S/N,I,D/N],p) # Exposure does not depend on exposed, removed, or cumulative!
dS = -β0*S*F/N - z[1] -μ*S # susceptible
dE = β0*S*F/N + z[1] -(σ+μ)*E # exposed
dI = σ*E - (γ+μ)*I # infected
dR = γ*I - μ*R # removed (recovered + dead)
dN = -μ*N # total population
dD = d*γ*I - λ*D # severe, critical cases, and deaths
dC = σ*E # +cumulative cases
[dS,dE,dI,dR,dN,dD,dC]
end
prob_nn = ODEProblem(dudt_,u0, tspan, p)
s = concrete_solve(prob_nn, Tsit5(), u0, p, saveat = 1)
plot(solution, vars=[2,3,4])
plot!(s[2:4,:]')
function predict(θ)
Array(concrete_solve(prob_nn, Vern7(), u0, θ, saveat = solution.t,
abstol=1e-6, reltol=1e-6,
sensealg = InterpolatingAdjoint(autojacvec=ReverseDiffVJP())))
end
# No regularisation right now
function loss(θ)
pred = predict(θ)
sum(abs2, noisy_data[2:4,:] .- pred[2:4,:]), pred # + 1e-5*sum(sum.(abs, params(ann)))
end
loss(p)
const losses = []
callback(θ,l,pred) = begin
push!(losses, l)
if length(losses)%50==0
println(losses[end])
end
false
end
res1_uode = DiffEqFlux.sciml_train(loss, p, ADAM(0.01), cb=callback, maxiters = 500)
res2_uode = DiffEqFlux.sciml_train(loss, res1_uode.minimizer, BFGS(initial_stepnorm=0.01), cb=callback, maxiters = 10000)
loss(res2_uode.minimizer)
prob_nn2 = ODEProblem(dudt_,u0, tspan, res2_uode.minimizer)
uode_sol = solve(prob_nn2, Tsit5(), saveat = 1)
plot(solution, vars=[2,3,4])
plot!(uode_sol, vars=[2,3,4])
# Plot the losses
plot(losses, yaxis = :log, xaxis = :log, xlabel = "Iterations", ylabel = "Loss")
# Collect the state trajectory and the derivatives
X = noisy_data
# Ideal derivatives
DX = Array(solution(solution.t, Val{1}))
# Extrapolate out
prob_nn2 = ODEProblem(dudt_,u0, tspan2, res2_uode.minimizer)
_sol_uode = solve(prob_nn2, Vern7(), abstol=1e-12, reltol=1e-12, saveat = 1)
p_uode = scatter(solution_extrapolate, vars=[2,3,4], legend = :topleft, label=["True Exposed" "True Infected" "True Recovered"], title="Universal ODE Extrapolation")
plot!(p_uode,_sol_uode, lw = 5, vars=[2,3,4], label=["Estimated Exposed" "Estimated Infected" "Estimated Recovered"])
plot!(p_uode,[20.99,21.01],[0.0,maximum(hcat(Array(solution_extrapolate[2:4,:]),Array(_sol_uode[2:4,:])))],lw=5,color=:black,label="Training Data End")
savefig("universalode_extrapolation.png")
savefig("universalode_extrapolation.pdf")
### Universal ODE Part 2: SInDy to Equations
# Create a Basis
@variables u[1:3]
# Lots of polynomials
polys = Operation[]
for i ∈ 0:2, j ∈ 0:2, k ∈ 0:2
push!(polys, u[1]^i * u[2]^j * u[3]^k)
end
# And some other stuff
h = [cos.(u)...; sin.(u)...; unique(polys)...]
basis = Basis(h, u)
X = noisy_data
# Ideal derivatives
DX = Array(solution(solution.t, Val{1}))
S,E,I,R,N,D,C = eachrow(X)
F,β0,α,κ,μ,_,γ,d,λ = p_
L = β.(0:tspan[end],β0,D,N,κ,α).*S.*I./N
L̂ = vec(ann([S./N I D./N]',res2_uode.minimizer))
X̂ = [S./N I D./N]'
scatter(L,title="Estimated vs Expected Exposure Term",label="True Exposure")
plot!(L̂,label="Estimated Exposure")
savefig("estimated_exposure.png")
savefig("estimated_exposure.pdf")
# Create an optimizer for the SINDY problem
opt = SR3()
# Create the thresholds which should be used in the search process
thresholds = exp10.(-6:0.1:1)
# Test on original data and without further knowledge
Ψ_direct = SInDy(X[2:4, :], DX[2:4, :], basis, thresholds, opt = opt, maxiter = 50000) # Fail
println(Ψ_direct.basis)
# Test on ideal derivative data ( not available )
Ψ_ideal = SInDy(X[2:4, 5:end], L[5:end], basis, thresholds, opt = opt, maxiter = 50000) # Succeed
println(Ψ_ideal.basis)
# Test on uode derivative data
Ψ = SInDy(X̂[:, 2:end], L̂[2:end], basis, thresholds, opt = opt, maxiter = 10000, normalize = true, denoise = true) # Succeed
println(Ψ.basis)
# Build a ODE for the estimated system
function approx(u,p,t)
S,E,I,R,N,D,C = u
F, β0,α,κ,μ,σ,γ,d,λ = p_
z = Ψ([S/N,I,D/N]) # Exposure does not depend on exposed, removed, or cumulative!
dS = -β0*S*F/N - z[1] -μ*S # susceptible
dE = β0*S*F/N + z[1] -(σ+μ)*E # exposed
dI = σ*E - (γ+μ)*I # infected
dR = γ*I - μ*R # removed (recovered + dead)
dN = -μ*N # total population
dD = d*γ*I - λ*D # severe, critical cases, and deaths
dC = σ*E # +cumulative cases
[dS,dE,dI,dR,dN,dD,dC]
end
# Create the approximated problem and solution
a_prob = ODEProblem{false}(approx, u0, tspan2, p_)
a_solution = solve(a_prob, Tsit5())
p_uodesindy = scatter(solution_extrapolate, vars=[2,3,4], legend = :topleft, label=["True Exposed" "True Infected" "True Recovered"])
plot!(p_uodesindy,a_solution, lw = 5, vars=[2,3,4], label=["Estimated Exposed" "Estimated Infected" "Estimated Recovered"])
plot!(p_uodesindy,[20.99,21.01],[0.0,maximum(hcat(Array(solution_extrapolate[2:4,:]),Array(_sol_uode[2:4,:])))],lw=5,color=:black,label="Training Data End")
savefig("universalodesindy_extrapolation.png")
savefig("universalodesindy_extrapolation.pdf")
|
{"hexsha": "b4385f99c88147dbd9a51825e2f4b4614a150479", "size": 9455, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "seir/seir.jl", "max_stars_repo_name": "carlos-hernani/julia101", "max_stars_repo_head_hexsha": "3c5c877cf585ddab5674aa839880029db31387b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "seir/seir.jl", "max_issues_repo_name": "carlos-hernani/julia101", "max_issues_repo_head_hexsha": "3c5c877cf585ddab5674aa839880029db31387b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "seir/seir.jl", "max_forks_repo_name": "carlos-hernani/julia101", "max_forks_repo_head_hexsha": "3c5c877cf585ddab5674aa839880029db31387b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8143939394, "max_line_length": 165, "alphanum_fraction": 0.6676890534, "num_tokens": 3476}
|
import numpy as np
from tqdm import tqdm
from joblib import Parallel, delayed
class ParticleFilter():
def __init__(self, dimension, n_particles, exploration_factor, keep_best, RandomSampler, Likelihood, Diffuser, n_jobs=-1, joblib_backend="loky"):
# Particles will simply be stored as numpy arrays (of size "dimension").
# exploration_factor is the fraction (between 0 and 1) of particles which are randomly sampled at each iteration.
# keep_best is a boolean that determines whether or not the best particle is kept without any diffusion noise.
# RandomSampler is a function which returns a list of n random valid particles.
# Likelihood is a function which takes in a particle, and returns the likelihood that the particle is the ground truth.
# Diffuser is a function which takes in a particle, and returns a new particle with added noise.
# n_jobs is the number of processors to use for multithreaded components. -1 means use all processors.
self.dimension = dimension
self.n_particles = n_particles
self.exploration_factor = exploration_factor
self.keep_best = keep_best
self.RandomSampler = RandomSampler
self.SingleSample = lambda : self.RandomSampler(1)[0]
self.Likelihood = Likelihood
self.Diffuser = Diffuser
self.parallel = Parallel(n_jobs=n_jobs, verbose=0, backend=joblib_backend)
self.init_particles()
def init_particles(self):
# Create random initial particles.
self.particles = self.RandomSampler(self.n_particles)
self.weights = np.zeros(self.n_particles)
self.max_weight_ind = -1
def weight(self):
# Compute weights for all particles, and normalize weights so their sum is 1.
# Determine the highest weight particle.
self.weights = np.asarray(self.parallel(delayed(self.Likelihood)(self.particles[i]) for i in tqdm(range(self.n_particles))))
self.normalizaion_factor = np.sum(self.weights)
if self.normalizaion_factor == 0:
self.weights = np.ones(self.n_particles)
self.normalizaion_factor = 1.0 / self.n_particles
self.weights = self.weights / self.normalizaion_factor
self.max_weight_ind = np.argmax(self.weights)
def predict_mle(self):
# Return highest likelihood particle
return self.particles[self.max_weight_ind]
def predict_mean(self):
# Return weighted average of all particles
return np.average(self.particles, weights=self.weights, axis=0)
def resample(self):
# Perform importance resampling, while keeping the best estimate if specified.
# Also add in the specified number of random exploration particles.
new_particles = []
# Determine step size
n_importance_resampling = self.n_particles * (1-self.exploration_factor)
if self.keep_best:
n_importance_resampling = n_importance_resampling - 1
new_particles.append(self.particles[self.max_weight_ind].copy())
step_size = 1/float(n_importance_resampling+1)
# Importance resampling
chkVal = step_size
chkIdx = 0
cs = np.cumsum(self.weights)
for i in range(int(n_importance_resampling)):
# Find the next sample
while cs[chkIdx] < chkVal:
chkIdx = chkIdx + 1
chkVal = chkVal + step_size
new_particles.append(self.particles[chkIdx].copy())
# Exploration particles
while(len(new_particles) < self.n_particles):
new_particles.append(self.SingleSample())
self.particles = np.array(new_particles)
def diffuse(self):
if self.keep_best:
start = 1
else:
start = 0
old_particles = self.particles.copy()
self.particles = np.asarray(self.parallel(delayed(self.Diffuser)(old_particles[i]) for i in range(self.n_particles)))
def test_particle_filter():
dimension = 2
n_particles = 25
exploration_factor = 0.1
keep_best = True
def RS(n):
return (np.random.rand(n,2) * 2) - 1
def L(x):
return 1 / (1 + np.linalg.norm(x))
eps = 0.05
def D(x):
return x + ((np.random.rand(2) * 2 * eps) - eps)
pf = ParticleFilter(dimension, n_particles, exploration_factor, keep_best, RS, L, D)
import matplotlib.pyplot as plt
while True:
pf.weight()
mle = pf.predict_mle()
mean = pf.predict_mean()
plt.cla()
plt.scatter(pf.particles[:,0], pf.particles[:,1], c=pf.weights)
plt.scatter([mle[0]], [mle[1]], marker="D", color="black")
plt.scatter([mean[0]], [mean[1]], marker="X", color="black")
plt.scatter([0], [0], marker="*", color="red")
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.draw()
plt.pause(1)
pf.resample()
pf.diffuse()
if __name__ == "__main__":
test_particle_filter()
|
{"hexsha": "5b3ef2368038b4af512fa64fa7fab588bdd4e80d", "size": 4419, "ext": "py", "lang": "Python", "max_stars_repo_path": "particle_filter.py", "max_stars_repo_name": "cohnt/Deformable-Object-Manifold-Learning", "max_stars_repo_head_hexsha": "81b6d757df78fbd4427db7ab87051ed1514180ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "particle_filter.py", "max_issues_repo_name": "cohnt/Deformable-Object-Manifold-Learning", "max_issues_repo_head_hexsha": "81b6d757df78fbd4427db7ab87051ed1514180ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "particle_filter.py", "max_forks_repo_name": "cohnt/Deformable-Object-Manifold-Learning", "max_forks_repo_head_hexsha": "81b6d757df78fbd4427db7ab87051ed1514180ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.825, "max_line_length": 146, "alphanum_fraction": 0.7377234668, "include": true, "reason": "import numpy", "num_tokens": 1157}
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
import os
from tuframework.evaluation.model_selection.summarize_results_in_one_json import summarize
from tuframework.paths import network_training_output_dir
import numpy as np
def list_to_string(l, delim=","):
st = "%03.3f" % l[0]
for i in l[1:]:
st += delim + "%03.3f" % i
return st
def write_plans_to_file(f, plans_file, stage=0, do_linebreak_at_end=True, override_name=None):
a = load_pickle(plans_file)
stages = list(a['plans_per_stage'].keys())
stages.sort()
patch_size_in_mm = [i * j for i, j in zip(a['plans_per_stage'][stages[stage]]['patch_size'],
a['plans_per_stage'][stages[stage]]['current_spacing'])]
median_patient_size_in_mm = [i * j for i, j in zip(a['plans_per_stage'][stages[stage]]['median_patient_size_in_voxels'],
a['plans_per_stage'][stages[stage]]['current_spacing'])]
if override_name is None:
f.write(plans_file.split("/")[-2] + "__" + plans_file.split("/")[-1])
else:
f.write(override_name)
f.write(";%d" % stage)
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['batch_size']))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['num_pool_per_axis']))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['patch_size']))
f.write(";%s" % list_to_string(patch_size_in_mm))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['median_patient_size_in_voxels']))
f.write(";%s" % list_to_string(median_patient_size_in_mm))
f.write(";%s" % list_to_string(a['plans_per_stage'][stages[stage]]['current_spacing']))
f.write(";%s" % list_to_string(a['plans_per_stage'][stages[stage]]['original_spacing']))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['pool_op_kernel_sizes']))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['conv_kernel_sizes']))
if do_linebreak_at_end:
f.write("\n")
if __name__ == "__main__":
summarize((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 24, 27), output_dir=join(network_training_output_dir, "summary_fold0"), folds=(0,))
base_dir = os.environ['RESULTS_FOLDER']
tuframeworks = ['tuframeworkV2', 'tuframeworkV2_zspacing']
task_ids = list(range(99))
with open("summary.csv", 'w') as f:
f.write("identifier;stage;batch_size;num_pool_per_axis;patch_size;patch_size(mm);median_patient_size_in_voxels;median_patient_size_in_mm;current_spacing;original_spacing;pool_op_kernel_sizes;conv_kernel_sizes;patient_dc;global_dc\n")
for i in task_ids:
for tuframework in tuframeworks:
try:
summary_folder = join(base_dir, tuframework, "summary_fold0")
if isdir(summary_folder):
summary_files = subfiles(summary_folder, join=False, prefix="Task%03.0d_" % i, suffix=".json", sort=True)
for s in summary_files:
tmp = s.split("__")
trainer = tmp[2]
expected_output_folder = join(base_dir, tuframework, tmp[1], tmp[0], tmp[2].split(".")[0])
name = tmp[0] + "__" + tuframework + "__" + tmp[1] + "__" + tmp[2].split(".")[0]
global_dice_json = join(base_dir, tuframework, tmp[1], tmp[0], tmp[2].split(".")[0], "fold_0", "validation_tiledTrue_doMirror_True", "global_dice.json")
if not isdir(expected_output_folder) or len(tmp) > 3:
if len(tmp) == 2:
continue
expected_output_folder = join(base_dir, tuframework, tmp[1], tmp[0], tmp[2] + "__" + tmp[3].split(".")[0])
name = tmp[0] + "__" + tuframework + "__" + tmp[1] + "__" + tmp[2] + "__" + tmp[3].split(".")[0]
global_dice_json = join(base_dir, tuframework, tmp[1], tmp[0], tmp[2] + "__" + tmp[3].split(".")[0], "fold_0", "validation_tiledTrue_doMirror_True", "global_dice.json")
assert isdir(expected_output_folder), "expected output dir not found"
plans_file = join(expected_output_folder, "plans.pkl")
assert isfile(plans_file)
plans = load_pickle(plans_file)
num_stages = len(plans['plans_per_stage'])
if num_stages > 1 and tmp[1] == "3d_fullres":
stage = 1
elif (num_stages == 1 and tmp[1] == "3d_fullres") or tmp[1] == "3d_lowres":
stage = 0
else:
print("skipping", s)
continue
g_dc = load_json(global_dice_json)
mn_glob_dc = np.mean(list(g_dc.values()))
write_plans_to_file(f, plans_file, stage, False, name)
# now read and add result to end of line
results = load_json(join(summary_folder, s))
mean_dc = results['results']['mean']['mean']['Dice']
f.write(";%03.3f" % mean_dc)
f.write(";%03.3f\n" % mn_glob_dc)
print(name, mean_dc)
except Exception as e:
print(e)
|
{"hexsha": "f22162f5212c320959db19cb6ae001a4e8cb5fd5", "size": 6277, "ext": "py", "lang": "Python", "max_stars_repo_path": "tuframework/evaluation/model_selection/summarize_results_with_plans.py", "max_stars_repo_name": "Magnety/tuFramework", "max_stars_repo_head_hexsha": "b31cb34d476ef306b52da955021f93c91c14ddf4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tuframework/evaluation/model_selection/summarize_results_with_plans.py", "max_issues_repo_name": "Magnety/tuFramework", "max_issues_repo_head_hexsha": "b31cb34d476ef306b52da955021f93c91c14ddf4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tuframework/evaluation/model_selection/summarize_results_with_plans.py", "max_forks_repo_name": "Magnety/tuFramework", "max_forks_repo_head_hexsha": "b31cb34d476ef306b52da955021f93c91c14ddf4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.5495495495, "max_line_length": 241, "alphanum_fraction": 0.5698582125, "include": true, "reason": "import numpy", "num_tokens": 1508}
|
!NORMAL, REAL GREEN'S FUNCTION
subroutine vca_get_gimp_real_full(Greal)
complex(8),dimension(Nlat,Nlat,Nspin,Nspin,Norb,Norb,Lreal),intent(inout) :: Greal
Greal = impGreal
end subroutine vca_get_gimp_real_full
subroutine vca_get_gimp_real_ij(Greal,ilat,jlat)
integer :: ilat,jlat
complex(8),dimension(Nspin,Nspin,Norb,Norb,Lreal),intent(inout) :: Greal
Greal = impGreal(ilat,jlat,:,:,:,:,:)
end subroutine vca_get_gimp_real_ij
|
{"hexsha": "c1dc6dcc13cdac5815e19c37a1abc7fcbc11ee07", "size": 514, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "VCA_IO/get_gimp_realaxis.f90", "max_stars_repo_name": "QcmPlab/VCA", "max_stars_repo_head_hexsha": "01f5001db0ab0016043fb990a56b381858f7f9e0", "max_stars_repo_licenses": ["FSFAP"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-10T08:01:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-10T08:01:29.000Z", "max_issues_repo_path": "VCA_IO/get_gimp_realaxis.f90", "max_issues_repo_name": "QcmPlab/VCA", "max_issues_repo_head_hexsha": "01f5001db0ab0016043fb990a56b381858f7f9e0", "max_issues_repo_licenses": ["FSFAP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "VCA_IO/get_gimp_realaxis.f90", "max_forks_repo_name": "QcmPlab/VCA", "max_forks_repo_head_hexsha": "01f5001db0ab0016043fb990a56b381858f7f9e0", "max_forks_repo_licenses": ["FSFAP"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7142857143, "max_line_length": 86, "alphanum_fraction": 0.6634241245, "num_tokens": 145}
|
#to add support for Python 3.x
from __future__ import division
from __future__ import print_function
import os, sys
import matplotlib
import gtk
# gtk.set_interactive(False)
matplotlib.use('TkAgg') # use WXAgg for smoother graphs; GTKAgg is faster
import matplotlib.pyplot as plt
import numpy as np
import xml.etree.ElementTree as ET
from collections import OrderedDict
from pprint import pprint
pltFont = {'fontname':'Vera Sans'}
colors = ['b', 'r', 'g', 'y']
styles = ['', '', '', '']
markers = ['', '', '', '']
last_color_index=0
last_style_index=0
last_marker_index=0
cafont = {'fontname':'Cambria'}
def debug_list_comprehension(x):
x = x.strip() # remove trailing and ending white spaces
try:
result = float(x) # try to convert
except ValueError:
print("ValueError: could not convert string to float: ")
print(x)
return result
def iter_color():
global last_color_index, colors
last_color_index = (last_color_index + 1) % len(colors)
return colors[last_color_index]
def iter_style():
global last_style_index, styles
last_style_index = (last_style_index + 1) % len(styles)
return styles[last_style_index]
def iter_markers():
global last_marker_index, markers
last_marker_index = (last_marker_index + 1) % len(markers)
return markers[last_marker_index]
# execute this if py interpreter is running this module as the main program (interpreter will set name to "main")
if __name__ == "__main__":
print("Number of arguments: ", len(sys.argv))
print("Argument List:", str(sys.argv))
if len(sys.argv) >= 2:
os.chdir(sys.argv[1])
else:
os.chdir(".")
######## READ DATA FROM LOGFILE ########
# os.listdir will get everything that's inside specified directory
# str.endswith(suffix [, start, end]) - returns True if the string ends with the specified suffix
stat_files = [ f for f in os.listdir(".") if f.endswith(".stats") ]
data = {}
# go through all the log files found in the current directory
for stat_file in stat_files:
print("loading", stat_file)
# go through the lines of the current stat file
#for line in file(stat_file):
# splittedDatasets = [x for x in line.split(';') if x]
# print("amount of splitted data sets: ", str(len(splittedDatasets)))
file = open(stat_file, 'r')
curDataset = file.read()
file.close()
# go through all the data field seperated by '\n'
# for curDataset in file(stat_file):
print("content: ", curDataset[0:400])
TITLE, SLOTNUMBER, X_LABEL, Y_LABEL, DATASET_NAME, COLOR, LINESTYLE, MARKER, XY_TAG, DATA_LIST = curDataset.split('\n', 9) # -> log dateiformat: TITEL | X_LABEL | Y_LABEL | DATASET_NAME | XY_TAG | DATA_LIST
TITLE = (TITLE.split('|', 1))[1].strip()
SLOTNUMBER = (SLOTNUMBER.split('|', 1))[1].strip()
X_LABEL = (X_LABEL.split('|', 1))[1].strip()
Y_LABEL = (Y_LABEL.split('|', 1))[1].strip()
DATASET_NAME = (DATASET_NAME.split('|', 1))[1].strip()
COLOR = (COLOR.split('|', 1))[1].strip()
LINESTYLE = (LINESTYLE.split('|', 1))[1].strip()
MARKER = (MARKER.split('|', 1))[1].strip()
XY_TAG = (XY_TAG.split('|', 1))[1].strip()
DATA_LIST = (DATA_LIST.split('|', 1))[1].strip()
#TITLE = TITLE.strip() # -> strip returns a copy of the string in which all white spaces have been stripped of
#if not stat_file in data.keys():
# print("new name: " + stat_file)
# data[stat_file] = []
# data[stat_file].append( (name, amount, iTimestamps) )
# print(infoData)
slotNumber = int(SLOTNUMBER)
print("----------------")
print("dataset_name: ", DATASET_NAME)
print("slotnumber: ", slotNumber)
if TITLE not in data.keys():
# print("new name: " + titlename)
data[TITLE] = {}
#slot = data[titlename]
if slotNumber not in data[TITLE].keys():
data[TITLE][slotNumber] = []
if COLOR == "":
COLOR = iter_color()
if LINESTYLE == "":
LINESTYLE = iter_style()
if MARKER == "":
MARKER = iter_markers()
data[TITLE][slotNumber].append((X_LABEL, Y_LABEL, DATASET_NAME, COLOR, LINESTYLE, MARKER, XY_TAG, DATA_LIST)) # DATA_LIST is alternating between x and y
#print("DATA::::::::", data)
######## PLOT ONE FIGURE ########
print("data_len: ",str(len(data)))
for titlename, innerDict in data.items(): #data.items() returns a list of ditc's (key,value) tuple pairs; neu: titlename vorher: stat_file_name
print("############################")
fig = plt.figure('Figure of ' + titlename)
fig.set_size_inches(15, 10, forward=True)
fig.suptitle(r'' + titlename + r'', fontsize=20, verticalalignment='top', horizontalalignment='center') # titlename.replace('_', '\_')
fig.subplots_adjust(top=0.2)
for slotNumber, datasets in innerDict.items(): # data.items() returns a list of ditc's (key,value) tuple pairs; neu: titlename vorher: stat_file_name
print("############################")
#::::::::plot:::::::::
print("plotting ", titlename, " with slotNumber ", slotNumber)
print("datasets_len: ",str(len(datasets)))
for dataset in datasets:
print("dataset_len: ",str(len(dataset)))
xAxisDescription, yAxisDescription, dataset_name, color, linestyle, marker, XY_TAG, values = dataset[0], dataset[1], dataset[2], dataset[3], dataset[4], dataset[5], dataset[6], dataset[7]
print("plotting ", dataset_name)
print("slotNumber: ", slotNumber)
print("XY_TAG: ", XY_TAG)
plt.hold(True) # hold to add elements without first clearing the figure; used to add more than one plot on the same graph
# cur_axes = plt.subplot(len(datasets), 1, slotNumber)
# adding a subplot
if slotNumber == 1:
cur_axes = plt.subplot(len(innerDict), 1, slotNumber)
else:
cur_axes = plt.subplot(len(innerDict), 1, slotNumber, sharex=cur_axes)
# configure the labels
#matplotlib.rcParams.update({'fontname':'Cambria'})
# fig.title (r'' + titlename.replace('_', '\_') + r'') # title above the graph
#plt.rcParams["font.family"] = pltFont
plt.xlabel (r'' + xAxisDescription.replace('_', '\_') + r'', fontsize=14, fontweight='medium') # xlabel of the current graph
yLabelHandle = plt.ylabel (r'' + yAxisDescription.replace('_', '\_') + r'', fontsize=14, horizontalalignment='left', fontweight='medium') # ylabel of the current graph
yLabelHandle.set_rotation(0)
cur_axes.yaxis.set_label_coords(-0.1, 1.1)
cur_axes.ticklabel_format(useOffset=False, style='plain') # disable scientific notation on both axis (useOffset for x, style for y axis)
# plt.xticks(rotation=70) # You can specify a rotation for the tick labels in degrees or with keywords.
cur_axes.xaxis.grid(True)
cur_axes.yaxis.grid(True)
print("prepare data values and plot them")
print("values: ", values[0:300])
values = values.strip()
if XY_TAG == 'x':
fxvals = np.fromstring(values, dtype='float', sep=' ')
for ax in fxvals:
cur_axes.axvline(x=ax, color=color, linestyle=linestyle, marker=marker, alpha=0.7, zorder=5) # , label=dataset_name)
elif XY_TAG == 'y':
fyvals = np.fromstring(values, dtype='float', sep=' ')
for ay in fyvals:
cur_axes.axhline(y=ay, color=color, linestyle=linestyle, marker=marker, alpha=0.7, zorder=4, label=dataset_name)
elif XY_TAG == 'xy':
fValueList = [float(x) for x in values.split(' ')] # use of list comprehension to convert strings from split func
#print("fValueList: ", fValueList)
fxvals = np.array(fValueList[0::2], dtype='float') # get all the even indexed elements inside the log after :
fyvals = np.array(fValueList[1::2], dtype='float') # get all the uneven indexed elements inside the log after :
print("fxvals: ", fxvals)
print("fyvals: ", fyvals)
print("len of fxvals: ", str(len(fxvals)))
print("len of fyvals: ", str(len(fyvals)))
#print("...........DEBUG.................");
#print("dataset_name: ", dataset_name);
#print("datavalues: ", fyvals[0:100]);
#print(".................................");
cur_axes.plot(fxvals, fyvals, color=color, linestyle=linestyle, marker=marker, label=dataset_name)
else:
print("skipping because of undefined plotOnAxis value...")
continue
# Put a legend below current axis
legend = plt.legend(loc='best', fancybox=True, shadow=False, ncol=1, fontsize=12) #bbox_to_anchor=(0, 0),
legend.get_frame().set_alpha(0.5)
fig.tight_layout(h_pad=0.7) # add some padding between the subplots (left, bottom, right, top)
fig.savefig(titlename + '_stats.png', bbox_inches='tight', transparent=False)
print("############################")
plt.show()
|
{"hexsha": "cfc7e960f63be95144e9e96d3f83842ef5bac16c", "size": 9800, "ext": "py", "lang": "Python", "max_stars_repo_path": "figures/results/BckUpPlotStats.py", "max_stars_repo_name": "RobertHue/LatexBeamerRosenheim", "max_stars_repo_head_hexsha": "dbe568806053c8b4ca3a5470a011822f6220d511", "max_stars_repo_licenses": ["MIT-0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "figures/results/BckUpPlotStats.py", "max_issues_repo_name": "RobertHue/LatexBeamerRosenheim", "max_issues_repo_head_hexsha": "dbe568806053c8b4ca3a5470a011822f6220d511", "max_issues_repo_licenses": ["MIT-0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "figures/results/BckUpPlotStats.py", "max_forks_repo_name": "RobertHue/LatexBeamerRosenheim", "max_forks_repo_head_hexsha": "dbe568806053c8b4ca3a5470a011822f6220d511", "max_forks_repo_licenses": ["MIT-0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.9541284404, "max_line_length": 218, "alphanum_fraction": 0.5771428571, "include": true, "reason": "import numpy", "num_tokens": 2332}
|
!========================================================================
!
! T o m o f a s t - x
! -----------------------
!
! Authors: Vitaliy Ogarko, Jeremie Giraud, Roland Martin.
!
! (c) 2021 The University of Western Australia.
!
! The full text of the license is available in file "LICENSE".
!
!========================================================================
!==========================================================================
! A class to work with 3D vectors.
!
! Vitaliy Ogarko, UWA, CET, Australia, 2015.
!==========================================================================
module vector
implicit none
private
public :: operator(+)
public :: operator(-)
public :: operator(*)
public :: assignment(=)
integer, parameter :: CUSTOM_REAL = 8
!-----------------------------------------------
! A main class (floating point vectors).
type, public :: t_vector
real(kind=CUSTOM_REAL) :: x, y, z
contains
private
procedure, public, pass :: cross_product => vector_cross_product
procedure, public, pass :: dot_product => vector_dot_product
procedure, public, pass :: get_norm => vector_get_norm
end type t_vector
interface t_vector
module procedure vector_constructor
end interface t_vector
interface operator(+)
module procedure vector_add
end interface
interface operator(-)
module procedure vector_subtract
end interface
interface operator(*)
module procedure vector_mult
end interface
interface assignment(=)
module procedure vector_assign
end interface
!-----------------------------------
! A class for integer vectors.
type, public :: t_ivector
integer :: x, y, z
end type t_ivector
interface t_ivector
module procedure ivector_constructor
end interface t_ivector
contains
!=======================================================================
! Constructor for t_ivector type.
!=======================================================================
function ivector_constructor(i, j, k) result(res)
integer, intent(in) :: i, j, k
type(t_ivector) :: res
res%x = i
res%y = j
res%z = k
end function ivector_constructor
!=======================================================================
! Constructor for t_vector type.
!=======================================================================
function vector_constructor(x, y, z) result(res)
real(kind=CUSTOM_REAL) :: x, y, z
type(t_vector) :: res
res%x = x
res%y = y
res%z = z
end function vector_constructor
!=======================================================================
! Returns cross-product between vectors.
!=======================================================================
pure function vector_cross_product(this, vec) result(res)
class(t_vector), intent(in) :: this
type(t_vector), intent(in) :: vec
type(t_vector) :: res
res%x = this%y * vec%z - this%z * vec%y
res%y = this%z * vec%x - this%x * vec%z
res%z = this%x * vec%y - this%y * vec%x
end function vector_cross_product
!=======================================================================
! Returns dot-product between vectors.
!=======================================================================
pure function vector_dot_product(this, vec) result(res)
class(t_vector), intent(in) :: this
type(t_vector), intent(in) :: vec
real(kind=CUSTOM_REAL) :: res
res = this%x * vec%x + this%y * vec%y + this%z * vec%z
end function vector_dot_product
!=======================================================================
! Returns the norm of a vector.
!=======================================================================
pure function vector_get_norm(this) result(res)
class(t_vector), intent(in) :: this
real(kind=CUSTOM_REAL) :: res
res = sqrt(this%x**2 + this%y**2 + this%z**2)
end function vector_get_norm
!=======================================================================
! Returns the sum of two vectors.
!=======================================================================
pure function vector_add(v1, v2) result(res)
type(t_vector), intent(in) :: v1, v2
type(t_vector) :: res
res%x = v1%x + v2%x
res%y = v1%y + v2%y
res%z = v1%z + v2%z
end function vector_add
!=======================================================================
! Returns the subtraction of two vectors.
!=======================================================================
pure function vector_subtract(v1, v2) result(res)
type(t_vector), intent(in) :: v1, v2
type(t_vector) :: res
res%x = v1%x - v2%x
res%y = v1%y - v2%y
res%z = v1%z - v2%z
end function vector_subtract
!=======================================================================
! Assign a vector to a scalar.
!=======================================================================
subroutine vector_assign(lhs, rhs)
type(t_vector), intent(out) :: lhs
real(kind=CUSTOM_REAL), intent(in) :: rhs
lhs%x = rhs
lhs%y = rhs
lhs%z = rhs
end subroutine vector_assign
!=======================================================================
! Returns a scalar times vector.
!=======================================================================
pure function vector_mult(const, vec) result(res)
real(kind=CUSTOM_REAL), intent(in) :: const
type(t_vector), intent(in) :: vec
type(t_vector) :: res
res%x = const * vec%x
res%y = const * vec%y
res%z = const * vec%z
end function vector_mult
end module vector
|
{"hexsha": "5097f695f5c9d3da32cbaf7e267a581ebf186a9e", "size": 5539, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/utils/vector.f90", "max_stars_repo_name": "RichardScottOZ/Tomofast-x", "max_stars_repo_head_hexsha": "af2d0b8ad59cdc18d9c348bec274ca4371ae94c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2021-07-22T02:49:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T07:11:30.000Z", "max_issues_repo_path": "src/utils/vector.f90", "max_issues_repo_name": "RichardScottOZ/Tomofast-x", "max_issues_repo_head_hexsha": "af2d0b8ad59cdc18d9c348bec274ca4371ae94c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-08T01:04:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-22T21:40:37.000Z", "max_forks_repo_path": "src/utils/vector.f90", "max_forks_repo_name": "RichardScottOZ/Tomofast-x", "max_forks_repo_head_hexsha": "af2d0b8ad59cdc18d9c348bec274ca4371ae94c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-08-20T06:14:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T04:59:32.000Z", "avg_line_length": 28.4051282051, "max_line_length": 75, "alphanum_fraction": 0.4654269724, "num_tokens": 1175}
|
#!/usr/bin/env /usr/bin/python
from pyrosetta import *
import re,sys
import os, shutil
import random
import numpy as np
import pickle
import math
os.environ["OPENBLAS_NUM_THREADS"] = "1"
phi=[]
psi=[]
phi_prob=[]
psi_prob=[]
#exit()
pcut = float(sys.argv[1])
k=int(sys.argv[2])
if(os.path.isfile("phipsi.npz")):
npz=np.load("phipsi.npz")
phi=npz['phi']
phi_prob=npz['phi_prob']
psi=npz['psi']
psi_prob=npz['psi_prob']
def main():
#print pcut, k
init('-mute all -hb_cen_soft -relax:default_repeats 5 -default_max_cycles 200')
#read fasta sequence
sequence=read_fasta("seq.fasta")
nres=len(sequence)
scorefxn=ScoreFunction()
scorefxn.set_weight(rosetta.core.scoring.cen_hb, 5.0) # short-range hbonding
scorefxn.set_weight(rosetta.core.scoring.rama, 1.0) # ramachandran score
scorefxn.set_weight(rosetta.core.scoring.omega, 0.5) # omega torsion score
scorefxn.set_weight(rosetta.core.scoring.vdw, 1.0)
scorefxn.set_weight(rosetta.core.scoring.atom_pair_constraint, 5)
scorefxn.set_weight(rosetta.core.scoring.dihedral_constraint, 4)
scorefxn.set_weight(rosetta.core.scoring.angle_constraint, 4)
scorefxn1=ScoreFunction()
scorefxn1.set_weight(rosetta.core.scoring.cen_hb, 5.0) # short-range hbonding
scorefxn1.set_weight(rosetta.core.scoring.rama, 1.0) # ramachandran score
scorefxn1.set_weight(rosetta.core.scoring.omega, 0.5) # omega torsion score
scorefxn1.set_weight(rosetta.core.scoring.vdw, 3.0)
scorefxn1.set_weight(rosetta.core.scoring.atom_pair_constraint, 3)
scorefxn1.set_weight(rosetta.core.scoring.dihedral_constraint, 1)
scorefxn1.set_weight(rosetta.core.scoring.angle_constraint, 1)
scorefxn_vdw=ScoreFunction()
scorefxn_vdw.set_weight(rosetta.core.scoring.vdw, 1.0)
scorefxn_vdw.set_weight(rosetta.core.scoring.rama, 1.0)
scorefxn_cart=ScoreFunction()
#scorefxn_cart.set_weight(rosetta.core.scoring.cen_hb, 5.0) # short-range hbonding
scorefxn_cart.set_weight(rosetta.core.scoring.hbond_sr_bb, 3.0) # short-range hbonding
scorefxn_cart.set_weight(rosetta.core.scoring.hbond_lr_bb, 3.0) # long-range hbonding
scorefxn_cart.set_weight(rosetta.core.scoring.rama, 1.0) # ramachandran score
scorefxn_cart.set_weight(rosetta.core.scoring.omega, 0.5) # omega torsion score
scorefxn_cart.set_weight(rosetta.core.scoring.vdw, 0.5)
scorefxn_cart.set_weight(rosetta.core.scoring.cart_bonded, 0.1)
scorefxn_cart.set_weight(rosetta.core.scoring.atom_pair_constraint, 5)
scorefxn_cart.set_weight(rosetta.core.scoring.dihedral_constraint, 4)
scorefxn_cart.set_weight(rosetta.core.scoring.angle_constraint, 4)
mmap = MoveMap()
mmap.set_bb(True)
mmap.set_chi(False)
mmap.set_jump(True)
n_iter = 1000 + nres
print(n_iter)
min_mover = rosetta.protocols.minimization_packing.MinMover(mmap, scorefxn, 'lbfgs_armijo_nonmonotone', 0.0001, True)
min_mover.max_iter(n_iter)
min_mover1 = rosetta.protocols.minimization_packing.MinMover(mmap, scorefxn1, 'lbfgs_armijo_nonmonotone', 0.0001, True)
min_mover1.max_iter(n_iter)
min_mover_vdw = rosetta.protocols.minimization_packing.MinMover(mmap, scorefxn_vdw, 'lbfgs_armijo_nonmonotone', 0.0001, True)
min_mover_vdw.max_iter(500)
min_mover_cart=rosetta.protocols.minimization_packing.MinMover(mmap, scorefxn_cart, 'lbfgs_armijo_nonmonotone', 0.0001, True)
min_mover_cart.max_iter(n_iter)
min_mover_cart.cartesian(True)
repeat_mover = RepeatMover(min_mover, 3)
count=0
score_file="cen_"+str(pcut)+"_"+str(k)+".sc"
score_file_r="score_r.sc"
# ref(score_file, score_file_r, nres)
# select_model(score_file_r)
# exit()
SS = open(score_file, "w")
SS.close()
rep_all = []
if(os.path.isfile("repul_cst")):
rep_all=read_cst("repul_cst")
cst_all = read_cst("cst.txt")
topcut=0.65
cst_for_score=[]
while(len(cst_for_score)<nres and topcut>0):
topcut -= 0.05
cst_for_score=fetch_cst_high(cst_all, nres, 1, 10000, topcut, 1)
sep1=1
sep2=10000
cst_top=[]
pcut1=0.85 #used for generating starting conformtaion
while(len(cst_top)<nres and pcut1>0.1):
pcut1 -= 0.05
cst_top=fetch_cst_high(cst_all, nres, sep1, sep2, pcut1, 1)
if(len(cst_for_score)<nres):
print("warning: not enough reliable constraints to use: ", len(cst_for_score))
print("num of top cst:", len(cst_for_score))
print("k, pcut:", k, pcut)
pose0=make_start(k, sequence, scorefxn_vdw, min_mover_vdw)
#0.0, equals to the paper version
probCuts=[0.0, 0.05, 0.1, 0.2] #probability cutoff for selecting the low-probability restraints, useful for avoiding clash for big proteins
totcut=len(probCuts)
for it in range(0, 4):
low_cut=probCuts[it]
if(pcut<low_cut):
print("warning: pcut<low_cut, ", pcut, low_cut)
if(it>0):
print("skip iteration with low_cut", it)
continue
print("low_cut=", low_cut)
pose=Pose()
pose.assign(pose0)
sep1=1
sep2=10000
cst_rep_low = fetch_cst_low(cst_all, sep1, sep2, 0.0, low_cut)
print("repulsive cst: ", len(cst_rep_low))
add_cst(pose, cst_rep_low)
print("\nminimize with short cst...\n")
#short
sep1=1
sep2=12
cst_short=fetch_cst(cst_all, nres, sep1, sep2, pcut, 1)
run_min(cst_short, 1, pose, repeat_mover, min_mover_cart)
clash_score=remove_clash(scorefxn_vdw, min_mover1, pose)
pose.dump_pdb("s.pdb")
#medm
print("\nminimize with medm cst...\n")
sep1=12
sep2=24
cst_medm=fetch_cst(cst_all, nres, sep1, sep2, pcut, 1)
run_min(cst_medm, 1, pose, repeat_mover, min_mover_cart)
clash_score=remove_clash(scorefxn_vdw, min_mover1, pose)
pose.dump_pdb("m.pdb")
#long
print("\nminimize with long cst...\n")
sep1=24
sep2=10000
#add_cst(pose, cst_rep_low)
cst_long=fetch_cst(cst_all, nres, sep1, sep2, pcut, 1)
run_min(cst_long, 1, pose, repeat_mover, min_mover_cart)
clash_score=remove_clash(scorefxn_vdw, min_mover1, pose)
name="pose" + str(k) + "_" + str(pcut) + "_" + str(it) + ".pdb"
output_data(pose, cst_for_score, scorefxn, name, score_file)
#generate alternative models for the first start
print("generate alternative model with short+medium, then long")
if(k==0):
pose.assign(pose0)
else:
pose=make_start(k, sequence, scorefxn_vdw, min_mover_vdw) #use new conformation to increase diversity
#print "prepare start conformation...\n"
sep1=1
sep2=10000
print("prepare start conformation with top restraints, and repulsive restraints")
pose0_with_topcst=Pose()
pose0_with_topcst.assign(pose0)
add_cst(pose0_with_topcst, cst_rep_low)
run_min(cst_top, 1, pose0_with_topcst, repeat_mover, min_mover_cart)
remove_clash(scorefxn_vdw, min_mover1, pose0_with_topcst)
pose0_with_topcst.dump_pdb("ini.pdb")
pose0_with_topcst.remove_constraints() #remmove constraints to avoid duplicated ones
pose.assign(pose0_with_topcst)
add_cst(pose, cst_rep_low)
#short+medm first, and then replusive+long
sep1=1
sep2=24
cst=fetch_cst(cst_all, nres, sep1, sep2, pcut, 1)
run_min(cst, 1, pose, repeat_mover, min_mover_cart)
remove_clash(scorefxn_vdw, min_mover1, pose)
pose.dump_pdb("ini_m.pdb")
#long
sep1=24
sep2=10000
cst=fetch_cst(cst_all, nres, sep1, sep2, pcut, 1)
run_min(cst, 1, pose, repeat_mover, min_mover_cart)
clash_score=remove_clash(scorefxn_vdw, min_mover1, pose)
#if(clash_score>100):
# clash_score=remove_clash_with_repusive_cst(scorefxn_vdw, cst_rep, cst_rep_low, min_mover1, repeat_mover, min_mover_cart, pose)
name="pose" + str(k) + "_" + str(pcut) + "_0_" + str(it)+ ".pdb"
output_data(pose, cst_for_score, scorefxn, name, score_file)
#using all cst
print("generate alternative model1 with all cst together")
pose.assign(pose0_with_topcst)
sep1=1
sep2=10000
cst=fetch_cst(cst_all, nres, sep1, sep2, pcut, 1)
add_cst(pose, cst_rep_low)
#add_cst(pose, rep_all)
run_min(cst, 1, pose, repeat_mover, min_mover_cart)
clash_score=remove_clash(scorefxn_vdw, min_mover1, pose)
#if(clash_score>100):
# clash_score=remove_clash_with_repusive_cst(scorefxn_vdw, cst_rep, cst_rep_low, min_mover1, repeat_mover, min_mover_cart, pose)
name="pose" + str(k) + "_" + str(pcut) + "_1_" + str(it) + ".pdb"
output_data(pose, cst_for_score, scorefxn, name, score_file)
#it
# ref(score_file, score_file_r, nres)
# select_model(score_file_r)
#end of main
def ref(filename, filename_r, nres):
probCuts=[0.15, 0.1, 0.05] #probability cutoff for selecting the restraints
sc=[]
decoy=[]
i=0
with open(filename, "r") as f:
for line in f:
line=line.rstrip()
if(line[0] == "#"):
continue
else:
b=re.split("\t", line)
sc.append(float(b[1]))
decoy.append(b[0])
#print b[0], b[1]
topmodels=[]
for prob in probCuts:
selected=fetch_subset(decoy, sc, prob)
topmodels += selected
##start refinement
#print "here";
SS = open(filename_r, "w")
SS.close()
scorefxn_fa=create_score_function('ref2015')
scorefxn_fa.set_weight(rosetta.core.scoring.atom_pair_constraint, 3)
mmap = MoveMap()
mmap.set_bb(True)
mmap.set_chi(True)
mmap.set_jump(True)
relax=rosetta.protocols.relax.FastRelax()
relax.set_scorefxn(scorefxn_fa)
relax.max_iter(200)
relax.dualspace(True)
relax.set_movemap(mmap)
cstname="cst_good.txt"
if(os.path.isfile(cstname)):pass
else:cstname="cst.txt"
cst_all = read_cst(cstname)
sep1=3
sep2=10000
pcut=0.15
cst=fetch_cst(cst_all, nres, sep1, sep2, pcut, 1)
for model in topmodels:
pose = pose_from_file(model + ".pdb")
add_cst(pose, cst)
print("refine model", model)
relax.apply(pose)
SS = open(filename_r, "a")
name=model + "_ref.pdb";
inf=name + "\t" + str(scorefxn_fa(pose))
SS.write(inf)
SS.write("\n")
pose.dump_pdb(name)
SS.close()
def select_model(filename):
sc=[]
decoy=[]
i=0
with open(filename, "r") as f:
for line in f:
line=line.rstrip()
if(line[0] == "#"):
continue
else:
b=re.split("\t", line)
sc.append(float(b[1]))
decoy.append(b[0])
#print b[0], b[1]
idx=np.argsort(sc)
topN=5
if(topN>len(idx)): topN=len(idx)
final=[]
k=1
for i in idx:
#print decoy[i]
if(not os.path.isfile(decoy[i])): continue
name="model"+str(k)+".pdb"
shutil.copy(decoy[i], name)
k +=1
if(k>5): break
def fetch_subset(decoys, score, prob):
nd=len(decoys)
selected_decoys=[]
selected_scores=[]
for i in range(0, nd):
d=decoys[i]
Reg=re.compile(r'(.+).pdb')
mo=Reg.search(d)
#print mo.group(1)
rst=re.split("_", mo.group(1))
if(float(rst[1])==prob):
selected_decoys.append(mo.group(1))
selected_scores.append(score[i])
idx=np.argsort(selected_scores)
topN=10
if(topN>len(idx)): topN=len(idx)
final=[]
for j in range(0,topN):
i=idx[j]
final.append(selected_decoys[i])
#print selected_decoys[i], selected_scores[i]
return final;
def output_data(pose, cst, scorefxn, name, filename):
pose.remove_constraints()
add_cst(pose, cst)
inf=name + "\t" + str(scorefxn(pose))
SS = open(filename, "a")
SS.write(inf)
SS.write("\n")
SS.close()
pose.dump_pdb(name)
def remove_clash(scorefxn, mover, pose):
clash_score=float(scorefxn(pose))
print("clash_score=", clash_score)
if(clash_score>10):
for nm in range(0, 2):
mover.apply(pose)
clash_score=float(scorefxn(pose))
print("clash_score=", clash_score)
if(clash_score<10): break
return clash_score
def remove_clash_with_repusive_cst(scorefxn, cst1, cst2, mover_vdw, mover_min, mover_cart, pose):
print("heavy clash, add regular repulsive restraints and minimize structure")
add_cst(pose, cst1)
mover_min.apply(pose)
mover_cart.apply(pose)
clash_score=remove_clash(scorefxn, mover_vdw, pose)
if(clash_score>100):
print("heavy clash still there, add strong repulsive restraints and minimize structure", clash_score)
add_cst(pose, cst2)
mover_min.apply(pose)
mover_cart.apply(pose)
clash_score=remove_clash(scorefxn, mover_vdw, pose)
if(clash_score>100):
print("heavy clash still there, please check the folding by hand", clash_score)
return clash_score
def run_min(cst_all, n_sets, pose, mover1, mover2):
if(len(cst_all)==0):
print("warning: empty constraint set")
return
random.shuffle(cst_all)
b_size=int(len(cst_all)/n_sets)
for i in range(0, len(cst_all), b_size):
batch=cst_all[i:i+b_size]
add_cst(pose, batch)
mover1.apply(pose)
mover2.apply(pose)
def fetch_cst(cst, nres, sep1, sep2, cut, flag):
pcut=cut
array=[]
for line in cst:
#print line
line=line.rstrip()
b=re.split("\s+", line)
#print b[1],b[2],b[3],b[4]
m=re.search('(?<=#).+', line)
dcst=re.split("\s+", m.group(0))
i=int(b[2])
j=int(b[4])
if(j==i):j=int(b[6]) #omega, phi
if(j==i):j=int(b[8]) #theta
if(b[0]=="Dihedral"): pcut=cut+0.5
if(b[0]=="Angle"): pcut=cut+0.5
if(b[0]=="AtomPair"): pcut=cut
if(pcut>0.9): pcut=0.9
sep=abs(j-i)
if(sep<sep1 or sep >=sep2): continue
#print(dcst[3])
if(flag==1):
if(float(dcst[3])>=pcut):
array.append(line)
#print line
else:
if(float(dcst[3])<pcut):
array.append(line)
return array
def fetch_cst_high(cst, nres, sep1, sep2, cut, flag):
pcut=cut
array=[]
for line in cst:
#print line
line=line.rstrip()
b=re.split("\s+", line)
if(b[0]!="AtomPair"): continue
#print b[1],b[2],b[3],b[4]
m=re.search('(?<=#).+', line)
dcst=re.split("\s+", m.group(0))
i=int(b[2])
j=int(b[4])
if(j==i):j=int(b[6]) #omega, phi
if(j==i):j=int(b[8]) #theta
sep=abs(j-i)
if(sep<sep1 or sep >=sep2): continue
#print(dcst[3])
if(flag==1):
if(float(dcst[3])>=pcut):
array.append(line)
#print line
else:
if(float(dcst[3])<pcut):
array.append(line)
return array
def fetch_cst_low(cst, sep1, sep2, lb, ub):
array=[]
for line in cst:
#print line
line=line.rstrip()
b=re.split("\s+", line)
#print b[1],b[2],b[3],b[4]
if(b[0]!="AtomPair"): continue
m=re.search('(?<=#).+', line)
dcst=re.split("\s+", m.group(0))
i=int(b[2])
j=int(b[4])
if(j==i):j=int(b[6]) #omega, phi
if(j==i):j=int(b[8]) #theta
sep=abs(j-i)
if(sep<sep1 or sep >=sep2): continue
#print(dcst[3])
if(float(dcst[3])>=lb and float(dcst[3])<ub):
array.append(line)
#print line
return array
def compute_dist(pose, i, atmi, j, atmj):
aai=pose.residue(i)
xyz_i=aai.xyz(atmi)
aaj=pose.residue(j)
xyz_j=aaj.xyz(atmj)
d=dist(xyz_i, xyz_j)
return d
def get_pairwise_dist(pose):
nres=pose.total_residue()
for i in range(1, nres-2):
aai=pose.residue(i)
resi=aai.name()
resi=resi[0:3]
atmi="CB"
if(resi == "GLY"):atmi="CA"
xyz_i=aai.xyz(atmi)
for j in range(i+3, nres+1):
aaj=pose.residue(j)
resj=pose.residue(j).name()
resj=resj[0:3]
atmj="CB"
if(resj == "GLY"):atmj="CA"
xyz_j=aaj.xyz(atmj)
d=dist(xyz_i, xyz_j)
if(d<20):
pass
#print i, j, d
#end j
#end i
def dist(x, y):
cut=20
d=100
a=abs(x[0]-y[0]);
b=abs(x[1]-y[1]);
c=abs(x[2]-y[2]);
if(a>cut or b>cut or c>cut):
return d
else:
d=math.sqrt(a**2+b**2+c**2)
return d
def apply_cst(cst_file, b_size, pose, mover1, mover2):
cst_all=read_cst(cst_file)
if(b_size==0):
b_size=len(cst_all)
else:
random.shuffle(cst_all)
for i in range(0, len(cst_all), b_size):
batch=cst_all[i:i+b_size]
add_cst(pose, batch)
#print(pose.constraint_set())
mover1.apply(pose)
mover2.apply(pose)
#print(mover.num_accepts(), mover.acceptance_rate())
return cst_all
def make_start(k, sequence, scorefxn, mover):
b=np.shape(phi)
pose=pose_from_sequence(sequence, 'centroid' )
if(k<=1 and b[0]>0):
set_predicted_dihedral(pose, k)
else:
set_random_dihedral(pose)
clash_score=float(scorefxn(pose))
if(clash_score>10):
for nm in range(0, 5):
mover.apply(pose)
clash_score=float(scorefxn(pose))
print("clash_score=", clash_score)
if(clash_score<10): break
return pose
def set_predicted_dihedral(pose, k):
b=np.shape(phi)
#print phi
#print b[0]
for i in range(0, b[0]):
prob=phi_prob[i]
dih=phi[i]
m=select_phipsi(prob, k)
#print m
v=float(dih[m])
pose.set_phi(i+1, v)
prob=psi_prob[i]
dih=psi[i]
m=select_phipsi(prob, k)
v=float(dih[m])
pose.set_psi(i+1, v)
#exit()
return(pose)
def select_phipsi_(prob, k):
n=len(prob)
m=0
if(k==0):return 0
else:
r=random.random()
ub=0
lb=ub
for i in range(0, n):
ub += prob[i]
if(r>=lb and r<ub):
m=i
break
return m
def select_phipsi(prob, k):
n=len(prob)
m=0
if(k==0):return 0
else:
r=random.random()
if(r>0.8): m=1
return m
def convert_spx(filename):
out=open("tor_ss.dat", "w")
out.write("#ID\tSS\tPHI\tPSI\n")
with open(filename, "r") as f:
for line in f:
if(line[0] == "#"):
continue
else:
b=re.split("\s+", line)
out.write("%d\t%c\t%.1f\t%.1f\n" %(int(b[1]), b[3], float(b[4]), float(b[5])))
out.close()
def set_random_dihedral(pose):
nres = pose.total_residue()
for i in range(1, nres):
#pick phi/psi randomly from:
#-140 153 180 0.135 B
# -72 145 180 0.155 B
#-122 117 180 0.073 B
# -82 -14 180 0.122 A
# -61 -41 180 0.497 A
# 57 39 180 0.018 L
phi,psi=random_dihedral()
pose.set_phi(i,phi)
pose.set_psi(i,psi)
pose.set_omega(i,180)
return(pose)
def random_dihedral():
phi=0
psi=0
r=random.random()
if(r<=0.135):
phi=-140
psi=153
elif(r>0.135 and r<=0.29):
phi=-72
psi=145
elif(r>0.29 and r<=0.363):
phi=-122
psi=117
elif(r>0.363 and r<=0.485):
phi=-82
psi=-14
elif(r>0.485 and r<=0.982):
phi=-61
psi=-41
else:
phi=57
psi=39
return(phi, psi)
def read_cst(file):
array=[]
with open(file, "r") as f:
for line in f:
#print line
line=line.rstrip()
array.append(line)
return array
def add_cst(pose, array):
constraints = rosetta.protocols.constraint_movers.ConstraintSetMover()
tmpname=str(pcut)+"_"+str(k)+"_"+"tmp.sp";
F = open(tmpname, "w")
for a in array:
F.write(a)
F.write("\n")
F.close()
constraints.constraint_file(tmpname)
constraints.add_constraints(True)
constraints.apply(pose)
os.remove(tmpname)
def read_fasta(file):
fasta="";
with open(file, "r") as f:
for line in f:
if(line[0] == ">"):
continue
else:
line=line.rstrip()
fasta = fasta + line;
return fasta
if __name__ == '__main__':
main()
|
{"hexsha": "e60f034cb2003ac578e6e662cd96eda478657657", "size": 21845, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/TrRosetta/TR/fold_from_tor_split.py", "max_stars_repo_name": "ruiyangsong/mCNN", "max_stars_repo_head_hexsha": "889f182245f919fb9c7a8d97965b11576b01a96c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/TrRosetta/TR/fold_from_tor_split.py", "max_issues_repo_name": "ruiyangsong/mCNN", "max_issues_repo_head_hexsha": "889f182245f919fb9c7a8d97965b11576b01a96c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/TrRosetta/TR/fold_from_tor_split.py", "max_forks_repo_name": "ruiyangsong/mCNN", "max_forks_repo_head_hexsha": "889f182245f919fb9c7a8d97965b11576b01a96c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1029776675, "max_line_length": 151, "alphanum_fraction": 0.5718471046, "include": true, "reason": "import numpy", "num_tokens": 6540}
|
from sklearn.model_selection import StratifiedKFold, KFold
from reval.relative_validation import RelativeValidation
from collections import namedtuple
from scipy import stats
import numpy as np
import math
class FindBestClustCV(RelativeValidation):
"""Child class of :class:`reval.relative_validation.RelativeValidation`.
It performs cross validation on the training set to
select the best number of clusters, i.e., the number that minimizes the
misclassification error.
:param nfold: number of CV folds
:type nfold: int
:param nclust_range: list with minimum and maximum cluster numbers to look for
:type nclust_range: list ([int, int])
:param s: classification object inherited from :class:`reval.relative_validation.RelativeValidation`
:type s: class
:param s: classification object inherited from :class:`reval.relative_validation.RelativeValidation`
:type c: class
:param nrand: number of iterations for normalized misclassification error, inherited from
:class:`reval.relative_validation.RelativeValidation` class
"""
def __init__(self, nfold, nclust_range, s, c, nrand):
"""Construct method
"""
super().__init__(s, c, nrand)
self.nfold = nfold
self.nclust_range = nclust_range
def best_nclust(self, data, strat_vect=None):
"""This method takes as input the training dataset and the
stratification vector (if available) and performs a
CV to select the best number of clusters that minimizes
normalized misclassification error.
:param data: training dataset
:type data: ndarray, (n_samples, n_features)
:param strat_vect: vector for stratification, defaults to None
:type strat_vect: ndarray, (n_samples,)
:return: CV metrics for training and validation sets, best number of clusters
:rtype: dictionary, int
"""
data_array = np.array(data)
reval = RelativeValidation(self.class_method, self.clust_method, self.nrand)
metrics = {'train': {}, 'val': {}}
check_dist = {'train': {}, 'val': {}}
for ncl in range(self.nclust_range[0], self.nclust_range[1]):
if strat_vect is not None:
kfold = StratifiedKFold(n_splits=self.nfold)
fold_gen = kfold.split(data_array, strat_vect)
else:
kfold = KFold(n_splits=self.nfold)
fold_gen = kfold.split(data_array)
norm_stab_tr, norm_stab_val = [], []
for tr_idx, val_idx in fold_gen:
tr_set, val_set = data_array[tr_idx], data_array[val_idx]
reval.clust_method.n_clusters = ncl
miscl_tr, modelfit, tr_labels = reval.train(tr_set)
miscl_val, val_labels = reval.test(val_set, modelfit)
rndmisc_mean_val = reval.rndlabels_traineval(tr_set, val_set,
tr_labels,
val_labels)
ms_val = miscl_val / rndmisc_mean_val
norm_stab_tr.append(miscl_tr)
norm_stab_val.append(ms_val)
check_dist['train'].setdefault(ncl, list()).append(miscl_tr)
check_dist['val'].setdefault(ncl, list()).append(ms_val)
metrics['train'][ncl] = (np.mean(norm_stab_tr), _confint(norm_stab_tr))
metrics['val'][ncl] = (np.mean(norm_stab_val), _confint(norm_stab_val))
val_score = np.array([val[0] for val in metrics['val'].values()])
bestscore = min(val_score)
# select the cluster with the minimum misclassification error
# and the maximum number of clusters
bestncl = np.flatnonzero(val_score == bestscore)[-1] + self.nclust_range[0]
return metrics, bestncl, check_dist
def evaluate(self, data_tr, data_ts, nclust):
"""Method that applies clustering algorithm with the best number of clusters
to the test set. It returns the clustering labels.
:param data_tr: training dataset
:type data_tr: ndarray, (n_samples, n_features)
:param data_ts: test dataset
:type data_ts: ndarray, (n_samples, n_features)
:param nclust: best number of clusters
:type nclust: int
:return: labels and accuracy for both training and test sets
:rtype: namedtuple, (train_cllab, train_acc, test_cllab, test_acc)
"""
self.clust_method.n_clusters = nclust
tr_misc, modelfit, labels_tr = super().train(data_tr)
ts_misc, labels_ts = super().test(data_ts, modelfit)
Eval = namedtuple('Eval',
['train_cllab', 'train_acc', 'test_cllab', 'test_acc'])
out = Eval(labels_tr, 1 - tr_misc, labels_ts, 1 - ts_misc)
return out
def _confint(vect):
"""
Private function to compute confidence interval.
:param vect: performance scores
:type vect: list
:return: mean and error
:rtype: tuple
"""
error = stats.t.ppf(1 - (0.05 / 2), len(vect) - 1) * (np.std(vect) / math.sqrt(len(vect)))
mean = np.mean(vect)
return mean, error
|
{"hexsha": "785ad29203b2cc3fe2438a1d361280ac03e24fed", "size": 5206, "ext": "py", "lang": "Python", "max_stars_repo_path": "reval/best_nclust_cv.py", "max_stars_repo_name": "landiisotta/relative_validation_clustering", "max_stars_repo_head_hexsha": "8842abd1674d899eee9997ea4f0cbe2429df0732", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reval/best_nclust_cv.py", "max_issues_repo_name": "landiisotta/relative_validation_clustering", "max_issues_repo_head_hexsha": "8842abd1674d899eee9997ea4f0cbe2429df0732", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reval/best_nclust_cv.py", "max_forks_repo_name": "landiisotta/relative_validation_clustering", "max_forks_repo_head_hexsha": "8842abd1674d899eee9997ea4f0cbe2429df0732", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.2695652174, "max_line_length": 104, "alphanum_fraction": 0.6390703035, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1235}
|
from .context import skip_if_no_cuda_device
import numpy as np
import os
from km3net.util import *
#this test verifies that we are testing
#the current repository package rather than the installed package
def test_get_kernel_path():
path = "/".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])
reference = path+'/km3net/kernels/'
print(reference)
answer = get_kernel_path()
print(answer)
assert reference == answer
#reading or generating input
def test_get_real_input_data():
pass
def test_generate_input_data():
pass
def test_generate_correlations_table():
pass
def test_generate_large_correlations_table():
pass
def test_get_slice():
pass
def test_insert_clique():
pass
#handling matrices
def test_create_sparse_matrix():
skip_if_no_cuda_device()
correlations = np.zeros((4,6), dtype=np.uint8)
correlations[0,1] = 1
correlations[1,2] = 1
correlations[2,1] = 1
dense_matrix = get_full_matrix(correlations)
sums = np.sum(dense_matrix, axis=1)
print("correlations table:")
print(correlations)
print("matrix:")
print(dense_matrix)
print("sums:")
print(sums)
print(np.sum(sums.sum()))
row_idx, col_idx, prefix_sums = create_sparse_matrix(correlations.T, sums)
reference = csr_matrix(dense_matrix)
ref_row_idx = reference.nonzero()[0]
ref_col_idx = reference.nonzero()[1]
ref_prefix_sums = np.cumsum(sums)
print(row_idx)
print(ref_row_idx)
print(col_idx)
print(ref_col_idx)
print(prefix_sums)
print(ref_prefix_sums)
assert all([a==b for a,b in zip(row_idx, ref_row_idx)])
assert all([a==b for a,b in zip(col_idx, ref_col_idx)])
assert all([a==b for a,b in zip(prefix_sums, ref_prefix_sums)])
def test_get_full_matrix():
correlations = np.zeros((3,5), dtype=np.uint8)
correlations[0,1] = 1
correlations[1,2] = 1
correlations[2,1] = 1
answer = get_full_matrix(correlations)
reference = np.zeros((5,5), dtype=np.uint8)
row_idx = [1, 1, 2, 2, 4, 4]
col_idx = [2, 4, 1, 4, 1, 2]
reference[row_idx,col_idx] = 1
print(answer)
print(reference)
assert all([a==b for a,b in zip(answer.flatten(),reference.flatten())])
def test_sparse_to_dense():
pass
def test_dense_to_sparse():
dense_matrix = np.zeros((5,5), dtype=np.uint8)
dense_matrix[1,1] = 1
dense_matrix[1,2] = 1
dense_matrix[2,3] = 1
dense_matrix[3,4] = 1
dense_matrix[2,1] = 1
col_idx, prefix_sum, degrees = dense_to_sparse(dense_matrix)
print(dense_matrix)
print(col_idx)
print(prefix_sum)
print(degrees)
ref_col_idx = [1, 2, 1, 3, 4]
ref_prefix_sum = [0, 2, 4, 5, 5]
ref_degrees = [0, 2, 2, 1, 0]
assert all([a==b for a,b in zip(col_idx, ref_col_idx)])
assert all([a==b for a,b in zip(prefix_sum, ref_prefix_sum)])
assert all([a==b for a,b in zip(degrees, ref_degrees)])
#cpu versions of algorithms
def test_correlations_cpu_3B():
pass
def test_correlations_cpu():
pass
#cuda helper functions
def test_init_pycuda():
pass
def test_allocate_and_copy():
pass
def test_ready_input():
pass
def test_memcpy_dtoh():
pass
|
{"hexsha": "4d28f2b23bdfe5fee000a3943c69f16a434459f0", "size": 3218, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_util.py", "max_stars_repo_name": "remenska/KM3Net", "max_stars_repo_head_hexsha": "4c175662465b9a880fc1864f62219ce9702311f1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-23T23:54:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T23:54:08.000Z", "max_issues_repo_path": "test/test_util.py", "max_issues_repo_name": "nlesc-km3net/KM3NeT", "max_issues_repo_head_hexsha": "4c175662465b9a880fc1864f62219ce9702311f1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-07-01T12:19:15.000Z", "max_issues_repo_issues_event_max_datetime": "2016-07-04T09:01:51.000Z", "max_forks_repo_path": "test/test_util.py", "max_forks_repo_name": "nlesc-km3net/KM3NeT", "max_forks_repo_head_hexsha": "4c175662465b9a880fc1864f62219ce9702311f1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9857142857, "max_line_length": 80, "alphanum_fraction": 0.6780609074, "include": true, "reason": "import numpy", "num_tokens": 929}
|
// Copyright (C) 2021 Christian Brommer, Control of Networked Systems, University of Klagenfurt, Austria.
//
// All rights reserved.
//
// This software is licensed under the terms of the BSD-2-Clause-License with
// no commercial use allowed, the full terms of which are made available
// in the LICENSE file. No license in patents is granted.
//
// You can contact the author at <christian.brommer@ieee.org>
#ifndef MARS_TYPE_ERASURE_CPP
#define MARS_TYPE_ERASURE_CPP
#include <gmock/gmock.h>
#include <mars/sensors/imu/imu_measurement_type.h>
#include <mars/sensors/imu/imu_sensor_class.h>
#include <Eigen/Dense>
#include <memory>
class mars_type_erasure_test : public testing::Test
{
public:
};
TEST_F(mars_type_erasure_test, IMU_MEASUREMENT)
{
Eigen::Vector3d linear_acceleration = { 1, 2, 3 };
Eigen::Vector3d angular_velocity = { 4, 5, 6 };
mars::IMUMeasurementType imu_measurement(linear_acceleration, angular_velocity);
std::shared_ptr<void> test = std::make_shared<mars::IMUMeasurementType>(imu_measurement);
mars::IMUMeasurementType resolved_void = *(static_cast<mars::IMUMeasurementType*>(test.get()));
ASSERT_EQ(imu_measurement, resolved_void);
}
#endif // MARS_TYPE_ERASURE_CPP
|
{"hexsha": "2ebe568f7ebaff89bba3c6c338c4f66ce7b31546", "size": 1212, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "source/tests/mars-test/mars_type_erasure.cpp", "max_stars_repo_name": "eallak/mars_lib", "max_stars_repo_head_hexsha": "9657fb669c48be39471e7504c3648319126c020b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/tests/mars-test/mars_type_erasure.cpp", "max_issues_repo_name": "eallak/mars_lib", "max_issues_repo_head_hexsha": "9657fb669c48be39471e7504c3648319126c020b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/tests/mars-test/mars_type_erasure.cpp", "max_forks_repo_name": "eallak/mars_lib", "max_forks_repo_head_hexsha": "9657fb669c48be39471e7504c3648319126c020b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8947368421, "max_line_length": 105, "alphanum_fraction": 0.7698019802, "num_tokens": 313}
|
import os
import sys
from glob import glob
import numpy as np
import h5py
import freqent.freqentn as fen
import multiprocessing
import argparse
def calc_epr_spectral(file):
'''
function to pass to multiprocessing pool to calculate epr in parallel
'''
print('Reading {f}'.format(f=file.split(os.path.sep)[-2]))
with h5py.File(file) as d:
t_points = d['data']['t_points'][:]
t_epr = np.where(t_points > 10)[0]
dt = np.diff(t_points)[0]
dx = d['params']['lCompartment'][()]
# nCompartments = d['params']['nCompartments'][()]
nSim = d['params']['nSim'][()]
s = np.zeros(nSim)
nt, nx = d['data']['trajs'][0, 0, t_epr, :].shape
rhos = np.zeros((nSim, nt - (nt + 1) % 2, nx - (nx + 1) % 2))
for ind, traj in enumerate(d['data']['trajs'][..., t_epr, :]):
s[ind], rhos[ind], w = fen.entropy(traj, sample_spacing=[dt, dx],
window='boxcar', detrend='constant',
smooth_corr=True, nfft=None,
sigma=sigma,
subtract_bias=True,
many_traj=False,
return_epf=True)
if '/data/s' in d:
del d['data']['s']
d['data'].create_dataset('s', data=s)
if '/data/rhos' in d:
del d['data']['rhos']
d['data'].create_dataset('rhos', data=rhos)
if '/data/omega' in d:
del d['data']['omega']
d['data'].create_dataset('omega', data=w[0])
if '/data/k' in d:
del d['data']['k']
d['data'].create_dataset('k', data=w[1])
if '/params/sigma/' in d:
del d['params']['sigma']
d['params'].create_dataset('sigma', data=sigma)
return s, rhos, w
parser = argparse.ArgumentParser()
parser.add_argument('--files', '-f', type=str, nargs='+',
help='files to calculate entropy for')
parser.add_argument('--sigma', '-sig', type=float, nargs=2, default=[1, 1],
help='size of Gaussian to smooth correlation functions with')
args = parser.parse_args()
files = args.files
sigma = args.sigma
print('Calculating eprs...')
with multiprocessing.Pool(processes=4) as pool:
result = pool.map(calc_epr_spectral, files)
print('Done.')
|
{"hexsha": "92f11bd80af8b148edd61a56297d37faef18124c", "size": 2463, "ext": "py", "lang": "Python", "max_stars_repo_path": "freqent/tests/brussfield/calculations/calculate_epr.py", "max_stars_repo_name": "lab-of-living-matter/freqent", "max_stars_repo_head_hexsha": "210d8f25a59894d903c42d52e5475900303f9631", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-01-16T01:39:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T00:23:38.000Z", "max_issues_repo_path": "freqent/tests/brussfield/calculations/calculate_epr.py", "max_issues_repo_name": "lab-of-living-matter/freqent", "max_issues_repo_head_hexsha": "210d8f25a59894d903c42d52e5475900303f9631", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "freqent/tests/brussfield/calculations/calculate_epr.py", "max_forks_repo_name": "lab-of-living-matter/freqent", "max_forks_repo_head_hexsha": "210d8f25a59894d903c42d52e5475900303f9631", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-20T15:03:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-20T15:03:30.000Z", "avg_line_length": 33.7397260274, "max_line_length": 83, "alphanum_fraction": 0.5144133171, "include": true, "reason": "import numpy", "num_tokens": 605}
|
import face_recognition
from scipy import misc
import numpy as np
from skimage import transform
import os.path
for i in range(1200):
image_numpy = misc.imread('/media/rob/Ma Book1/mugshots/aligned/alignedFace'+str(i)+'.jpg')
image_numpy = np.flip(image_numpy, axis=1)
image_numpy = misc.imsave('/media/rob/Ma Book1/mugshots/aligned/alignedFace'+str(1200+i)+'.jpg',image_numpy)
if i%100 == 0:
print("done with "+str(i))
print("all done")
|
{"hexsha": "01d6b3ac13622fe1f8a78bc4a04ae265422f6916", "size": 462, "ext": "py", "lang": "Python", "max_stars_repo_path": "datavis/faceFlipper.py", "max_stars_repo_name": "carykh/celebrityFaces", "max_stars_repo_head_hexsha": "7513ae9562a51e89e0ae5ec33db309e2cb16192b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-12-14T20:06:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-26T23:50:17.000Z", "max_issues_repo_path": "datavis/faceFlipper.py", "max_issues_repo_name": "carykh/celebrityFaces", "max_issues_repo_head_hexsha": "7513ae9562a51e89e0ae5ec33db309e2cb16192b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datavis/faceFlipper.py", "max_forks_repo_name": "carykh/celebrityFaces", "max_forks_repo_head_hexsha": "7513ae9562a51e89e0ae5ec33db309e2cb16192b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-02-07T03:08:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-03T21:25:26.000Z", "avg_line_length": 33.0, "max_line_length": 112, "alphanum_fraction": 0.7164502165, "include": true, "reason": "import numpy,from scipy", "num_tokens": 134}
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Unrestricted Dirac Hartree-Fock g-tensor
(In testing)
Refs: TCA, 129, 715
'''
from functools import reduce
import numpy
from pyscf import lib
from pyscf.prop.nmr import dhf as dhf_nmr
from pyscf.data import nist
# TODO: 3 SCF for sx, sy, sz
def kernel(gobj, gauge_orig=None, mb='RKB', with_gaunt=False, verbose=None):
log = lib.logger.new_logger(gobj, verbose)
mf = gobj._scf
mol = mf.mol
# Add finite field to remove degeneracy
mag_field = numpy.ones(3) * 1e-6
h10 = dhf_nmr.make_h10rkb(mol, None, None, False, log)
sc = numpy.dot(mf.get_ovlp(), mf.mo_coeff)
h0 = reduce(numpy.dot, (sc*mf.mo_energy, sc.conj().T))
h10b = h0 + numpy.einsum('xij,x->ij', h10, mag_field)
h10b = reduce(numpy.dot, (mf.mo_coeff.conj().T, h10b, mf.mo_coeff))
mo_energy, v = numpy.linalg.eigh(h10b)
mo_coeff = numpy.dot(mf.mo_coeff, v)
mo_occ = mf.get_occ(mo_energy, mo_coeff)
occidx = mo_occ > 0
orbo = mo_coeff[:,occidx]
dm0 = numpy.dot(orbo, orbo.T.conj())
dme = numpy.dot(orbo * mo_energy[occidx], orbo.conj().T)
h10 = dhf_nmr.make_h10(mol, dm0, gauge_orig, mb, with_gaunt, log)
s10 = dhf_nmr.make_s10(mol, gauge_orig, mb)
# Intrinsic muB = eh/2mc
# First order Dirac operator is 1/c * h10 => g ~ Tr(h10,DM)/c / mu_B = 2 Tr(h10,DM)
muB = .5 # Bohr magneton
g = (numpy.einsum('xij,ji->x', h10, dm0) -
numpy.einsum('xij,ji->x', s10, dme)) / muB
c = lib.param.LIGHT_SPEED
n4c = dm0.shape[0]
n2c = n4c // 2
Sigma = numpy.zeros_like(s10)
Sigma[:,:n2c,:n2c] = mol.intor('int1e_sigma_spinor', comp=3)
Sigma[:,n2c:,n2c:] = .25/c**2 * mol.intor('int1e_spsigmasp_spinor', comp=3)
effspin = numpy.einsum('xij,ji->x', Sigma, dm0) * .5
log.debug('Eff-spin %s', effspin.real)
g = (g / effspin).real
facppt = 1e3
gshift = (g - nist.G_ELECTRON) * facppt
log.note('G shift (ppt) %s', gshift)
return g
class GTensor(dhf_nmr.NMR):
kernel = kernel
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.M(
atom = [['Ne', (0.,0.,0.)],
#['He', (.4,.7,0.)],
],
basis = 'ccpvdz', spin=2, charge=2)
mf = scf.DHF(mol).run()
print(GTensor(mf).kernel((0,0,0)))
print(GTensor(mf).kernel(mb='RMB'))
|
{"hexsha": "a7d3910d21a71c536578588f608ef48af95ac53c", "size": 2989, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyscf/prop/gtensor/dhf.py", "max_stars_repo_name": "y-yao/pyscf_arrow", "max_stars_repo_head_hexsha": "079088a5d92af1570167004f411207deb104a1bb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-01T12:39:45.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-01T12:39:45.000Z", "max_issues_repo_path": "pyscf/prop/gtensor/dhf.py", "max_issues_repo_name": "y-yao/pyscf_arrow", "max_issues_repo_head_hexsha": "079088a5d92af1570167004f411207deb104a1bb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyscf/prop/gtensor/dhf.py", "max_forks_repo_name": "y-yao/pyscf_arrow", "max_forks_repo_head_hexsha": "079088a5d92af1570167004f411207deb104a1bb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8461538462, "max_line_length": 83, "alphanum_fraction": 0.6477082636, "include": true, "reason": "import numpy", "num_tokens": 1010}
|
#!/usr/bin/env python
import numpy as np
import os
import torch
from torch import nn
import warnings
import models
from scipy.signal import resample
import math
import pandas as pd
import shutil
from get_12ECG_features import get_12ECG_features
#os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if torch.cuda.is_available():
device = torch.device("cuda")
device_count = torch.cuda.device_count()
else:
warnings.warn("gpu is not available")
device = torch.device("cpu")
def Resample(input_signal, src_fs, tar_fs):
'''
:param input_signal:输入信号
:param src_fs:输入信号采样率
:param tar_fs:输出信号采样率
:return:输出信号
'''
dtype = input_signal.dtype
audio_len = input_signal.shape[1]
audio_time_max = 1.0 * (audio_len) / src_fs
src_time = 1.0 * np.linspace(0, audio_len, audio_len) / src_fs
tar_time = 1.0 * np.linspace(0, np.int(audio_time_max * tar_fs), np.int(audio_time_max * tar_fs)) / tar_fs
for i in range(input_signal.shape[0]):
if i == 0:
output_signal = np.interp(tar_time, src_time, input_signal[i, :]).astype(dtype)
output_signal = output_signal.reshape(1, len(output_signal))
else:
tmp = np.interp(tar_time, src_time, input_signal[i, :]).astype(dtype)
tmp = tmp.reshape(1, len(tmp))
output_signal = np.vstack((output_signal, tmp))
return output_signal
def processing_data(data, win_length, src_fs, tar_fs):
"""
Add any preprocessing at here
"""
data = Resample(data, src_fs, tar_fs)
num = data.shape[1]
if num < win_length:
zeros_padding = np.zeros(shape=(data.shape[0], win_length - num), dtype=np.float32)
data = np.hstack((data, zeros_padding))
data = data.astype(np.float32)
data = torch.from_numpy(data)
data = torch.unsqueeze(data, 0)
return data
def prepare_data(age, gender):
data = np.zeros(5,)
if age >= 0:
data[0] = age / 100
if 'F' in gender:
data[2] = 1
data[4] = 1
elif gender == 'Unknown':
data[4] = 0
elif 'f' in gender:
data[2] = 1
data[4] = 1
else:
data[3] = 1
data[4] = 1
return data
def read_ag(header_data):
for lines in header_data:
if lines.startswith('#Age'):
tmp = lines.split(': ')[1].strip()
if tmp == 'NaN':
age = -1
else:
age = int(tmp)
if lines.startswith('#Sex'):
tmp = lines.split(': ')[1].strip()
if tmp == 'NaN':
gender = 'Unknown'
else:
gender = tmp
data = prepare_data(age, gender)
data = torch.from_numpy(data).float()
data = torch.unsqueeze(data, 0)
return data
def output_label(logits_prob, threshold, num_classes):
pred_label = np.zeros(num_classes, dtype=int)
_, y_pre_label = torch.max(logits_prob, 1)
y_pre_label = y_pre_label.cpu().detach().numpy()
pred_label[y_pre_label] = 1
score_tmp = logits_prob.cpu().detach().numpy()
y_pre = (score_tmp - threshold) >= 0
pred_label = pred_label + y_pre
pred_label[pred_label > 1.1] = 1
return score_tmp, pred_label
def run_12ECG_classifier(data, header_data, model):
weight_list = ['./magic_weight0.npz', './magic_weight1.npz', './magic_weight2.npz',
'./magic_weight3.npz', './magic_weight4.npz']
num_classes = 24
tar_fs = 257
src_fs = int(header_data[0].split(' ')[2].strip())
ag = read_ag(header_data)
ag = ag.to(device)
win_length = 4096
m = nn.Sigmoid()
data = processing_data(data, win_length, src_fs, tar_fs)
inputs = data.to(device)
# Use your classifier here to obtain a label and score for each class.
val_length = inputs.shape[2]
overlap = 256
patch_number = math.ceil(abs(val_length - win_length) / (win_length - overlap)) + 1
if patch_number > 1:
start = int((val_length - win_length) / (patch_number - 1))
score = 0
combined_label = 0
for j in range(len(model)):
model_one = model[j]
for i in range(patch_number):
if i == 0:
logit = model_one(inputs[:, :, 0: val_length], ag)
logits_prob = m(logit)
elif i == patch_number - 1:
logit = model_one(inputs[:, :, val_length - win_length: val_length], ag)
logits_prob_tmp = m(logit)
logits_prob = (logits_prob + logits_prob_tmp) / patch_number
else:
logit = model_one(inputs[:, :, i * start:i * start + win_length], ag)
logits_prob_tmp = m(logit)
logits_prob = logits_prob + logits_prob_tmp
# using the threshold to check each model
A = np.load(weight_list[j])
threshold = A['arr_0']
score_tmp, pred_label = output_label(logits_prob, threshold, num_classes)
# the label
combined_label = combined_label + pred_label
# The probability
score = score + score_tmp
score = score / len(model)
combined_label = combined_label / len(model)
max_index = np.argmax(combined_label, 1)
combined_label[0, max_index] = 1
threshold_tmp = 0.5
combined_label[combined_label >= threshold_tmp] = 1
combined_label[combined_label < threshold_tmp] = 0
current_label = np.squeeze(combined_label.astype(np.int))
current_score = np.squeeze(score)
# Get the label
label_file_dir = './utils/dx_mapping_scored.csv'
label_file = pd.read_csv(label_file_dir)
equivalent_classes = ['59118001', '63593006', '17338001']
classes = sorted(list(set([str(name) for name in label_file['SNOMED CT Code']]) - set(equivalent_classes)))
return current_label, current_score, classes
def load_12ECG_model(model_input):
# load the model from disk
# load the model from disk
model_list = ['./load_model/48-0.6740-split0.pth',
'./load_model/42-0.6701-split1.pth',
'./load_model/40-0.6777-split2.pth',
'./load_model/42-0.6749-split3.pth',
'./load_model/47-0.6791-split4.pth']
for i in range(5):
shutil.copy(model_list[i], model_input)
model_list = lsdir(rootdir=model_input, suffix=".pth")
split_list = ['split0', 'split1', 'split2', 'split3', 'split4']
resumes = []
for split in split_list:
sub_list = [i for i in model_list if split in i]
accuracy = np.array([float(i.split('-')[-2]) for i in sub_list])
resumes.append(sub_list[int(np.argmax(accuracy))])
model_all = []
for resume in resumes:
model = getattr(models, 'seresnet18_1d_ag')(in_channel=12, out_channel=24)
# Consider the gpu or cpu condition
if torch.cuda.is_available():
if device_count > 1:
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(resume))
else:
model.load_state_dict(torch.load(resume, map_location=device))
model.to(device)
model.eval()
model_all.append(model)
return model_all
def lsdir(rootdir="", suffix=".png"):
file_list = []
assert os.path.exists(rootdir)
for r, y, names in os.walk(rootdir):
for name in names:
if str(name).endswith(suffix):
file_list.append(os.path.join(r, name))
return file_list
|
{"hexsha": "c13ebc85125eb130063052fdaa1265dc4a5647ab", "size": 7427, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_12ECG_classifier.py", "max_stars_repo_name": "ZhaoZhibin/Physionet2020model", "max_stars_repo_head_hexsha": "ea7379bd1e4c145c84fd254faa0d5d1330cd2f6e", "max_stars_repo_licenses": ["BSD-2-Clause", "MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-12T19:01:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T07:40:34.000Z", "max_issues_repo_path": "run_12ECG_classifier.py", "max_issues_repo_name": "ZhaoZhibin/Physionet2020model", "max_issues_repo_head_hexsha": "ea7379bd1e4c145c84fd254faa0d5d1330cd2f6e", "max_issues_repo_licenses": ["BSD-2-Clause", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_12ECG_classifier.py", "max_forks_repo_name": "ZhaoZhibin/Physionet2020model", "max_forks_repo_head_hexsha": "ea7379bd1e4c145c84fd254faa0d5d1330cd2f6e", "max_forks_repo_licenses": ["BSD-2-Clause", "MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-10T11:24:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T11:24:01.000Z", "avg_line_length": 32.718061674, "max_line_length": 111, "alphanum_fraction": 0.6134374579, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1928}
|
//
// The MIT License(MIT)
//
// Copyright(c) 2014 Demonsaw LLC
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <boost/lexical_cast.hpp>
#include <QCheckBox>
#include <QComboBox>
#include <QFileDialog>
#include <QGroupBox>
#include <QLineEdit>
#include <QListView>
#include <QMediaPlayer>
#include <QSpinBox>
#include <QToolButton>
#include <QVBoxLayout>
#include "client_communication_pane.h"
#include "component/chat_component.h"
#include "component/client/client_option_component.h"
#include "pane/pane.h"
#include "resource/resource.h"
#include "utility/std.h"
namespace eja
{
// Constructor
client_communication_pane::client_communication_pane(entity::ptr entity, QWidget* parent /*= 0*/) : entity_pane(entity, parent)
{
create();
layout();
signal();
}
// Interface
void client_communication_pane::create()
{
// Player
m_player = new QMediaPlayer(this);
// Chat
m_chat_audio = new QCheckBox("Sound", this);
m_chat_audio->setFixedWidth(73);
m_chat_visual = new QCheckBox("Taskbar", this);
m_chat_visual->setFixedWidth(72);
m_chat_sound = new QLineEdit(this);
m_chat_sound->setAttribute(Qt::WA_MacShowFocusRect, 0);
m_chat_sound->setFixedWidth(230);
m_chat_sound->setMaxLength(128);
m_chat_sound_play = new QToolButton(this);
m_chat_sound_play->setObjectName("pane");
m_chat_sound_play->setIcon(QIcon(resource::menu::sound));
m_chat_sound_play->setToolTip("Play");
m_chat_sound_edit = new QToolButton(this);
m_chat_sound_edit->setObjectName("pane");
m_chat_sound_edit->setIcon(QIcon(resource::menu::edit));
m_chat_sound_edit->setToolTip("Edit");
m_chat_sound_clear = new QToolButton(this);
m_chat_sound_clear->setObjectName("pane");
m_chat_sound_clear->setIcon(QIcon(resource::menu::clear));
m_chat_sound_clear->setToolTip("Clear");
m_chat_timestamp = new QComboBox(this);
m_chat_timestamp->setAttribute(Qt::WA_MacShowFocusRect, 0);
m_chat_timestamp->setFixedWidth(230);
m_chat_timestamp->setEditable(true);
QListView* chat_timestamp_view = new QListView(this);
chat_timestamp_view->setObjectName("pane");
m_chat_timestamp->setView(chat_timestamp_view);
m_chat_timestamp->addItem(default_chat::timestamp);
m_chat_timestamp->addItem("%m-%d-%Y %I:%M:%S");
m_chat_timestamp->addItem("%Y-%m-%d %H:%M:%S");
m_chat_timestamp->addItem("%H:%M:%S");
m_chat_timestamp_refresh = new QToolButton(this);
m_chat_timestamp_refresh->setObjectName("pane");
m_chat_timestamp_refresh->setIcon(QIcon(resource::menu::reset));
m_chat_timestamp_refresh->setToolTip("Reset");
m_chat_volume = new QSpinBox(this);
m_chat_volume->setAttribute(Qt::WA_MacShowFocusRect, 0);
m_chat_volume->setFixedWidth(112);
m_chat_volume->setRange(default_chat::min_volume, default_chat::max_volume);
m_chat_volume->setSuffix("%");
m_chat_volume_refresh = new QToolButton(this);
m_chat_volume_refresh->setObjectName("pane");
m_chat_volume_refresh->setIcon(QIcon(resource::menu::reset));
m_chat_volume_refresh->setToolTip("Reset");
m_chat_history = new QSpinBox(this);
m_chat_history->setAttribute(Qt::WA_MacShowFocusRect, 0);
m_chat_history->setFixedWidth(112);
m_chat_history->setRange(default_chat::min_history, default_chat::max_history);
m_chat_history->setSuffix(" entries");
m_chat_history_refresh = new QToolButton(this);
m_chat_history_refresh->setObjectName("pane");
m_chat_history_refresh->setIcon(QIcon(resource::menu::reset));
m_chat_history_refresh->setToolTip("Reset");
// Message
m_message_audio = new QCheckBox("Sound", this);
m_message_audio->setFixedWidth(73);
m_message_visual = new QCheckBox("Taskbar", this);
m_message_visual->setFixedWidth(72);
m_message_sound = new QLineEdit(this);
m_message_sound->setAttribute(Qt::WA_MacShowFocusRect, 0);
m_message_sound->setFixedWidth(230);
m_message_sound->setMaxLength(128);
m_message_sound_play = new QToolButton(this);
m_message_sound_play->setObjectName("pane");
m_message_sound_play->setIcon(QIcon(resource::menu::sound));
m_message_sound_play->setToolTip("Play");
m_message_sound_edit = new QToolButton(this);
m_message_sound_edit->setObjectName("pane");
m_message_sound_edit->setIcon(QIcon(resource::menu::edit));
m_message_sound_edit->setToolTip("Edit");
m_message_sound_clear = new QToolButton(this);
m_message_sound_clear->setObjectName("pane");
m_message_sound_clear->setIcon(QIcon(resource::menu::clear));
m_message_sound_clear->setToolTip("Clear");
m_message_timestamp = new QComboBox(this);
m_message_timestamp->setAttribute(Qt::WA_MacShowFocusRect, 0);
m_message_timestamp->setFixedWidth(230);
m_message_timestamp->setEditable(true);
QListView* message_ttimestamp_view = new QListView(this);
message_ttimestamp_view->setObjectName("pane");
m_message_timestamp->setView(message_ttimestamp_view);
m_message_timestamp->addItem(default_message::timestamp);
m_message_timestamp->addItem("%m-%d-%Y %I:%M:%S");
m_message_timestamp->addItem("%Y-%m-%d %H:%M:%S");
m_message_timestamp->addItem("%H:%M:%S");
m_message_timestamp_refresh = new QToolButton(this);
m_message_timestamp_refresh->setObjectName("pane");
m_message_timestamp_refresh->setIcon(QIcon(resource::menu::reset));
m_message_timestamp_refresh->setToolTip("Reset");
m_message_volume = new QSpinBox(this);
m_message_volume->setAttribute(Qt::WA_MacShowFocusRect, 0);
m_message_volume->setFixedWidth(112);
m_message_volume->setRange(default_message::min_volume, default_message::max_volume);
m_message_volume->setSuffix("%");
m_message_volume_refresh = new QToolButton(this);
m_message_volume_refresh->setObjectName("pane");
m_message_volume_refresh->setIcon(QIcon(resource::menu::reset));
m_message_volume_refresh->setToolTip("Reset");
// Default
const auto option = m_entity->get<client_option_component>();
if (option)
{
// Chat
m_chat_visual->setChecked(option->has_chat_visual());
m_chat_sound->setText(QString::fromStdString(option->get_chat_sound()));
m_chat_volume->setValue(option->get_chat_volume());
m_chat_history->setValue(option->get_chat_history());
const auto chat_timestamp = option->get_chat_timestamp();
const auto chat_qtimestamp = QString::fromStdString(chat_timestamp);
const auto chat_timestamp_index = m_chat_timestamp->findText(chat_qtimestamp);
if (chat_timestamp_index == -1)
m_chat_timestamp->addItem(chat_qtimestamp);
m_chat_timestamp->setCurrentText(chat_qtimestamp);
const auto chat_audio = option->has_chat_audio();
m_chat_audio->setChecked(chat_audio);
m_chat_sound->setEnabled(chat_audio);
m_chat_volume->setEnabled(chat_audio);
m_chat_sound_play->setEnabled(chat_audio);
m_chat_sound_edit->setEnabled(chat_audio);
m_chat_sound_clear->setEnabled(chat_audio);
m_chat_volume_refresh->setEnabled(chat_audio);
// Message
m_message_visual->setChecked(option->has_message_visual());
m_message_sound->setText(QString::fromStdString(option->get_message_sound()));
m_message_volume->setValue(option->get_message_volume());
const auto message_timestamp = option->get_message_timestamp();
const auto message_qtimestamp = QString::fromStdString(message_timestamp);
const auto message_timestamp_index = m_message_timestamp->findText(message_qtimestamp);
if (message_timestamp_index == -1)
m_message_timestamp->addItem(message_qtimestamp);
m_message_timestamp->setCurrentText(message_qtimestamp);
const auto message_audio = option->has_message_audio();
m_message_audio->setChecked(message_audio);
m_message_sound->setEnabled(message_audio);
m_message_volume->setEnabled(message_audio);
m_message_sound_play->setEnabled(message_audio);
m_message_sound_edit->setEnabled(message_audio);
m_message_sound_clear->setEnabled(message_audio);
m_message_volume_refresh->setEnabled(message_audio);
}
}
void client_communication_pane::layout()
{
// Chat
QVBoxLayout* chat_layout = new QVBoxLayout(this);
chat_layout->setSpacing(0);
chat_layout->setMargin(6);
pane::add_row(chat_layout, m_chat_audio, m_chat_visual);
pane::add_spacing(chat_layout, 2);
pane::add_row(chat_layout, "Audio File", m_chat_sound, m_chat_sound_play, m_chat_sound_edit, m_chat_sound_clear);
pane::add_spacing(chat_layout, 2);
pane::add_row(chat_layout, "Volume", m_chat_volume, m_chat_volume_refresh, "History", m_chat_history, m_chat_history_refresh);
pane::add_spacing(chat_layout, 2);
pane::add_row(chat_layout, "Timestamp", m_chat_timestamp, m_chat_timestamp_refresh);
QGroupBox* chat_group = new QGroupBox("Chat", this);
chat_group->setLayout(chat_layout);
// Message
QVBoxLayout* message_layout = new QVBoxLayout(this);
message_layout->setSpacing(0);
message_layout->setMargin(6);
pane::add_row(message_layout, m_message_audio, m_message_visual);
pane::add_spacing(message_layout, 2);
pane::add_row(message_layout, "Audio File", m_message_sound, m_message_sound_play, m_message_sound_edit, m_message_sound_clear);
pane::add_spacing(message_layout, 2);
pane::add_row(message_layout, "Volume", m_message_volume, m_message_volume_refresh);
pane::add_spacing(message_layout, 2);
pane::add_row(message_layout, "Timestamp", m_message_timestamp, m_message_timestamp_refresh);
QGroupBox* message_group = new QGroupBox("Message", this);
message_group->setLayout(message_layout);
// Layout
QVBoxLayout* layout = new QVBoxLayout(this);
layout->setContentsMargins(6, 0, 0, 0);
layout->setSpacing(0);
layout->addWidget(chat_group);
layout->addSpacing(6);
layout->addWidget(message_group);
layout->addStretch(1);
setLayout(layout);
}
void client_communication_pane::signal()
{
// Chat
connect(m_chat_audio, &QCheckBox::stateChanged, [=](int state)
{
const auto option = m_entity->get<client_option_component>();
if (option)
{
const auto audio = (state == Qt::Checked);
option->set_chat_audio(audio);
m_chat_sound->setEnabled(audio);
m_chat_volume->setEnabled(audio);
m_chat_sound_play->setEnabled(audio);
m_chat_sound_edit->setEnabled(audio);
m_chat_sound_clear->setEnabled(audio);
m_chat_volume_refresh->setEnabled(audio);
}
});
connect(m_chat_visual, &QCheckBox::stateChanged, [=](int state)
{
const auto option = m_entity->get<client_option_component>();
if (option)
{
const auto visual = (state == Qt::Checked);
option->set_chat_visual(visual);
}
});
connect(m_chat_timestamp, static_cast<void (QComboBox::*)(const QString&)>(&QComboBox::currentTextChanged), [=](const QString& str)
{
const auto option = m_entity->get<client_option_component>();
if (!option)
return;
const auto& timestamp = option->get_chat_timestamp();
const auto qtimestamp = str.toStdString();
if (timestamp != qtimestamp)
option->set_chat_timestamp(qtimestamp);
});
connect(m_chat_timestamp_refresh, &QToolButton::clicked, [this]()
{
if (m_chat_timestamp->currentIndex())
m_chat_timestamp->setCurrentIndex(0);
});
connect(m_chat_sound, &QLineEdit::textChanged, [=](const QString& str)
{
const auto option = m_entity->get<client_option_component>();
if (option)
option->set_chat_sound(str.toStdString());
});
connect(m_chat_sound_play, &QToolButton::clicked, [this]()
{
if (m_player->state() == QMediaPlayer::PlayingState)
return;
const auto option = m_entity->get<client_option_component>();
if (option && option->has_chat_audio())
{
if (!option->has_chat_sound())
m_player->setMedia(QUrl(resource::audio::chat));
else
m_player->setMedia(QUrl::fromLocalFile(QString::fromStdString(option->get_chat_sound())));
m_player->setVolume(option->get_chat_volume());
m_player->play();
}
});
connect(m_chat_sound_edit, &QToolButton::clicked, [this]()
{
QFileDialog dialog;
dialog.setWindowTitle("Chat Alert");
dialog.setFileMode(QFileDialog::ExistingFile);
dialog.setOptions(QFileDialog::ReadOnly);
dialog.setViewMode(QFileDialog::ViewMode::Detail);
//"aac", "flac", "m4a", "mp3", "wav", "wma", "cue"
QStringList filters;
filters << "WAV (*.wav)" << "M4A (*.m4a)" << "MP3 (*.mp3)" << "WMA (*.wma)" << "All Files (*)";
dialog.setNameFilters(filters);
dialog.selectNameFilter("WAV (*.wav)");
if (dialog.exec())
{
const auto qpaths = dialog.selectedFiles();
const auto& qpath = qpaths.at(0);
if (qpath != m_chat_sound->text())
{
m_chat_sound->setText(qpath);
const auto option = m_entity->get<client_option_component>();
if (option)
option->set_chat_sound(qpath.toStdString());
}
}
});
connect(m_chat_sound_clear, &QToolButton::clicked, [this]()
{
const auto qsound = m_chat_sound->text();
if (qsound.isEmpty())
return;
m_chat_sound->clear();
const auto option = m_entity->get<client_option_component>();
if (option)
option->set_chat_sound();
});
connect(m_chat_volume, static_cast<void (QSpinBox::*)(int)>(&QSpinBox::valueChanged), [=](int value)
{
const auto option = m_entity->get<client_option_component>();
if (option)
{
const auto& volume = option->get_chat_volume();
const auto qvolume = static_cast<size_t>(value);
if (volume != qvolume)
option->set_chat_volume(qvolume);
}
});
connect(m_chat_volume_refresh, &QToolButton::clicked, [this]()
{
m_chat_volume->setValue(default_chat::num_volume);
const auto option = m_entity->get<client_option_component>();
if (option)
option->set_chat_volume(default_chat::num_volume);
});
connect(m_chat_history, static_cast<void (QSpinBox::*)(int)>(&QSpinBox::valueChanged), [=](int value)
{
const auto option = m_entity->get<client_option_component>();
if (option)
{
const auto history = option->get_chat_history();
const auto qhistory = static_cast<size_t>(value);
if (history != qhistory)
{
option->set_chat_history(qhistory);
if (qhistory < history)
m_entity->call(function_type::chat, function_action::clear, m_entity);
}
}
});
connect(m_chat_history_refresh, &QToolButton::clicked, [this]()
{
m_chat_history->setValue(default_chat::num_history);
const auto option = m_entity->get<client_option_component>();
if (option)
option->set_chat_history(default_chat::num_history);
});
// Message
connect(m_message_audio, &QCheckBox::stateChanged, [=](int state)
{
const auto option = m_entity->get<client_option_component>();
if (option)
{
const auto audio = (state == Qt::Checked);
option->set_message_audio(audio);
m_message_sound->setEnabled(audio);
m_message_volume->setEnabled(audio);
m_message_sound_play->setEnabled(audio);
m_message_sound_edit->setEnabled(audio);
m_message_sound_clear->setEnabled(audio);
m_message_volume_refresh->setEnabled(audio);
}
});
connect(m_message_visual, &QCheckBox::stateChanged, [=](int state)
{
const auto option = m_entity->get<client_option_component>();
if (option)
{
const auto visual = (state == Qt::Checked);
option->set_message_visual(visual);
}
});
connect(m_message_timestamp, static_cast<void (QComboBox::*)(const QString&)>(&QComboBox::currentTextChanged), [=](const QString& str)
{
const auto option = m_entity->get<client_option_component>();
if (!option)
return;
const auto& timestamp = option->get_message_timestamp();
const auto qtimestamp = str.toStdString();
if (timestamp != qtimestamp)
option->set_message_timestamp(qtimestamp);
});
connect(m_message_timestamp_refresh, &QToolButton::clicked, [this]()
{
if (m_message_timestamp->currentIndex())
m_message_timestamp->setCurrentIndex(0);
});
connect(m_message_sound, &QLineEdit::textChanged, [=](const QString& str)
{
const auto option = m_entity->get<client_option_component>();
if (option)
option->set_message_sound(str.toStdString());
});
connect(m_message_sound_play, &QToolButton::clicked, [this]()
{
if (m_player->state() == QMediaPlayer::PlayingState)
return;
const auto option = m_entity->get<client_option_component>();
if (option && option->has_message_audio())
{
if (!option->has_message_sound())
m_player->setMedia(QUrl(resource::audio::message));
else
m_player->setMedia(QUrl::fromLocalFile(QString::fromStdString(option->get_message_sound())));
m_player->setVolume(option->get_message_volume());
m_player->play();
}
});
connect(m_message_sound_edit, &QToolButton::clicked, [this]()
{
QFileDialog dialog;
dialog.setWindowTitle("message Alert");
dialog.setFileMode(QFileDialog::ExistingFile);
dialog.setOptions(QFileDialog::ReadOnly);
dialog.setViewMode(QFileDialog::ViewMode::Detail);
//"aac", "flac", "m4a", "mp3", "wav", "wma", "cue"
QStringList filters;
filters << "WAV (*.wav)" << "M4A (*.m4a)" << "MP3 (*.mp3)" << "WMA (*.wma)" << "All Files (*)";
dialog.setNameFilters(filters);
dialog.selectNameFilter("WAV (*.wav)");
if (dialog.exec())
{
const auto qpaths = dialog.selectedFiles();
const auto& qpath = qpaths.at(0);
if (qpath != m_message_sound->text())
{
m_message_sound->setText(qpath);
const auto option = m_entity->get<client_option_component>();
if (option)
option->set_message_sound(qpath.toStdString());
}
}
});
connect(m_message_sound_clear, &QToolButton::clicked, [this]()
{
const auto qsound = m_message_sound->text();
if (qsound.isEmpty())
return;
m_message_sound->clear();
const auto option = m_entity->get<client_option_component>();
if (option)
option->set_message_sound();
});
connect(m_message_volume, static_cast<void (QSpinBox::*)(int)>(&QSpinBox::valueChanged), [=](int value)
{
const auto option = m_entity->get<client_option_component>();
if (option)
{
const auto& volume = option->get_message_volume();
const auto qvolume = static_cast<size_t>(value);
if (volume != qvolume)
option->set_message_volume(qvolume);
}
});
connect(m_message_volume_refresh, &QToolButton::clicked, [this]()
{
m_message_volume->setValue(default_message::num_volume);
const auto option = m_entity->get<client_option_component>();
if (option)
option->set_message_volume(default_message::num_volume);
});
}
}
|
{"hexsha": "ec245c7b234d40842ca6ed275b92666e1745b42d", "size": 19455, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "ds2/qt_demonsaw/pane/client/client_communication_pane.cpp", "max_stars_repo_name": "demonsaw/Code", "max_stars_repo_head_hexsha": "b036d455e9e034d7fd178e63d5e992242d62989a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 132.0, "max_stars_repo_stars_event_min_datetime": "2017-03-22T03:46:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T15:08:16.000Z", "max_issues_repo_path": "ds2/qt_demonsaw/pane/client/client_communication_pane.cpp", "max_issues_repo_name": "demonsaw/Code", "max_issues_repo_head_hexsha": "b036d455e9e034d7fd178e63d5e992242d62989a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2017-04-06T17:46:10.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-08T18:27:59.000Z", "max_forks_repo_path": "ds2/qt_demonsaw/pane/client/client_communication_pane.cpp", "max_forks_repo_name": "demonsaw/Code", "max_forks_repo_head_hexsha": "b036d455e9e034d7fd178e63d5e992242d62989a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 30.0, "max_forks_repo_forks_event_min_datetime": "2017-03-26T22:38:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-21T20:50:17.000Z", "avg_line_length": 33.4278350515, "max_line_length": 136, "alphanum_fraction": 0.7263428425, "num_tokens": 4839}
|
import theano
import theano.tensor as T
import theano.tensor.nlinalg as nlinalg
import theano.gof as gof
import numpy as np
import numerical.numpyext.linalg as ntl
class CholeskyInvJitterOp(theano.Op):
__props__ = ('lower', 'destructive')
def __init__(self, lower=True, maxiter=10):
self.lower = lower
self.maxiter = maxiter
self.destructive = False
def infer_shape(self, node, shapes):
return [shapes[0]]
def make_node(self, x):
x = T.as_tensor_variable(x)
assert x.ndim == 2
return gof.Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
x = inputs[0]
z = outputs[0]
z[0] = self._cholesky_inv_jitter(x).astype(x.dtype)
def grad(self, inputs, gradients):
x, = inputs
xi = self(x)
gz, = gradients
return [-nlinalg.matrix_dot(xi, gz.T, xi).T]
def _cholesky_inv_jitter(self, x):
return ntl.cholesky_inv_jitter(x, self.maxiter)
inv_jitter = CholeskyInvJitterOp()
class CholeskyLogDetJitterOp(theano.Op):
__props__ = ('lower', 'destructive')
def __init__(self, lower=True, maxiter=10):
self.lower = lower
self.maxiter = maxiter
self.destructive = False
def infer_shape(self, node, shapes):
return [()]
def make_node(self, x):
x = T.as_tensor_variable(x)
assert x.ndim == 2
o = theano.tensor.scalar(dtype=x.dtype)
return gof.Apply(self, [x], [o])
def perform(self, node, inputs, outputs):
x = inputs[0]
z = outputs[0]
L = ntl.cholesky_jitter(x, self.maxiter).astype(x.dtype)
z[0] = np.asarray(2.0 * np.sum(np.log(np.diag(L))), dtype=x.dtype)
#print("CholeskyLogDetJitterOp.perform", z[0])
if np.isnan(z[0]):
print("Error: CholeskyLogDetJitterOp.perform(...) returns NaN")
print("X: {}".format(x))
print("X.shape: {}".format(x.shape))
def grad(self, inputs, gradients):
x, = inputs
xi = inv_jitter(x)
gz, = gradients
return [gz * xi.T]
log_det_jitter = CholeskyLogDetJitterOp()
|
{"hexsha": "498b3a070ca9b24d836c22aec91d2aab63ba84de", "size": 2169, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/numerical/theanoext/operations/cholesky.py", "max_stars_repo_name": "dmytrov/gaussianprocess", "max_stars_repo_head_hexsha": "7044bd2d66f44e10656fee17e94fdee0c24c70bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/numerical/theanoext/operations/cholesky.py", "max_issues_repo_name": "dmytrov/gaussianprocess", "max_issues_repo_head_hexsha": "7044bd2d66f44e10656fee17e94fdee0c24c70bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/numerical/theanoext/operations/cholesky.py", "max_forks_repo_name": "dmytrov/gaussianprocess", "max_forks_repo_head_hexsha": "7044bd2d66f44e10656fee17e94fdee0c24c70bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8076923077, "max_line_length": 75, "alphanum_fraction": 0.602120793, "include": true, "reason": "import numpy,import theano", "num_tokens": 599}
|
import shapely.geometry
import numpy as np
import fiona.crs
import pyproj
from shapely.geometry.point import Point
UTM_ZONE30 = pyproj.Proj(
proj='utm',
zone=30,
datum='WGS84',
units='m',
errcheck=True)
schema = {'geometry': 'LineString', 'properties': {'PhysID': 'int'}}
crs = fiona.crs.from_string(UTM_ZONE30.srs)
x0, y0, x1, y1 = 0, 0, 640, 320
features = \
[shapely.geometry.LineString([(x0, y0), (x1, y0)]),
shapely.geometry.LineString([(x1, y0), (x1, y1)]),
shapely.geometry.LineString([(x1, y1), (x0, y1)]),
shapely.geometry.LineString([(x0, y1), (x0, y0)])]
with fiona.collection("outline_2.shp", "w", "ESRI Shapefile", schema, crs=crs) as output:
for i in range(len(features)):
output.write({'geometry': shapely.geometry.mapping(features[i]), 'properties': {'PhysID': i}})
# Array coordinates
array_list = np.zeros((7, 2))
array_1 = np.arange(64, 320, 64)
array_2 = np.arange(64 + 32, 320-64, 64)
array_list[0:4, 0] = 640 / 3
array_list[4:, 0] = 640 / 3 + 64
array_list[0:4, 1] = array_1
array_list[4:, 1] = array_2
np.save("Turbine_coords.npy", array_list)
features2 = []
for x, y in array_list:
p = Point(x, y)
circle = shapely.geometry.LineString(list(p.buffer(10).exterior.coords))
features2.append(circle)
with fiona.collection("turbine_circles.shp", "w", "ESRI Shapefile", schema, crs=crs) as output:
for i in range(len(features2)):
output.write({'geometry': shapely.geometry.mapping(features2[i]), 'properties': {'PhysID': 100}})
|
{"hexsha": "1a41d2112e579b449fc47301d00b17852994dafa", "size": 1535, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/discrete_turbines/qmesh/shapefile_generation.py", "max_stars_repo_name": "jrper/thetis", "max_stars_repo_head_hexsha": "3c08a2e6947552119232fefd7380fa61b2a9b84b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/discrete_turbines/qmesh/shapefile_generation.py", "max_issues_repo_name": "jrper/thetis", "max_issues_repo_head_hexsha": "3c08a2e6947552119232fefd7380fa61b2a9b84b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/discrete_turbines/qmesh/shapefile_generation.py", "max_forks_repo_name": "jrper/thetis", "max_forks_repo_head_hexsha": "3c08a2e6947552119232fefd7380fa61b2a9b84b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3695652174, "max_line_length": 106, "alphanum_fraction": 0.6599348534, "include": true, "reason": "import numpy", "num_tokens": 519}
|
module Endpoints
using ..Pages
export Endpoint, endpoints, method, servefile, servefolder
export GET, HEAD, POST, PUT, DELETE, CONNECT, OPTIONS, TRACE, PATCH
struct Method{M} end
struct Endpoint
handlers::Dict{Symbol,HTTP.RequestHandlerFunction}
route::String
function Endpoint(handle,route,method::Method{M}=GET) where M
route = lowercase(route)
if haskey(endpoints,route)
e = endpoints[route]
e.handlers[M] = HTTP.RequestHandlerFunction(handle)
return e
else
handlers = Dict(M=>HTTP.RequestHandlerFunction(handle))
e = new(handlers,route)
endpoints[route] = e
return e
end
end
end
const endpoints = Dict{String,Endpoint}()
symbol(m::Type{Method{M}}) where M = M
Base.show(io::IO,m::Method{M}) where M = print(io,M)
Base.show(io::IO,::MIME"text/plain",m::Method{M}) where M = print(io,M)
methods = ["GET","HEAD","POST","PUT","DELETE","CONNECT","OPTIONS","TRACE","PATCH"]
for method in methods
@eval Endpoints $(Symbol(method)) = Method{Symbol($(method))}()
end
method(m::Method{S}) where {S} = S
function servefile(filepath,root="/")
if isfile(filepath)
file = basename(filepath)
if isequal(lowercase(file),"index.html") || isequal(lowercase(file),"index.htm")
Endpoint(root) do request::HTTP.Request
read(filepath,String)
end
end
Endpoint("$(root)/$(file)") do request::HTTP.Request
read(filepath,String)
end
else
@warn "$(filepath) not found."
end
end
function servefolder(folder,root="/")
for file in readdir(folder)
filepath = joinpath(folder,file)
servefile(filepath,root)
end
end
end
|
{"hexsha": "9d43397a9564deb15745e93c383958b0f6350b42", "size": 1774, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Endpoints.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Pages.jl-7c165e09-dada-5b64-9fdc-39b801c58527", "max_stars_repo_head_hexsha": "b626454e82da21659e9ca822e94dbdf220652917", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2016-05-29T10:05:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:34:24.000Z", "max_issues_repo_path": "src/Endpoints.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Pages.jl-7c165e09-dada-5b64-9fdc-39b801c58527", "max_issues_repo_head_hexsha": "b626454e82da21659e9ca822e94dbdf220652917", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2016-07-31T09:39:39.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-30T01:45:13.000Z", "max_forks_repo_path": "src/Endpoints.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Pages.jl-7c165e09-dada-5b64-9fdc-39b801c58527", "max_forks_repo_head_hexsha": "b626454e82da21659e9ca822e94dbdf220652917", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2016-07-12T02:15:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T12:42:34.000Z", "avg_line_length": 27.2923076923, "max_line_length": 88, "alphanum_fraction": 0.6245772266, "num_tokens": 432}
|
# -*- coding: utf-8 -*-
__all__ = ["USE_AESARA", "aesara", "sparse", "change_flags", "ifelse"]
USE_AESARA = False
try:
import aesara
except ImportError:
aesara = None
else:
try:
import pymc3.theanof # noqa
except ImportError:
USE_AESARA = True
if aesara is None or not USE_AESARA:
try:
import theano.graph
except ImportError:
try:
import theano.gof
except ImportError:
raise ImportError(
"None of 'aesara', 'theano-pymc', or 'theano' are installed"
)
# General imports: these are the same for both theano and theano-pymc
import theano as aesara
from theano import sparse
from theano.ifelse import ifelse
try:
change_flags = theano.config.change_flags
except (ImportError, AttributeError):
from theano.configparser import change_flags
else:
# Aesara is installed
from aesara import sparse
from aesara.configparser import change_flags
from aesara.ifelse import ifelse
|
{"hexsha": "665f49940ae0710f5643411d2b8753087e00a6bb", "size": 1050, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/aesara_theano_fallback/compat.py", "max_stars_repo_name": "dfm/aesara-theano-fallback", "max_stars_repo_head_hexsha": "9b7ba725ed9c25fa0ec457b183d4dfa3ad6874ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/aesara_theano_fallback/compat.py", "max_issues_repo_name": "dfm/aesara-theano-fallback", "max_issues_repo_head_hexsha": "9b7ba725ed9c25fa0ec457b183d4dfa3ad6874ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-03-10T19:41:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-15T18:32:18.000Z", "max_forks_repo_path": "src/aesara_theano_fallback/compat.py", "max_forks_repo_name": "dfm/aesara-theano-fallback", "max_forks_repo_head_hexsha": "9b7ba725ed9c25fa0ec457b183d4dfa3ad6874ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3404255319, "max_line_length": 76, "alphanum_fraction": 0.6466666667, "include": true, "reason": "import theano,from theano,import pymc3", "num_tokens": 254}
|
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from ccgnet import experiment as exp
from ccgnet import layers
import tensorflow as tf
import numpy as np
import time
from sklearn.metrics import balanced_accuracy_score
from ccgnet.Dataset import Dataset, DataLoader
def build_model(
graphcnn_layer_1_size,
graphcnn_layer_2_size,
graphcnn_layer_3_size,
graphcnn_act_fun,
graph_pool_1_size,
graph_pool_2_size,
graph_pool_3_size,
graph_pool_act_fun,
dense_layer_1_size,
dense_layer_2_size,
dense_layer_3_size,
dense_act_func,
dense_dropout
):
mask_judge = (graph_pool_1_size, graph_pool_2_size, graph_pool_3_size)
print(mask_judge)
class Model(object):
def build_model(self, inputs, is_training, global_step):
V = inputs[0]
A = inputs[1]
labels = inputs[2]
mask = inputs[3]
graph_size = inputs[4]
tags = inputs[5]
# Graph-CNN stage
V = layers.make_graphcnn_layer(V, A, graphcnn_layer_1_size)
V = layers.make_bn(V, is_training, mask=mask, num_updates=global_step)
V = graphcnn_act_fun(V)
if graph_pool_1_size != None:
V_pool, A = layers.make_graph_embed_pooling(V, A, mask=mask, no_vertices=graph_pool_1_size)
V = layers.make_bn(V_pool, is_training, mask=None, num_updates=global_step)
V = graph_pool_act_fun(V)
if graphcnn_layer_2_size != None:
if mask_judge[0] != None:
m = None
else:
m = mask
V = layers.make_graphcnn_layer(V, A, graphcnn_layer_2_size)
V = layers.make_bn(V, is_training, mask=m, num_updates=global_step)
V = graphcnn_act_fun(V)
if graph_pool_2_size != None:
if mask_judge[0] != None:
m = None
else:
m = mask
V_pool, A = layers.make_graph_embed_pooling(V, A, mask=m, no_vertices=graph_pool_2_size)
V = layers.make_bn(V_pool, is_training, mask=None, num_updates=global_step)
V = graph_pool_act_fun(V)
if graphcnn_layer_3_size != None:
if mask_judge[1] != None or mask_judge[0] != None:
m = None
else:
m = mask
V = layers.make_graphcnn_layer(V, A, graphcnn_layer_3_size)
V = layers.make_bn(V, is_training, mask=m, num_updates=global_step)
V = graphcnn_act_fun(V)
if mask_judge[1] != None or mask_judge[0] != None:
m = None
else:
m = mask
V_pool, A = layers.make_graph_embed_pooling(V, A, mask=m, no_vertices=graph_pool_3_size)
V = layers.make_bn(V_pool, is_training, mask=None, num_updates=global_step)
V = graph_pool_act_fun(V)
# Predictive Stage
no_input_features = int(np.prod(V.get_shape()[1:]))
V = tf.reshape(V, [-1, no_input_features])
V = layers.make_embedding_layer(V, dense_layer_1_size, name='FC-1')
V = layers.make_bn(V, is_training, mask=None, num_updates=global_step)
V = dense_act_func(V)
V = tf.compat.v1.layers.dropout(V, dense_dropout, training=is_training)
if dense_layer_2_size != None:
V = layers.make_embedding_layer(V, dense_layer_2_size, name='FC-2')
V = layers.make_bn(V, is_training, mask=None, num_updates=global_step)
V = dense_act_func(V)
V = tf.compat.v1.layers.dropout(V, dense_dropout, training=is_training)
if dense_layer_3_size != None:
V = layers.make_embedding_layer(V, dense_layer_3_size, name='FC-3')
V = layers.make_bn(V, is_training, mask=None, num_updates=global_step)
V = dense_act_func(V)
V = tf.compat.v1.layers.dropout(V, dense_dropout, training=is_training)
out = layers.make_embedding_layer(V, 2, name='final')
return out, labels
return Model()
def black_box_function(args_dict):
tf.reset_default_graph()
batch_size = args_dict['batch_size']
graphcnn_layer_1_size = args_dict['graphcnn_layer_1_size']
graphcnn_layer_2_size = args_dict['graphcnn_layer_2_size']
graphcnn_layer_3_size = args_dict['graphcnn_layer_3_size']
graphcnn_act_fun = args_dict['graphcnn_act_fun']
graph_pool_1_size = args_dict['graph_pool_1_size']
graph_pool_2_size = args_dict['graph_pool_2_size']
graph_pool_3_size = args_dict['graph_pool_3_size']
graph_pool_act_fun = args_dict['graph_pool_act_fun']
dense_layer_1_size = args_dict['dense_layer_1_size']
dense_layer_2_size = args_dict['dense_layer_2_size']
dense_layer_3_size = args_dict['dense_layer_3_size']
dense_act_func = args_dict['dense_act_func']
dense_dropout = args_dict['dense_dropout']
# make save dir
snapshot_path = abs_path+'/bayes_snapshot/'
model_name = 'BayesOpt-GraphCNN/'
verify_dir_exists(snapshot_path+model_name)
if os.listdir(snapshot_path+model_name) == []:
dataset_name = 'Step_0/'
else:
l_ = [int(i.split('_')[1]) for i in os.listdir(snapshot_path+model_name) if 'Step_' in i]
dataset_name = 'Step_{}/'.format(max(l_)+1)
model = build_model(graphcnn_layer_1_size,
graphcnn_layer_2_size,
graphcnn_layer_3_size,
graphcnn_act_fun,
graph_pool_1_size,
graph_pool_2_size,
graph_pool_3_size,
graph_pool_act_fun,
dense_layer_1_size,
dense_layer_2_size,
dense_layer_3_size,
dense_act_func,
dense_dropout)
model = exp.Model(model, train_data, valid_data, with_test=False, snapshot_path=snapshot_path, use_subgraph=False, use_desc=False, build_fc=False,
model_name=model_name, dataset_name=dataset_name+'/time_0')
history = model.fit(num_epoch=100, save_info=True, save_att=False, silence=False, train_batch_size=batch_size,
max_to_keep=1, metric='loss')
loss = min(history['valid_cross_entropy'])
tf.reset_default_graph()
print('\nLoss: {}'.format(loss))
print(str(args_dict))
return loss
from hyperopt import fmin, tpe, Trials, hp
import hyperopt.pyll.stochastic
import random
def verify_dir_exists(dirname):
if os.path.isdir(os.path.dirname(dirname)) == False:
os.makedirs(os.path.dirname(dirname))
def make_dataset():
data1 = Dataset(abs_path+'/CC_Table/CC_Table.tab', mol_blocks_dir=abs_path+'/Mol_Blocks.dir')
data1.make_graph_dataset(Desc=0, A_type='OnlyCovalentBond', hbond=0, pipi_stack=0, contact=0, make_dataframe=True)
return data1
abs_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
fold_10 = eval(open(abs_path+'/Fold_10.dir').read())
data = make_dataset()
Samples = fold_10['fold-0']['train']+fold_10['fold-0']['valid']
# data spliting
random.shuffle(Samples)
num_sample = len(Samples)
train_num = int(0.9 * num_sample)
train_samples = Samples[:train_num]
valid_samples = Samples[train_num:]
train_data, valid_data = data.split(train_samples=train_samples, valid_samples=valid_samples)
args_dict = {
'batch_size':hp.choice('batch_size', (128,)),
'graphcnn_layer_1_size':hp.choice('graphcnn_layer_1_size', (16,32,64,128,256)),
'graphcnn_layer_2_size':hp.choice('graphcnn_layer_2_size', (16,32,64,128,256,None)),
'graphcnn_layer_3_size':hp.choice('graphcnn_layer_3_size', (16,32,64,128,256,None)),
'graphcnn_act_fun':hp.choice('graphcnn_act_fun', (tf.nn.relu, )),
'graph_pool_1_size':hp.choice('graph_pool_1_size', (8,16,32,None)),
'graph_pool_2_size':hp.choice('graph_pool_2_size', (8,16,32,None)),
'graph_pool_3_size':hp.choice('graph_pool_3_size', (8,16,32)),
'graph_pool_act_fun':hp.choice('graph_pool_act_fun', (tf.nn.relu, )),
'dense_layer_1_size':hp.choice('dense_layer_1_size', (64,128,256,512)),
'dense_layer_2_size':hp.choice('dense_layer_2_size', (64,128,256,512,None)),
'dense_layer_3_size':hp.choice('dense_layer_3_size', (64,128,256,512,None)),
'dense_act_func':hp.choice('dense_act_func', (tf.nn.relu, )),
'dense_dropout':hp.uniform('dense_dropout', 0.0, 0.75)
}
trials = Trials()
best = fmin(
fn=black_box_function,
space=args_dict,
algo=tpe.suggest,
max_evals=100,
trials=trials,
trials_save_file='trials_save_file-graphcnn')
print('\nbest:')
print(best)
|
{"hexsha": "f75ddd83bbbdb79643ecaca64767c649ff74c687", "size": 9283, "ext": "py", "lang": "Python", "max_stars_repo_path": "BayesOpt/BayesOpt-GraphCNN.py", "max_stars_repo_name": "Saoge123/ccgnet", "max_stars_repo_head_hexsha": "9359c642bd1faa4c15cae829615385761ebd8d92", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-11-09T08:07:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T13:53:46.000Z", "max_issues_repo_path": "BayesOpt/BayesOpt-GraphCNN.py", "max_issues_repo_name": "Saoge123/ccgnet", "max_issues_repo_head_hexsha": "9359c642bd1faa4c15cae829615385761ebd8d92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-05-05T14:18:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-22T02:58:53.000Z", "max_forks_repo_path": "BayesOpt/BayesOpt-GraphCNN.py", "max_forks_repo_name": "Saoge123/ccgnet", "max_forks_repo_head_hexsha": "9359c642bd1faa4c15cae829615385761ebd8d92", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-16T14:34:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-21T06:49:32.000Z", "avg_line_length": 45.0631067961, "max_line_length": 150, "alphanum_fraction": 0.6149951524, "include": true, "reason": "import numpy", "num_tokens": 2288}
|
C Copyright(C) 1999-2020 National Technology & Engineering Solutions
C of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
C NTESS, the U.S. Government retains certain rights in this software.
C
C See packages/seacas/LICENSE for details
SUBROUTINE VERSION(QAINFO)
include 'params.blk'
CHARACTER*(MXQARC) QAINFO(6)
QAINFO(1) = 'blot '
QAINFO(2) = '2021/03/31 '
QAINFO(3) = ' 3.141 '
QAINFO(4) = ' '
RETURN
END
|
{"hexsha": "727c9050c20d1afbf392b603379f7e057a9f0c29", "size": 591, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/seacas/applications/blot/bl_version.f", "max_stars_repo_name": "jschueller/seacas", "max_stars_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_stars_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2016-02-04T18:38:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T03:01:49.000Z", "max_issues_repo_path": "packages/seacas/applications/blot/bl_version.f", "max_issues_repo_name": "jschueller/seacas", "max_issues_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_issues_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_issues_count": 206, "max_issues_repo_issues_event_min_datetime": "2015-11-20T01:57:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:12:04.000Z", "max_forks_repo_path": "packages/seacas/applications/blot/bl_version.f", "max_forks_repo_name": "jschueller/seacas", "max_forks_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_forks_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_forks_count": 68, "max_forks_repo_forks_event_min_datetime": "2016-01-13T22:46:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:25:05.000Z", "avg_line_length": 29.55, "max_line_length": 72, "alphanum_fraction": 0.5363790186, "num_tokens": 155}
|
from skimage import measure
import numpy as np
np.random.seed(123)
try:
from MulticoreTSNE import MulticoreTSNE as TSNE
except:
from sklearn.manifold import TSNE
from tqdm import tqdm
from phathom.preprocess.filtering import gaussian_blur
try:
from mayavi import mlab
except:
mlab = None
import matplotlib.pyplot as plt
from matplotlib import cm
import seaborn as sns
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster.bicluster import SpectralCoclustering
import multiprocessing
from scipy.spatial import distance
from sklearn import preprocessing
import pandas as pd
def smooth_segmentation(seg, sigma=1, scale_factor=10):
binary = (seg > 0)
smooth = scale_factor * gaussian_blur(binary, sigma)
return smooth.astype(np.float32)
def marching_cubes(seg, level, spacing, step_size):
return measure.marching_cubes_lewiner(seg, level=level, spacing=spacing, step_size=step_size, allow_degenerate=False)
def plot_mesh(verts, faces, color=(1, 0, 0), figure=None):
if figure is not None:
mlab.figure(figure)
return mlab.triangular_mesh([vert[0] for vert in verts],
[vert[1] for vert in verts],
[vert[2] for vert in verts],
faces,
color=color)
def randomly_sample(n, *items, return_idx=False):
idx = np.arange(len(items[0]))
np.random.shuffle(idx)
idx = idx[:n]
if return_idx:
return tuple(item[idx] for item in items), idx
else:
return tuple(item[idx] for item in items)
def voxels_to_micron(data_voxels, voxel_size):
return data_voxels * np.asarray(voxel_size)
def make_bins(start, stop, bins):
bin_edges = np.linspace(start, stop, bins + 1)
bin_width = bin_edges[1] - bin_edges[0]
return bin_edges, bin_width
def cross_products(vectors, ref=np.array([1, 0, 0])):
return np.cross(vectors, ref)
def dot_products(vectors, ref=np.array([1, 0, 0])):
return np.dot(vectors, ref)
centers_um_global = None
sox2_labels_global = None
tbr1_labels_global = None
def compute_profile(vert, vi, ci, length, bins, radius):
global centers_um_global
global sox2_labels_global
global tbr1_labels_global
pts = centers_um_global
sox2_labels = sox2_labels_global
tbr1_labels = tbr1_labels_global
# Translate points to origin
pts_translated = pts - vert
# Rotate points to align the normal with the z-axis
v_cross = np.array([[0, -vi[2], vi[1]],
[vi[2], 0, -vi[0]],
[-vi[1], vi[0], 0]])
rotation_matrix = np.eye(3) + v_cross + np.matmul(v_cross, v_cross) / (1 + ci)
pts_translated_rotated = rotation_matrix.dot(pts_translated.T).T
# Bin count the cells
bin_edges, bin_height = make_bins(0, length, bins)
sox2_count = np.zeros(bins, np.int)
tbr1_count = np.zeros(bins, np.int)
negative_count = np.zeros(bins, np.int)
for j, bin_start in enumerate(bin_edges[:-1]):
bin_stop = bin_start + bin_height
x, y, z = pts_translated_rotated[:, 2], pts_translated_rotated[:, 1], pts_translated_rotated[:, 0]
idx = np.where(np.logical_and(x ** 2 + y ** 2 <= radius ** 2, np.logical_and(z >= bin_start, z <= bin_stop)))[0]
sox2_lbls = sox2_labels[idx]
tbr1_lbls = tbr1_labels[idx]
negative_lbls = np.where(np.logical_and(sox2_lbls == 0, tbr1_lbls == 0))[0]
sox2_count[j] = sox2_lbls.sum()
tbr1_count[j] = tbr1_lbls.sum()
negative_count[j] = len(negative_lbls)
return sox2_count, tbr1_count, negative_count
def _compute_profile(inputs):
return compute_profile(*inputs)
def compute_profiles(verts, normals, length, bins, radius, centers_um, sox2_labels, tbr1_labels):
global centers_um_global
global sox2_labels_global
global tbr1_labels_global
centers_um_global = centers_um
sox2_labels_global = sox2_labels
tbr1_labels_global = tbr1_labels
v = cross_products(normals)
c = dot_products(normals)
# Get cell density profiles for each cell-type
args_list = []
for i, (vi, ci, vert) in tqdm(enumerate(zip(v, c, verts)), total=len(normals)):
args_list.append((vert, vi, ci, length, bins, radius))
with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
results = list(tqdm(pool.imap(_compute_profile, args_list), total=len(args_list)))
return np.asarray(results)
def counts_to_features(counts):
features = counts.reshape((len(counts), -1)) # Flattened profiles
return preprocessing.scale(features) # Normalize each feature (cell bin) to unit mean, zero variance
def hierarchical_clustering(features, n_clusters, linkage):
labels = AgglomerativeClustering(n_clusters=n_clusters, linkage=linkage).fit_predict(features)
return labels
def euclidean_distance_matrix(counts, nb_vectors):
dist = np.zeros((nb_vectors, nb_vectors, counts.shape[1]))
for c in range(counts.shape[1]):
counts_channel = counts[:, c]
Y = distance.squareform(distance.pdist(counts_channel, metric='correlation'))
Y[np.isnan(Y)] = 1
dist[..., c] = Y
D = dist.mean(axis=-1)
def spectral_coclustering(n_clusters, D):
scc = SpectralCoclustering(n_clusters=n_clusters).fit(D)
# fit_data = D[np.argsort(scc.row_labels_)]
# fit_data = fit_data[:, np.argsort(scc.column_labels_)]
#
# plt.matshow(D)
# plt.matshow(fit_data)
labels = scc.row_labels_
return labels
def labels_to_cluster_idx(labels):
n_clusters = len(np.unique(labels))
return [np.where(labels == i)[0] for i in range(n_clusters)]
def separate_by_labels(labels, *items):
cluster_idx = labels_to_cluster_idx(labels)
return cluster_idx, tuple([item[idx] for idx in cluster_idx] for item in items)
def colormap_to_colors(n, name='Set2'):
cmap = cm.get_cmap(name)
colors = [tuple(list(cmap(i))[:3]) for i in range(n)]
return colors
def plot_normals(cluster_verts, cluster_normals, colors, opacity):
for i, (v, n, color) in enumerate(zip(cluster_verts, cluster_normals, colors)):
mlab.quiver3d(v[:, 0], v[:, 1], v[:, 2], n[:, 0], n[:, 1], n[:, 2],
color=color, opacity=opacity)
def plot_nuclei(centers_um, nb_nuclei, sox2_labels, tbr1_labels, scale_factor=1, figure=None):
if figure is not None:
mlab.figure(figure)
centers_sample, sox2_labels_sample, tbr1_labels_sample = randomly_sample(nb_nuclei,
centers_um,
sox2_labels,
tbr1_labels)
negative_idx = np.where(np.logical_and(sox2_labels_sample == 0, tbr1_labels_sample == 0))[0]
sox2_idx = np.where(np.logical_and(sox2_labels_sample > 0, tbr1_labels_sample == 0))[0]
tbr1_idx = np.where(np.logical_and(sox2_labels_sample == 0, tbr1_labels_sample > 0))[0]
negative = centers_sample[negative_idx]
sox2 = centers_sample[sox2_idx]
tbr1 = centers_sample[tbr1_idx]
# Plot nuclei
mlab.points3d(negative[:, 0], negative[:, 1], negative[:, 2], scale_factor=scale_factor, color=(0, 0, 1))
mlab.points3d(sox2[:, 0], sox2[:, 1], sox2[:, 2], scale_factor=scale_factor, color=(1, 0, 0))
mlab.points3d(tbr1[:, 0], tbr1[:, 1], tbr1[:, 2], scale_factor=scale_factor, color=(0, 1, 0))
def show3d(stop=False):
mlab.show(stop=stop)
def plot_clustermap(features, method):
g = sns.clustermap(features, col_cluster=False, method=method)
plt.show()
return g
def plot_tsne(features, labels, colors):
embedding = TSNE(n_jobs=-1).fit_transform(features)
cluster_idx, (cluster_tsne,) = separate_by_labels(labels, embedding)
for i, (tsne, color) in enumerate(zip(cluster_tsne, colors)):
plt.plot(tsne[:, 0], tsne[:, 1], 'o', c=color, label=f"Cluster {i}")
plt.legend()
sns.despine()
plt.show()
def cluster_sizes(labels):
cluster_idx = labels_to_cluster_idx(labels)
return [len(idx) for idx in cluster_idx]
def plot_cluster_profiles(counts, labels):
n = counts.shape[-1]
x0 = np.arange(n)
x = []
sox2 = []
tbr1 = []
dn = []
lbls = []
for row, lbl in zip(counts, labels):
x += list(x0)
sox2 += list(row[0])
tbr1 += list(row[1])
dn += list(row[2])
lbls += n * [lbl]
x = 3 * x
lbls = 3 * lbls
y = sox2 + tbr1 + dn
cell_type = len(sox2) * ['sox2'] + len(tbr1) * ['tbr1'] + len(dn) * ['dn']
df = pd.DataFrame({'x': x, 'y': y, 'cell_type': cell_type, 'labels': lbls})
sns.relplot(x='x', y='y', hue='cell_type', col='labels',
kind='line', palette=['r', 'g', 'b'], data=df)
plt.show()
|
{"hexsha": "e887491dd8cb3e8fec2bfd8cef8828cb6e7a2fc2", "size": 8891, "ext": "py", "lang": "Python", "max_stars_repo_path": "phathom/phenotype/mesh.py", "max_stars_repo_name": "chunglabmit/phathom", "max_stars_repo_head_hexsha": "304db7a95e898e9b03d6b2640172752d21a7e3ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-18T11:54:29.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-18T11:54:29.000Z", "max_issues_repo_path": "phathom/phenotype/mesh.py", "max_issues_repo_name": "chunglabmit/phathom", "max_issues_repo_head_hexsha": "304db7a95e898e9b03d6b2640172752d21a7e3ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-04-05T20:53:52.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-01T16:37:39.000Z", "max_forks_repo_path": "phathom/phenotype/mesh.py", "max_forks_repo_name": "chunglabmit/phathom", "max_forks_repo_head_hexsha": "304db7a95e898e9b03d6b2640172752d21a7e3ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8081180812, "max_line_length": 121, "alphanum_fraction": 0.6488583961, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2420}
|
program problem_4b
!! In your batch file, how many passports are valid?
use aoc_utilities
use iso_fortran_env
implicit none
integer,parameter :: chunk_size = 256
integer :: iunit, istat, n_lines, record_num, i, j, n_valid, ival, n, c
character(len=:),allocatable :: line, key, val
logical :: status_ok
type(string),dimension(:),allocatable :: key_vals, key_val
type :: record
logical :: byr = .false. ! (Birth Year)
logical :: iyr = .false. ! (Issue Year)
logical :: eyr = .false. ! (Expiration Year)
logical :: hgt = .false. ! (Height)
logical :: hcl = .false. ! (Hair Color)
logical :: ecl = .false. ! (Eye Color)
logical :: pid = .false. ! (Passport ID)
logical :: cid = .false. ! (Country ID)
end type record
type(record), dimension(:),allocatable :: records
type(record) :: tmp
open(newunit=iunit, file='input.txt', iostat=istat)
if (istat /= 0) error stop ' error reading file'
n_lines = number_of_lines_in_file(iunit)
record_num = 1
records = [tmp]
do j = 1, n_lines
call read_line_from_file(iunit,chunk_size,line,status_ok)
if (line == '') then
record_num = record_num + 1
records = [records, tmp]
else
call split(line,' ',chunk_size,key_vals)
do i = 1, size(key_vals)
call split(key_vals(i)%str,':',chunk_size,key_val)
key = key_val(1)%str
val = key_val(2)%str
n = len(val)
select case (key)
case('byr') ! four digits; at least 1920 and at most 2002.
if (len(val)==4 .and. verify(val,'0123456789')==0) then
read(val, '(I4)') ival
records(record_num)%byr = ival >= 1920 .and. ival <= 2002
end if
case('iyr') ! four digits; at least 2010 and at most 2020.
if (len(val)==4 .and. verify(val,'0123456789')==0) then
read(val, '(I4)') ival
records(record_num)%iyr = ival >= 2010 .and. ival <= 2020
end if
case('eyr') ! four digits; at least 2020 and at most 2030.
if (len(val)==4 .and. verify(val,'0123456789')==0) then
read(val, '(I4)') ival
records(record_num)%eyr = ival >= 2020 .and. ival <= 2030
end if
case('hgt') ! a number followed by either cm or in:
! If cm, the number must be at least 150 and at most 193.
! If in, the number must be at least 59 and at most 76.
if (n>2) then
if (verify(val(1:n-2),'0123456789')==0) then
read(val(1:n-2),*) ival
if (val(n-1:n)=='cm' ) then
records(record_num)%hgt = ival>=150 .and. ival<=193
elseif (val(n-1:n)=='in') then
records(record_num)%hgt = ival>=59 .and. ival<=76
end if
end if
end if
case('hcl') ! a # followed by exactly six characters 0-9 or a-f.
!records(record_num)%hcl = .true.
if (n==7) then
if (val(1:1)=='#') then
do c = 2, n
if ((val(c:c)>='0' .and. val(c:c)<='9') .or. &
(val(c:c)>='a' .and. val(c:c)<='f')) then
!ok
else
cycle
end if
end do
records(record_num)%hcl = .true. ! all ok
end if
end if
case('ecl') ! exactly one of: amb blu brn gry grn hzl oth.
records(record_num)%ecl = any(val==['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'])
case('pid') ! a nine-digit number, including leading zeroes.
records(record_num)%pid = n==9 .and. verify(val,'0123456789')==0
case('cid') ! ignored, missing or not.
records(record_num)%cid = .true.
case default
error stop 'invalid key'
end select
end do
end if
end do
write(*,*) 'number of records: ', record_num
n_valid = 0
do i = 1, record_num
if (all([ records(i)%byr, &
records(i)%iyr, &
records(i)%eyr, &
records(i)%hgt, &
records(i)%hcl, &
records(i)%ecl, &
records(i)%pid ])) then
n_valid = n_valid + 1
end if
end do
write(*,*) 'n_valid: ', n_valid
end program problem_4b
|
{"hexsha": "b169d14676a6a1500854ec5a93ef5a6e8166d2fb", "size": 4684, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/day4/problem_4b.f90", "max_stars_repo_name": "jacobwilliams/AoC-2020", "max_stars_repo_head_hexsha": "2adf673a0ac62710fc5461576feb95bf5fae4cf2", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/day4/problem_4b.f90", "max_issues_repo_name": "jacobwilliams/AoC-2020", "max_issues_repo_head_hexsha": "2adf673a0ac62710fc5461576feb95bf5fae4cf2", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/day4/problem_4b.f90", "max_forks_repo_name": "jacobwilliams/AoC-2020", "max_forks_repo_head_hexsha": "2adf673a0ac62710fc5461576feb95bf5fae4cf2", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7741935484, "max_line_length": 101, "alphanum_fraction": 0.4861229718, "num_tokens": 1275}
|
import numpy.random as rnd
from scipy import stats
import numpy as np
def AWGN_IS(x, snr, seed=None):
rng = rnd.default_rng(0)
noise_sigma = 10 ** (-snr / 20)
n, n_trials = x.shape
mu, sigma = 0, noise_sigma
mu_biased, sigma_biased = 0.5, noise_sigma
noise = np.zeros(x.shape, dtype=float)
# sample noise (from original pdf)
noise = rng.normal(mu, sigma, x.shape)
bias_bits = np.array([0, 5, 10, 15, 20, 25])
for i in range(len(bias_bits)):
for j in range(n_trials):
noise[i, j] = rng.normal(mu_biased, sigma_biased, 1)
weights = (-1) * np.ones([n_trials])
for j in range(n_trials):
weight_temp = 1
for i in range(len(bias_bits)):
lr = stats.norm.pdf(noise[i, j], mu, sigma) / stats.norm.pdf(noise[i, j], mu_biased, sigma_biased)
weight_temp = weight_temp * lr
weights[j] = weight_temp
y = x + noise
return y, weights
|
{"hexsha": "513cf8aa3149a3cff19230c5a9a932a61fb36f02", "size": 949, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyldpc/channel.py", "max_stars_repo_name": "LingruiZhu/pyldpc-master", "max_stars_repo_head_hexsha": "b85dc1121a821e48c5e18168dd68ca5a21cf5a22", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-15T14:51:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-15T14:51:24.000Z", "max_issues_repo_path": "pyldpc/channel.py", "max_issues_repo_name": "LingruiZhu/pyldpc-master", "max_issues_repo_head_hexsha": "b85dc1121a821e48c5e18168dd68ca5a21cf5a22", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyldpc/channel.py", "max_forks_repo_name": "LingruiZhu/pyldpc-master", "max_forks_repo_head_hexsha": "b85dc1121a821e48c5e18168dd68ca5a21cf5a22", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-10T08:38:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-10T08:38:03.000Z", "avg_line_length": 28.7575757576, "max_line_length": 110, "alphanum_fraction": 0.6090621707, "include": true, "reason": "import numpy,from scipy", "num_tokens": 283}
|
"""
Class for serving and recording post-processed live data
The functions which are the tasks to be performed must be defined outside the
class. I don't recall why. This should be looked into.
The generalplan here is this::
-------------------------------- reader hands packets to 16 unpackers
/ | | |
/ | | | 16 unpacker_queues
/ | | |
/ | | |
/ | | |
/ | | |
unpacker unpacker unpacker unpacker ... (12 more)
\ | | /
\ | | /
\ | | /
\ | | /
\ | | / 4 aggregator_queues
\ | | /
\ | | /
\ \ / /
\ \ / /
\ \ / /
\ \ / /
aggregator .... (3 more)
/ \
ordered_queue / \ monitor_queue
/ \
data_server averager
| |
HDF5 file pickle file
Then the main thread collects the results from the four aggragators and writes
them to disk.
"""
import cPickle
import h5py
import logging
import numpy
import os
import signal
import socket
import time
from multiprocessing import Process, Queue
from struct import unpack_from
from support import sync_second
logger = logging.getLogger(__name__)
pkt_size = 1026*64/8
pkt_fmt = "!8cHHI"+1024*4*"H"
num_workers = 16
num_aggregators = 4
max_count = 5000
if socket.gethostname() == 'gpu1':
IP = '10.0.0.12'
else:
IP = '10.0.0.2'
def unscramble_packet(input_queue, output_queue):
"""
gets unscrambled packet
Packet Structure::
__________________________________________________________________
| 64-bit Frames |
|________________________________________________________________|
| Frame | uint(3) | uint(2) | uint(1)| uint(0) |
|_______|___________________________|_________|__________________|
| 0 | user defined header |
| 1 | (pol << 63) + pkt_cnt_sec | sec_cnt | raw_pkt_cnt |
|----------------------------------------------------------------|
| 2 | F512 | F0 | P512 | P0 |
| 3 | F513 | F1 | P513 | P1 |
| ... | ... | ... | ... | ... |
| 512 | F1022 | F510 | P1022 | P510 |
| 513 | F1023 | F511 | P1023 | P511 |
|----------------------------------------------------------------|
| 514 | F512 | F0 | P512 | P0 |
| 515 | F513 | F1 | P513 | P1 |
| ... | ... | ... | ... | ... |
| 1024 | F1022 | F510 | P1022 | P510 |
| 1025 | F1023 | F511 | P1023 | P511 |
|----------------------------------------------------------------|
where P means 'power' and F means 'fourth moment'. Note that the columns
are in reversed order: 3,2,1,0.
The unpacking is into::
- 8 chars
- 2 unsigned shorts and 1 unsigned int
- 1024*4 unsigned shorts
"""
def unscramble(data):
"""
unscrambles a packet
"""
D = numpy.array(data, dtype=numpy.uint16).reshape((1024,4))
power = {}
power['I'] = numpy.append(D[:512,0],D[:512,1])
power['Q'] = numpy.append(D[512:,0],D[512:,1])
kurt = {}
kurt['I'] = numpy.append(D[:512,2],D[:512,3]).astype(numpy.float32)/4096.
kurt['Q'] = numpy.append(D[512:,2],D[512:,3]).astype(numpy.float32)/4096.
return power, kurt
while True:
one_second = {}
one_second['hdr'] = []
one_second['pkt cnt sec'] = []
one_second['sec cnt'] = []
one_second['raw pkt cnt'] = []
one_second['pwr-I'] = []
one_second['krt-I'] = []
one_second['pwr-Q'] = []
one_second['krt-Q'] = []
count = max_count
while count:
#try:
pkt_buf = input_queue.get()
if count == max_count:
one_second['time'] = time.time()
result = unpack_from(pkt_fmt, pkt_buf)
one_second['hdr'].append(result[:8])
one_second['pkt cnt sec'].append(result[8])
one_second['sec cnt'].append(result[9])
one_second['raw pkt cnt'].append(result[10])
data = result[11:]
power, kurtosis = unscramble(data)
one_second['pwr-I'].append(power['I'])
one_second['krt-I'].append(kurtosis['I'])
one_second['pwr-Q'].append(power['Q'])
one_second['krt-Q'].append(kurtosis['Q'])
count -= 1
#except (KeyboardInterrupt):
# # wait for reader to finish
# pass
output_queue.put(one_second)
#logger.debug("unscramble_packet: unscrambled %d packets from %s at %f",
# max_count, input_queue, one_second['time'])
#logger.debug("unscramble_packet: unscrambling ended at %f", time.time())
def get_packet(socket, unpacker_queues):
"""
gets packets and assigns them to unscramblers
"""
while True:
#try:
for unpacker in range(num_workers):
#logger.debug("get_packet: getting data for worker %d", unpacker)
#logger.debug("get_packet: putting data on %s at %f",
# unpacker_queues[unpacker], time.time())
for count in range(max_count):
data, addr = socket.recvfrom(pkt_size)
unpacker_queues[unpacker].put(data)
#logger.debug("get_packet: finished %d packets at %f",
# max_count, time.time())
#except KeyboardInterrupt:
# # nothing to do; signal_handler takes care of it
# pass
def aggregate_data(inqueue, outqueue):
"""
move data from the input queue to the output queue
"""
working = True
while working:
#try:
data = inqueue.get()
#logger.debug("aggregate_data: got data from %s at %s", inqueue, data['time'])
#logger.debug("aggregate_data: sent data to %s at %s", outqueue, data['time'])
outqueue.put(data)
#except KeyboardInterrupt:
# # nothing to do; signal_handler takes care of it
# working = False
def average_one_second(inqueue, outfile):
"""
collects ordered raw data and writes 1-sec averages to file
"""
def merge_1_sec(one_second):
"""
Average power and kurtosis data for one second
@param one_second - 1 sec worth of data
@type one_second - dict
"""
for key in ['pwr-I', 'krt-I', 'pwr-Q', 'krt-Q']:
array1d = numpy.array(one_second[key]).mean(axis=0)
array2d = array1d.reshape(array1d.shape[0],1)
if key == 'pwr-I':
merged = array2d
else:
merged = numpy.append(merged, array2d, axis=1)
return merged
working = True
while working:
#try:
one_second = inqueue.get()
average = {}
logger.debug("average_one_second: got data from %s at %s",
inqueue, one_second['time'])
array_1_sec = merge_1_sec(one_second)
cPickle.dump(array_1_sec, outfile)
outfile.flush()
logger.debug("average_one_second: sent data to %s at %s",
outfile.name, one_second['time'])
#except KeyboardInterrupt:
# # nothing to do; signal_handler takes care of it
# working = False
class KurtosisDataServer(object):
"""
capture kurtosis packets from the 10Gbe port, unpack them and write to file
"""
def __init__(self):
"""
initialize the server
"""
self.logger = logging.getLogger(logger.name+".KurtosisDataServer")
self.socket = self._open_socket() # 10Gbe port
self.ordered_queue = Queue() # final queue for ordered, unscrambled packets
self.monitor_queue = Queue() # queue for averaging data in 1 sec blocks
UTtime = self._open_datafile()
self._open_monitor_file(UTtime)
self._create_workers_and_queues()
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
self._start_workers()
self.run()
self._join_workers()
def _open_socket(self, host=IP, port=60000):
"""
opens socket to ROACH 10Gbe
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host,port))
self.logger.info("_open_socket: socket bound")
return s
def _create_workers_and_queues(self):
"""
creates the processes to be run concurrently
"""
self.unpacker_queue = {} # one unpacker_queue for each worker
self.unpacker = {}
self.aggregator_queue = {}
self.aggregator = {}
for count in range(num_workers):
self.unpacker_queue[count] = Queue()
if count % num_aggregators == 0:
# define the aggregator and aggregator queue for this unpacker
aggregatorID = count/num_aggregators
self.aggregator_queue[aggregatorID] = Queue()
self.aggregator[aggregatorID] = Process(target=aggregate_data,
name="aggregator-"+str(aggregatorID),
args=(self.aggregator_queue[aggregatorID],
self.ordered_queue))
self.logger.debug(
"_create_workers_and_queues: aggregator %d takes data from %s",
aggregatorID, self.aggregator_queue[aggregatorID])
self.logger.debug(
"_create_workers_and_queues: aggregator %d puts data on %s",
aggregatorID, self.ordered_queue)
self.unpacker[count] = Process(target=unscramble_packet,
name="unpacker-"+str(count),
args=(self.unpacker_queue[count],
self.aggregator_queue[aggregatorID]))
self.logger.debug(
"_create_workers_and_queues: unpacker %d takes data from %s",
count, self.unpacker_queue[count])
self.logger.debug(
"_create_workers_and_queues: unpacker %d puts data on %s",
count, self.aggregator_queue[aggregatorID])
self.averager = Process(target=average_one_second,
name="averager",
args=(self.monitor_queue, self.mfile))
self.reader = Process(target=get_packet,
name="reader",
args=(self.socket, self.unpacker_queue))
def _open_datafile(self):
"""
opens an HDF5 file for unpacked data
"""
UTtuple = time.gmtime()
fname = time.strftime("kurt-%Y-%j-%H%M%S.hdf5", UTtuple)
path = "/data/HDF5/dss14/" + str(UTtuple.tm_year) + "/" \
+ str(UTtuple.tm_yday) + "/"
self.logger.debug("_open_datafile: %s", path+fname)
try:
self.file = h5py.File(path+fname)
except IOError:
os.makedirs(path)
self.file = h5py.File(path+fname)
def _open_monitor_file(self, UTtime):
"""
opens file for 1-sec monitor data
"""
UTtuple = time.gmtime(UTtime)
fname = time.strftime("mon-%Y-%j-%H%M%S.pkl", UTtuple)
path = "/data/HDF5/dss14/" + str(UTtuple.tm_year) + "/" \
+ str(UTtuple.tm_yday) + "/"
self.logger.debug("_open_datafile: %s", path+fname)
try:
self.mfile = open(path+fname, "wb+")
except IOError:
os.makedirs(path)
self.mfile = h5py.File(path+fname)
return UTtime
def _start_workers(self):
"""
Start the processes in the right order
First starts the aggregators which combine packets for the final queue.
Next start the unpackers. Finally start the reader.
"""
for count in range(num_aggregators):
self.aggregator[count].start()
self.logger.debug("_start_workers: started aggregator %d", count)
for count in range(num_workers):
self.unpacker[count].start()
self.logger.debug("_start_workers: started unpacker %d", count)
self.averager.start()
sync_second()
self.reader.start() # get all the others going before starting the reader
def _join_workers(self):
"""
block the main task until all the child tasks have finished
"""
for count in range(num_aggregators):
self.aggregator[count].join()
self.logger.debug("_join_workers: started aggregator %d", count)
for count in range(num_workers):
self.unpacker[count].join()
self.logger.debug("_start_workers: started unpacker %d", count)
self.averager.join()
self.reader.join()
def signal_handler(self, signl, frame):
"""
This does not end the thread
"""
if signl == signal.SIGINT:
self.logger.debug("signal_handler: Ctrl-C received")
elif signl == signal.SIGHUP:
self.logger.debug("signal_handler: Hangup signal received")
else:
return
self.not_done = False
# stop the reader first
try:
self.reader.terminate()
#self.reader.join()
self.logger.warning("signal_handler: reader terminated")
except AttributeError:
self.logger.debug("signal_handler: no reader to terminate")
pass
time.sleep(16) # give all the unpackers time to finish.
# stop the unpackers
for count in range(num_workers):
try:
self.unpacker[count].terminate()
#self.unpacker[count].join()
self.logger.warning("signal_handler: unpacker %d terminated", count)
except AttributeError:
self.logger.error("signal_handler: no %s to terminate",
self.unpacker[count])
pass
time.sleep(4) # give the aggregators time to empty their queues.
# stop the aggregators
for count in range(num_aggregators):
try:
self.aggregator[count].terminate()
#self.aggregator[count].join()
self.logger.warning("signal_handler: aggregator %d terminated",
count)
except AttributeError, details:
self.logger.error(
"signal_handler: AttributeError: failed to terminate %s\n%s",
self.aggregator[count], details)
pass
time.sleep(4) # give time to finish writing to files
try:
self.socket.close()
except Exception, details:
self.logger.debug("signal_handler: cannot close socket: %s", details)
try:
self.file.close()
except ValueError:
# probably already closed
self.logger.warning("signal_handler: file close error")
# stop the averager
try:
self.averager.terminate()
self.averager.join()
self.logger.warning("signal_handler: averager %d terminated", count)
except ValueError, details:
self.logger.warning(
"signal_handler: VaueError: failed to terminate %s\n%s",
self.averager, details)
pass
except AttributeError, details:
self.logger.warning(
"signal_handler: AttributeError: failed to terminate %s\n%s",
self.averager, details)
pass
def run(self):
"""
"""
self.not_done = True
grpnum = 0
while self.not_done:
try:
one_second = self.ordered_queue.get()
timestruc = time.gmtime(one_second['time'])
if timestruc.tm_min == 0 and timestruc.tm_sec == 0:
self.logger.debug("run: start of new hour at %s",
time.asctime(timestruc))
# close the main data file
self.file.close()
self.logger.debug("run: %s closed", self.file.name)
# stop the averager
self.averager.terminate()
self.logger.debug("run: averager terminated")
self.averager.join(1)
# close the monitor file
self.logger.debug("run: averager finished")
self.mfile.close()
self.logger.debug("run: %s closed", self.mfile.name)
UTtime = self._open_datafile()
self.logger.debug("run: %s opened", self.file.name)
self._open_monitor_file(UTtime)
self.logger.debug("run: %s opened", self.mfile.name)
self.averager = Process(target=average_one_second,
args=(self.monitor_queue, self.mfile))
self.averager.start()
self.logger.debug("run: averager started")
self.monitor_queue.put(one_second)
grpname = "one_second %5d" % grpnum
grp = self.file.create_group(grpname)
for d in one_second.keys():
ds = grp.create_dataset(d, data=numpy.array(one_second[d]))
self.file.flush()
self.logger.debug("run: got %s" % grpnum)
grpnum += 1
except KeyboardInterrupt:
self.not_done = False
#self.reader.join()
#for count in range(num_workers):
# self.unpacker[count].join()
#for count in range(num_aggregators):
# self.aggregator[count].join()
#self.averager.join()
class ScansProcessor(object):
"""
"""
def __init__(self):
"""
"""
pass
def record_scans(self, test=True, offset=0):
"""
Record the scans defined in a scans file
The method reads the .scans file parsing each line into a dict with keys::
scan - scan number
start - start UNIX time
stop - end UNIX time
source - source name
exposure - integration time in seconds
It gets the observation parameters and then sets the IF switch accordingly.
It then starts a scan if the current time is equal to or greater than the
start time and less than the stop time, computing the number of 1-sec
records to request from the server.
@param test : only print diagnostic info for each scan (default: False)
@type test : bool
@param offset : number of seconds before times in the scans file
@type offset : int
@param switch_override : manually set designated switch state
@type switch_override : dict
"""
if switch_override:
self.switch_override = switch_override
def get_obs_pars():
"""
"""
session_fmt = projects_dir + \
"DSAO/Activities/" + self.activity + '/dss%2d/%4d/%03d/'
session_dir = session_fmt % (self.dss, self.year, self.DOY)
files = glob.glob(session_dir+"*.scans")
# there are two but either will do
scans_file = files[0]
f = open(scans_file, 'r')
lines = f.readlines()
f.close()
rxID = []
bands = []
pols = []
for f in files:
# this parses the scans file name for IF channel info
IF = f.split('.')[0].split('_')[-1]
receiverID = IF[:-3]+str(self.dss)
rxID.append(receiverID)
bands.append(receiverID[:-2])
pol = IF[-3:]
pols.append(pol)
return rxID, bands, pols, lines
def get_scans(lines):
"""
"""
scans = {}
scans['scan'] = []
scans['start'] = []
scans['stop'] = []
scans['source'] = []
scans['exposure'] = []
for line in lines:
parts = line.strip().split()
scans['scan'].append(int(parts[0]))
scans['source'].append(parts[-1])
start = calendar.timegm(time.strptime(str(2018)+"/"+ parts[1],
"%Y/%j/%H:%M:%S"))
scans['start'].append(start)
stop = calendar.timegm(time.strptime(str(2018)+"/"+ parts[2],
"%Y/%j/%H:%M:%S"))
scans['stop'].append(stop)
scans['exposure'].append(stop-start)
return scans
rxID, bands, pols, lines = get_obs_pars()
scans = get_scans(lines)
for idx in range(len(scans['scan'])):
print scans['scan'][idx], scans['start'][idx], scans['stop'][idx], \
scans['source'][idx], scans['exposure'][idx]
# do 1 sec scans for integration
if test:
print scans['scan'][idx], scans['start'][idx], scans['stop'][idx], \
scans['source'][idx], scans['exposure'][idx]
else:
if offset:
scans['start'][idx] -= offset
scans['stop'][idx] -= offset
if calendar.timegm(time.gmtime()) > scans['stop'][idx]:
# current time is after scan stop time
self.logger.info("record_scans: skipping %s", scans['scan'][idx])
while calendar.timegm(time.gmtime()) < scans['start'][idx]:
# wait
time.sleep(0.001)
self.logger.info("record_scans: sleep ended with scan %s",
scans['scan'][idx])
if calendar.timegm(time.gmtime()) >= scans['start'][idx]:
# do scan for exposure
self.spectrometer.hardware.start(scans['exposure'][idx])
self.logger.debug("record_scans: recording scan %s for %d sec",
scans['scan'][idx], scans['exposure'][idx])
if __name__ == "__main__":
logging.basicConfig()
mylogger = logging.getLogger()
mylogger.setLevel(logging.DEBUG)
server = KurtosisDataServer()
|
{"hexsha": "507537a5ef25b8aaa30e86b68045915254ab0155", "size": 21380, "ext": "py", "lang": "Python", "max_stars_repo_path": "BackEnds/data_server-daemon.py", "max_stars_repo_name": "SDRAST/MonitorControl", "max_stars_repo_head_hexsha": "3aaa0b93be3e6d5c2ad8f8e3423cf51fed6dcd8e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BackEnds/data_server-daemon.py", "max_issues_repo_name": "SDRAST/MonitorControl", "max_issues_repo_head_hexsha": "3aaa0b93be3e6d5c2ad8f8e3423cf51fed6dcd8e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BackEnds/data_server-daemon.py", "max_forks_repo_name": "SDRAST/MonitorControl", "max_forks_repo_head_hexsha": "3aaa0b93be3e6d5c2ad8f8e3423cf51fed6dcd8e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1759729272, "max_line_length": 84, "alphanum_fraction": 0.5624883068, "include": true, "reason": "import numpy", "num_tokens": 5159}
|
import gpflow
import tensorflow as tf
tf.config.run_functions_eagerly(True)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
import os
import operator
plt.style.use("ggplot")
warnings.filterwarnings('ignore')
np.random.seed(0)
def pred_x(model, patient_idx, X, Y,
cluster_assignments,
X_test=None, Y_test=None,
model_name='', n_test_points=20, feature='', seed=1):
'''Generate test points for prediction for HGP/MOE Models'''
color = 'blue'
xx = np.linspace(0, np.max(X[patient_idx]), 100)[:, None]
## predict mean and variance of latent GP at test points
if model_name == 'GPR':
mean_x, var_x = model.predict_y(xx, full_cov=False, full_output_cov=False)
else:
mean_x, var_x = model.predict_y(xx, patient_idx)
## generate samples from posterior
if X_test is not None:
xnew = np.array(X_test[patient_idx])[:, None]
xnew = xnew.astype(np.float64)
else:
xnew = np.linspace(np.max(X[patient_idx]),
np.max(X[patient_idx]) + n_test_points, n_test_points)[:, None]
if model_name == 'GPR':
pred_mean, pred_var = model.predict_y(xnew, full_cov=False, full_output_cov=False)
else:
pred_mean, pred_var = model.predict_y(xnew, patient_idx)
## Compute MSE
x_train = np.array(X[patient_idx])[:, None]
x_train = x_train.astype(np.float64)
if model_name == 'GPR':
train_pred_mean, train_pred_var = model.predict_y(x_train, full_cov=False, full_output_cov=False)
else:
train_pred_mean, train_pred_var = model.predict_y(x_train, patient_idx)
y_train = Y[patient_idx]
train_error = np.square(np.subtract(y_train, train_pred_mean.numpy().flatten())).mean()
test_error = 0
if X_test is not None:
x_test = np.array(X_test[patient_idx])[:, None]
x_test = x_test.astype(np.float64)
if model_name == 'GPR':
test_pred_mean, test_pred_var = model.predict_y(x_test, full_cov=False, full_output_cov=False)
else:
test_pred_mean, test_pred_var = model.predict_y(x_test, patient_idx)
y_test = Y_test[patient_idx]
test_error = np.square(np.subtract(y_test, test_pred_mean.numpy().flatten())).mean()
## plot
fig = plt.figure(figsize=(12, 6))
plt.errorbar(X[patient_idx], Y[patient_idx],
yerr=0.1, color='black',
capsize=3, elinewidth=1, fmt='o',
label='noisy observations')
plt.plot(xx, mean_x, color=color, lw=2, label='mean function')
plt.fill_between(
xx[:, 0],
mean_x.numpy().flatten() - 1.96 * np.sqrt(np.abs(var_x.numpy().flatten())),
mean_x.numpy().flatten() + 1.96 * np.sqrt(np.abs(var_x.numpy().flatten())),
color=color,
alpha=0.2,
label='fitted variance'
)
plt.plot(xnew, pred_mean, "d", color='blue', label='predicted values')
if Y_test is not None:
y_test = Y_test[patient_idx]
plt.plot(X_test[patient_idx], y_test, 'o',
color='red', label='test points')
if model_name == 'MOE':
str_title = model_name + '\n' + \
'Patient %d ' %patient_idx + feature + \
" Group " + ', '.join(str(e) for e in cluster_assignments[patient_idx])
else:
str_title = model_name + '\n' + \
'Patient %d ' %patient_idx + feature + \
" Group %d" %cluster_assignments[patient_idx]
plt.title(str_title + "\n Train MSE %.5f | Test MSE %.5f" %(train_error, test_error))
plt.ylabel(feature + ' measurement')
plt.xlabel('time since admission (scaled)')
plt.ylabel(feature + ' measurement')
handles, labels = plt.gca().get_legend_handles_labels()
hl = sorted(zip(handles, labels),
key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
plt.legend(handles2, labels2)
plt.show()
|
{"hexsha": "7fc2a6315c9140012ed3de402683890c0075e393", "size": 4002, "ext": "py", "lang": "Python", "max_stars_repo_path": "hgpmoe/plot.py", "max_stars_repo_name": "bee-hive/HGP-MOE", "max_stars_repo_head_hexsha": "b1f753b3e82f8cedcbb3d29381ae0eb6a4755bc1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-14T11:17:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T11:17:02.000Z", "max_issues_repo_path": "hgpmoe/plot.py", "max_issues_repo_name": "bee-hive/HGP-MOE", "max_issues_repo_head_hexsha": "b1f753b3e82f8cedcbb3d29381ae0eb6a4755bc1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hgpmoe/plot.py", "max_forks_repo_name": "bee-hive/HGP-MOE", "max_forks_repo_head_hexsha": "b1f753b3e82f8cedcbb3d29381ae0eb6a4755bc1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3818181818, "max_line_length": 106, "alphanum_fraction": 0.6206896552, "include": true, "reason": "import numpy", "num_tokens": 1013}
|
from numpy.testing import *
import numpy as np
rlevel = 1
class TestRegression(TestCase):
def test_polyfit_build(self,level=rlevel):
"""Ticket #628"""
ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
9.95368241e+00, -3.14526520e+02]
x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
170, 171, 172, 173, 174, 175, 176]
y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
tested = np.polyfit(x, y, 4)
assert_array_almost_equal(ref, tested)
if __name__ == "__main__":
run_module_suite()
|
{"hexsha": "189b2e48131a8b33c2d231c9030808384c3eff24", "size": 1408, "ext": "py", "lang": "Python", "max_stars_repo_path": "GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/lib/tests/test_regression.py", "max_stars_repo_name": "miguelsousa/robothon", "max_stars_repo_head_hexsha": "f2ac88884e04a6e77f79c91e1709ab8c84f46043", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2015-02-23T15:14:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-09T16:14:19.000Z", "max_issues_repo_path": "GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/lib/tests/test_regression.py", "max_issues_repo_name": "miguelsousa/robothon", "max_issues_repo_head_hexsha": "f2ac88884e04a6e77f79c91e1709ab8c84f46043", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/lib/tests/test_regression.py", "max_forks_repo_name": "miguelsousa/robothon", "max_forks_repo_head_hexsha": "f2ac88884e04a6e77f79c91e1709ab8c84f46043", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-06-21T19:53:21.000Z", "max_forks_repo_forks_event_max_datetime": "2017-06-21T19:53:21.000Z", "avg_line_length": 45.4193548387, "max_line_length": 75, "alphanum_fraction": 0.4779829545, "include": true, "reason": "import numpy,from numpy", "num_tokens": 794}
|
#include <boost/spirit/home/support/utree/utree_traits_fwd.hpp>
|
{"hexsha": "826eb38d64fe9441912ebaf540d8b87697a1fd27", "size": 64, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_spirit_home_support_utree_utree_traits_fwd.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_spirit_home_support_utree_utree_traits_fwd.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_spirit_home_support_utree_utree_traits_fwd.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 32.0, "max_line_length": 63, "alphanum_fraction": 0.828125, "num_tokens": 17}
|
import torch
import torch.nn as nn
import torch.nn.utils.prune as prune
import numpy as np
import custom_modules.custom_modules as modules
def compute_group_lasso_mask(inputTensor: torch.Tensor, clusterSize: int, threshold: float) -> torch.Tensor:
mask = torch.zeros_like(inputTensor, dtype=torch.float)
input_dims = inputTensor.size()
numChannels = input_dims[1]
N = input_dims[0]
# Make channel the least significant dimension
# mask_flatten shares the same underlying memory as mask
mask_flatten = mask.view(N, numChannels, -1)
# Generate the boolean tensor
zeros = torch.zeros_like(inputTensor, dtype=torch.float)
booleans = torch.isclose(inputTensor, zeros, atol=threshold)
booleans_flatten = booleans.view(N, numChannels, -1)
for c in range(0, numChannels, clusterSize):
cEnd = min(c + clusterSize, numChannels)
source = torch.cumprod(booleans_flatten[:, c:cEnd, :], dim=1)[:, -1, :].unsqueeze(1)
reference = torch.zeros_like(source)
mask_flatten[:, c:cEnd, :] = torch.isclose(source, reference)
return mask
class channelClusterGroupLassoPruningMethod(prune.BasePruningMethod):
"""
Prune according to the specified cluster size along the channel dimension
Caution: this is an unstructured pruning method, but do not use prune.global_unstructured
on it.
Reason: prune.global_unstructured lumps all the tensors in flattened tensor
"""
PRUNING_TYPE = "unstructured"
def __init__(self, clusterSize, threshold):
"""
clusterSize: integer. The number of consecutive elements considered for pruning at once
threshold: float. How close to zero should be counted as zero.
"""
super(channelClusterGroupLassoPruningMethod, self).__init__()
self.threshold = threshold
self.clusterSize = clusterSize
def compute_mask(self, t, default_mask):
"""
t: input tensor
default_mask: not used
"""
mask = compute_group_lasso_mask(inputTensor=t, clusterSize=self.clusterSize, threshold=self.threshold)
return mask
@classmethod
def apply(cls, module, name, clusterSize, threshold):
return super(channelClusterGroupLassoPruningMethod, cls).apply(module, name
, clusterSize=clusterSize, threshold=threshold)
###Helper functions for applying pruning####
def applyClusterPruning(module, name, clusterSize, threshold):
"""Prunes tensor corresponding to parameter called `name` in `module`
by removing every other entry in the tensors.
Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called `name+'_mask'` corresponding to the
binary mask applied to the parameter `name` by the pruning method.
The parameter `name` is replaced by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
`name+'_orig'`.
Args:
module (nn.Module): module containing the tensor to prune
name (string): parameter name within `module` on which pruning
will act.
clusterSize (int):
threshold (float):
Returns:
module (nn.Module): modified (i.e. pruned) version of the input
module
Examples:
>>> m = nn.Linear(3, 4)
>>> applyClusterPruning(m, name='bias', clusterSize=3, threshold=1e-3)
"""
channelClusterGroupLassoPruningMethod.apply(module, name, clusterSize, threshold)
return module
###Prune a network#####
def pruneNetwork(net, clusterSize, threshold, prefix=''):
"""
Applies cluster pruning to the 2D-convolution, linear, and 2D-transposed convolution layers
in a network
:param net: torch.nn. The network model to be pruned.
:param clusterSize: int. The cluster granularity size
:param threshold: float. Values with magnitudes lower than this will be considered as 0
:param prefix: str. Prefix attached to the module name when printing out which modules are pruned.
:return: None
"""
for name, module in net.named_children():
if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):
print("Pruning " + prefix + "." + name + " weight")
applyClusterPruning(module, "weight", clusterSize, threshold)
else:
pruneNetwork(module, clusterSize, threshold, prefix + "." + name)
###Remove pruning masks from a network###
def unPruneNetwork(net):
"""
Remove the pruning hooks and masks of weights from a pruned network
:param net: The network to be pruned
:return: None
"""
for name, module in net.named_modules():
for _, hook in module._forward_pre_hooks.items():
if isinstance(hook, prune.BasePruningMethod):
prune.remove(module, "weight")
continue
###Regularization contribution calculation#####
# TODO: Make this run faster
def calculateChannelGroupLasso(input: torch.Tensor, clusterSize=2) -> torch.Tensor:
"""
Compute the group lasso according to the block size along channels
input: torch.Tensor. The input tensor
clusterSize: scalar. Lasso group size
return: scalar. The group lasso size.
"""
accumulate = torch.tensor(0, dtype=torch.float32)
if input.dim() <= 1:
raise ImportError("Input tensor dimensions must be at least 2")
numChannels = input.shape[1]
numChunks = (numChannels - 1) // clusterSize + 1
eps = 1e-16
squared = torch.pow(input, 2.0)
# TODO: The more chunks there are, the slower this gets.... Fix this!
# Each chunk is a view of the original tensor, so there is no copy overhead
if clusterSize > 1:
for chunk in list(torch.chunk(input=squared, chunks=numChunks, dim=1)):
square_summed = torch.sum(chunk, 1, keepdim=False)
sqrt = square_summed.add_(torch.tensor(eps)).pow_(0.5)
accumulate.add_(torch.sum(sqrt))
elif clusterSize == 1:
sqrt = squared.add_(torch.tensor(eps)).pow_(0.5)
accumulate.add_(torch.sum(sqrt))
return accumulate
def compute_balanced_pruning_mask(
weightTensor: torch.Tensor,
clusterSize: int,
pruneRangeInCluster: int,
sparsity: float) -> torch.Tensor:
"""
Calculates the mask for balanced pruning.
Consecutive weights within the same pruning range can be pruned as clusters
If the input channel size is not a multiple of prune_range * cluster_size
then zeros are padded after each input channel during the calculation of masks
:param weightTensor: The weight tensor to be pruned
:param clusterSize: Number of consecutive weights within the same pruning range regarded as one unit
:param pruneRangeInCluster: Pruning range counted in clusters
:param sparsity: The target sparsity. Must be greater than 0.0 and smaller than 1.0
:return:
"""
if sparsity >= 1.0:
# Keep the sparsity strictly less than 1.0,
# Otherwise, all the weights might become zero
sparsity = 0.9999
if sparsity >= 1.0 / pruneRangeInCluster:
inputDims = weightTensor.size()
numInputChannels = inputDims[1]
# Number of filters. Each filter is of dimensions CHW
N = inputDims[0]
# Prune range in terms of individual weights
pruneRangeInWeights = pruneRangeInCluster * clusterSize
# Lower the weight matrix into a 3D tensor of dimensions N x C x (HW)
weightTensorFlatten = weightTensor.view(N, numInputChannels, -1)
# Need to permute the flattened weight tensor to get HHWC layout
# GOTTCHA: Need to call contiguous, otherwise view will not work
# See https://discuss.pytorch.org/t/call-contiguous-after-every-permute-call/13190
weightTensorNHWxC = (weightTensorFlatten.permute(0, 2, 1).contiguous()).view(-1, numInputChannels)
numWeightRows = weightTensorNHWxC.size()[0]
# Allocate the mask tensor in NHWC layout
maskNHWxC = torch.zeros_like(weightTensorNHWxC)
# consider each matrix row on a chunk-by-chunk basis
for row in range(0, numWeightRows):
# Extract each row from the flattened weight matrix
# and pad it with zeros to a multiple of clusterSize * pruneRange
paddedLen = (1 + (numInputChannels - 1) // pruneRangeInWeights) * pruneRangeInWeights
weightRowWithPadding = torch.zeros(paddedLen, dtype=torch.float)
weightRowWithPadding[0:numInputChannels] = torch.squeeze(weightTensorNHWxC[row, :])
# Split each row into chunks. Each chunk is a prune range
weightRowWithPaddingPartitioned = weightRowWithPadding.view(-1, pruneRangeInCluster, clusterSize)
# Calculate the norm of each chunk.
# Dim 0: Across ranges
# Dim 1: Across clusters within the same range
# Dim 2: Across values within the same cluster
# TODO: Currently using L_inf. Change the norm order if necessary
norms = torch.norm(weightRowWithPaddingPartitioned, p=float(1.0), dim=2, keepdim=True).detach().numpy()
threshold = np.quantile(norms, q=sparsity, axis=1, interpolation='lower', keepdims=True)
# print ("Row: {}, threshold: {}".format(row, threshold))
# Generate the padded mask and flatten it
rowMaskPadded = torch.from_numpy(np.greater(norms, threshold).astype(float)).flatten().repeat_interleave(clusterSize)
maskNHWxC[row, :] = rowMaskPadded[0:numInputChannels]
mask = (maskNHWxC.view(N, -1, numInputChannels).permute(0, 2,1).contiguous()).view_as(weightTensor)
else:
# Special case: if the sparsity is lower than
mask = torch.ones_like(weightTensor)
# print('Mask: {}'.format(mask))
return mask
class balancedPruningMethod(prune.BasePruningMethod):
"""
Perform balanced pruning according to the specified cluster size along the channel dimension
Caution: this is an unstructured pruning method, but do not use prune.global_unstructured
on it.
Reason: prune.global_unstructured lumps all the tensors in flattened tensor
"""
PRUNING_TYPE = "unstructured"
def __init__(self, clusterSize: int, pruneRangeInCluster: int, sparsity: float):
"""
clusterSize: integer. The number of consecutive elements considered for pruning at once
sparsity: float. Target sparsity
"""
super(balancedPruningMethod, self).__init__()
self.pruneRangeInCluster = pruneRangeInCluster
self.clusterSize = clusterSize
self.sparsity = sparsity
def compute_mask(self, t, default_mask):
"""
t: input tensor
default_mask: not used
"""
mask = compute_balanced_pruning_mask(
weightTensor=t,
clusterSize=self.clusterSize,
pruneRangeInCluster=self.pruneRangeInCluster,
sparsity=self.sparsity)
return mask
@classmethod
def apply(cls, module, name, clusterSize, pruneRangeInCluster, sparsity):
return super(balancedPruningMethod, cls).apply(module, name,
clusterSize=clusterSize,
pruneRangeInCluster=pruneRangeInCluster,
sparsity=sparsity)
###Helper functions for applying balanced pruning####
def applyBalancedPruning(module, name, clusterSize: int, pruneRangeInCluster: int, sparsity: float):
"""Prunes tensor corresponding to parameter called `name` in `module`
by removing every other entry in the tensors.
Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called `name+'_mask'` corresponding to the
binary mask applied to the parameter `name` by the pruning method.
The parameter `name` is replaced by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
`name+'_orig'`.
Args:
module (nn.Module): module containing the tensor to prune
name (string): parameter name within `module` on which pruning
will act.
clusterSize (int): Number of consecutive weights to be seen as one unit
pruneRangeInCluster (int): Size of balanced pruning window in terms of cluster
sparsity (float): Target sparsity level
Side-effects:
- Adds pruning attributes to the module
- Adds a registered buffer called weight_target_sparsity to the module
Returns:
module (nn.Module): modified (i.e. pruned) version of the input
module
Examples:
>>> m = nn.Linear(3, 4)
>>> applyBalancedPruning(m, name='weight', clusterSize=3, sparsity=0.25, pruneRangeInCluster=4)
"""
balancedPruningMethod.apply(module, name, clusterSize, pruneRangeInCluster, sparsity)
module.register_buffer('weight_target_sparsity', torch.tensor(float(sparsity)))
return module
def savePruneMask(net) -> None:
"""
Saves all the masks as module attributes.
Useful for temprorarily saving the masks before quantization
Side-effect: add attributes "[tensor_name]_mask" to the module
:param net:
:return:
"""
for name, module in net.named_modules():
for _, hook in module._forward_pre_hooks.items():
if isinstance(hook, prune.BasePruningMethod):
setattr(module, hook._tensor_name+'_prev_prune_mask', module.weight_mask.detach().clone())
continue
def restoreWeightPruneMask(net) -> None:
"""
Reapply weight masks from saved values
Side-affects:
- Reinstate the saved weight masks as a pruning mask
- Delete the saved weight mask from the modulle
:param net:
:return:
"""
for name, module in net.named_modules():
if hasattr(module, 'weight_prev_prune_mask'):
prune.CustomFromMask.apply(module, 'weight', module.weight_prev_prune_mask)
del module.weight_prev_prune_mask
continue
|
{"hexsha": "1a2516513a2f299090c0e07fa4a3c53696b23d6c", "size": 14243, "ext": "py", "lang": "Python", "max_stars_repo_path": "develop/pruning/pruning.py", "max_stars_repo_name": "mustard-seed/SparseNN_training", "max_stars_repo_head_hexsha": "267a1fb5bed650e66ad5cf3d98069891bb307aec", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "develop/pruning/pruning.py", "max_issues_repo_name": "mustard-seed/SparseNN_training", "max_issues_repo_head_hexsha": "267a1fb5bed650e66ad5cf3d98069891bb307aec", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "develop/pruning/pruning.py", "max_forks_repo_name": "mustard-seed/SparseNN_training", "max_forks_repo_head_hexsha": "267a1fb5bed650e66ad5cf3d98069891bb307aec", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1606060606, "max_line_length": 129, "alphanum_fraction": 0.6713473285, "include": true, "reason": "import numpy", "num_tokens": 3259}
|
#!/usr/bin/env python
# coding: utf-8
# # MLFlow Pre-packaged Model Server AB Test Deployment
# In this example we will build two models with MLFlow and we will deploy them as an A/B test deployment. The reason this is powerful is because it allows you to deploy a new model next to the old one, distributing a percentage of traffic. These deployment strategies are quite simple using Seldon, and can be extended to shadow deployments, multi-armed-bandits, etc.
# ## Tutorial Overview
#
# This tutorial will follow closely break down in the following sections:
#
# 1. Train the MLFlow elastic net wine example
#
# 2. Deploy your trained model leveraging our pre-packaged MLFlow model server
#
# 3. Test the deployed MLFlow model by sending requests
#
# 4. Deploy your second model as an A/B test
#
# 5. Visualise and monitor the performance of your models using Seldon Analytics
#
# It will follow closely our talk at the [Spark + AI Summit 2019 on Seldon and MLflow](https://www.youtube.com/watch?v=D6eSfd9w9eA).
# ## Dependencies
#
# For this example to work you must be running Seldon 0.3.2 or above - you can follow our [getting started guide for this](https://docs.seldon.io/projects/seldon-core/en/latest/workflow/install.html).
#
# In regards to other dependencies, make sure you have installed:
#
# * Helm v3.0.0+
# * kubectl v1.14+
# * Python 3.6+
# * MLFlow 1.1.0
# * [pygmentize](https://pygments.org/docs/cmdline/)
# * [tree](http://mama.indstate.edu/users/ice/tree/)
#
# We will also take this chance to load the Python dependencies we will use through the tutorial:
# In[ ]:
import numpy as np
import pandas as pd
from seldon_core.seldon_client import SeldonClient
# Let's get started! 🚀🔥
# ## 1. Train the first MLFlow Elastic Net Wine example
#
# For our example, we will use the elastic net wine example from [MLflow's tutorial](https://github.com/mlflow/mlflow/tree/master/examples/sklearn_elasticnet_wine).
# ### MLproject
#
# As any other MLflow project, it is defined by its `MLproject` file:
# In[ ]:
get_ipython().system('pygmentize -l yaml MLproject')
# We can see that this project uses Conda for the environment and that it's defined in the `conda.yaml` file:
# In[ ]:
get_ipython().system('pygmentize conda.yaml')
# Lastly, we can also see that the training will be performed by the `train.py` file, which receives two parameters `alpha` and `l1_ratio`:
# In[ ]:
get_ipython().system('pygmentize train.py')
# ### Dataset
#
# We will use the wine quality dataset.
# Let's load it to see what's inside:
# In[ ]:
data = pd.read_csv("wine-quality.csv")
data.head()
# ### Training
#
# We've set up our MLflow project and our dataset is ready, so we are now good to start training.
# MLflow allows us to train our model with the following command:
#
# ``` bash
# $ mlflow run . -P alpha=... -P l1_ratio=...
# ```
#
# On each run, `mlflow` will set up the Conda environment defined by the `conda.yaml` file and will run the training commands defined in the `MLproject` file.
# In[ ]:
get_ipython().system('mlflow run . -P alpha=0.5 -P l1_ratio=0.5')
# Each of these commands will create a new run which can be visualised through the MLFlow dashboard as per the screenshot below.
#
# 
#
# Each of these models can actually be found on the `mlruns` folder:
# In[ ]:
get_ipython().system('tree -L 1 mlruns/0')
# ### MLmodel
#
# Inside each of these folders, MLflow stores the parameters we used to train our model, any metric we logged during training, and a snapshot of our model.
# If we look into one of them, we can see the following structure:
# In[ ]:
get_ipython().system('tree mlruns/0/$(ls mlruns/0 | head -1)')
# In particular, we are interested in the `MLmodel` file stored under `artifacts/model`:
# In[ ]:
get_ipython().system('pygmentize -l yaml mlruns/0/$(ls mlruns/0 | head -1)/artifacts/model/MLmodel')
# This file stores the details of how the model was stored.
# With this information (plus the other files in the folder), we are able to load the model back.
# Seldon's MLflow server will use this information to serve this model.
#
# Now we should upload our newly trained model into a public Google Bucket or S3 bucket.
# We have already done this to make it simpler, which you will be able to find at `gs://seldon-models/mlflow/model-a`.
# ## 2. Deploy your model using the Pre-packaged Moldel Server for MLFlow
#
# Now we can deploy our trained MLFlow model.
#
# For this we have to create a Seldon definition of the model server definition, which we will break down further below.
#
# We will be using the model we updated to our google bucket (gs://seldon-models/mlflow/elasticnet_wine_1.8.0), but you can use your model if you uploaded it to a public bucket.
# ### Setup Seldon Core
#
# Use the setup notebook to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Setup-Cluster) with [Ambassador Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Ambassador) and [Install Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Install-Seldon-Core). Instructions [also online](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html).
# In[ ]:
get_ipython().system('pygmentize mlflow-model-server-seldon-config.yaml')
# Once we write our configuration file, we are able to deploy it to our cluster by running it with our command
# In[ ]:
get_ipython().system('kubectl apply -f mlflow-model-server-seldon-config.yaml')
# Once it's created we just wait until it's deployed.
#
# It will basically download the image for the pre-packaged MLFlow model server, and initialise it with the model we specified above.
#
# You can check the status of the deployment with the following command:
# In[ ]:
get_ipython().system('kubectl rollout status deployment.apps/mlflow-deployment-mlflow-deployment-dag-0-wines-classifier')
# Once it's deployed, we should see a "succcessfully rolled out" message above. We can now test it!
# ## 3. Test the deployed MLFlow model by sending requests
# Now that our model is deployed in Kubernetes, we are able to send any requests.
# We will first need the URL that is currently available through Ambassador.
#
# If you are running this locally, you should be able to reach it through localhost, in this case we can use port 80.
# In[ ]:
get_ipython().system('kubectl get svc | grep ambassador')
# Now we will select the first datapoint in our dataset to send to the model.
# In[ ]:
x_0 = data.drop(["quality"], axis=1).values[:1]
print(list(x_0[0]))
# We can try sending a request first using curl:
# In[ ]:
get_ipython().system('curl -X POST -H \'Content-Type: application/json\' -d \'{"data": {"names": [], "ndarray": [[7.0, 0.27, 0.36, 20.7, 0.045, 45.0, 170.0, 1.001, 3.0, 0.45, 8.8]]}}\' http://localhost:8003/seldon/seldon/mlflow-deployment/api/v0.1/predictions')
# We can also send the request by using our python client
# In[ ]:
import math
import subprocess
import numpy as np
from seldon_core.seldon_client import SeldonClient
HOST = "localhost" # Add the URL you found above
port = "80" # Make sure you use the port above
batch = x_0
payload_type = "ndarray"
sc = SeldonClient(
gateway="ambassador", namespace="seldon", gateway_endpoint=HOST + ":" + port
)
client_prediction = sc.predict(
data=batch, deployment_name="mlflow-deployment", names=[], payload_type=payload_type
)
print(client_prediction.response)
# ## 4. Deploy your second model as an A/B test
#
# Now that we have a model in production, it's possible to deploy a second model as an A/B test.
# Our model will also be an Elastic Net model but using a different set of parameters.
# We can easily train it by leveraging MLflow:
# In[ ]:
get_ipython().system('mlflow run . -P alpha=0.75 -P l1_ratio=0.2')
# As we did before, we will now need to upload our model to a cloud bucket.
# To speed things up, we already have done so and the second model is now accessible in `gs://seldon-models/mlflow/model-b`.
# ### A/B test
#
# We will deploy our second model as an A/B test.
# In particular, we will redirect 20% of the traffic to the new model.
#
# This can be done by simply adding a `traffic` attribute on our `SeldonDeployment` spec:
# In[ ]:
get_ipython().system('pygmentize ab-test-mlflow-model-server-seldon-config.yaml')
# And similar to the model above, we only need to run the following to deploy it:
# In[ ]:
get_ipython().system('kubectl apply -f ab-test-mlflow-model-server-seldon-config.yaml')
# We can check that the models have been deployed and are running with the following command.
#
# We should now see the "a-" model and the "b-" models.
# In[ ]:
get_ipython().system('kubectl get pods')
# ## 5. Visualise and monitor the performance of your models using Seldon Analytics
#
# This section is optional, but by following the instructions you will be able to visualise the performance of both models as per the chart below.
#
# In order for this example to work you need to install and run the [Grafana Analytics package for Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/analytics/analytics.html#helm-analytics-chart).
#
# For this we can access the URL with the command below, it will request an admin and password which by default are set to the following:
# * Username: admin
# * Password: password
#
# You can access the grafana dashboard through the port provided below:
# In[ ]:
get_ipython().system("kubectl get svc grafana-prom -o jsonpath='{.spec.ports[0].nodePort}'")
# Now that we have both models running in our Kubernetes cluster, we can analyse their performance using Seldon Core's integration with Prometheus and Grafana.
# To do so, we will iterate over the training set (which can be found in `wine-quality.csv`), making a request and sending the feedback of the prediction.
#
# Since the `/feedback` endpoint requires a `reward` signal (i.e. the higher the better), we will simulate one as:
#
# $$
# R(x_{n})
# = \begin{cases}
# \frac{1}{(y_{n} - f(x_{n}))^{2}} &, y_{n} \neq f(x_{n}) \\
# 500 &, y_{n} = f(x_{n})
# \end{cases}
# $$
#
# , where $R(x_{n})$ is the reward for input point $x_{n}$, $f(x_{n})$ is our trained model and $y_{n}$ is the actual value.
# In[ ]:
sc = SeldonClient(
gateway="ambassador", namespace="seldon", deployment_name="wines-classifier"
)
def _get_reward(y, y_pred):
if y == y_pred:
return 500
return 1 / np.square(y - y_pred)
def _test_row(row):
input_features = row[:-1]
feature_names = input_features.index.to_list()
X = input_features.values.reshape(1, -1)
y = row[-1].reshape(1, -1)
# Note that we are re-using the SeldonClient defined previously
r = sc.predict(deployment_name="mlflow-deployment", data=X, names=feature_names)
y_pred = r.response["data"]["tensor"]["values"]
reward = _get_reward(y, y_pred)
sc.feedback(
deployment_name="mlflow-deployment",
prediction_request=r.request,
prediction_response=r.response,
reward=reward,
)
return reward[0]
data.apply(_test_row, axis=1)
# You should now be able to see Seldon's pre-built Grafana dashboard.
# 
# In bottom of the dashboard you can see the following charts:
#
# - On the left: the requests per second, which shows the different traffic breakdown we specified.
# - On the center: the reward, where you can see how model `a` outperforms model `b` by a large margin.
# - On the right, the latency for each one of them.
#
# You are able to add your own custom metrics, and try out other more complex deployments by following further guides at https://docs.seldon.io/projects/seldon-core/en/latest/examples/mlflow_server_ab_test_ambassador.html
|
{"hexsha": "4b38bf16f1a86b506d9a09c6ade7c394440d1a47", "size": 12019, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/jupyter_execute/examples/models/mlflow_server_ab_test_ambassador/mlflow_server_ab_test_ambassador.py", "max_stars_repo_name": "edshee/seldon-core", "max_stars_repo_head_hexsha": "78c10fbca16a5e2a0c25b9673aa3deb220070e26", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/jupyter_execute/examples/models/mlflow_server_ab_test_ambassador/mlflow_server_ab_test_ambassador.py", "max_issues_repo_name": "edshee/seldon-core", "max_issues_repo_head_hexsha": "78c10fbca16a5e2a0c25b9673aa3deb220070e26", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/jupyter_execute/examples/models/mlflow_server_ab_test_ambassador/mlflow_server_ab_test_ambassador.py", "max_forks_repo_name": "edshee/seldon-core", "max_forks_repo_head_hexsha": "78c10fbca16a5e2a0c25b9673aa3deb220070e26", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1363636364, "max_line_length": 517, "alphanum_fraction": 0.7206090357, "include": true, "reason": "import numpy", "num_tokens": 3157}
|
// Warning! This file is autogenerated.
#include <boost/text/collation_table.hpp>
#include <boost/text/collate.hpp>
#include <boost/text/data/all.hpp>
#ifndef LIMIT_TESTING_FOR_CI
#include <boost/text/save_load_table.hpp>
#include <boost/filesystem.hpp>
#endif
#include <gtest/gtest.h>
using namespace boost::text;
auto const error = [](string const & s) { std::cout << s; };
auto const warning = [](string const & s) {};
collation_table make_save_load_table()
{
#ifdef LIMIT_TESTING_FOR_CI
string const table_str(data::my::standard_collation_tailoring());
return tailored_collation_table(
table_str,
"my::standard_collation_tailoring()", error, warning);
#else
if (!exists(boost::filesystem::path("my_standard.table"))) {
string const table_str(data::my::standard_collation_tailoring());
collation_table table = tailored_collation_table(
table_str,
"my::standard_collation_tailoring()", error, warning);
save_table(table, "my_standard.table.1");
boost::filesystem::rename("my_standard.table.1", "my_standard.table");
}
return load_table("my_standard.table");
#endif
}
collation_table const & table()
{
static collation_table retval = make_save_load_table();
return retval;
}
TEST(tailoring, my_standard_000_000)
{
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1012, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x1012, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1012, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x1012, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1012, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x1012, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1012, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x1012, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1012, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x1012, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1012, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x1012, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1012, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1012, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1012, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1012, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1012, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1012, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1012, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1012, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1013, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x1013, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1013, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x1013, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1013, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x1013, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1013, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x1013, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1013, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x1013, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1013, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x1013, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1013, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1013, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1013, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1013, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1013, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1013, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1013, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1013, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1014, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x1014, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1014, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x1014, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1014, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x1014, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1014, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x1014, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1014, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x1014, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1014, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x1014, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
}
TEST(tailoring, my_standard_001_001)
{
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1014, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1014, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1014, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1014, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1014, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1014, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1014, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1014, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1015, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x1015, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1015, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x1015, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1015, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x1015, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1015, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x1015, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1015, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x1015, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1015, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x1015, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1015, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1015, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1015, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1015, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1015, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1015, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1015, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1015, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1016, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x1016, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1016, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x1016, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1016, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x1016, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1016, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x1016, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1016, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x1016, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1016, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x1016, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1016, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1016, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1016, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1016, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1016, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1016, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1016, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1016, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1017, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x1017, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1017, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x1017, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
}
TEST(tailoring, my_standard_001_002)
{
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1017, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x1017, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1017, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x1017, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1017, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x1017, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1017, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x1017, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1017, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1017, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1017, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1017, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1017, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1017, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1017, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1017, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1018, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x1018, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1018, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x1018, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1018, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x1018, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1018, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x1018, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1018, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x1018, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1018, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x1018, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1018, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1018, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1018, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1018, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1018, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1018, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1018, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1018, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1019, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x1019, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x1019, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x1019, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1019, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x1019, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x1019, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x1019, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1019, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x1019, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x1019, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x1019, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1019, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1019, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1019, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1019, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
}
TEST(tailoring, my_standard_001_003)
{
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1019, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1019, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x1019, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x1019, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x101a, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x101a, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x101a, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x101a, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x101a, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x101a, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x101a, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x101a, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x101a, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x101a, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x101a, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x101a, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101a, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x101a, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101a, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x101a, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101a, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x101a, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101a, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x101a, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x101b, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x101b, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x101b, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x101b, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x101b, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x101b, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x101b, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x101b, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x101b, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x101b, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x101b, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x101b, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101b, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x101b, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101b, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x101b, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101b, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x101b, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101b, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x101b, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x101c, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x101c, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x101c, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x101c, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x101c, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x101c, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x101c, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x101c, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
}
TEST(tailoring, my_standard_001_004)
{
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x101c, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x101c, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x101c, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x101c, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101c, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x101c, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101c, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x101c, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101c, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x101c, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101c, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x101c, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x101e, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x101e, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x101e, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x101e, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x101e, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x101e, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x101e, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x101e, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x101e, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x101e, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x101e, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x101e, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101e, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x101e, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101e, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x101e, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101e, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x101e, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101e, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x101e, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x101f, 0x1039};
auto const rel = std::vector<uint32_t>{0x1023, 0x101f, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x101f, 0x103a};
auto const rel = std::vector<uint32_t>{0x1023, 0x101f, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x101f, 0x1039};
auto const rel = std::vector<uint32_t>{0x1025, 0x101f, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x101f, 0x103a};
auto const rel = std::vector<uint32_t>{0x1025, 0x101f, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x101f, 0x1039};
auto const rel = std::vector<uint32_t>{0x1027, 0x101f, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x101f, 0x103a};
auto const rel = std::vector<uint32_t>{0x1027, 0x101f, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101f, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x101f, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101f, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x101f, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101f, 0x1039};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x101f, 0x1039};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x1031, 0x102c, 0x101f, 0x103a};
auto const rel = std::vector<uint32_t>{0x1029, 0x1031, 0x102c, 0x101f, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
}
TEST(tailoring, my_standard_001_005)
{
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1031, 0x102c, 0x1000, 0x103a, 0x1000, 0x103b};
auto const rel = std::vector<uint32_t>{0x1031, 0x102c, 0x1000, 0x103a, 0x103b};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::quaternary),
0);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::quaternary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1014, 0x103a, 0x1014, 0x102f, 0x1015, 0x103a};
auto const rel = std::vector<uint32_t>{0x1014, 0x103a, 0x102f, 0x1015, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::quaternary),
0);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::quaternary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1031, 0x101e, 0x1039, 0x101e};
auto const rel = std::vector<uint32_t>{0x1031, 0x103f};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::quaternary),
0);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::quaternary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x102d, 0x101e, 0x1039, 0x101e};
auto const rel = std::vector<uint32_t>{0x102d, 0x103f};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::quaternary),
0);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::quaternary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x102f, 0x101e, 0x1039, 0x101e};
auto const rel = std::vector<uint32_t>{0x102f, 0x103f};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::quaternary),
0);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::quaternary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d, 0x101e, 0x1039, 0x101e};
auto const rel = std::vector<uint32_t>{0x1023, 0x103f};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102f, 0x101e, 0x1039, 0x101e};
auto const rel = std::vector<uint32_t>{0x1025, 0x103f};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::tertiary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::tertiary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x101e, 0x1039, 0x101e};
auto const rel = std::vector<uint32_t>(1, 0x103f);
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::quaternary),
0);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::quaternary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1014, 0x103e, 0x102d, 0x102f, 0x1000, 0x103a};
auto const rel = std::vector<uint32_t>(1, 0x104c);
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::primary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::primary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x101b, 0x103d, 0x1031, 0x1037};
auto const rel = std::vector<uint32_t>(1, 0x104d);
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::primary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::primary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x101c, 0x100a, 0x103a, 0x1038, 0x1000, 0x1031, 0x102c, 0x1004, 0x103a, 0x1038};
auto const rel = std::vector<uint32_t>{0x104e, 0x1004, 0x103a, 0x1038};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::primary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::primary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1021, 0x102d};
auto const rel = std::vector<uint32_t>(1, 0x104f);
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::secondary),
-1);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::secondary),
-1);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::primary),
0);
// equal to preceeding cps at next-lower strength
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::primary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x101c, 0x1000, 0x103a, 0x101a, 0x102c};
auto const rel = std::vector<uint32_t>{0x101c, 0x1000, 0x103a, 0x103b, 0x102c};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::quaternary),
0);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::quaternary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x101e, 0x1019, 0x102e};
auto const rel = std::vector<uint32_t>{0x101e, 0x1039, 0x1019, 0x102e};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::quaternary),
0);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::quaternary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x1011, 0x1019, 0x1004, 0x103a, 0x1038};
auto const rel = std::vector<uint32_t>{0x1011, 0x1039, 0x1019, 0x1004, 0x103a, 0x1038};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::quaternary),
0);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::quaternary),
0);
}
{
// greater than (or equal to, for =) preceeding cps
auto const res = std::vector<uint32_t>{0x101c, 0x1000, 0x103a, 0x1018, 0x1000, 0x103a};
auto const rel = std::vector<uint32_t>{0x101c, 0x1039, 0x1018, 0x1000, 0x103a};
string const res_str = to_string(res);
string const rel_str = to_string(rel);
auto const res_view = as_utf32(res);
auto const rel_view = as_utf32(rel);
EXPECT_EQ(collate(
res.begin(), res.end(),
rel.begin(), rel.end(),
table(), collation_strength::quaternary),
0);
EXPECT_EQ(collate(
res_view.begin(), res_view.end(),
rel_view.begin(), rel_view.end(),
table(), collation_strength::quaternary),
0);
}
}
|
{"hexsha": "e16b40e6b9fe0ae2292d2021dee8f5cc72b0cc33", "size": 161782, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/tailoring_rule_test_my_standard_001.cpp", "max_stars_repo_name": "eightysquirrels/text", "max_stars_repo_head_hexsha": "d935545648777786dc196a75346cde8906da846a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/tailoring_rule_test_my_standard_001.cpp", "max_issues_repo_name": "eightysquirrels/text", "max_issues_repo_head_hexsha": "d935545648777786dc196a75346cde8906da846a", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/tailoring_rule_test_my_standard_001.cpp", "max_forks_repo_name": "eightysquirrels/text", "max_forks_repo_head_hexsha": "d935545648777786dc196a75346cde8906da846a", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1685669573, "max_line_length": 123, "alphanum_fraction": 0.6058708633, "num_tokens": 43750}
|
import copy
import vnmrjpy as vj
import numpy as np
import matplotlib.pyplot as plt
class Lmafit():
"""Low-rank matrix fitting algorithm
Fills missing matrix elements by low rank approximation
ref.: paper
"""
def __init__(self,init_data,\
known_data='NOT GIVEN',\
tol=None,\
k=None,\
rank_strategy=None,\
verbose=False,\
realtimeplot=False):
"""Initialize Lmafit, get defaults from vnmrjpy config
Args:
init_data (np.ndarray) : matrix to complete. unkown elements can
be approximated beforehand
known_data (np.ndarray) : matrix of the same shape as init_data
with only the known elements, rest are zero
tol (float) : tolerance of fitting
rank_strategy (str) : increase or decrease rank
verbose
realtimeplot
"""
conf = vj.config
if tol == None:
tol = conf['lmafit_tol']
if rank_strategy == None:
rank_strategy = conf['lmafit_rank_strategy']
if k == None:
k = conf['lmafit_start_rank']
if (type(known_data) == str and known_data == 'NOT GIVEN'):
if init_data[init_data == 0].size < init_data.size / 10:
raise(Exception('Known data not given'))
known_data = copy.deepcopy(init_data)
self.verbose = verbose
(m,n) = init_data.shape
datamask = copy.deepcopy(known_data)
datamask[datamask != 0+0*1j] = 1
datanrm = np.linalg.norm(init_data,'fro')
# init
#Z = np.matrix(init_data)
Z = init_data
#X = np.matrix(np.zeros((m,k),dtype='complex64'))
#TODO check removed matrix ok
X = np.zeros((m,k),dtype='complex64')
Y = np.eye(k,n,dtype='complex64')
Res = np.multiply(init_data,datamask) - known_data
res = datanrm
reschg_tol = 0.5*tol
# parameters for alf
alf = 0
increment = 0.5
#rank estimation parameters
itr_rank = 0
minitr_reduce_rank = 5
maxitr_reduce_rank = 50
tau_limit = 10
rank_incr = 3
rank_max = 50
self.realtimeplot = realtimeplot
if realtimeplot == True:
self.rtplot = vj.util.RealTimeImshow(np.absolute(init_data))
self.initpars = (init_data,known_data,\
m,n,k,tol,rank_strategy,datanrm,\
Z,X,Y,Res,res,reschg_tol,alf,increment,itr_rank,\
minitr_reduce_rank,maxitr_reduce_rank,tau_limit,\
datamask,rank_incr,rank_max)
def solve(self,max_iter=100):
"""Main iteration
Completed matrix: Z = X*Y
Returns :
X (np.matrix)
Y (np.matrix)
out_list [] : some list of helper outputs
"""
def rank_check(R,reschg,tol):
# diag = np.diag(R)
# d_hat = [diag[i]/diag[i+1] for i in range(len(diag)-1)]
# tau = (len(diag)-1)*max(d_hat)/(sum(d_hat)-max(d_hat))
if reschg < 10*tol:
ind_string = 'increase'
else:
ind_string = 'stay'
return ind_string
def increase_rank(X,Y,Z,rank_incr,rank_max):
k = X.shape[1]
k_new = min(k+rank_incr,rank_max)
m = X.shape[0]
n = Y.shape[1]
#X_new = np.matrix(np.zeros((m,k_new),dtype='complex64'))
#Y_new = np.matrix(np.eye(k_new,n,dtype='complex64'))
X_new = np.zeros((m,k_new),dtype='complex64')
Y_new = np.eye(k_new,n,dtype='complex64')
X_new[:,:k] = X
Y_new[:k,:] = Y
Z_new = X.dot(Y)
return X_new, Y_new, Z_new
# -------------------INIT------------------------
(data,known_data,m,n,k,tol,rank_strategy,datanrm,\
Z,X,Y,Res,res,reschg_tol,alf,increment,itr_rank,\
minitr_reduce_rank,maxitr_reduce_rank,tau_limit,\
datamask, rank_incr,rank_max) = self.initpars
# --------------MAIN ITERATION--------------------
objv = np.zeros(max_iter)
RR = np.ones(max_iter)
for iter_ in range(max_iter):
itr_rank += 1
X0 = copy.deepcopy(X)
Y0 = copy.deepcopy(Y)
Res0 = copy.deepcopy(Res)
res0 = copy.deepcopy(res)
Z0 = copy.deepcopy(Z)
X = Z.dot(Y.conj().T)
X, R = np.linalg.qr(X)
Y = X.conj().T.dot(Z)
Z = X.dot(Y)
Res = np.multiply(known_data-Z,datamask)
res = np.linalg.norm(Res,'fro')
relres = res / datanrm
ratio = res / res0
reschg = np.abs(1-ratio)
RR[iter_] = ratio
# adjust alf
if self.verbose == True:
print('ratio : {}; rank : {}; reschg : {}, alf : {}'\
.format(ratio,X.shape[1],reschg, alf))
if ratio >= 1.0:
increment = np.max([0.1*alf,0.1*increment])
X = copy.deepcopy(X0)
Y = copy.deepcopy(Y0)
Res = copy.deepcopy(Res0)
res = copy.deepcopy(res0)
relres = res / datanrm
alf = 0
Z = copy.deepcopy(Z0)
elif ratio > 0.7:
increment = max(increment,0.25*alf)
alf = alf + increment
objv[iter_] = relres
# check stopping
if ((reschg < reschg_tol) and ((itr_rank > minitr_reduce_rank) \
or (relres < tol))):
if self.verbose == True:
print('Stopping crit achieved')
break
# rank adjustment
rankadjust = rank_check(R,reschg,tol)
if rankadjust == 'increase':
X,Y,Z = increase_rank(X,Y,Z,rank_incr,rank_max)
Zknown = known_data + alf*Res
Z = Z - np.multiply(Z,datamask) + Zknown
if self.realtimeplot == True:
self.rtplot.update_data(np.absolute(Z))
obj = objv[:iter_]
return X, Y, [obj, RR, iter_, relres, reschg]
|
{"hexsha": "8fb63a9fda5d3cd0d08613403f4f13a427ec181d", "size": 6409, "ext": "py", "lang": "Python", "max_stars_repo_path": "vnmrjpy/aloha/lmafit.py", "max_stars_repo_name": "hlatkydavid/vnmrjpy", "max_stars_repo_head_hexsha": "48707a1000dc87e646e37c8bd686e695bd31a61e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vnmrjpy/aloha/lmafit.py", "max_issues_repo_name": "hlatkydavid/vnmrjpy", "max_issues_repo_head_hexsha": "48707a1000dc87e646e37c8bd686e695bd31a61e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vnmrjpy/aloha/lmafit.py", "max_forks_repo_name": "hlatkydavid/vnmrjpy", "max_forks_repo_head_hexsha": "48707a1000dc87e646e37c8bd686e695bd31a61e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3802083333, "max_line_length": 77, "alphanum_fraction": 0.5007021376, "include": true, "reason": "import numpy", "num_tokens": 1566}
|
# Low Level Functions
import types
import random
import math
import time
euler = 2.718281828
##############################################
# 1. Matrix Initializations ---
##############################################
def size(matrix):
return len(matrix),len(matrix[0])
def zeros(m,n):
# Create zero matrix
return [[0.0 for row in range(n)] for col in range(m)]
def T(matrix):
# Transpose of a Matrix
return [list(tup) for tup in zip(*matrix)]
def TList(ListIn):
# Transpose of a list
return [[ListIn[i]] for i in range(len(ListIn))]
def zerosSWCol(D,N,SWCol):
# Create zero sparse matrix
# The rows correspond to features
matrix = D*[None]
for d in range(D):
if SWCol[d]==1: matrix[d] = N*[0]
return matrix
def complexzeros(m,n):
# Create zero matrix
return [[0.0+0.0j for row in range(n)] for col in range(m)]
def zeros3D(d,m,n):
# Create zero matrix 3D
return ([[[0.0 for row in range(n)] for col in range(m)]
for dim in range(d)])
def ones(m,n):
# Create ones matrix
return [[1.0 for row in range(n)] for col in range(m)]
def eye(n):
# Create eye matrix
new_matrix = [[0 for row in range(n)] for col in range(n)]
for i in range(n):
new_matrix[i][i]=1.0
return new_matrix
def randuni(m,n):
# Create random matrix from uniform distribution at [0,1]
return [[random.random() for row in range(n)] for col in range(m)]
def randuniVec(n,s):
# Create random matrix from uniform distribution at [0,1]
return [s*random.random() for i in range(n)]
def rand(m,n,s):
#s = s/2.0
# Create random matrix
# normal distribution 0 mean 1 var
return [[random.gauss(0,s) for row in range(n)] for col in range(m)]
def rotrand(mat,RotMat):
# Rotate a Guassian random matrix mat with rotmat matrix
N = len(mat)
new_mat = zeros(N,2)
for i in range(N):
new_mat[i] = Mat2Vec(multMat([mat[i]],RotMat))
return new_mat
def show(matrix):
# Print out matrix
for col in matrix: print (col)
def drange(a,b,c):
# Decimal range a,a+c,a+2c,...,b
L = int(round((b-a)/c + 1))
Out = L*[0]
Out[0] = a
for i in range(L):
if i==0:
continue
Out[i] = Out[i-1] + c
return Out
def Factorial(n):
# Factorial of an integer < 30
n = int(n)
Out = 1
for i in range(n): Out = Out*(i+1)
return Out
def realVec(vec):
# return the real part of a list (vector)
N = len(vec)
Out = vec
for i in range(N):
if type(vec[i])==complex:
Out[i] = vec[i].real
return Out
def cmp(a, b):
return (a > b) - (a < b)
def sign(number):
# The sign of a number
return cmp(number, 0)
def sortOrd(list):
# return the ordered index of a list
#Indx = range(len(list))
#Indx.sort(lambda x,y: cmp(list[x],list[y]))
Indx = sorted(range(len(list)), key=lambda k: list[k])
return Indx
def signVec(vec):
return list(map(sign,vec))
def tanh(x):
return (euler**(2.0*x)-1.0)/(euler**(2.0*x)+1.0)
###############################################
# 2. Matrix Algebraic functions ---
#----------------------------------------------
# ADDITIONSs
# ADDITION: Matrix with Matrix
def add(matrix1,matrix2):
N,D = size(matrix1)
return [[matrix1[i][j] + matrix2[i][j] for j in range(D)]
for i in range(N)]
#===========================================
# Add: vec1 + vec2
#===========================================
def addVec(vec1,vec2):
return list(map(lambda x,y:x+y,vec1,vec2))
#===========================================
# AddTract: Fl + vec
#===========================================
def addFlVec(NumFl,vec):
N = len(vec)
NumFl = float(NumFl)
new_vec = N*[0]
for i in range(N):
new_vec[i]= vec[i] + NumFl
return new_vec
#===========================================
# AddTract: Int + vec
#===========================================
def addIntVec(NumInt,vec):
N = len(vec)
new_vec = N*[0]
for i in range(N):
new_vec[i]= vec[i] + NumInt
return new_vec
#================================================
# SUM: all values of a matrix
#================================================
def SumVal(matrix):
SUM = 0
for d in range(len(matrix)):
SUM += sum(matrix[d])
return SUM
#================================================
# Sum of matrix vertically 1 or Horizontally 2
#================================================
def sumMat(matrix,Dim):
if Dim==2:
matrix = T(matrix)
N = len(matrix)
M = zeros(N,1)
for i in range(N):
M[i][0] += sum(matrix[i])
if Dim==2: M = T(M)
return M
#===========================================
# Mean of matrix vertically 1 or Horizontally 2
#===========================================
def mean(matrix,Dim):
M = sumMat(matrix,Dim)
if Dim==1:
DivFactor = len(matrix[0])
elif Dim==2:
DivFactor = len(matrix)
return multFlMat(1.0/DivFactor,M)
#======================================================
# Sum of complex matrix vertically 1 or Horizontally 2
#=====================================================
def complexsum(matrix,Dim):
if Dim==2:
matrix = T(matrix)
N = len(matrix)
M = complexzeros(N,1)
for i in range(N):
M[i][0] += sum(matrix[i])
if Dim==2: M = T(M)
return M
#===========================================
# Matrix subtraction
#===========================================
def subMat(matrix1,matrix2):
N,D = size(matrix1)
new_matrix = zeros(N,D)
for i in range(N):
new_matrix[i] = subVec(matrix1[i],matrix2[i])
return new_matrix
#===========================================
# Matrix subtraction
#===========================================
def subVec(vec1,vec2):
return list(map(lambda x,y:x-y,vec1,vec2))
#===========================================
# SubTract: vec - Fl
#===========================================
def subVecFl(vec,NumFl):
N = len(vec)
NumFl = float(NumFl)
new_vec = N*[0]
for i in range(N):
new_vec[i]= vec[i] - NumFl
return new_vec
#===========================================
# SubTract: Matrix - Fl
#===========================================
def subMatFl(matrix,NumFl):
N,D = size(matrix)
NumFl = float(NumFl)
new_matrix = zeros(N,D)
for i in range(N):
new_matrix[i]= subVecFl(matrix[i],NumFl)
return new_matrix
#===========================================
# SubTract: Fl - vec
#===========================================
def subFlVec(NumFl,vec):
N,NumFl = len(vec),float(NumFl)
new_vec = N*[0]
for i in range(N): new_vec[i]= NumFl - vec[i]
return new_vec
#===========================================
# Multiply Float to matrix
#===========================================
def multFlMat(FlNum,matrix):
if type(matrix)==int or type(matrix)==float:
return FlNum*matrix
else:
R,C = size(matrix)
return [[FlNum*matrix[i][j] for j in range(C)] for i in range(R)]
#===========================================
# Multiply Float to vec
#===========================================
def multIntVec(Num,vec):
return [vec[i]*Num for i in range(len(vec))]
#===========================================
# Multiply Float to vec
#===========================================
def multFlVec(FlNum,vec):
if type(vec)==int or type(vec)==float:
# Case vec is a value
return FlNum*vec
else:
return [vec[i]*FlNum for i in range(len(vec))]
#===========================================
# Multiply Complex to vec
#===========================================
def multComplexVec(CNum,vec):
# Complex with Matrix multiplication
if type(vec)==int or type(vec)==float:
Out = FlNum*vec
return Out
N = len(vec)
Out = N*[0+0j]
for i in range(N):
Out[i] = CNum*vec[i]
return Out
#===========================================
# Matrix multiplication
#===========================================
def multMat(matrix1,matrix2):
# Matrix multiplication
if len(matrix1[0]) != len(matrix2):
# Check matrix dimensions
print ('Matrices must be m*n and n*p to multiply!')
else:
# Multiply if correct dimensions
new_matrix = zeros(len(matrix1),len(matrix2[0]))
for i in range(len(matrix1)):
for j in range(len(matrix2[0])):
for k in range(len(matrix2)):
new_matrix[i][j] += matrix1[i][k]*matrix2[k][j]
return new_matrix
#===========================================
# Element wise multiplication of matrices
#===========================================
def multMatElw(mat1,mat2):
N,D = size(mat1)
# for i in range(N):
# for j in range(D):
# new_matrix[i][j] = matrix1[i][j] * matrix2[i][j]
return [[mat1[i][j]*mat2[i][j] for i in range(N)] for j in range(D)]
#===========================================
# Element wise multiplication of vectors
#===========================================
def multVecElw(vec1,vec2):
N = len(vec1)
new_vec = N*[0]
for i in range(N):
new_vec[i]=vec1[i]*vec2[i]
return new_vec
#===========================================
# Power Float to matrix
#===========================================
def powFlMat(FlNum,matrix):
if type(matrix)==int or type(matrix)==float:
Out = pow(FlNum,matrix)
return Out
R,C = size(matrix)
new_matrix = zeros(R,C)
for i in range(R):
for j in range(C):
new_matrix[i][j] = FlNum**matrix[i][j]
return new_matrix
#===========================================
# Power matrix to float
#===========================================
def powMatFl(matrix,FlNum):
if type(matrix)==int or type(matrix)==float:
Out = pow(matrix,FlNum)
print (Out)
return Out
R = len(matrix)
C = len(matrix[0])
new_matrix = zeros(R,C)
for i in range(R):
for j in range(C):
new_matrix[i][j] = matrix[i][j]**FlNum
return new_matrix
#===========================================
# Power vector to float
#===========================================
def powVecFl(vec,FlNum):
if type(vec)==int or type(vec)==float:
return vec**FlNum
else:
return [vec[i]**FlNum for i in range(len(vec))]
#===========================================
# Power float to vector
#===========================================
def powFlVec(FlNum,vec):
if type(vec)==int or type(vec)==float:
return FlNum**vec
else:
return [FlNum**vec[i] for i in range(len(vec))]
#=============================================
# Element wise power for matrices
#=============================================
def powElwMat(matrix,Number):
n,d = size(matrix)
powMat = zeros(n,d)
for i in range(n):
for j in range(d):
powMat[i][j] = matrix[i][j]**Number
return powMat
#===========================================
# Exp of matrix
#===========================================
def exp(matrix):
return [expVec(matrix[i]) for i in range(len(matrix))]
#===========================================
# Exp of Complex matrix
#===========================================
def expComplexMat(matrix):
R,C = size(matrix)
new_matrix = complexzeros(R,C)
for i in range(R):
for j in range(C):
new_matrix[i][j] = euler**matrix[i][j]
return new_matrix
#===========================================
# Exp of vec (float)
#===========================================
def expVec(vec):
return list(map(math.exp,vec))
#===========================================
# Logarithms
#===========================================
# Log10 of a vec
def log10Vec(vec):
return list(map(math.log10,vec))
# LogNormal of a vec
def logNVec(vec):
return list(map(math.log,vec))
# Log10 of a Matrix
def log10Mat(mat):
return [log10Vec(mat[i]) for i in range(len(mat))]
# LogN of a Matrix
def logNMat(mat):
return [logNVec(mat[i]) for i in range(len(mat))]
#===========================================
# Divide Float to matrix
#===========================================
def divFlMat(FlNum,matrix):
N,D = size(matrix)
new_matrix = zeros(N,D)
for i in range(N):
for j in range(D):
new_matrix[i][j] = FlNum/matrix[i][j]
return new_matrix
#===========================================
# Divide Float to vector
#===========================================
def divFlVec(FlNum,vec):
return [FlNum/vec[i] for i in range(len(vec))]
#===========================================
# Element wise division of matrices
#===========================================
def divElw(matrix1,matrix2):
N,D =size(matrix1)
return [[matrix1[i][j] / matrix2[i][j] for j in range(D)]
for i in range(N)]
#===========================================
# Element wise division of vectors
#===========================================
def divElwVec(vec1,vec2):
return list(map(lambda x,y: x/y, vec1, vec2))#
#===========================================
# Element wise absolute value for vectors
#===========================================
def absElwVec(vector):
return list(map(abs,vector))
#=============================================
# Element wise absolute value for matrices
#=============================================
def absElwMat(matrix):
return [absElwVec(matrix[i]) for i in range(len(matrix))]
#=============================================
# Product of a vector elements
#=============================================
def prod(vector):
p = 1
for i in vector:
p *= i
return p
#=============================================
# 3. SORTING: MAX and SORT ---
#=============================================
#=============================================
# Estimate Max and Arg Max of Mat Rows
#=============================================
def maxRow(matrix):
N,M = size(matrix)
ArgMaxVec = N*[0]
MaxVec = N*[0]
MaxVec = list(map(max,matrix))
for i in range(N):
ArgMaxVec[i] = matrix[i].index(MaxVec[i])
return MaxVec,ArgMaxVec
#=============================================
# Estimate Max and Arg Max of a vec
#=============================================
def maxVec(vec):
MaxVal = max(vec)
return MaxVal, vec.index(MaxVal)
#=============================================
# Estimate Min and Arg Min of Vec list
#=============================================
def minVec(vec):
MinVal = min(vec)
return MinVal, vec.index(MinVal)
# REM: HERE
#=============================================
# Median of matrix vertically 1 or Horizontally 2
#=============================================
def median(matrix,Dim):
N,D = size(matrix)
if Dim==1:
M = zeros(N,1)
for i in range(N):
A = sorted(Mat2Vec(getRow(matrix,i)))
M[i][0] = A[int(N/2)]
elif Dim==2:
M = zeros(1,D)
Matrix2 = T(matrix)
for i in range(D):
A = sorted(Mat2Vec(getRow(Matrix2,i)))
M[0][i] = A[int(N/2)]
return M
#======================================
# Differentiate Lines of a matrix
#======================================
def diffMat(matrix):
N,K = size(matrix)
new_mat = zeros(N,K)
for i in range(N-1):
new_mat[i] = subVec(matrix[i+1],matrix[i])
new_mat[N-1] = new_mat[N-2]
return new_mat
#----------------------------------------------------
# Replications, gets, and rotations
#----------------------------------------------------
#=============================================
# Repeat a vertical vec DTimes to Horizontal
#=============================================
def repvecCOL(vec,DTimes):
# vec is a Nx1 vector
N = len(vec)
M = zeros(N,DTimes)
for i in range(N):
for j in range(DTimes):
M[i][j] = vec[i][0]
return M
#=============================================
# Repeat a horizontal vec NTimes to vertical
#=============================================
def repvecROW(vec,NTimes):
# vec is a 1xD vector
D = len(vec[0])
M = zeros(NTimes,D)
for i in range(NTimes):
for j in range(D):
M[i][j] = vec[0][j]
return M
#=============================================
# Get Column of a matrix
#=============================================
def getCol(matrix,j):
N = len(matrix)
ColVec = zeros(N,1)
for i in range(N): ColVec[i][0] = matrix[i][j]
return ColVec
#=============================================
# Get Row of a matrix
#=============================================
def getRow(matrix,i):
D = len(matrix[0])
RowVec = zeros(1,D)
for j in range(D):
RowVec[0][j] = matrix[i][j]
return RowVec
#=============================================
# Get Diag of a matrix nxn
#=============================================
def getDiag(matrix):
n = len(matrix)
DiagVec = zeros(1,n)
for j in range(n):
DiagVec[0][j] = matrix[j][j]
return DiagVec
#=============================================
# Transform matrix to vector
#=============================================
def Mat2Vec(matrix):
N,D = size(matrix)
if D == 1:
M = [0 for i in range(N)]
for i in range(N):
M[i] = matrix[i][0]
else:
M = [0 for i in range(D)]
for i in range(D):
M[i] = matrix[0][i]
return M
#=========================================================
# Handle Inconsistencies of EM
# written for Python 2.5
#=========================================================
def Incs(ProbMixtInit,IndexMixt):
eps = 2.2 * 10e-10
Inf = 10 * 10e+10
ZeroProbMixt = (FindMatLNum(ProbMixtInit,eps)+ \
FindMatGNum(ProbMixtInit,Inf))
for IndPat in ZeroProbMixt:
ProbMixtInit[IndPat][IndexMixt] = eps;
return ProbMixtInit
# REM: only in P2.6 availabe
# math.isnan
# math.isinf
#ZeroProbMixt= [find( ProbMixtInit(1:NPatterns) < eps)' ...
# find( isnan(ProbMixtInit(1:NPatterns)))' ...
# find( ProbMixtInit(1:NPatterns) == Inf)'];
return Out
#===== Normalize NxM matrix lines (Probs) to [0,1] =======
def NormProb(matrix):
SumCols = list(map(sum,matrix))
return [[matrix[i][m]/SumCols[i]
for m in range(len(matrix[0]))] for i in range(len(matrix))]
#==== Estimate likelihood of a model =========
def EstLikelyhood(ProbMixtNorm,ProbMixtInit):
N,M = size(ProbMixtNorm)
LTotal = 0
log = math.log
for m in range(M):
for i in range(N):
if ProbMixtInit[i][m]!=0:
LTotal += ProbMixtNorm[i][m]*log(ProbMixtInit[i][m])
return LTotal
#####################################################
# Comparisons and Finds
#####################################################
#===========================================
# Compare Mat1 >= Mat2
#===========================================
def matCompGE(matrix1,matrix2):
N,D = size(matrix1)
Out = zeros(N,D)
for i in range(N):
for j in range(D):
if matrix1[i][j]>=matrix2[i][j]:
Out[i][j] = 1
return Out
#===========================================
# Compare Mat1 >= Mat2
#===========================================
def matCompL(matrix1,matrix2):
N,D = size(matrix1)
Out = zeros(N,D)
for i in range(N):
for j in range(D):
if matrix1[i][j]<matrix2[i][j]:
Out[i][j] = 1
return Out
#===========================================
# Compare vector > number
#===========================================
def CompVecGNum(vector,number):
n = len(vector)
Out = n*[0]
for i in range(n):
if vector[i]>number:
Out[i] = 1
return Out
#===========================================
# Compare vector <= number
#===========================================
def CompVecLENum(vector,number):
n = len(vector)
Out = n*[0]
for i in range(n):
if vector[i]<=number:
Out[i] = 1
else:
Out[i] = 0
return Out
#===========================================
# Compare vector < number
#===========================================
def CompVecLNum(vector,number):
n = len(vector)
Out = n*[0]
for i in range(n):
if vector[i]<number:
Out[i] = 1
return Out
#===========================================
# Compare vector == number
#===========================================
def CompVecENum(vector,number):
n = len(vector)
Out = n*[0]
OutIndex = findSTR(number,Vector)
for i in range(len(OutIndex)):
Out[OutIndex[i]] = 1
return Out
#===========================================
# Compare Mat(Nx1) < number
#===========================================
def FindMatLNum(matrix,number):
N = len(matrix)
Out = []
for i in range(N):
if matrix[i][0]<number:
Out.append(i)
return Out
#===========================================
# Compare Mat(Nx1) > number
#===========================================
def FindMatGNum(matrix,number):
N = len(matrix)
Out = []
for i in range(N):
if matrix[i][0]>number:
Out.append(i)
return Out
#===========================================
# Find index vector > Num
#===========================================
def FindG(vector,Num):
n = len(vector)
Out = []
for i in range(n):
if vector[i] > Num:
Out.append(i)
return Out
#===========================================
# Find index vector > Num
#===========================================
def FindGb(vector,Num):
n = len(vector)
Out = []
for i in range(n):
if vector[i][0] > Num:
Out.append(i)
return Out
#===========================================
# Find index vector < Num
#===========================================
def FindL(vector,Num):
n = len(vector)
Out = []
for i in range(n):
if vector[i]<Num:
Out.append(i)
return Out
#===========================================
# Find index vector < Num
#===========================================
def FindLb(vector,Num):
n = len(vector)
Out = []
for i in range(n):
if vector[i][0]<Num:
Out.append(i)
return Out
#===========================================
# Find index vector < Num
#===========================================
def FindLE(vector,Num):
n = len(vector)
Out = []
for i in range(n):
if vector[i]<=Num:
Out.append(i)
return Out
#===========================================
# Find index vector == num
#===========================================
def FindE(vector,Num):
return findSTR(Num,vector)
#===========================================
# Find index L>=vector >= H
#===========================================
def FindIn(vector,L,H):
n = len(vector)
Out = []
for i in range(n):
if vector[i]>=L and vector[i]<=H:
Out.append(i)
return Out
#===========================================
# Find index vector \notin [L,H]
#===========================================
def FindOut(vector,L,H):
n = len(vector)
Out = []
for i in range(n):
if vector[i]<=L and vector[i]>=H:
Out.append(i)
return Out
#======================================================
# Selections and Assignments
#------------------------------------------------------
#======================================
# Normalize Tab to max value
#======================================
def NormTab(matrix):
N,D = size(matrix)
Output = zeros(N,D)
for i in range(N):
MaxValLine = max(matrix[i])
for j in range(D):
Output[i][j] = matrix[i][j]/MaxValLine
return Output
#======================================
# 3D Matrix Assignment
#======================================
def AssMat3(matrixTemp,matrixIn,DDim):
# matrix Out DDim x N x D
# matrix In N x D
# Assign matIn at matOut at DDim
N,D = size(matrixIn)
matrixOut = matrixTemp
for i in range(N):
for j in range(D):
matrixOut[DDim][i][j] = matrixIn[i][j]
return matrixOut
#======================================
# 2D Matrix Assignment byCol
#======================================
def AssMatCol(matrixTemp,matrixIn,d):
# matrix Out N x D
# matrix In N x 1
# Assign matIn at matTemp at d Col < D
# and return matTemp
matrixOut = matrixTemp
for i in range(len(matrixIn)):
matrixOut[i][d] = matrixIn[i][0]
return matrixOut
#======================================
# 2D Matrix Assignment byRow
#======================================
def AssMatRow(matrixTemp,VecIn,RowsIndex):
# matrixTemp N x D
# VecIn 1 x D
# RowsIndex 1 x K
# Assign VecIn at matTemp[kI] where kI = RowsIndex[k] k=1,..,K
# and return matTemp
matrixOut = matrixTemp
K = len(RowsIndex)
for k in range(K):
matrixOut[RowsIndex[k]] = VecIn
return matrixOut
#----------------------------
def SelVecAt(vector,Index):
# Select elements of vector by indexing
return [vector[Index[i]] for i in range(len(Index))]
#----------------------------
#def SelSTRAt(STR,Index):
# # Select elements of vector by indexing
# Out = ''
# for i in range(len(Index)):
# Out.append(STR[Index[i]])
# return Out
#
#
#
#print SelSTRAt('abcdefghijk',[1,2])
#==========================================
# Select rows of matrix by indexing
#==========================================
def SelMatRowsAt(matrix,Index):
N = len(Index)
D = len(matrix[0])
Out = zeros(N,D)
for i in range(N):
Out[i] = matrix[Index[i]]
return Out
#==========================================
# Select Cols of matrix by indexing
#==========================================
def SelMatColsAt(matrix,Index):
N,D = len(matrix),len(Index)
Out = zeros(N,D)
for j in range(D):
for i in range(N):
Out[i][j] = matrix[i][Index[j]]
return Out
#==========================================
# Select a (orthogonal) part of a matrix
#==========================================
def SelPart(A,SRows,SCols):
# A matrix
# SRows [0,2] to select first and third row
# SCols [0,2] to select first and third col
return [[A[i][j] for j in SCols] for i in SRows]
######################################################
# REM: Adapt Advanced Statistics
# imported from stats.py
######################################################
#=====================================================
# Continued fraction form of the incomplete
# Beta function
# (Adapted from: Numerical Recipies in C.)
#-----------------------------------------------------
def betacf(a,b,x):
ITMAX = 200
EPS = 3.0e-7
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
if (abs(az-aold)<(EPS*abs(az))):
return az
print ('a or b too big, or ITMAX too small in Betacf.')
#=========================================================
# incomplete beta function:
#
# I_x(a,b) = 1/Beta(a,b)* int_0^x t^(a-1)(1-t)^(b-1) dt
#
# a,b>0
# Beta(a,b) = Gamma(a)*Gamma(b)/Gamma(a+b) using the
# betacf function. (See Numerical Recipies in C.)
#=========================================================
def BetaInc(x,a,b):
if (x<0.0 or x>1.0):
raise Exception(ValueError, 'Bad x in lbetai')
if (x==0.0 or x==1.0):
bt = 0.0
else:
bt = math.exp(GammaLn(a+b)-GammaLn(a)-GammaLn(b)+a*math.log(x)+b*\
math.log(1.0-x))
if (x<(a+1.0)/(a+b+2.0)):
return bt*betacf(a,b,x)/float(a)
else:
return 1.0-bt*betacf(b,a,1.0-x)/float(b)
#=========================================================
# Natural logarithm of Gamma function of xx.
# Gamma(z) = int_0^{\inf} t^(z-1)exp(-t) dt
# (From: Numerical Recipies in C.)
#=========================================================
def GammaLn(xx):
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x += 1.0
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser)
#===========================================
# Chi2 random numbers with D degFree
#===========================================
def Chi2Rnd(D):
Out = 0
for d in range(D):
Out += random.gauss(0,1)**2
return Out
#===========================================
# Estimate Covariance Matrix
#===========================================
def cov(Patterns):
N,D = size(Patterns)
CovMat = zeros(D,D)
MeanVec = mean(Patterns,2)
AvPat = subMat(Patterns,repvecROW(MeanVec,N))
for d1 in range(D):
for d2 in range(d1,D):
CovMat[d1][d2] = SumVal(multMatElw(getCol(AvPat,d1),\
getCol(AvPat,d2)))
CovMat[d2][d1] = CovMat[d1][d2]
CovMat = multFlMat(1.0/(N-1),CovMat)
return CovMat
#print show(cov(T([[1,1,1,1],[5,6,7,8],[9,10,11,12]])))
#=================================================
# Estimate Multiple Correlation Coefficient MCC
# N-1 First Cols to Last Col
#=================================================
def MCC(Patterns):
import numpy
N,D = size(Patterns)
SigmaMat = cov(Patterns)
Sm = SelPart(SigmaMat,range(D-1),range(D-1))
sMp1vector= SigmaMat[D-1][0:D-1]
sMp1Mp1 = SigmaMat[D-1][D-1]
InvSm = numpy.linalg.inv(Sm)
ResA = multMat([sMp1vector],InvSm)
ResB = multMat(ResA,T([sMp1vector]))
MultCC = ( ResB[0][0] /sMp1Mp1)**0.5
return MultCC
#=================================================
# Estimate Pearson Concordance Coefficient
#=================================================
def PearsonConco(x,y):
CovMat = cov(T([x,y]))
N = len(x)
vx,vy,vxy = CovMat[0][0],CovMat[1][1],CovMat[0][1]
mx,my = mean([x],1)[0][0],mean([y],1)[0][0]
r = vxy/ (vx*vy)**0.5
zr = 0.5 * math.log((1.0+r)/(1.0-r))
fc = 1.96*(1.0/(N-3.0)**0.5)
zeta_l,zeta_u = zr - fc, zr + fc
r_l,r_u = tanh(zeta_l),tanh(zeta_u)
#t = stats.t.ppf(0.95,N-2)
#PearsonDownLim= t/np.sqrt(N-2+pow(t,2))
Conco = 2.0*vxy/(vx+vy+(mx-my)**2.0)
return r,r_l,r_u,Conco
#===========================================
# Estimate variance of a vec
#===========================================
def var(vec):
N = len(vec)
MeanVal = sum(vec)/float(N)
AvVec = subVecFl(vec,MeanVal)
S2 = 0
for i in range(N):
S2 += AvVec[i]**2.0
return 1.0/(float(N)-1)*S2
#===========================================
# Estimate variance of a Matrix in dimension d
#===========================================
def varMat(Mat,d):
N,K = size(Mat)
if d == 2:
VecVarOut = N*[0]
for i in range(N):
VecVarOut[i] = var(Mat[i])
elif d==1:
VecVarOut = K*[0]
for j in range(K):
VecVarOut[j] = var(Mat2Vec(T(getCol(Mat,j))))
return VecVarOut
#===========================================
# Estimate (x-m)Sigma^(-1)(x-m)T
#===========================================
def CleverMult(Patterns,Center,SigmaInv,N,D,G):
if D==1:
s = SigmaInv[0][0]
m = Center[0]
return [[s*(Patterns[n][0]-m)**2] for n in range(N)]
if D==2:
for n in range(N):
A,B = Patterns[n][0]-Center[0],Patterns[n][1]-Center[1]
G[n][0] = A**2*SigmaInv[0][0] + B**2*SigmaInv[1][1] + \
A*B*SigmaInv[0][1]
return G
else:
DistPatIndexMixt = D*[0.0]
Buff = D*[0.0]
for n in range(N):
for d in range(D):
DistPatIndexMixt[d] = Patterns[n][d] - Center[d]
for d in range(D):
Buff[d] = 0
for k in range(D):
Buff[d] += DistPatIndexMixt[k] *SigmaInv[k][d]
Buff[d] = Buff[d]*DistPatIndexMixt[d]
G[n][0] = sum(Buff)
return G
#=================================================
# The error function: erf
# Abramovitz & Stegun "Handbook of Math eq.7.1.26"
#==================================================
def erf(x):
# save the sign of x
xSign = sign(x)
x = abs(x)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
return xSign*y
#===================================
# FIR filter of Signal
#===================================
def FIRfilter(B,vec):
N = len(vec)
LenF = len(B)
new_vec = N*[0]
for i in range(N):
for j in range(LenF):
new_vec[i] += B[j]*vec[i-j]
return new_vec
#===================================
# ShiftMinTo0 Column
#===================================
def ShiftMinTo0(Matrix,Col):
ColData = Mat2Vec(getCol(Matrix,Col))
MinVal = -1*min(ColData)
ColData = addFlVec(MinVal,ColData)
Matrix = AssMatCol(Matrix,T([ColData]),Col)
return Matrix
#---- ShiftMinTo0 Row ----
def ShiftMinTo0Row(Matrix,iRow):
Matrix[iRow] = addFlVec(-1*min(Matrix[iRow]),Matrix[iRow])
return Matrix
#----------------------------------------
# Set operations ---
#========================================
# STRING OPERATIONS
# Find where str value occurs in str L
def findSTR(value, L):
i = -1
Out = []
while 1:
try:
f = L.index(value, i+1)
Out.append( f )
i = Out[-1]
except:
return Out
# Set operations
#========================================
# Intersection of two lists
#========================================
def intersect(A,B):
return list(set(A) & set(B))
#========================================
# Unique of list of (Integ or STR)
#========================================
def Unique(A):
Out = list(set(A))
Out.sort()
return Out
######################################################
# Performance Times
#=====================================================
def profile_mult(matrix1,matrix2):
# A more detailed timing with process information
# Arguments must be strings for this function
# eg. profile_mult('a','b')
import cProfile
cProfile.run('matrix.mult(' + matrix1 + ',' + matrix2 + ')')
######################################################
# Image to eps
#=====================================================
#def Im2eps(Filename,outfile):
# import Image
# im = Image.open(Filename)
# im.save(outfile, "EPS")
#
#FotoName = "NeuroTool3"
#Im2eps(FotoName+".bmp",FotoName+".eps")
#####------- Test Area ------------------
####
#######b = 'jimverasdsdjimversdsdsddddddddjimveraassasjimver'
#######a = 'jimver'
#######
## = [[5,15,15,2],[2,2,3,4],[5,6,7,8]]
######L = ['asdsd','bbb','aaaa','cc','cc','asdsd']
#G = [1.0,5.0,2.0]
###
###L = [2.3,2.3,4.5]
###
#TS = time.clock()
#divFlVec(5.0,G)
#print time.clock()-TS
|
{"hexsha": "7219c48835a870c36554a78c0ec53856e9ab47fd", "size": 38005, "ext": "py", "lang": "Python", "max_stars_repo_path": "Utils/matrix.py", "max_stars_repo_name": "MKLab-ITI/DanceAnno", "max_stars_repo_head_hexsha": "fad9985bf0843c3b95895df946c3caeee4e42210", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2016-01-31T23:34:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-05T16:14:41.000Z", "max_issues_repo_path": "Utils/matrix.py", "max_issues_repo_name": "MKLab-ITI/DanceAnno", "max_issues_repo_head_hexsha": "fad9985bf0843c3b95895df946c3caeee4e42210", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Utils/matrix.py", "max_forks_repo_name": "MKLab-ITI/DanceAnno", "max_forks_repo_head_hexsha": "fad9985bf0843c3b95895df946c3caeee4e42210", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2016-03-17T04:01:45.000Z", "max_forks_repo_forks_event_max_datetime": "2016-03-17T04:01:45.000Z", "avg_line_length": 25.8713410483, "max_line_length": 74, "alphanum_fraction": 0.4039205368, "include": true, "reason": "import numpy", "num_tokens": 9326}
|
from __future__ import division, print_function, absolute_import
import pytest
import numpy as np
from scipy.spatial.transform import Rotation
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
from scipy.constants import golden as phi
from scipy.spatial import cKDTree
TOL = 1E-12
NS = range(1, 13)
NAMES = ["I", "O", "T"] + ["C%d" % n for n in NS] + ["D%d" % n for n in NS]
SIZES = [60, 24, 12] + list(NS) + [2 * n for n in NS]
def _calculate_rmsd(P, Q):
"""Calculates the root-mean-square distance between the points of P and Q.
The distance is taken as the minimum over all possible matchings. It is
zero if P and Q are identical and non-zero if not.
"""
distance_matrix = cdist(P, Q, metric='sqeuclidean')
matching = linear_sum_assignment(distance_matrix)
return np.sqrt(distance_matrix[matching].sum())
def _generate_pyramid(n, axis):
thetas = np.linspace(0, 2 * np.pi, n + 1)[:-1]
P = np.vstack([np.zeros(n), np.cos(thetas), np.sin(thetas)]).T
P = np.concatenate((P, [[1, 0, 0]]))
return np.roll(P, axis, axis=1)
def _generate_prism(n, axis):
thetas = np.linspace(0, 2 * np.pi, n + 1)[:-1]
bottom = np.vstack([-np.ones(n), np.cos(thetas), np.sin(thetas)]).T
top = np.vstack([+np.ones(n), np.cos(thetas), np.sin(thetas)]).T
P = np.concatenate((bottom, top))
return np.roll(P, axis, axis=1)
def _generate_icosahedron():
x = np.array([[0, -1, -phi],
[0, -1, +phi],
[0, +1, -phi],
[0, +1, +phi]])
return np.concatenate([np.roll(x, i, axis=1) for i in range(3)])
def _generate_octahedron():
return np.array([[-1, 0, 0], [+1, 0, 0], [0, -1, 0],
[0, +1, 0], [0, 0, -1], [0, 0, +1]])
def _generate_tetrahedron():
return np.array([[1, 1, 1], [1, -1, -1], [-1, 1, -1], [-1, -1, 1]])
@pytest.mark.parametrize("name", [-1, None, True, np.array(['C3'])])
def test_group_type(name):
with pytest.raises(ValueError,
match="must be a string"):
Rotation.create_group(name)
@pytest.mark.parametrize("name", ["Q", " ", "CA", "C ", "DA", "D ", "I2", ""])
def test_group_name(name):
with pytest.raises(ValueError,
match="must be one of 'I', 'O', 'T', 'Dn', 'Cn'"):
Rotation.create_group(name)
@pytest.mark.parametrize("name", ["C0", "D0"])
def test_group_order_positive(name):
with pytest.raises(ValueError,
match="Group order must be positive"):
Rotation.create_group(name)
@pytest.mark.parametrize("axis", ['A', 'b', 0, 1, 2, 4, False, None])
def test_axis_valid(axis):
with pytest.raises(ValueError,
match="`axis` must be one of"):
Rotation.create_group("C1", axis)
def test_icosahedral():
"""The icosahedral group fixes the rotations of an icosahedron. Here we
test that the icosahedron is invariant after application of the elements
of the rotation group."""
P = _generate_icosahedron()
for g in Rotation.create_group("I"):
g = Rotation.from_quat(g.as_quat())
assert _calculate_rmsd(P, g.apply(P)) < TOL
def test_octahedral():
"""Test that the octahedral group correctly fixes the rotations of an
octahedron."""
P = _generate_octahedron()
for g in Rotation.create_group("O"):
assert _calculate_rmsd(P, g.apply(P)) < TOL
def test_tetrahedral():
"""Test that the tetrahedral group correctly fixes the rotations of a
tetrahedron."""
P = _generate_tetrahedron()
for g in Rotation.create_group("T"):
assert _calculate_rmsd(P, g.apply(P)) < TOL
@pytest.mark.parametrize("n", NS)
@pytest.mark.parametrize("axis", 'XYZ')
def test_dicyclic(n, axis):
"""Test that the dicyclic group correctly fixes the rotations of a
prism."""
P = _generate_prism(n, axis='XYZ'.index(axis))
for g in Rotation.create_group("D%d" % n, axis=axis):
assert _calculate_rmsd(P, g.apply(P)) < TOL
@pytest.mark.parametrize("n", NS)
@pytest.mark.parametrize("axis", 'XYZ')
def test_cyclic(n, axis):
"""Test that the cyclic group correctly fixes the rotations of a
pyramid."""
P = _generate_pyramid(n, axis='XYZ'.index(axis))
for g in Rotation.create_group("C%d" % n, axis=axis):
assert _calculate_rmsd(P, g.apply(P)) < TOL
@pytest.mark.parametrize("name, size", zip(NAMES, SIZES))
def test_group_sizes(name, size):
assert len(Rotation.create_group(name)) == size
@pytest.mark.parametrize("name, size", zip(NAMES, SIZES))
def test_group_no_duplicates(name, size):
g = Rotation.create_group(name)
kdtree = cKDTree(g.as_quat())
assert len(kdtree.query_pairs(1E-3)) == 0
@pytest.mark.parametrize("name, size", zip(NAMES, SIZES))
def test_group_symmetry(name, size):
g = Rotation.create_group(name)
q = np.concatenate((-g.as_quat(), g.as_quat()))
distance = np.sort(cdist(q, q))
deltas = np.max(distance, axis=0) - np.min(distance, axis=0)
assert (deltas < TOL).all()
|
{"hexsha": "5407bcc643f76cac64d7489d046c3a3ecaace146", "size": 5060, "ext": "py", "lang": "Python", "max_stars_repo_path": "scipy/spatial/transform/tests/test_rotation_groups.py", "max_stars_repo_name": "TNonet/scipy", "max_stars_repo_head_hexsha": "84d0b611f08187a2259a86a4ad5ed7295632c570", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scipy/spatial/transform/tests/test_rotation_groups.py", "max_issues_repo_name": "TNonet/scipy", "max_issues_repo_head_hexsha": "84d0b611f08187a2259a86a4ad5ed7295632c570", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scipy/spatial/transform/tests/test_rotation_groups.py", "max_forks_repo_name": "TNonet/scipy", "max_forks_repo_head_hexsha": "84d0b611f08187a2259a86a4ad5ed7295632c570", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8571428571, "max_line_length": 78, "alphanum_fraction": 0.6333992095, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1452}
|
/-
Copyright (c) 2020 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel
-/
import category_theory.category
import pseudoelements
import tactic.combinators
import tactic.chase_tactic
open category_theory
open category_theory.abelian
open category_theory.abelian.pseudoelements
open tactic
namespace tactic.chase
section lemmas
universes v u
variables {C : Type u} [𝒞 : category.{v} C] [abelian.{v} C]
include 𝒞
local attribute [instance] object_to_sort
local attribute [instance] hom_to_fun
lemma pseudo_congr {X Y : C} {f g : X ⟶ Y} (h : f = g) (x : X) : f x = g x :=
by rw h
end lemmas
meta def try_apply_comm_lemma_at_aux (l : commutativity_lemma) :
ℕ → diagram_term → option (diagram_term)
| 0 ⟨ms, elem⟩ :=
match list.is_prefix_of l.lhs ms with
| ff := none
| tt := some ⟨list.append l.rhs (list.drop l.lhs.length ms), elem⟩
end
| (n + 1) ⟨[], e⟩ := none
| (n + 1) ⟨t::ts, e⟩ :=
match try_apply_comm_lemma_at_aux n ⟨ts, e⟩ with
| none := none
| some ⟨nt, ne⟩ := some ⟨t::nt, ne⟩
end
meta def try_apply_element_lemma_at_aux (l : element_lemma) :
ℕ → diagram_term → option (diagram_term)
| 0 ⟨ms, elem⟩ := if l.lhs = ⟨ms, elem⟩ then some l.rhs else none
| (n + 1) ⟨[], e⟩ := none
| (n + 1) ⟨t :: ts, e⟩ :=
match try_apply_element_lemma_at_aux n ⟨ts, e⟩ with
| none := none
| some ⟨nt, ne⟩ := some ⟨t::nt, ne⟩
end
meta inductive lemma_app
| comm : commutativity_lemma → ℕ → diagram_term → lemma_app
| elem : element_lemma → ℕ → diagram_term → lemma_app
meta instance format_lemma_app : has_to_format lemma_app :=
{ to_format := λ a,
match a with
| lemma_app.comm a b c := format!"comm: lemma ({a}) at {b} gives {c}"
| lemma_app.elem a b c := format!"elem: lemma ({a}) at {b} gives {c}"
end }
meta def next_term : lemma_app → diagram_term
| (lemma_app.comm _ _ t) := t
| (lemma_app.elem _ _ t) := t
meta def apply_comm_lemma_at_aux : ℕ → diagram_term → tactic (option expr)
| 0 t := some <$> (mk_eq_refl $ as_expr t)
| 1 ⟨t::[], e⟩ := some <$> (mk_eq_refl $ as_expr ⟨[t], e⟩)
| (n + 1) ⟨[], _⟩ := return none
| (n + 1) ⟨t::[], e⟩ := return none
| (n + 1) ⟨t::(u::ts), e⟩ :=
do
some x ← i_to_expr ``(%%(u.ex) ≫ %%(t.ex)) >>= as_morphism,
lhs ← mk_app `category_theory.abelian.pseudoelements.comp_apply [u.ex, t.ex, as_expr ⟨ts, e⟩] >>= mk_eq_symm,
some rhs ← apply_comm_lemma_at_aux n ⟨x::ts, e⟩,
some <$> mk_eq_trans lhs rhs
meta def apply_comm_lemma_at (l : commutativity_lemma) :
ℕ → diagram_term → diagram_term → tactic (option expr)
| 0 ⟨ms, elem⟩ goal :=
do
some one ← apply_comm_lemma_at_aux (l.lhs.length - 1) ⟨ms, elem⟩,
let inner := as_expr ⟨list.drop (l.lhs.length) ms, elem⟩,
two ← mk_app `tactic.chase.pseudo_congr [l.ex, inner],
some three ← apply_comm_lemma_at_aux (l.rhs.length - 1) goal,
three' ← mk_eq_symm three,
onetwo ← mk_eq_trans one two,
some <$> mk_eq_trans onetwo three'
| (n + 1) ⟨[], e⟩ goal := none
| (n + 1) fr ⟨[], e⟩ := none
| (n + 1) ⟨t::ts, e⟩ ⟨u::us, f⟩ :=
do
some inner ← apply_comm_lemma_at n ⟨ts, e⟩ ⟨us, f⟩,
some <$> mk_app `congr_arg [t.app, inner]
meta def apply_elem_lemma_at (l : element_lemma) :
ℕ → diagram_term → tactic (option expr)
| 0 ⟨ms, elem⟩ := return $ some l.ex
| (n + 1) ⟨[], _⟩ := none
| (n + 1) ⟨t::ts, e⟩ :=
do
some inner ← apply_elem_lemma_at n ⟨ts, e⟩,
some <$> mk_app `congr_arg [t.app, inner]
meta def build_proof (t : diagram_term) : lemma_app → tactic expr
| (lemma_app.comm x y z) :=
do
some u ← apply_comm_lemma_at x y t z,
return u
| (lemma_app.elem x y z) :=
do
some u ← apply_elem_lemma_at x y t,
return u
meta def try_apply_comm_lemma_at (l : commutativity_lemma) (n : ℕ) (t : diagram_term) :
option (lemma_app) :=
match try_apply_comm_lemma_at_aux l n t with
| none := none
| some t := lemma_app.comm l n t
end
meta def try_apply_elem_lemma_at (l : element_lemma) (n : ℕ) (t : diagram_term) :
option lemma_app :=
match try_apply_element_lemma_at_aux l n t with
| none := none
| some t := lemma_app.elem l n t
end
meta def iota : ℕ → list ℕ
| 0 := [0]
| (n + 1) := (n + 1) :: iota n
meta def try_apply_comm_lemma (l : commutativity_lemma) (t : diagram_term) : list lemma_app :=
list.filter_map (λ n, try_apply_comm_lemma_at l n t) $ iota t.ms.length
meta def try_apply_elem_lemma (l : element_lemma) (t : diagram_term) : list lemma_app :=
list.filter_map (λ n, try_apply_elem_lemma_at l n t) $ iota t.ms.length
meta def try_all_comm (t : diagram_term) : chase_tactic (list lemma_app) :=
do
l ← get,
return $ list.join $ list.map (λ l, try_apply_comm_lemma l t) l.comm_lemmas
meta def try_all_elem (t : diagram_term) : chase_tactic (list lemma_app) :=
do
l ← get,
return $ list.join $ list.map (λ l, try_apply_elem_lemma l t) l.elem_lemmas
meta mutual def show_via_zero, find_proof_dfs
with show_via_zero : diagram_term → diagram_term → chase_tactic (option expr)
| cur goal := do
l ← diagram_term.to_zero cur,
match l with
| none := return none
| some e := do
zer ← goal.zero,
r ← find_proof_dfs goal zer [],
match r with
| none := return none
| some f := (mk_eq_symm f) >>= (λ g, some <$> mk_eq_trans e g)
end
end
with find_proof_dfs :
diagram_term → diagram_term → list diagram_term → chase_tactic (option expr)
| cur goal seen := if cur = goal then some <$> mk_eq_refl (as_expr cur) else
do
via_zero ← show_via_zero cur goal,
match via_zero with
| some e := return $ some e
| none := do
cands_comm ← try_all_comm cur,
cands_elem ← try_all_elem cur,
let cands := list.append cands_comm cands_elem,
list.mfoldl (λ r s,
match r with
| some q := return $ some q
| none :=
ite (list.any seen (λ e, to_bool $ e = (next_term s))) (return none) $
do
--trace format!"trying {s}...",
l ← find_proof_dfs (next_term s) goal (cur::seen),
match l with
| none := none
| some q := do
f ← build_proof cur s,
t ← mk_eq_trans f q,
return $ some t
end
end) none cands
end
meta def find_direct_proof (cur goal : diagram_term) : chase_tactic (option expr) :=
find_proof_dfs cur goal []
meta def find_proof : diagram_term → diagram_term → chase_tactic (option expr)
| ⟨t, e⟩ ⟨t', e'⟩ := do
mm ← diagram_term.type ⟨t, e⟩ >>= mono_with_domain,
match mm with
| none := find_direct_proof ⟨t, e⟩ ⟨t', e'⟩
| some m := do
ii ← find_proof ⟨m::t, e⟩ ⟨m::t', e'⟩,
match ii with
| none := none
| some i := some <$> mk_app `category_theory.abelian.pseudoelements.pseudo_injective_of_mono [i]
end
end
meta def commutativity : chase_tactic unit :=
do
(_, l, r) ← target_lhs_rhs,
some lhs ← as_diagram_term l,
some rhs ← as_diagram_term r,
some p ← find_proof lhs rhs,
tactic.exact p
end tactic.chase
namespace tactic.interactive
open interactive (parse)
open lean.parser (tk pexpr)
meta def commutativity (loc : parse ((tk "at" *> some <$> pexpr) <|> return none)) : tactic unit :=
do
l ← match loc with
| none := return none
| some m := some <$> to_expr m
end,
chase.run_chase_tactic l tactic.chase.commutativity
end tactic.interactive
|
{"author": "TwoFX", "repo": "lean-homological-algebra", "sha": "e3a8e4ecaf49bec6c7b38b34c0b8f9749e941aa8", "save_path": "github-repos/lean/TwoFX-lean-homological-algebra", "path": "github-repos/lean/TwoFX-lean-homological-algebra/lean-homological-algebra-e3a8e4ecaf49bec6c7b38b34c0b8f9749e941aa8/src/tactic/commutativity.lean"}
|
# Copyright (c) 2020 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import random
from dataclasses import dataclass
from typing import Dict, Set, List, Optional
import bmesh
import bpy
import bpy_types
import numpy as np
from . import mdl, blendmat
BLENDMATS: Dict[str, blendmat.BlendMat] = {}
@dataclass
class BlendMdl:
am: "AliasMdl"
obj: bpy_types.Object
sub_objs: List[bpy_types.Object]
sample_as_light_mats: Set[blendmat.BlendMat]
_initial_pose_num: int
_group_frame_times: Optional[List[float]]
_shape_keys: List[List[bpy.types.ShapeKey]]
_no_anim: bool
_current_pose_num: Optional[int] = None
_last_time: Optional[float] = None
def _update_pose(self, time: float, pose_num: int, fps: float):
if self._current_pose_num is None or self._current_pose_num != pose_num:
for sub_obj, shape_keys in zip(self.sub_objs, self._shape_keys):
blender_frame = int(round(fps * time))
if self._current_pose_num is not None:
shape_keys[self._current_pose_num].value = 0
shape_keys[self._current_pose_num].keyframe_insert('value', frame=blender_frame)
last_blender_frame = int(round(fps * self._last_time))
shape_keys[pose_num].value = 0
shape_keys[pose_num].keyframe_insert('value', frame=last_blender_frame)
shape_keys[pose_num].value = 1
shape_keys[pose_num].keyframe_insert('value', frame=blender_frame)
self._current_pose_num = pose_num
self._last_time = time
def add_pose_keyframe(self, pose_num: int, time: float, fps: float):
if self._no_anim:
pass
elif self._group_frame_times is not None:
if pose_num != self._initial_pose_num:
raise Exception("Changing pose of a model whose initial pose is a group frame "
"is unsupported")
else:
self._update_pose(time, pose_num, fps)
def set_invisible_to_camera(self):
for sub_obj in self.sub_objs:
sub_obj.cycles_visibility.camera = False
def done(self, final_time: float, fps: float):
if self._no_anim:
return
if self._group_frame_times is not None:
loop_time = -random.random() * self._group_frame_times[-1]
while loop_time < final_time:
for pose_num, offset in enumerate([0] + self._group_frame_times[:-1]):
self._update_pose(loop_time + offset, pose_num, fps)
loop_time += self._group_frame_times[-1]
for sub_obj in self.sub_objs:
for c in sub_obj.data.shape_keys.animation_data.action.fcurves:
for kfp in c.keyframe_points:
kfp.interpolation = 'LINEAR'
def _set_uvs(mesh, am, tri_set):
mesh.uv_layers.new()
bm = bmesh.new()
bm.from_mesh(mesh)
uv_layer = bm.loops.layers.uv[0]
for bm_face, tri_idx in zip(bm.faces, tri_set):
tcs = am.get_tri_tcs(tri_idx)
for bm_loop, (s, t) in zip(bm_face.loops, tcs):
bm_loop[uv_layer].uv = s / am.header['skin_width'], t / am.header['skin_height']
bm.to_mesh(mesh)
def _simplify_pydata(verts, tris):
vert_map = []
new_tris = []
for tri in tris:
new_tri = []
for vert_idx in tri:
if vert_idx not in vert_map:
vert_map.append(vert_idx)
new_tri.append(vert_map.index(vert_idx))
new_tris.append(new_tri)
return ([verts[old_vert_idx] for old_vert_idx in vert_map], [], new_tris), vert_map
def _get_tri_set_fullbright_frac(am, tri_set, skin_idx):
skin_area = 0
fullbright_area = 0
for tri_idx in tri_set:
mask, skin = am.get_tri_skin(tri_idx, skin_idx)
skin_area += np.sum(mask)
fullbright_area += np.sum(mask * (skin >= 224))
return fullbright_area / skin_area
def _create_shape_key(obj, simple_frame, vert_map):
shape_key = obj.shape_key_add(name=simple_frame.name)
for old_vert_idx, shape_key_vert in zip(vert_map, shape_key.data):
shape_key_vert.co = simple_frame.frame_verts[old_vert_idx]
return shape_key
def add_model(am, pal, mdl_name, obj_name, skin_num, mdl_cfg, initial_pose_num, do_materials):
pal = np.concatenate([pal, np.ones(256)[:, None]], axis=1)
# If the initial pose is a group frame, just load frames from that group.
if am.frames[initial_pose_num].frame_type == mdl.FrameType.GROUP:
group_frame = am.frames[initial_pose_num]
timescale = mdl_cfg.get('timescale', 1)
group_times = [t / timescale for t in group_frame.times]
else:
group_frame = None
group_times = None
for frame in am.frames:
if frame.frame_type != mdl.FrameType.SINGLE:
raise Exception(f"Frame type {frame.frame_type} not supported for non-static models")
# Set up things specific to each tri-set
sample_as_light_mats: Set[blendmat.BlendMat] = set()
obj = bpy.data.objects.new(obj_name, None)
sub_objs = []
shape_keys = []
bpy.context.scene.collection.objects.link(obj)
for tri_set_idx, tri_set in enumerate(am.disjoint_tri_sets):
# Create the mesh and object
subobj_name = f"{obj_name}_triset{tri_set_idx}"
mesh = bpy.data.meshes.new(subobj_name)
if am.frames[0].frame_type == mdl.FrameType.SINGLE:
initial_verts = am.frames[0].frame.frame_verts
else:
initial_verts = am.frames[initial_pose_num].frames[0].frame_verts
pydata, vert_map = _simplify_pydata([list(v) for v in initial_verts],
[list(am.tris[t]) for t in tri_set])
mesh.from_pydata(*pydata)
subobj = bpy.data.objects.new(subobj_name, mesh)
subobj.parent = obj
sub_objs.append(subobj)
bpy.context.scene.collection.objects.link(subobj)
# Create shape keys, used for animation.
if group_frame is None:
shape_keys.append([
_create_shape_key(subobj, frame.frame, vert_map) for frame in am.frames
])
else:
shape_keys.append([
_create_shape_key(subobj, simple_frame, vert_map)
for simple_frame in group_frame.frames
])
if do_materials:
# Set up material
sample_as_light = mdl_cfg['sample_as_light']
mat_name = f"{mdl_name}_skin{skin_num}"
if sample_as_light:
mat_name = f"{mat_name}_{obj_name}_triset{tri_set_idx}_fullbright"
if mat_name not in BLENDMATS:
array_im, fullbright_array_im, _ = blendmat.array_ims_from_indices(
pal,
am.skins[skin_num],
force_fullbright=mdl_cfg['force_fullbright']
)
im = blendmat.im_from_array(mat_name, array_im)
if fullbright_array_im is not None:
fullbright_im = blendmat.im_from_array(f"{mat_name}_fullbright", fullbright_array_im)
strength = mdl_cfg['strength']
cam_strength = mdl_cfg.get('cam_strength', strength)
bm = blendmat.setup_fullbright_material(
blendmat.BlendMatImages.from_single_pair(im, fullbright_im),
mat_name,
strength,
cam_strength,
warp=False
)
else:
bm = blendmat.setup_diffuse_material(
blendmat.BlendMatImages.from_single_diffuse(im),
mat_name,
warp=False
)
bm.mat.cycles.sample_as_light = sample_as_light
if sample_as_light:
sample_as_light_mats.add(bm)
BLENDMATS[mat_name] = bm
bm = BLENDMATS[mat_name]
# Apply the material
mesh.materials.append(bm.mat)
_set_uvs(mesh, am, tri_set)
return BlendMdl(am, obj, sub_objs, sample_as_light_mats,
initial_pose_num, group_times, shape_keys,
mdl_cfg.get('no_anim', False))
|
{"hexsha": "35cc72f9540d05918002837cba0c2a1fd2bd4d53", "size": 9447, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyquake/blendmdl.py", "max_stars_repo_name": "proteanblank/pyquake", "max_stars_repo_head_hexsha": "26818b92bf648d897975993a3c40a78d7a5c1a9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2020-11-07T23:52:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:43:27.000Z", "max_issues_repo_path": "pyquake/blendmdl.py", "max_issues_repo_name": "proteanblank/pyquake", "max_issues_repo_head_hexsha": "26818b92bf648d897975993a3c40a78d7a5c1a9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyquake/blendmdl.py", "max_forks_repo_name": "proteanblank/pyquake", "max_forks_repo_head_hexsha": "26818b92bf648d897975993a3c40a78d7a5c1a9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-22T00:52:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-15T05:36:01.000Z", "avg_line_length": 38.7172131148, "max_line_length": 105, "alphanum_fraction": 0.6267598179, "include": true, "reason": "import numpy", "num_tokens": 2165}
|
# coding: utf8
# coding: utf8
import sys
import os
from sys import argv
sys.path.insert(0, os.getcwd()) # adds current directory to python path
import numpy as np
import matplotlib.pylab as plt
####################
# Recovery of Data
####################
folder_name = ""
pathIn = "crocoddyl_eval/test_4/log_eval/"
res = np.load(pathIn + folder_name + "results_wyaw_all_false.npy" , allow_pickle=True )
# res1 = np.load(pathIn + folder_name + "results_osqp_wyaw.npy" , allow_pickle=True )
import numpy as np
X = np.linspace(1,-1,25)
# Y = np.linspace(-1,1,65)
W = np.linspace(-2.2,2.2,25)
def find_nearest(Vx , Vy):
idx = (np.abs(X - Vx)).argmin()
idy = (np.abs(W - Vy)).argmin()
return idx , idy
XX , YY = np.meshgrid(X,W)
Z = np.zeros((XX.shape[0] , YY.shape[1]))
Z_osqp = np.zeros((XX.shape[0] , YY.shape[1]))
# plt.figure()
# for elt in res :
# if elt[0] == True :
# plt.plot(elt[1][0] , elt[1][1] , "bs" , markerSize= "13")
# else :
# pass
# plt.xlim([-1,1])
# plt.ylim([-1,1])
plt.figure()
for elt in res :
idx , idy = find_nearest(elt[1][0] , elt[1][5])
Z[idx,idy] = elt[0]
plt.rc('text', usetex=True)
im = plt.imshow(Z ,cmap = plt.cm.binary , extent=(-2.2,2.2,-1,1))
plt.xlabel("Lateral Velocity $\dot{p_y} \hspace{2mm} [m.s^{-1}]$" , fontsize=12)
plt.ylabel("Forward Velocity $\dot{p_x} \hspace{2mm} [m.s^{-1}]$" , fontsize=12)
plt.title("Viable Operating Regions (DDP and foot optimization)" , fontsize=14)
# plt.figure()
# for elt in res1 :
# idx , idy = find_nearest(elt[1][0] , elt[1][5])
# Z_osqp[idx,idy] = elt[0]
# plt.rc('text', usetex=True)
# im = plt.imshow(Z_osqp ,cmap = plt.cm.binary , extent=(-2.2,2.2,-1,1))
# plt.xlabel("Lateral Velocity $\dot{p_y} \hspace{2mm} [m.s^{-1}]$" , fontsize=12)
# plt.ylabel("Forward Velocity $\dot{p_x} \hspace{2mm} [m.s^{-1}]$" , fontsize=12)
# plt.title("Viable Operating Regions OSQP" , fontsize=14)
plt.show()
|
{"hexsha": "4a3cf0ebbf9dcb5b6fa817c7123663bfad625166", "size": 1948, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/quadruped_reactive_walking/crocoddyl_eval/test_4/analyse_simu.py", "max_stars_repo_name": "nim65s/quadruped-reactive-walking", "max_stars_repo_head_hexsha": "1e0f4069fd11af85abf10bfc8f9d66200c672646", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-03T10:59:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T15:05:25.000Z", "max_issues_repo_path": "python/quadruped_reactive_walking/crocoddyl_eval/test_4/analyse_simu.py", "max_issues_repo_name": "nim65s/quadruped-reactive-walking", "max_issues_repo_head_hexsha": "1e0f4069fd11af85abf10bfc8f9d66200c672646", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/quadruped_reactive_walking/crocoddyl_eval/test_4/analyse_simu.py", "max_forks_repo_name": "nim65s/quadruped-reactive-walking", "max_forks_repo_head_hexsha": "1e0f4069fd11af85abf10bfc8f9d66200c672646", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-17T13:34:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T10:58:37.000Z", "avg_line_length": 25.2987012987, "max_line_length": 87, "alphanum_fraction": 0.6093429158, "include": true, "reason": "import numpy", "num_tokens": 669}
|
# targets for phase in / phase out of policies
pinft = 0.4*pinf_o # infection prob at meeting
#socialmaxyyt = 2 # max no. people met outside firm young_young
#socialmaxoyt = 1 # max no. people met outside firm old_young
#socialmaxoot = 0.5 # max no. people met outside firm old_old
#phomeofficet = 1
#pshopt = [[0.85,0.5,1],[0.85,0.5,1]]
#workmeett = [4,5,8,2] # max number of people met at work: man / ser / food /public
#shopmeett = [5,14,10] # max number of people met at each shopping instant man / ser / food
# direct change in variable through policy
tauadj = 0
#genhomeoffice = true
ptravel = 0
#bailoutprogram = true # initially no bailouts
#shorttimeprogram = true # initially no short term program
|
{"hexsha": "a4ff8ec76637bfc0292f33425fc69db1686ec9e5", "size": 719, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "policy_only_xi.jl", "max_stars_repo_name": "jasperhepp/ace_covid19", "max_stars_repo_head_hexsha": "d2ba0c066ccfdb2523c03f3fc334e0ef4c102adc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-06-24T10:52:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-28T17:27:55.000Z", "max_issues_repo_path": "policy_only_xi.jl", "max_issues_repo_name": "jasperhepp/ace_covid19", "max_issues_repo_head_hexsha": "d2ba0c066ccfdb2523c03f3fc334e0ef4c102adc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-25T11:02:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-25T11:49:59.000Z", "max_forks_repo_path": "policy_only_xi.jl", "max_forks_repo_name": "jasperhepp/ace_covid19", "max_forks_repo_head_hexsha": "d2ba0c066ccfdb2523c03f3fc334e0ef4c102adc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-07T18:23:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-30T15:58:52.000Z", "avg_line_length": 27.6538461538, "max_line_length": 92, "alphanum_fraction": 0.7148817803, "num_tokens": 237}
|
module fftpack_precision
! Explicit typing only
implicit none
! Everything is private unless stated otherwise
private
public :: wp, ip
public :: pimach, epmach
!-----------------------------------------------
! Dictionary: precision constants
!-----------------------------------------------
integer, parameter :: ip = selected_int_kind(r=9)
integer, parameter :: sp = selected_real_kind(p=6, r=37)
integer, parameter :: dp = selected_real_kind(p=15, r=307)
integer, parameter :: qp = selected_real_kind(p=33, r=4931)
integer, parameter :: wp = dp
!-----------------------------------------------
contains
pure function pimach() result (return_value)
!-----------------------------------------------
! Dummy arguments
!-----------------------------------------------
real (wp) :: return_value
!-----------------------------------------------
return_value = 3.141592653589793238462643383279502884197169399375105820974_wp
end function pimach
pure function epmach() result (return_value)
!
! Purpose:
!
! Computes an approximate machine epsilon (accuracy), i.e.,
!
! the smallest number x of the kind wp such that 1 + x > 1.
!
!-----------------------------------------------
! Dummy arguments
!-----------------------------------------------
real (wp) :: return_value
!-----------------------------------------------
return_value = epsilon(1.0_wp)
end function epmach
end module fftpack_precision
|
{"hexsha": "00b8720ad320c94ba8e10946260ece6162f14fca", "size": 1647, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/fftpack_precision.f90", "max_stars_repo_name": "jbdv-no/modern_fftpack", "max_stars_repo_head_hexsha": "6909d44988925dcae1ee478c06be31e5605d3974", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-05-06T18:42:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T11:35:56.000Z", "max_issues_repo_path": "src/fftpack_precision.f90", "max_issues_repo_name": "jlokimlin/fftpack6.0", "max_issues_repo_head_hexsha": "6909d44988925dcae1ee478c06be31e5605d3974", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2016-05-07T21:36:00.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-09T16:44:47.000Z", "max_forks_repo_path": "src/fftpack_precision.f90", "max_forks_repo_name": "jlokimlin/fftpack6.0", "max_forks_repo_head_hexsha": "6909d44988925dcae1ee478c06be31e5605d3974", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2017-05-27T12:03:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T05:49:34.000Z", "avg_line_length": 29.4107142857, "max_line_length": 85, "alphanum_fraction": 0.4456587735, "num_tokens": 329}
|
import datetime
import glob
import imageio
import json
import numpy as np
import os
import psutil
import subprocess
import sys
import time
import models
import tensorflow as tf
import keras.backend as K
from keras.utils import generic_utils
from keras.optimizers import Adam, SGD
# Utils
sys.path.append("../utils")
import general_utils
import data_utils
w = 256
a = np.exp(-np.linspace(-w//2+1, w//2, w)**2/(50/(w/150))**2)
b = np.exp(-np.linspace(-w//2+1, w//2, w)**2/(50/(w/200))**2)
gaussian_overlap = a[:, np.newaxis] * b[np.newaxis, :]
gaussian_overlap = np.vstack((np.zeros((30, w)), gaussian_overlap[:-30]))
gaussian_overlap_tf = tf.convert_to_tensor(gaussian_overlap, dtype=tf.float32)
gaussian_overlap_left_tf = tf.convert_to_tensor(np.hstack((gaussian_overlap, np.zeros((w, w)))), dtype=tf.float32)
gaussian_overlap_right_tf = tf.convert_to_tensor(np.hstack((np.zeros((w, w)), gaussian_overlap)), dtype=tf.float32)
vgg = None
def load_vgg(model='vgg16', input_shape=(256, 256, 3), pooling='avg'):
global vgg
vgg = VGGFace(model=model, include_top=False, input_shape=input_shape, pooling=pooling)
def vgg_l1_loss(y_true, y_pred):
vgg_loss = tf.mul(tf.ones(y_true.shape.as_list()[:-1]), K.mean(K.square(vgg(y_pred) - vgg(y_true)), axis=-1))
l1_loss = K.mean(K.abs(y_pred - y_true), axis=-1)
return vgg_loss + l1_loss
def vgg_l1_weighted_loss(y_true, y_pred):
vgg_loss = tf.mul(tf.ones(y_true.shape.as_list()[:-1]), K.mean(K.square(vgg(y_pred) - vgg(y_true)), axis=-1))
l1_loss = K.mean(K.abs(y_pred - y_true), axis=-1)
l1_weighted_loss = l1_loss + tf.multiply(l1_loss, gaussian_overlap_tf)
return vgg_loss + l1_weighted_loss
def l1_weighted_identity_loss(y_true, y_pred):
l1_loss = K.mean(K.abs(y_pred - y_true), axis=-1)
return l1_loss + tf.multiply(l1_loss, gaussian_overlap_left_tf) + tf.multiply(l1_loss, gaussian_overlap_right_tf)
def l1_weighted_loss(y_true, y_pred):
reconstruction_loss = K.mean(K.abs(y_pred - y_true), axis=-1)
return reconstruction_loss + tf.multiply(reconstruction_loss, gaussian_overlap_tf)
def l1_loss(y_true, y_pred):
return K.mean(K.abs(y_pred - y_true), axis=-1)
def check_this_process_memory():
memoryUse = psutil.Process(os.getpid()).memory_info()[0]/2.**30 # memory use in GB
print('memory use: %.4f' % memoryUse, 'GB')
def purge_weights(n, model_name):
gen_weight_files = glob.glob('../../models/%s/gen_weights*' % model_name)
for gen_weight_file in gen_weight_files[:-n]:
os.remove(os.path.realpath(gen_weight_file))
def train(**kwargs):
"""
Train model
Load the whole train data in memory for faster operations
args: **kwargs (dict) keyword arguments that specify the model hyperparameters
"""
# Roll out the parameters
img_dim = kwargs["img_dim"]
patch_size = kwargs["patch_size"]
image_data_format = kwargs["image_data_format"]
generator_type = kwargs["generator_type"]
dset = kwargs["dset"]
use_identity_image = kwargs["use_identity_image"]
batch_size = kwargs["batch_size"]
n_batch_per_epoch = kwargs["n_batch_per_epoch"]
nb_epoch = kwargs["nb_epoch"]
augment_data = kwargs["augment_data"]
model_name = kwargs["model_name"]
save_weights_every_n_epochs = kwargs["save_weights_every_n_epochs"]
visualize_images_every_n_epochs = kwargs["visualize_images_every_n_epochs"]
save_only_last_n_weights = kwargs["save_only_last_n_weights"]
use_mbd = kwargs["use_mbd"]
label_smoothing = kwargs["use_label_smoothing"]
label_flipping_prob = kwargs["label_flipping_prob"]
use_l1_weighted_loss = kwargs["use_l1_weighted_loss"]
use_vgg_loss = kwargs["use_vgg_loss"]
vgg_model = kwargs["vgg_model"]
vgg_pooling = kwargs["vgg_pooling"]
prev_model = kwargs["prev_model"]
change_model_name_to_prev_model = kwargs["change_model_name_to_prev_model"]
discriminator_optimizer = kwargs["discriminator_optimizer"]
n_run_of_gen_for_1_run_of_disc = kwargs["n_run_of_gen_for_1_run_of_disc"]
load_all_data_at_once = kwargs["load_all_data_at_once"]
MAX_FRAMES_PER_GIF = kwargs["MAX_FRAMES_PER_GIF"]
dont_train = kwargs["dont_train"]
# batch_size = args.batch_size
# n_batch_per_epoch = args.n_batch_per_epoch
# nb_epoch = args.nb_epoch
# save_weights_every_n_epochs = args.save_weights_every_n_epochs
# generator_type = args.generator_type
# patch_size = args.patch_size
# label_smoothing = False
# label_flipping_prob = False
# dset = args.dset
# use_mbd = False
if dont_train:
# Get the number of non overlapping patch and the size of input image to the discriminator
nb_patch, img_dim_disc = data_utils.get_nb_patch(img_dim, patch_size, image_data_format)
if use_identity_image:
gen_input_img_dim = [img_dim[0], 2*img_dim[1], img_dim[2]]
else:
gen_input_img_dim = img_dim
generator_model = models.load("generator_unet_%s" % generator_type,
gen_input_img_dim,
nb_patch,
use_mbd,
batch_size,
model_name)
generator_model.compile(loss='mae', optimizer='adam')
return generator_model
# Check and make the dataset
# If .h5 file of dset is not present, try making it
if load_all_data_at_once:
if not os.path.exists("../../data/processed/%s_data.h5" % dset):
print("dset %s_data.h5 not present in '../../data/processed'!" % dset)
if not os.path.exists("../../data/%s/" % dset):
print("dset folder %s not present in '../../data'!\n\nERROR: Dataset .h5 file not made, and dataset not available in '../../data/'.\n\nQuitting." % dset)
return
else:
if not os.path.exists("../../data/%s/train" % dset) or not os.path.exists("../../data/%s/val" % dset) or not os.path.exists("../../data/%s/test" % dset):
print("'train', 'val' or 'test' folders not present in dset folder '../../data/%s'!\n\nERROR: Dataset must contain 'train', 'val' and 'test' folders.\n\nQuitting." % dset)
return
else:
print("Making %s dataset" % dset)
subprocess.call(['python3', '../data/make_dataset.py', '../../data/%s' % dset, '3'])
print("Done!")
else:
if not os.path.exists(dset):
print("dset does not exist! Given:", dset)
return
if not os.path.exists(os.path.join(dset, 'train')):
print("dset does not contain a 'train' dir! Given dset:", dset)
return
if not os.path.exists(os.path.join(dset, 'val')):
print("dset does not contain a 'val' dir! Given dset:", dset)
return
epoch_size = n_batch_per_epoch * batch_size
init_epoch = 0
if prev_model:
print('\n\nLoading prev_model from', prev_model, '...\n\n')
prev_model_latest_gen = sorted(glob.glob(os.path.join('../../models/', prev_model, '*gen*epoch*.h5')))[-1]
print(prev_model_latest_gen)
# Find prev model name, epoch
if change_model_name_to_prev_model:
model_name = prev_model_latest_gen.split('models')[-1].split('/')[1]
init_epoch = int(prev_model_latest_gen.split('epoch')[1][:5]) + 1
# img_dim = X_target_train.shape[-3:]
# img_dim = (256, 256, 3)
# Get the number of non overlapping patch and the size of input image to the discriminator
nb_patch, img_dim_disc = data_utils.get_nb_patch(img_dim, patch_size, image_data_format)
if use_identity_image:
gen_input_img_dim = [img_dim[0], 2*img_dim[1], img_dim[2]]
else:
gen_input_img_dim = img_dim
try:
# Create optimizer
opt_generator = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
# Load generator model
generator_model = models.load("generator_unet_%s" % generator_type,
gen_input_img_dim,
nb_patch,
use_mbd,
batch_size,
model_name)
if use_vgg_loss:
load_vgg(model=vgg_model, input_shape=gen_input_img_dim, pooling=vgg_pooling)
if use_l1_weighted_loss and use_identity_image:
loss = vgg_l1_weighted_identity_loss
elif use_l1_weighted_loss and not use_identity_image:
loss = vgg_l1_weighted_loss
elif not use_l1_weighted_loss and use_identity_image:
loss = vgg_l1_identity_loss
else:
loss = vgg_l1_loss
else:
if use_l1_weighted_loss and use_identity_image:
loss = l1_weighted_identity_loss
elif use_l1_weighted_loss and not use_identity_image:
loss = l1_weighted_loss
else:
loss = l1_loss
generator_model.compile(loss=loss, optimizer=opt_generator)
# Load prev_model
if prev_model:
generator_model.load_weights(prev_model_latest_gen)
# Load .h5 data all at once
print('\n\nLoading data...\n\n')
check_this_process_memory()
if load_all_data_at_once:
X_target_train, X_sketch_train, X_target_val, X_sketch_val = data_utils.load_data(dset, image_data_format)
check_this_process_memory()
print('X_target_train: %.4f' % (X_target_train.nbytes/2**30), "GB")
print('X_sketch_train: %.4f' % (X_sketch_train.nbytes/2**30), "GB")
print('X_target_val: %.4f' % (X_target_val.nbytes/2**30), "GB")
print('X_sketch_val: %.4f' % (X_sketch_val.nbytes/2**30), "GB")
# To generate training data
X_target_batch_gen_train, X_sketch_batch_gen_train = data_utils.data_generator(X_target_train, X_sketch_train, batch_size, augment_data=augment_data)
X_target_batch_gen_val, X_sketch_batch_gen_val = data_utils.data_generator(X_target_val, X_sketch_val, batch_size, augment_data=False)
# Load data from images through an ImageDataGenerator
else:
if use_identity_image:
X_batch_gen_train = data_utils.data_generator_from_dir(os.path.join(dset, 'train'), target_size=(img_dim[0], 3*img_dim[1]), batch_size=batch_size)
X_batch_gen_val = data_utils.data_generator_from_dir(os.path.join(dset, 'val'), target_size=(img_dim[0], 3*img_dim[1]), batch_size=batch_size)
else:
X_batch_gen_train = data_utils.data_generator_from_dir(os.path.join(dset, 'train'), target_size=(img_dim[0], 2*img_dim[1]), batch_size=batch_size)
X_batch_gen_val = data_utils.data_generator_from_dir(os.path.join(dset, 'val'), target_size=(img_dim[0], 2*img_dim[1]), batch_size=batch_size)
check_this_process_memory()
if dont_train:
raise KeyboardInterrupt
# Setup environment (logging directory etc)
general_utils.setup_logging(**kwargs)
# Losses
gen_losses = []
# Start training
print("\n\nStarting training...\n\n")
# For each epoch
for e in range(nb_epoch):
# Initialize progbar and batch counter
# progbar = generic_utils.Progbar(epoch_size)
batch_counter = 0
gen_loss_epoch = 0
start = time.time()
# For each batch
# for X_target_batch, X_sketch_batch in data_utils.gen_batch(X_target_train, X_sketch_train, batch_size):
for batch in range(n_batch_per_epoch):
# Create a batch to feed the generator model
if load_all_data_at_once:
X_gen_target, X_gen_sketch = next(X_target_batch_gen_train), next(X_sketch_batch_gen_train)
else:
X_gen_target, X_gen_sketch = data_utils.load_data_from_data_generator_from_dir(X_batch_gen_train, img_dim=img_dim, augment_data=augment_data, use_identity_image=use_identity_image)
# Train generator
gen_loss = generator_model.train_on_batch(X_gen_sketch, X_gen_target)
# Add losses
gen_loss_epoch += gen_loss
print("Epoch", str(init_epoch+e+1), "batch", str(batch+1), "G_loss", gen_loss)
# Append loss
gen_losses.append(gen_loss_epoch/n_batch_per_epoch)
# Save images for visualization
if (e + 1) % visualize_images_every_n_epochs == 0:
data_utils.plot_generated_batch(X_gen_target, X_gen_sketch, generator_model, batch_size, image_data_format,
model_name, "training", init_epoch + e + 1, MAX_FRAMES_PER_GIF)
# Get new images for validation
if load_all_data_at_once:
X_target_batch_val, X_sketch_batch_val = next(X_target_batch_gen_val), next(X_sketch_batch_gen_val)
else:
X_target_batch_val, X_sketch_batch_val = data_utils.load_data_from_data_generator_from_dir(X_batch_gen_val, img_dim=img_dim, augment_data=False, use_identity_image=use_identity_image)
# Predict and validate
data_utils.plot_generated_batch(X_target_batch_val, X_sketch_batch_val, generator_model, batch_size, image_data_format,
model_name, "validation", init_epoch + e + 1, MAX_FRAMES_PER_GIF)
# Plot losses
data_utils.plot_gen_losses(gen_losses, model_name, init_epoch)
# Save weights
if (e + 1) % save_weights_every_n_epochs == 0:
# Delete all but the last n weights
purge_weights(save_only_last_n_weights, model_name)
# Save gen weights
gen_weights_path = os.path.join('../../models/%s/gen_weights_epoch%05d_genLoss%.04f.h5' % (model_name, init_epoch + e, gen_losses[-1]))
print("Saving", gen_weights_path)
generator_model.save_weights(gen_weights_path, overwrite=True)
check_this_process_memory()
print('[{0:%Y/%m/%d %H:%M:%S}] Epoch {1:d}/{2:d} END, Time taken: {3:.4f} seconds'.format(datetime.datetime.now(), init_epoch + e + 1, init_epoch + nb_epoch, time.time() - start))
print('------------------------------------------------------------------------------------')
except KeyboardInterrupt:
if dont_train:
return generator_model
else:
pass
# SAVE THE MODEL
# Save the model as it is, so that it can be loaded using -
# ```from keras.models import load_model; gen = load_model('generator_latest.h5')```
gen_weights_path = '../../models/%s/generator_latest.h5' % (model_name)
print("Saving", gen_weights_path)
if use_l1_weighted_loss:
generator_model.compile(loss='mae', optimizer=opt_generator)
generator_model.save(gen_weights_path, overwrite=True)
# Save model as json string
generator_model_json_string = generator_model.to_json()
print("Saving", '../../models/%s/generator_latest.txt' % model_name)
with open('../../models/%s/generator_latest.txt' % model_name, 'w') as outfile:
a = outfile.write(generator_model_json_string)
# Save model as json
generator_model_json_data = json.loads(generator_model_json_string)
print("Saving", '../../models/%s/generator_latest.json' % model_name)
with open('../../models/%s/generator_latest.json' % model_name, 'w') as outfile:
json.dump(generator_model_json_data, outfile)
print("Done.")
return generator_model
|
{"hexsha": "77b18e35a369fa155e8e4ce837ccff7c0eafbf5d", "size": 16066, "ext": "py", "lang": "Python", "max_stars_repo_path": "pix2pix/src/model/train_only_generator.py", "max_stars_repo_name": "voletiv/DeepLearningImplementations", "max_stars_repo_head_hexsha": "22ec85cdc7daa308ff2bec81962ca77e5959a70b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pix2pix/src/model/train_only_generator.py", "max_issues_repo_name": "voletiv/DeepLearningImplementations", "max_issues_repo_head_hexsha": "22ec85cdc7daa308ff2bec81962ca77e5959a70b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pix2pix/src/model/train_only_generator.py", "max_forks_repo_name": "voletiv/DeepLearningImplementations", "max_forks_repo_head_hexsha": "22ec85cdc7daa308ff2bec81962ca77e5959a70b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-03-23T15:52:11.000Z", "max_forks_repo_forks_event_max_datetime": "2018-03-23T15:52:11.000Z", "avg_line_length": 44.258953168, "max_line_length": 203, "alphanum_fraction": 0.6365616831, "include": true, "reason": "import numpy", "num_tokens": 3806}
|
#!/usr/bin/env python
import rospy, roslib, sys, cv2, time
import numpy as np
from std_msgs.msg import Int32
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from visual_servoing.srv import *
from std_srvs.srv import Empty as EmptySrv
from gazebo_ros_link_attacher.srv import Attach, AttachRequest, AttachResponse
from cv_bridge import CvBridge, CvBridgeError
from datetime import datetime
modelParent = ["scara"]
linkParent = ["scara_link4"]
modelChild = ["cube_red","cube_green","cube_blue"]
linkChild = ["cube_red","cube_green","cube_blue"]
pickPos = [0.16,-0.2,0.8,0]
placePos = [[0.2,0.2,0.028,0],
[0.2,0.2,0.0485,0],
[0.2,0.2,0.069,0]]
homePos = [0.4,0,0.05,0];
class scaraCam:
def __init__(self):
self.bridge = CvBridge()
self.imgRGB = rospy.Subscriber("/scaraCam/color/image_raw",Image,self.callback_imgRGB)
self.pubProcImg = rospy.Publisher('/scaraCam/color/image_proc', Image, queue_size=10)
self.frame = np.zeros((480,640,3), np.uint8)
self.frameCubes = np.zeros((480,640,3), np.uint8)
self.font = cv2.FONT_HERSHEY_SIMPLEX
self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
def callback_imgRGB(self,data):
try:
img = self.bridge.imgmsg_to_cv2(data,"passthrough")
self.frame = img.copy()
except CvBridgeError,e:
print e
def masking(self,color):
self.frameCubes = self.frame.copy()
frameHSV = cv2.cvtColor(self.frameCubes, cv2.COLOR_BGR2HSV)
if color == "red":
mask = cv2.inRange(frameHSV, (0,50,240), (10,255,255))
mask += cv2.inRange(frameHSV, (170,50,240), (180,255,255))
elif color == "green":
mask = cv2.inRange(frameHSV, (50,50,240), (70,255,255))
else:
mask = cv2.inRange(frameHSV, (110,50,240), (130,255,255))
# mask = cv2.erode(mask,self.kernel,iterations = 1)
# mask = cv2.dilate(mask,self.kernel,iterations = 1)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.kernel)
_,contours,_ = cv2.findContours(mask, 1, 2)
max_area = 0; c = 0; i=0
while i < len(contours):
area = cv2.contourArea(contours[i])
if max_area < area:
max_area = area
c = i
i += 1
if max_area >= 4000:
rect = cv2.minAreaRect(contours[c])
box = cv2.boxPoints(rect)
box = np.int0(box)
self.frameCubes = cv2.drawContours(self.frameCubes,[box],0,(0,0,0),5)
# cv2.putText(self.frameCubes,str(np.round(rect[0],2)),(20,20), self.font, .5,(0,0,0),2,cv2.LINE_AA)
# cv2.putText(self.frameCubes,str(np.round(rect[2],2)),(20,40), self.font, .5,(0,0,0),2,cv2.LINE_AA)
cv2.circle(self.frameCubes,(320,395), 7, (255,255,255), -1)
cv2.circle(self.frameCubes,(int(round(rect[0][0],0)),int(round(rect[0][1],0))), 5, (0,0,0), -1)
self.pubProcImg.publish(self.bridge.cv2_to_imgmsg(self.frameCubes,"bgr8"))
# cv2.imshow('Scara Cam View',sc.frameCubes)
# cv2.waitKey(1)
if max_area >= 4000:
return([rect[0][0],rect[0][1],rect[2],max_area])
else:
return([0,0,0,0,0])
q = [0,0,0,0]
pos = [0,0,0,0]
def callback_JointMeas(data):
global q,pos
q = data.position
pos[0] = 0.2*np.cos(q[0])+0.2*np.cos(q[0]+q[1])
pos[1] = 0.2*np.sin(q[0])+0.2*np.sin(q[0]+q[1])
pos[2] = 0.2-q[2]-0.1-0.01
def RotZ(theta):
return([[np.cos(theta),-np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
def SetPoint(pos,clearFlag,waitTime):
res = IKSrv(pos).q;
J1Srv(res[0]);
J2Srv(res[1]);
if clearFlag == 0: J3Srv(res[2])
elif clearFlag == 1: J3Srv(-0.5)
else: J3Srv(res[2]-0.01)
J4Srv(res[3]);
time.sleep(waitTime)
if __name__ == '__main__':
rospy.init_node('SCARA_CamProcess', anonymous=True)
# Unpausing the gazebo physics
print("Visual Seroving : Waiting for required services...")
rospy.wait_for_service('/gazebo/unpause_physics')
rospy.wait_for_service('/scara/IK')
rospy.wait_for_service('/scara/Joint1_Pos_Ref')
rospy.wait_for_service('/scara/Joint2_Pos_Ref')
rospy.wait_for_service('/scara/Joint3_Pos_Ref')
rospy.wait_for_service('/scara/Joint4_Pos_Ref')
rospy.wait_for_service('/link_attacher_node/attach')
print("Visual Seroving : All services ready...")
unpause = rospy.ServiceProxy('/gazebo/unpause_physics', EmptySrv)
IKSrv = rospy.ServiceProxy('/scara/IK', IK_srv)
J1Srv = rospy.ServiceProxy('/scara/Joint1_Pos_Ref', PC_srv)
J2Srv = rospy.ServiceProxy('/scara/Joint2_Pos_Ref', PC_srv)
J3Srv = rospy.ServiceProxy('/scara/Joint3_Pos_Ref', PC_srv)
J4Srv = rospy.ServiceProxy('/scara/Joint4_Pos_Ref', PC_srv)
AttSrv = rospy.ServiceProxy('/link_attacher_node/attach', Attach)
DetSrv = rospy.ServiceProxy('/link_attacher_node/detach', Attach)
pubTT = rospy.Publisher("/turntable/tta_velocity_controller/command",Float64, queue_size=10)
pubRecord = rospy.Publisher('/camRecorder', Int32, queue_size=1)
rospy.Subscriber("/scara/joint_states", JointState, callback_JointMeas)
unpause() # Unpausing the gazebo physics
time.sleep(1)
pubTT.publish(1)
frameSize = [640,480]
desired = [320,395]
Kp = 0.6
sc = scaraCam()
# pubRecord.publish(1); time.sleep(2)
SetPoint(homePos,0,2)
for i in range(3):
SetPoint(pickPos,1,2) # Go to pick location
flagDown = 0
start = time.time()
# Track the cube till its grasped
while not rospy.is_shutdown():
if i == 0:
res = sc.masking("red")
elif i == 1:
res = sc.masking("green")
else:
res = sc.masking("blue")
if res[3] != 0:
error = np.array([res[1]-desired[1],res[0]-desired[0]])
error = np.dot(RotZ(q[0]+q[1]),error)
dPos = -Kp*error*(0.03/120)
newPos = np.array(pos[0:2])+dPos
# print(np.degrees(np.unwrap([np.radians(res[2])+q[0]+q[1]])))
newPos = np.concatenate((newPos,[0.0275,-np.radians(res[2])+q[0]+q[1]]),axis = None)
if np.linalg.norm(dPos)*1000 < 1 or flagDown == 1:
SetPoint(newPos,0,0)
flagDown = 1
if time.time()-start > 5 and np.linalg.norm(dPos)*1000 < 0.5 and res[3] > 7000 and abs(newPos[2]-pos[2])*1000<1:
AttSrv(modelParent[0],linkParent[0],modelChild[i],linkChild[i]); time.sleep(0) # Grasp the object
SetPoint(newPos,2,0.25) # Clear table after grasping
# print("Cube "+str(i+1)+" Grasped")
SetPoint(newPos,1,0.75) # Retract after grasping
break
else:
SetPoint(newPos,1,0)
SetPoint(placePos[i],1,1.50) # Go to place location
# Changing turntable directions
if i==0:
pubTT.publish(-1)
elif i==1:
pubTT.publish(1)
SetPoint(placePos[i],0,0.75) # Move down
res = DetSrv(modelParent[0],linkParent[0],modelChild[i],linkChild[i]); time.sleep(1.0) # Detach the object
SetPoint(placePos[i],1,0.75) # Move up
SetPoint(homePos,0,1)
# time.sleep(2); pubRecord.publish(0)
|
{"hexsha": "1671c579e698b00f2a7edc5ae5c76f149f5d3a3a", "size": 6632, "ext": "py", "lang": "Python", "max_stars_repo_path": "visual_servoing/scripts/3_PickPlace_VS.py", "max_stars_repo_name": "nsabhari/Visual_Servoing", "max_stars_repo_head_hexsha": "36905434a0ad425fd44f0b5997f09e5f5a76af45", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-05T18:19:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-14T23:59:19.000Z", "max_issues_repo_path": "visual_servoing/scripts/3_PickPlace_VS.py", "max_issues_repo_name": "nsabhari/Visual_Servoing", "max_issues_repo_head_hexsha": "36905434a0ad425fd44f0b5997f09e5f5a76af45", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "visual_servoing/scripts/3_PickPlace_VS.py", "max_forks_repo_name": "nsabhari/Visual_Servoing", "max_forks_repo_head_hexsha": "36905434a0ad425fd44f0b5997f09e5f5a76af45", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3626943005, "max_line_length": 117, "alphanum_fraction": 0.6774728589, "include": true, "reason": "import numpy", "num_tokens": 2310}
|
from Good_Boids_module.Update_Boids import Boids
import numpy as np
from nose.tools import assert_almost_equal, assert_greater
from nose.tools import assert_less, assert_equal
from numpy.testing import assert_array_equal
import os
import yaml
from Good_Boids_module.tests.record_fixtures import configuration_file
fixtures = yaml.load(open('fixture.yaml'))
configuration_file_data = yaml.load(open(configuration_file))
def test_good_boids_for_regression():
before_positions = list(fixtures["before_positions"])
before_velocities = list(fixtures["before_velocities"])
new_positions = list(Boids(configuration_file).get_raw_positions(before_positions, before_velocities))
after_positions = list(fixtures["after_positions"])
new_velocities = list(Boids(configuration_file).get_raw_velocities(before_positions, before_velocities))
after_velocities = list(fixtures["after_velocities"])
for i in range(len(new_positions)):
assert_almost_equal(new_positions[0][i], after_positions[0][i], delta=0.1)
assert_almost_equal(new_positions[1][i], after_positions[1][i], delta=0.1)
assert_almost_equal(new_velocities[0][i], after_velocities[0][i], delta=15)
assert_almost_equal(new_velocities[1][i], after_velocities[1][i], delta=15)
test_good_boids_for_regression()
def test_good_boids_initialization():
boids_positions = Boids(configuration_file).positions
boids_velocities = Boids(configuration_file).velocities
assert_equal(configuration_file_data['birds_number'], len(boids_positions[0]))
assert_equal(configuration_file_data['birds_number'], Boids(configuration_file).birds_num)
for boid in range(Boids(configuration_file).birds_num):
assert_less(boids_positions[0][boid], configuration_file_data['position_upper_limits'][0])
assert_greater(boids_positions[0][boid], configuration_file_data['position_lower_limits'][0])
assert_less(boids_positions[1][boid], configuration_file_data['position_upper_limits'][1])
assert_greater(boids_positions[1][boid], configuration_file_data['position_lower_limits'][1])
assert_less(boids_velocities[0][boid], configuration_file_data['velocity_upper_limits'][0])
assert_greater(boids_velocities[0][boid], configuration_file_data['velocity_lower_limits'][0])
assert_less(boids_velocities[1][boid], configuration_file_data['velocity_upper_limits'][1])
assert_greater(boids_velocities[1][boid], configuration_file_data['velocity_lower_limits'][1])
test_good_boids_initialization()
|
{"hexsha": "790d7f2469b623df18560268f0e89fc2f0e10bab", "size": 2558, "ext": "py", "lang": "Python", "max_stars_repo_path": "Good_Boids_module/tests/test_the_Good_Boids.py", "max_stars_repo_name": "anest1s/Refactoring_the_Bad_Boids", "max_stars_repo_head_hexsha": "d569de4372d96917ef6aa7f1ca8acdaa09c26e0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Good_Boids_module/tests/test_the_Good_Boids.py", "max_issues_repo_name": "anest1s/Refactoring_the_Bad_Boids", "max_issues_repo_head_hexsha": "d569de4372d96917ef6aa7f1ca8acdaa09c26e0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Good_Boids_module/tests/test_the_Good_Boids.py", "max_forks_repo_name": "anest1s/Refactoring_the_Bad_Boids", "max_forks_repo_head_hexsha": "d569de4372d96917ef6aa7f1ca8acdaa09c26e0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.16, "max_line_length": 108, "alphanum_fraction": 0.7888975762, "include": true, "reason": "import numpy,from numpy", "num_tokens": 568}
|
Require Import Coq.Strings.String.
Require Import Coq.PArith.BinPos.
Require Import ExtLib.Core.RelDec.
Require Import ExtLib.Data.String.
Require Import ExtLib.Data.Nat.
Require Import ExtLib.Data.HList.
Require Import MirrorCore.Lemma.
Require Import MirrorCore.TypesI.
Require Import MirrorCore.Lambda.Expr.
Require Import MirrorCore.RTac.RTac.
Require Import MirrorCore.provers.DefaultProver.
Require MirrorCore.syms.SymEnv.
Require MirrorCore.syms.SymSum.
Require Import MirrorCore.Subst.FMapSubst.
Require Import MirrorCore.Lambda.ExprLift.
Require Import MirrorCore.Lambda.ExprSubst.
Require Import MirrorCore.Lambda.ExprUnify_simul.
Require Import MirrorCore.Lambda.Red.
Require Import MirrorCore.Lambda.AppN.
Require Import MirrorCore.Lambda.RedAll.
Require Import MirrorCore.Lambda.ExprVariables.
Require Import MirrorCharge.ILogicFunc.
Require Import MirrorCharge.OrderedCanceller.
Require Import MirrorCharge.BILNormalize.
Require Import MirrorCharge.SynSepLog.
Require Import MirrorCharge.SepLogFold.
Require Import MirrorCharge.Java.Semantics.
Require Import MirrorCharge.Java.JavaType.
Require Import MirrorCharge.Java.JavaFunc.
Require Import MirrorCharge.ModularFunc.ILogicFunc.
Require Import MirrorCharge.ModularFunc.BILogicFunc.
Require Import MirrorCharge.RTac.ReifyLemma.
Require Import MirrorCharge.RTac.PullConjunct.
Require Import MirrorCore.Reify.Reify.
Require Import MirrorCharge.Java.Reify.
Require Import MirrorCharge.RTac.Subst.
Require Import Java.Language.Lang.
Require Import Java.Language.Program.
Require Import MirrorCharge.RTac.Apply.
Require Import MirrorCharge.RTac.Cancellation.
Require Import MirrorCharge.RTac.Intro.
Require Import MirrorCharge.RTac.EApply.
Require Import MirrorCharge.RTac.Instantiate.
Require Import Coq.Arith.Peano_dec.
Fixpoint mkStars n P Q : expr typ func :=
match n with
| 0 => mkStar tySasn P Q
| S n => mkStar tySasn (mkStars n P Q) (mkStars n P Q)
end.
Definition cancelTest n :=
mkForall tySasn tyProp
(mkForall tySasn tyProp
(mkEntails tySasn (mkStars n (Var 0) (Var 1)) (mkStars n (Var 1) (Var 0)))).
Section blurb.
Context {fs : Environment}.
Time Eval vm_compute in typeof_expr nil nil (cancelTest 10).
Check THEN.
Check runOnGoals.
Time Eval vm_compute in
(THEN (REPEAT 10 (INTRO typ func))
(runOnGoals (CANCELLATION typ func tySasn is_pure)))
nil nil 0 0 (CTop nil nil) (ctx_empty (expr := expr typ func)) (cancelTest 10).
Fixpoint search_NoDup
{A} (A_dec: forall a b: A, {a=b}+{a=b->False}) (l: list A) : option (NoDup l) :=
match l with
| nil => Some (NoDup_nil A)
| a::l' =>
match search_NoDup A_dec l' with
| Some nodup =>
match In_dec A_dec a l' with
| left isin => None
| right notin =>
match search_NoDup A_dec l' with
| Some pf => Some (NoDup_cons _ notin pf)
| None => None
end
end
| None => None
end
end.
(*
Definition list_notin_set lst s :=
fold_right (fun a acc => andb (SS.for_all (fun b => negb (string_dec a b)) s) acc) true lst.
Definition method_specI : stac typ (expr typ func) subst :=
fun tus tvs s lst e =>
match e with
| mkEntails [l, mkProgEq [mkProg [P]], mkMethodSpec [C, m, mkVarList [args], mkString [r], p, q]] =>
match C, m with
| Inj (inl (inr (pString Cname))), Inj (inl (inr (pString Mname))) =>
match SM.find Cname (p_classes P) with
| Some Class =>
match SM.find Mname (c_methods Class) with
| Some Method =>
match search_NoDup Coq.Strings.String.string_dec args with
| Some pf =>
match eq_nat_dec (length args) (length (m_params Method)) with
| left pf' =>
if list_notin_set args (modifies (m_body Method)) then
More tus tvs s lst
mkEntails [l, mkProgEq [mkProg [P]],
mkTriple [mkApplyTruncSubst [tyAsn, p, mkSubstList [mkVarList [args], mkExprList [map E_var (m_params Method)]] ], mkCmd [m_body Method],
mkApplyTruncSubst [tyAsn, q, mkSubstList [mkVarList [r::args], mkConsExprList [App fEval (mkExpr [m_ret Method]), mkExprList[map E_var (m_params Method)]]] ]]]
else
@Fail _ _ _
| right _ => @Fail _ _ _
end
| None => @Fail _ _ _
end
| None => @Fail _ _ _
end
| None => @Fail _ _ _
end
| _, _ => @Fail _ _ _
end
| _ => @Fail _ _ _
end.
*)
Require Import MirrorCharge.Java.Semantics.
(** Skip **)
Definition skip_lemma : lemma typ (expr typ func) (expr typ func).
reify_lemma reify_imp rule_skip.
Defined.
Print skip_lemma.
Lemma skip_lemma_sound :
lemmaD (exprD'_typ0 (T:=Prop)) nil nil skip_lemma.
Proof.
unfold lemmaD; simpl; intros.
unfold exprT_App, exprT_Inj, Rcast_val, Rcast in * ; simpl in *.
apply rule_skip. apply H.
Qed.
Example test_skip_lemma : test_lemma skip_lemma. Admitted.
Definition skip_lemma2 : lemma typ (expr typ func) (expr typ func).
reify_lemma reify_imp rule_skip2.
Defined.
Print skip_lemma2.
Example test_skip_lemma2 : test_lemma skip_lemma2. Admitted.
Definition seq_lemma (c1 c2 : cmd) : lemma typ (expr typ func) (expr typ func).
Proof.
reify_lemma reify_imp (@rule_seq c1 c2).
Defined.
Print seq_lemma.
Lemma seq_lemma_sound c1 c2 :
lemmaD (exprD'_typ0 (T:=Prop)) nil nil (seq_lemma c1 c2).
Proof.
unfold lemmaD; simpl; intros.
unfold exprT_App, exprT_Inj, Rcast_val, Rcast in * ; simpl in *.
eapply rule_seq; [apply H | apply H0].
Qed.
Example test_seq_lemma (c1 c2 : cmd) : test_lemma (seq_lemma c1 c2). Admitted.
Definition if_lemma (e : dexpr) (c1 c2 : cmd) : lemma typ (expr typ func) (expr typ func).
Proof.
reify_lemma reify_imp (@rule_if e c1 c2).
Defined.
Require Import ExtLib.Tactics.
(*
Lemma if_lemma_sound e c1 c2 :
lemmaD (exprD'_typ0 (T:=Prop)) nil nil (if_lemma e c1 c2).
Proof.
remember (exprD nil nil (evalDExpr e) tyExpr).
Check rule_if.
unfold lemmaD; simpl.
unfold lemmaD', exprD'_typ0.
unfold le
unfold if_lemma.
destruct o.
unfold exprD in Heqo. simpl in Heqo.
remember (ExprDsimul.ExprDenote.exprD' nil nil tyVal (evalDExpr e)).
destruct o; inv_all; subst; try congruence.
unfold lemmaD, lemmaD', exprD'_typ0, ExprDsimul.ExprDenote.exprD'; simpl in *; intros.
unfold ExprDsimul.ExprDenote.exprD' in *; simpl in *; intros.
unfold exprT_App, exprT_Inj, Rcast_val, Rcast in *; simpl in *.
unfold OpenFunc.typ2_cast_bin, OpenFunc.typ3_cast_bin in *; simpl in *.
unfold exprT, OpenT in *; simpl in *.
rewrite <- Heqo0.
unfold ExprDsimul.ExprDenote.exprD' in Heqo0. simpl in Heqo0.
rewrite <- Heqo0.
repeat red.
setoid_rewrite <- Heqo.
rewrite <- Heqo.
unfold exprT_App, exprT_Inj, Rcast_val, Rcast in * ; simpl in *.
eapply rule_if; [eapply H | eapply H0].
+ unfold lemmaD; simpl; intros.
unfold exprT_App, exprT_Inj, Rcast_val, Rcast in * ; simpl in *.
eapply rule_if; [eapply H | eapply H0].
+ unfold lemmaD; simpl; intros.
unfold exprT_App, exprT_Inj, Rcast_val, Rcast in * ; simpl in *.
eapply rule_if; [eapply H | eapply H0].
Print if_lemma.
vm_compute.
unfold lemmaD'. simpl.
unfold exprD'_typ0. simpl.
unfold ExprDsimul.ExprDenote.exprD'.
simpl.
unfold if_lemma. simpl.
apply rule_skip. apply H.
Qed.
*)
Example test_if_lemma e (c1 c2 : cmd) : test_lemma (if_lemma e c1 c2). Admitted.
Definition read_lemma (x y : var) (f : field) : lemma typ (expr typ func) (expr typ func).
Proof.
reify_lemma reify_imp (@rule_read_fwd x y f).
Defined.
Lemma read_lemma_sound x y f :
lemmaD (exprD'_typ0 (T:=Prop)) nil nil (read_lemma x y f).
Proof.
(*
unfold lemmaD; simpl; intros.
unfold exprT_App, exprT_Inj, Rcast_val, Rcast, OpenFunc.typ3_cast_bin, OpenFunc.typ2_cast_bin, eq_rect_r in * ; simpl in *.
eapply rule_read_fwd; [eapply H | eapply H0].
*)
admit.
Qed.
Example test_read_lemma x y f : test_lemma (read_lemma x y f). Admitted.
Set Printing Width 140.
Definition write_lemma (x : var) (f : field) (e : dexpr) : lemma typ (expr typ func) (expr typ func).
Proof.
reify_lemma reify_imp (@rule_write_fwd x f e).
Defined.
(*
Lemma write_lemma_sound x f e :
lemmaD (exprD'_typ0 (T:=Prop)) nil nil (write_lemma x f e).
Proof.
induction e.
Check evalDExpr.
unfold lemmaD, lemmaD'; simpl; intros.
unfold exprT_App, exprT_Inj, Rcast_val, Rcast, OpenFunc.typ3_cast_bin, OpenFunc.typ2_cast_bin, eq_rect_r,
fPointsto, typ2_cast_bin, BaseFunc.mkString; simpl.
unfold exprD'_typ0; simpl.
unfold ExprDsimul.ExprDenote.exprD'; simpl.
vm_compute.
eapply rule_write_fwd; [eapply H | eapply H0].
Qed.
*)
Example test_write_lemma x f e : test_lemma (write_lemma x f e). Admitted.
Definition assign_lemma (x : var) (e : dexpr) : lemma typ (expr typ func) (expr typ func).
Proof.
reify_lemma reify_imp (@rule_assign_fwd x e).
Defined.
Print assign_lemma.
Example test_assign_lemma x e : test_lemma (assign_lemma x e). Admitted.
Definition alloc_lemma (x : var) (C : class) : lemma typ (expr typ func) (expr typ func).
Proof.
reify_lemma reify_imp (@rule_alloc_fwd x C).
Defined.
Example test_alloc_lemma x C : test_lemma (alloc_lemma x C). Admitted.
Definition pull_exists_lemma : lemma typ (expr typ func) (expr typ func).
Proof.
reify_lemma reify_imp (@pull_exists val).
Defined.
Example test_pull_exists_lemma : test_lemma pull_exists_lemma. Admitted.
Eval vm_compute in pull_exists_lemma.
Definition ent_exists_right_lemma : lemma typ (expr typ func) (expr typ func).
Proof.
reify_lemma reify_imp (@ent_left_exists val).
Defined.
Example test_pull_exists_lemma2 : test_lemma ent_exists_right_lemma. Admitted.
Definition eq_to_subst_lemma : lemma typ (expr typ func) (expr typ func).
Proof.
reify_lemma reify_imp eq_to_subst.
Defined.
Eval vm_compute in eq_to_subst_lemma.
Example test_eq_lemma : test_lemma (eq_to_subst_lemma). Admitted.
Check rule_static_complete.
Definition scall_lemma (x : Lang.var) (C : class) (m : string) (es : list dexpr)
: lemma typ (expr typ func) (expr typ func).
Proof.
reify_lemma reify_imp rule_static_complete.
Qed.
Print scall_lemma.
Print pull_exists_lemma.
Example test_pull_exists : test_lemma (pull_exists_lemma). Admitted.
Require Import MirrorCharge.ModularFunc.BaseFunc.
Definition fieldLookupTac : rtac typ (expr typ func) :=
fun tus tvs n m c s e =>
match e with
| App (App (App (Inj (inr pFieldLookup)) (Inj (inr (pProg P)))) C) f =>
match baseS C with
| Some C' =>
match C' with
| pString C'' =>
match class_lookup C'' P with
| Some Class =>
match @exprUnify (ctx_subst c) typ func _ _ _ _ _ 3
tus tvs 0 f (mkFields (c_fields Class)) tyVarList s with
| Some s => Solved s
| None => Fail
end
| None => Fail
end
| _ => Fail
end
| None => Fail
end
| _ => Fail
end.
Definition FIELD_LOOKUP := fieldLookupTac.
Require Import ExtLib.Tactics.
Lemma FIELD_LOOKUP_sound : rtac_sound FIELD_LOOKUP.
Proof.
unfold rtac_sound, rtac_spec; intros.
unfold FIELD_LOOKUP, fieldLookupTac in H.
destruct g; subst; try apply I.
forward.
simpl in H10.
forward.
SearchAbout exprUnify'.
Print exprUnify'.
Print ExprUnify_common.unify_sound_ind.
pose proof (exprUnify_sound).
specialize(H13 (ctx_subst ctx) typ func _ _ _ _ _ _ _ _ _ _ 3).
red in H13.
red in H13.
apply H13 with (tv' := nil) in H10; clear H13; [|assumption].
forward_reason; split; auto.
forward. simpl in H13.
simpl in H15.
unfold Ctx.propD, exprD'_typ0 in H15.
forward; inv_all; subst.
simpl in H15.
autorewrite with exprD_rw in H15; [|apply _];
simpl in H15; forward; inv_all; subst.
autorewrite with exprD_rw in H0.
simpl in H0; forward; inv_all; subst.
autorewrite with exprD_rw in H2; [|apply _];
simpl in H2; forward; inv_all; subst.
autorewrite with exprD_rw in H2; [|apply _];
simpl in H2; forward; inv_all; subst.
unfold funcAs in H2. simpl in H2.
forward; inv_all; subst.
pose proof (pctxD_substD H12 H14).
destruct H as [? [? ?]].
specialize (H13 _ _ _ H5 eq_refl H).
forward; inv_all; subst.
destruct H6 as [? [? ?]].
pose proof (substD_pctxD _ H10 H14 H6).
destruct H13 as [? [? ?]].
simpl in *. unfold Expr_expr.
rewrite H13.
split. admit.
intros. unfold exprT_App, exprT_Inj in *; simpl in *.
destruct e0; inv_all; try congruence.
destruct f; try congruence.
destruct s1; try congruence.
destruct s1; try congruence.
destruct s1; try congruence.
destruct s1; try congruence.
destruct s1; try congruence.
inv_all; subst. simpl in *.
Check Ap_pctxD.
gather_facts.
eapply Pure_pctxD; eauto.
intros.
specialize (H7 _ _ H8).
destruct H7 as [? ?]. specialize (H15 HList.Hnil). simpl in H15.
rewrite H15; clear H15.
unfold field_lookup. exists c; split; [|reflexivity].
autorewrite with exprD_rw in H3; [|apply _];
simpl in H2; forward; inv_all; subst.
simpl.
unfold eq_rect_r. simpl.
Require Import ExtLib.Data.String.
Lemma class_lookup_sound s p c (H : class_lookup s p = Some c) : In (s, c) (p_classes p).
Proof.
destruct p. induction p_classes; simpl in *.
* unfold class_lookup in H. simpl in H; congruence.
* unfold class_lookup in H. simpl in H.
destruct a; simpl in *. consider (s ?[ eq ] s0); intros; subst.
+ left. rewrite rel_dec_eq_true in H; inv_all; subst; [reflexivity| apply _|reflexivity].
+ right. rewrite rel_dec_neq_false in H; [apply IHp_classes; apply H | apply _ | apply H0].
Qed.
apply class_lookup_sound.
apply H9.
apply _.
Qed.
Require Import MirrorCharge.ModularFunc.ListFunc.
Definition foldTac (e : expr typ func) (args : list (expr typ func))
: expr typ func :=
match listS e with
| Some (pFold t u) =>
match args with
| f :: acc :: (Inj (inr (pFields fs)))::nil =>
fold_right (fun x acc => beta (beta (App (App f (mkString x)) acc))) acc fs
| _ => apps e args
end
| _ => apps e args
end.
Require Import MirrorCore.Lambda.ExprTac.
Lemma foldTacOk : partial_reducer_ok foldTac.
Proof.
unfold partial_reducer_ok; intros.
unfold foldTac.
remember (listS e); destruct o; [| exists val; tauto].
destruct l; try (exists val; tauto).
destruct es; try (exists val; tauto).
destruct es; try (exists val; tauto).
destruct es; try (exists val; tauto).
destruct e2; try (exists val; tauto).
destruct f; try (exists val; tauto).
destruct j; try (exists val; tauto).
destruct es; try (exists val; tauto).
destruct e; simpl in Heqo; try congruence.
destruct f; simpl in Heqo; try congruence.
destruct s; simpl in Heqo; try congruence.
destruct s; simpl in Heqo; try congruence.
destruct s; simpl in Heqo; try congruence.
destruct s; simpl in Heqo; try congruence.
inv_all; subst.
autorewrite with exprD_rw in H; simpl in H; forward; inv_all; subst.
autorewrite with exprD_rw in H; simpl in H; forward; inv_all; subst; [|apply _].
autorewrite with exprD_rw in H; simpl in H; forward; inv_all; subst; [|apply _].
autorewrite with exprD_rw in H0; simpl in H0; forward; inv_all; subst; [|apply _].
autorewrite with exprD_rw in H2; simpl in H2; forward; inv_all; subst; [|apply _].
unfold funcAs in H2.
Opaque type_cast.
simpl in H2. forward; inv_all; subst. red in r. inv_all.
inversion r; subst.
rewrite (UIP_refl r) in H4. unfold Rcast in H4; simpl in H4.
inversion H4; unfold eq_rect_r in H6; simpl in H6.
subst. clear H4.
clear H2 r.
Opaque beta.
simpl.
unfold exprT_App, eq_rect_r. simpl.
cut (exists val' : exprT tus tvs (typD t),
ExprDsimul.ExprDenote.exprD' tus tvs t
(fold_right
(fun (x : string) (acc : expr typ func) =>
beta (beta (App (App e0 (mkString x)) acc))) e1 l) = Some val' /\
(forall (us : hlist typD tus) (vs : hlist typD tvs),
fold_right (e4 us vs) (e3 us vs) (exprT_Inj tus tvs l us vs) = val' us vs)).
intros [? [? ?]].
eexists; split; [eassumption | intros; apply H4].
induction l; simpl; intros.
+ exists e3; tauto.
+ destruct IHl as [? [? ?]].
eexists; split; [|intros; reflexivity].
Lemma exprD'_remove_beta tus tvs t e de (H : exprD' tus tvs e t = Some de) :
exprD' tus tvs (beta e) t = Some de.
Proof.
pose proof (beta_sound tus tvs e t).
unfold exprD' in *. simpl in *. forward; inv_all; subst.
Require Import FunctionalExtensionality.
f_equal. symmetry.
apply functional_extensionality; intros.
apply functional_extensionality; intros.
apply H2.
Qed.
do 2 (apply exprD'_remove_beta).
unfold exprD'. simpl.
red_exprD; [|apply _].
pose proof (exprD_typeof_Some).
specialize (H5 _ _ _ _ _ _ _ _ _ _ _ _ _ H2). rewrite H5; clear H5.
red_exprD; [|apply _].
forward; inv_all; subst.
unfold mkString. forward.
red_exprD; [|apply _].
Transparent type_cast.
unfold funcAs. simpl.
f_equal. unfold exprT_App; simpl.
unfold eq_rect_r. simpl.
apply functional_extensionality; intros.
apply functional_extensionality; intros.
rewrite H4. reflexivity.
Qed.
SearchAbout partial_reducer_ok.
Print partial_reducer_ok.
Print apps_reducer.
Print full_reducer_ok.
Print full_reducer.
Check @idred.
SearchAbout idred.
Print idred.
Print idred'.
Check @beta_all.
Definition FOLD := SIMPLIFY (typ := typ) (fun _ _ _ _ => (beta_all (fun _ => foldTac))).
Require Import ExtLib.Tactics.Consider.
Require Import ExtLib.Tactics.
Require Import MirrorCore.Lambda.RedAll.
Print partial_reducer.
(*
Lemma foldTacOk2 : partial_reducer_ok foldTac.
Proof.
unfold full_reducer_ok; intros.
Print var_termsP.
Print partial_reducer_ok.
unfold exprT, OpenT in P.
unfold foldTac.
remember (listS e). destruct o.
Focus 2.
simpl.
Qed.
*)
Lemma FOLD_sound : rtac_sound FOLD.
Proof.
admit.
(*
unfold FOLD.
apply SIMPLIFY_sound.
intros; simpl.
forward.
rewrite <- H.
simpl.
unfold Ctx.propD, exprD'_typ0 in H3; forward; inv_all; subst.
destruct (beta_all_sound foldTacOk _ _ e0 H3) as [v [H4 H5]].
apply beta_all_sound.
Check beta_all_sound.
(* beta_all_sound is missing *)
*)
Qed.
Check apps.
Definition BETA := SIMPLIFY (typ := typ) (fun _ _ _ _ => beta_all (fun _ => @apps typ func)).
Lemma BETA_sound : rtac_sound BETA.
Proof.
unfold BETA.
apply SIMPLIFY_sound.
intros; simpl; forward.
(*
SearchAbout full_reducer.
assert (full_reducer_ok (fun _ => apps (sym := func))). {
clear.
intros e vars tus tvs tus' tvs' P Hvars es t targs Hexpr.
}
unfold full_reducer_ok.
Print full_reducer.
pose proof (beta_all_sound).
SearchAbout beta_all.
rewrite <- H.*)
admit.
Qed.
Definition THEN' := @MirrorCore.RTac.Then.THEN typ (expr typ func).
Require Import MirrorCharge.RTac.Minify.
Let EAPPLY lem := THEN' (EAPPLY typ func lem) (MINIFY typ func).
Definition THEN (r1 r2 : rtac typ (expr typ func)) :=
THEN (THEN (THEN (INSTANTIATE typ func) (runOnGoals r1)) (runOnGoals (INSTANTIATE typ func))) (runOnGoals r2).
Definition EQSUBST := THEN (EAPPLY eq_to_subst_lemma) (SUBST typ func).
(*
Notation "'ap_eq' '[' x ',' y ']'" :=
(ap (T := Fun stack) (ap (T := Fun stack) (pure (T := Fun stack) (@eq val)) x) y).
*)
Require Import MirrorCharge.ModularFunc.OpenFunc.
Require Import MirrorCharge.ModularFunc.BaseFunc.
Require Import MirrorCharge.ModularFunc.EmbedFunc.
Definition match_ap_eq (e : expr typ func) : bool :=
match e with
| App emb (App (App f (App (App g (App h e)) x)) y) =>
match embedS emb, open_funcS f, open_funcS g, open_funcS h, baseS e with
| Some (eilf_embed _ _), Some (of_ap _ _), Some (of_ap _ _), Some (of_const _), Some (pEq _) => true
| _, _, _, _, _ => false
end
| _ => false
end.
Definition PULLEQL := PULLCONJUNCTL typ func match_ap_eq ilops.
(*
THEN (INSTANTIATE typ func subst) (runOnGoals (THEN (THEN (TRY FIELD_LOOKUP)
(runOnGoals (CANCELLATION typ func subst tySpec (fun _ => false)))) (runOnGoals FOLD))) ::
solve_entailment :: nil).
*)
Require Import MirrorCharge.AutoSetoidRewrite.
Require Import MirrorCharge.SetoidRewrite.Base.
Require Import MirrorCharge.SetoidRewrite.ILSetoidRewrite.
Require Import MirrorCharge.SetoidRewrite.BILSetoidRewrite.
Definition spec_respects (e : expr typ func) (_ : list (RG (expr typ func)))
(rg : RG (expr typ func)) : m (expr typ func) :=
match e with
| Inj (inr pTriple) =>
rg_bind (unifyRG (@rel_dec (expr typ func) _ _) rg
(RGrespects (RGflip (RGinj (fEntails tySasn)))
(RGrespects (RGinj (fEq tyCmd))
(RGrespects (RGinj (fEntails tySasn))
(RGinj (fEntails tySpec))))))
(fun _ => rg_ret fTriple)
| _ => rg_fail
end.
Definition step_unfold vars rw :=
setoid_rewrite vars _ (fEntails : typ -> expr typ func) rw
(sr_combine il_respects
(sr_combine (@il_respects_reflexive typ func _ _ _ ilops _ _)
(sr_combine bil_respects
(sr_combine eq_respects
(sr_combine spec_respects refl)))))
(fun _ => rw_fail).
Definition STEP_REWRITE rw : rtac typ (expr typ func) :=
fun tus tvs lus lvs c s e =>
match step_unfold (getVars c) rw tyProp e with
| Some (e', _) => More s (GGoal e')
| _ => More s (GGoal e)
end.
Definition PULL_TRIPLE_EXISTS : rtac typ (expr typ func) :=
THEN (THEN (EAPPLY pull_exists_lemma) (INTRO typ func)) BETA.
Definition solve_entailment (rw : rewriter (typ := typ) (func := func)) : rtac typ (expr typ func) :=
THEN (INSTANTIATE typ func)
(FIRST (SOLVE (CANCELLATION typ func tySasn is_pure) ::
(THEN (THEN (THEN (THEN PULLEQL (REPEAT 1000 EQSUBST))
(STEP_REWRITE rw)) (REPEAT 1000 (INTRO typ func)))
(CANCELLATION typ func tySasn is_pure)::
nil))).
Definition solve_alloc rw : rtac typ (expr typ func) :=
THEN (INSTANTIATE typ func)
(FIRST (SOLVE (CANCELLATION typ func tySpec (fun _ => false)) ::
FIELD_LOOKUP ::
THEN FOLD (solve_entailment rw) :: nil)).
Check MINIFY.
Print rtacK.
Check runOnGoals (SUBST typ func).
Check THENK.
Print rtacK.
Definition simStep (rw : rewriter (typ := typ) (func := func)) (r : rtac typ (expr typ func)) :=
THEN (THEN (THEN (THEN (SUBST typ func)
(TRY PULL_TRIPLE_EXISTS)) (STEP_REWRITE rw)) (REPEAT 10 PULL_TRIPLE_EXISTS)) r.
Require Import MirrorCharge.RTac.Minify.
Fixpoint tripleE (c : cmd) rw : rtac typ (expr typ func) :=
match c with
| cskip => simStep rw (THEN (EAPPLY skip_lemma) (solve_entailment rw))
| calloc x C => simStep rw (THEN (EAPPLY (alloc_lemma x C))
(FIRST (solve_alloc rw::solve_entailment rw::nil)))
| cseq c1 c2 => simStep rw (THEN' (EAPPLY (seq_lemma c1 c2))
(THENK (runOnGoals (TRY (tripleE c1 rw))) (THENK (MINIFY typ func) (runOnGoals (tripleE c2 rw)))))
| cassign x e => simStep rw (THEN (EAPPLY (assign_lemma x e)) (solve_entailment rw))
| cread x y f => simStep rw (THEN (EAPPLY (read_lemma x y f)) (solve_entailment rw))
| cwrite x f e => simStep rw (THEN (EAPPLY (write_lemma x f e)) (solve_entailment rw))
| _ => IDTAC
end.
Definition symE rw : rtac typ (expr typ func) :=
(fun tus tvs n m ctx s e =>
(match e return rtac typ (expr typ func) with
| App (App (Inj f) G) H =>
match ilogicS f, H with
| Some (ilf_entails tySpec), (* tySpec is a pattern, should be checked for equality with tySpec *)
App (App (App (Inj (inr pTriple)) P) Q) (Inj (inr (pCmd c))) =>
tripleE c rw
| _, _ => FAIL
end
| _ => FAIL
end) tus tvs n m ctx s e).
Definition runTac rw :=
(THEN (THEN (REPEAT 1000 (INTRO typ func)) (symE rw))
(INSTANTIATE typ func)).
Lemma runTac_sound rw : rtac_sound (runTac rw).
Proof.
admit.
Qed.
Definition mkPointsto (x : expr typ func) (f : field) (e : expr typ func) : expr typ func :=
mkAp tyVal tyAsn
(mkAp tyString (tyArr tyVal tyAsn)
(mkAp tyVal (tyArr tyString (tyArr tyVal tyAsn))
(mkConst (tyArr tyVal (tyArr tyString (tyArr tyVal tyAsn)))
fPointsto)
x)
(mkConst tyString (mkString f)))
e.
Require Import Java.Semantics.OperationalSemantics.
Require Import Java.Logic.SpecLogic.
Require Import Java.Logic.AssertionLogic.
Require Import Java.Examples.ListClass.
Require Import Charge.Logics.ILogic.
Fixpoint seq_skip n :=
match n with
| 0 => cskip
| S n => cseq cskip (seq_skip n)
end.
Require Import ExtLib.Structures.Applicative.
Local Instance Applicative_Fun A : Applicative (Fun A) :=
{ pure := fun _ x _ => x
; ap := fun _ _ f x y => (f y) (x y)
}.
Definition testSkip n : Prop :=
forall (G : spec) (P : sasn), G |-- triple P P (seq_skip n).
Lemma INTRO_sound : rtac_sound (INTRO typ func).
Proof.
admit.
Qed.
Require Import MirrorCharge.RTac.Tactics.
Check IDTAC_sound.
Ltac rtac_result reify term_table tac :=
let name := fresh "e" in
match goal with
| |- ?P =>
reify_aux reify term_table P name;
let t := eval vm_compute in (typeof_expr nil nil name) in
let goal := eval unfold name in name in
match t with
| Some ?t =>
let goal_result := constr:(run_tac tac (GGoal name)) in
let result := eval vm_compute in goal_result in
idtac result
| None => idtac "expression " goal "is ill typed" t
end
end.
Lemma test_skip_lemma3 : testSkip 10.
Proof.
idtac "start".
unfold testSkip; simpl.
Time run_rtac reify_imp term_table (@runTac_sound rw_fail).
Time Qed.
Definition test_alloc : expr typ func :=
mkEntails tySpec (mkProgEq (mkProg ListProg))
(mkTriple (mkTrue tySasn) (mkCmd (cseq (calloc "x" "NodeC") cskip)) (mkFalse tySasn)).
Require Import Charge.Logics.BILogic.
Lemma test_alloc_correct :
prog_eq ListProg |-- triple empSP lfalse ((calloc "x" "NodeC");;Skip).
Proof.
Time run_rtac reify_imp term_table (@runTac_sound rw_fail).
unfold open_func_symD. simpl.
admit.
Qed.
Lemma test_read : ltrue |--
triple
(ap_pointsto [("o": var), ("f" : field), pure (T := Fun (Lang.stack)) (vint 3)] **
ap_pointsto [("o": var), ("g" : field), pure (T := Fun (Lang.stack)) (vint 4)])
(ap_pointsto [("o": var), ("f": field), pure (T := Fun (Lang.stack)) (vint 3)] **
(ap_pointsto [("o": var), ("g": field), pure (T := Fun (Lang.stack)) (vint 4)]))
(cseq (cread "x" "o" "f") (cseq (cread "y" "o" "g") cskip)).
Proof.
Time run_rtac reify_imp term_table (@runTac_sound rw_fail).
Qed.
Lemma test_write :
ltrue |--
triple
(ap_pointsto [("o": var), ("f" : field), pure (T := Fun (Lang.stack)) (vint 3)])
(ap_pointsto [("o": var), ("f": field), pure (T := Fun (Lang.stack)) (vint 4)])
(cseq (cwrite "o" "f" (E_val (vint 4))) cskip).
Proof.
Time run_rtac reify_imp term_table (@runTac_sound rw_fail).
Qed.
Require Import BinInt.
Fixpoint mkSwapPre n : sasn :=
match n with
| 0 => empSP
| S n => ap_pointsto [("o": Lang.var), (append "f" (nat2string10 n) : field),
(eval (E_val (vint (Z.of_nat n))))] **
mkSwapPre n
end.
Fixpoint mkSwapPostAux n m :=
match n with
| 0 => empSP
| S n => ap_pointsto [("o": Lang.var), (append "f" (nat2string10 n) : field),
(eval (E_val (vint (Z.of_nat (m - (S n))))))] **
mkSwapPostAux n m
end.
Definition mkSwapPost n := mkSwapPostAux n n.
Fixpoint mkRead n c :=
match n with
| 0 => c
| S n => cseq (cread ((append "x" (nat2string10 n):Lang.var)) ("o":Lang.var) ((append "f" (nat2string10 n)):field))
(mkRead n c)
end.
Fixpoint mkWriteAux n m c :=
match n with
| 0 => c
| S n => cseq (cwrite ("o":Lang.var) (append "f" (nat2string10 n)) (E_var (append "x" (nat2string10 (m - (S n))))))
(mkWriteAux n m c)
end.
Definition mkWrite n c := mkWriteAux n n c.
Definition mkSwapProg (n : nat) (c : cmd) := mkRead n (mkWrite n c).
Definition mkSwap n :=
ltrue |-- triple (mkSwapPre n) (mkSwapPost n) (mkSwapProg n cskip).
Set Printing Depth 100.
Lemma test_swap : mkSwap 20.
Proof.
Opaque ap.
unfold mkSwap, mkSwapPre, mkSwapPost, mkSwapProg, mkSwapPostAux, mkRead, mkWrite, mkWriteAux.
Time run_rtac reify_imp term_table (@runTac_sound rw_fail).
Time Qed.
End blurb.
|
{"author": "jesper-bengtson", "repo": "MirrorCharge", "sha": "cb0fe1da80be70ba4b744d4178a4e6e3afa38e62", "save_path": "github-repos/coq/jesper-bengtson-MirrorCharge", "path": "github-repos/coq/jesper-bengtson-MirrorCharge/MirrorCharge-cb0fe1da80be70ba4b744d4178a4e6e3afa38e62/MirrorCharge!/src/MirrorCharge/Java/SymEx.v"}
|
import numpy as np
import pandas as pd
from scipy.integrate import odeint
from scipy import interpolate
#import pressure_estimation
def func(x, *params):
y = np.zeros_like(x)
for i in range(0, len(params), 3):
ctr = params[i]
amp = params[i+1]
wid = params[i+2]
y = y + amp * np.exp( -((x - ctr)/wid)**2)
#print("y:" + str(y))
return y
### ODE
def heart_ode(y, t, Rp, Ra, Rin, Ca, Cv, Vd, Emax, Emin):
#print("ODE time:", t )
Vlv, Pa, Pv = y
Plv_0 = Plv(Vlv,Vd,Emax,Emin,t)
Qin_0 = Qin(Plv_0,Pv, Rin)
Qa_0 = Qa(Plv_0,Pa,Ra)
Qp_0 = Qp(Pa,Pv,Rp)
dydt = [Qin_0-Qa_0, (Qa_0-Qp_0)/Ca, (Qp_0-Qin_0)/Cv]
return dydt
def Qa(Plv,Pa,Ra):
if (Plv>Pa):
return (Plv - Pa)/Ra
else:
return int(0)
def Qin(Plv,Pv, Rin):
if (Pv>Plv):
return (Pv - Plv)/Rin
else:
return int(0)
def Qp(Pa,Pv,Rp):
return (Pa - Pv)/Rp
def Plv(Vlv,Vd,Emax,Emin,t):
return Elastance(Emax,Emin,t)*(Vlv-Vd)
def E_pat2(t):
heart_cycle = int(t/T)
#print("heart Cycle" + str(heart_cycle))
t = t - heart_cycle * T
popt2=[0.62,0.61,0.09,0.46,0.82,0.17]
fit2 = func(t, *popt2)
#print(type(fit3))
#plt.plot(x, np.roll(fit3/max(fit3), -np.argmax(fit3)+142) , 'r-', linewidth = 3, alpha =0.6, label = "Patient 1")
return fit2
### Elastance function
def Elastance(Emax,Emin,t):
return (E_pat2(t) * (Emax-Emin) + Emin)
#return (Esin(t) * (Emax-Emin) + Emin)
### Solving the ODE
def compute_ode(Rp,Ra,Rin,Ca,Cv,Vd,Emax,Emin,t,start_v,start_pa,start_pv):
y0 = [start_v, start_pa, start_pv]
sol = odeint(heart_ode, y0, t, args = (Rp,Ra,Rin,Ca,Cv,Vd,Emax,Emin,))
result_Vlv = sol[:, 0]
result_Pa = sol[:, 1]
result_Pv = sol[:, 2]
return (result_Vlv, result_Pa, result_Pv)
def init_glob_para(HC):
global T
global Tsys
global Tir
T = HC #s Datensatz IM
Tsys = 0.3*np.sqrt(T) #s Samar (2005)
#Tsys = 0.3
Tir = 0.5*Tsys #s Datensatz IM
|
{"hexsha": "0fe1a2e54ca2a4c239aac9f64012135ccf7d94d5", "size": 2132, "ext": "py", "lang": "Python", "max_stars_repo_path": "5_simulation/ode_solver_pat2.py", "max_stars_repo_name": "xi2pi/elastance-function", "max_stars_repo_head_hexsha": "ac3422b55a1958fe0ce579a2b49a977545159ccd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-10T23:31:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T08:22:47.000Z", "max_issues_repo_path": "5_simulation/ode_solver_pat2.py", "max_issues_repo_name": "xi2pi/elastance-function", "max_issues_repo_head_hexsha": "ac3422b55a1958fe0ce579a2b49a977545159ccd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "5_simulation/ode_solver_pat2.py", "max_forks_repo_name": "xi2pi/elastance-function", "max_forks_repo_head_hexsha": "ac3422b55a1958fe0ce579a2b49a977545159ccd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.4421052632, "max_line_length": 118, "alphanum_fraction": 0.5530018762, "include": true, "reason": "import numpy,from scipy", "num_tokens": 814}
|
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from torch import tensor, float32
import json
from collections import defaultdict
# представление очищенного датасета в pytorch
class DatasetModel(Dataset):
def __init__(self, df, vectorizer):
self.df = df
self._vectorizer = vectorizer
self._max_seq_length = max(map(len, self.df.predictor)) + 2
self.train_df = self.df[self.df.split == 'train']
self.train_size = len(self.train_df)
self.valid_df = self.df[self.df.split == 'valid']
self.valid_size = len(self.valid_df)
self.test_df = self.df[self.df.split == 'test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'valid': (self.valid_df, self.valid_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# веса для классов
class_counts = self.train_df.target.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.target_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequences = [count for _, count in sorted_counts]
self.class_weights = 1.0 / tensor(frequences, dtype=float32)
# загружает данные и создаёт векторизатор
@classmethod
def make_vectorizer(cls, path: str):
df = pd.read_csv(path)
train_df = df[df.split == 'train']
return cls(df, PredictorVectorizer.from_dataframe(train_df))
def get_vectorizer(self):
return self._vectorizer()
def save_vectorizer(self, vectorizer_filepath):
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def set_split(self, split='train'):
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
# точка входа для данных в pytorch
def __getitem__(self, index):
"index - индекс точки данных"
row = self._target_df.iloc[index]
predictor_vector, vec_length = self._vectorizer.vectorize(row.predictor, self._max_seq_length)
target_index = self._vectorizer.target_vocab.lookup_token(row.target)
return {'x_data': predictor_vector,
'y_target': target_index,
'x_length': vec_length}
def get_num_batches(self, batch_size):
return len(self) // batch_size
# векторизатор, приводящий словари в соотвествие друг другу и использующий их
class PredictorVectorizer:
def __init__(self, char_vocab, target_vocab):
"""
Аргументы:
char_vocab(Vocabulary) - последовательности в словари
target_vocab - таргет(категория) в словари
"""
self.char_vocab = char_vocab
self.target_vocab = target_vocab
def vectorize(self, predictor, vector_length=-1):
"""
Аргументы:
predictor - размер вложений символов
vector_length - длина вектора индексов
"""
indices = [self.char_vocab.begin_seq_index]
indices.extend(self.char_vocab.lookup_token(token)
for token in predictor)
indices.append(self.char_vocab.end_seq_index)
if vector_length < 0:
vector_length = len(indices)
out_vector = np.zeros(vector_length, dtype=np.int64)
out_vector[:len(indices)] = indices
out_vector[len(indices):] = self.char_vocab.mask_index
return out_vector, len(indices)
@classmethod
def from_dataframe(cls, df: pd.DataFrame):
char_vocab = SequenceVocabulary()
target_vocab = Vocabulary()
for index, row in df.iterrows():
tokens = row.predictor.split(' ')
for token in tokens:
char_vocab.add_token(token)
target_vocab.add_token(row.target)
return cls(char_vocab, target_vocab)
@classmethod
def from_serializable(cls, contents):
char_vocab = SequenceVocabulary.from_serializable(contents['char_vocab'])
target_vocab = Vocabulary.from_serializable(contents['target_vocab'])
return cls(char_vocab=char_vocab, target_vocab=target_vocab)
def to_serializable(self):
return {'char_vocab': self.char_vocab.to_serializable(),
'target_vocab': self.target_vocab.to_serializable()}
# отображение токенов в числовую форму - технические словари
class Vocabulary:
"""
Аргументы:
token_to_idx: dict - соотвествие токенов индексам
add_unk: bool - нужно ли добавлять токен UNK
unk_token - добавляемый в словарь токен UNK
"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token='<UNK>'):
if token_to_idx is None:
token_to_idx = dict()
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
# сериализуемый словарь
def to_serializable(self):
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
# экземпляр класса на основе сериализованного словаря
@classmethod
def from_serializable(cls, contents):
return cls(**contents)
# обновляет словари отображения - если токен не найден, то добавляет в словарь
def add_token(self, token):
if token in self._token_to_idx:
index = self._token_to_idx[token]
else:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
# извлекает соответствующий токену индекс или индекс UNK, если токен не найден
def lookup_token(self, token):
if self._add_unk:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
# возвращает соотвествующий индексу токен
def lookup_index(self, index):
if index not in self._idx_to_token:
raise KeyError('Индекс (%d) не в словаре' % index)
return self._idx_to_token[index]
def __str__(self):
return '<Словарь (size=%d)>' % len(self)
def __len__(self):
return len(self._token_to_idx)
# токенизация последовательностей
class SequenceVocabulary(Vocabulary):
def __init__(self, token_to_idx=None, unk_token='<UNK>',
mask_token="<MASK>", begin_seq_token='<BEGIN>',
end_seq_token='<END>'):
super(SequenceVocabulary, self).__init__(token_to_idx)
self._mask_token = mask_token # для работы с последовательностями переменной длины
self._unk_token = unk_token # для обозначения отсуствующих токенов в словаре
self._begin_seq_token = begin_seq_token # начало предложения
self._end_seq_token = end_seq_token # конец предложения
self.mask_index = self.add_token(self._mask_token)
self.unk_index = self.add_token(self._unk_token)
self.begin_seq_index = self.add_token(self._begin_seq_token)
self.end_seq_index = self.add_token(self._end_seq_token)
def to_serializable(self):
contents = super(SequenceVocabulary, self).to_serializable()
contents.update({'unk_token': self._unk_token,
'mask_token': self._mask_token,
'begin_seq_token': self._begin_seq_token,
'end_seq_token': self._end_seq_token})
return contents
def lookup_token(self, token):
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
class TrainValidSplit:
def __init__(self,
train_proportion: float,
valid_proportion: float,
test_proportion: float,
raw_df_path: str,
seed: int):
self.by_target = self.get_target_dict(raw_df_path)
self.final_list = self.make_split(self.by_target, train_proportion, valid_proportion,
test_proportion, seed)
@staticmethod
def get_target_dict(raw_df_path):
df = pd.read_csv(raw_df_path)
by_target = defaultdict(list)
for _, row in df.iterrows():
by_target[row.target].append(row.to_dict())
return by_target
@staticmethod
def make_split(by_target, train_proportion, valid_proportion, test_proportion, seed):
final_list = []
np.random.seed(seed)
for _, item_list in sorted(by_target.items()):
np.random.shuffle(item_list)
n = len(item_list)
n_train = int(train_proportion * n)
n_valid = int(valid_proportion * n)
n_test = int(test_proportion * n)
for item in item_list[:n_train]:
item['split'] = 'train'
for item in item_list[n_train:n_train + n_valid]:
item['split'] = 'valid'
for item in item_list[n_train + n_valid:]:
item['split'] = 'test'
final_list.extend(item_list)
return final_list
def save_prepared_data(self, prepared_df_path):
prepared_data = pd.DataFrame(self.final_list)
return prepared_data.to_csv(prepared_df_path, index=False, encoding='utf-8')
|
{"hexsha": "a61a876fde60c3480062cf3265e8bdb93edfb1cd", "size": 9722, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/preprocessor.py", "max_stars_repo_name": "doksketch/happy-dating", "max_stars_repo_head_hexsha": "680c63f38fe039b6567f5fce94c3d0fa3b968019", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/preprocessor.py", "max_issues_repo_name": "doksketch/happy-dating", "max_issues_repo_head_hexsha": "680c63f38fe039b6567f5fce94c3d0fa3b968019", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/preprocessor.py", "max_forks_repo_name": "doksketch/happy-dating", "max_forks_repo_head_hexsha": "680c63f38fe039b6567f5fce94c3d0fa3b968019", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.475177305, "max_line_length": 102, "alphanum_fraction": 0.6395803333, "include": true, "reason": "import numpy", "num_tokens": 2315}
|
import os
import os.path as osp
import sys
import argparse
import json
import numpy as np
import pandas as pd
import time
import subprocess
RESULTS_DIR = './results'
if not osp.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
def get_args():
parser = argparse.ArgumentParser(description='gkm Protein Experiments')
parser.add_argument('--datasets', type=str, required=True,
help="Where to find the datasets")
parser.add_argument('--params', type=str, required=True,
help="The paramters file")
parser.add_argument('--dict', type=str, required=True,
help="The protein dictionary file"),
parser.add_argument('--out', type=str, required=True,
help='Name of log file to save results')
return parser.parse_args()
args = get_args()
file = args.out
datasets = args.datasets
parameters = args.params
results_file = osp.join(RESULTS_DIR, file)
dict_file = args.dict
df = pd.read_csv(parameters)
experiments = df.to_dict('records')
for e in experiments:
train_file = str(e['Dataset']) + '.train.fasta'
test_file = str(e['Dataset']) + '.test.fasta'
train_file = osp.join(datasets, train_file)
test_file = osp.join(datasets, test_file)
command = ["python", "run_gkm.py",
"--dir", datasets,
"--prefix", str(e["Dataset"]),
"--outdir", "temp",
"-g", str(e["g"]),
"-m", str(e["m"]),
"--dict", dict_file,
"--results", args.out]
print(' '.join(command))
output = subprocess.check_output(command)
|
{"hexsha": "077b34ab97657959ce3db209465d11f86b14d896", "size": 1527, "ext": "py", "lang": "Python", "max_stars_repo_path": "results/other_scripts/gkm_prot_tests.py", "max_stars_repo_name": "dblakely/FastSK", "max_stars_repo_head_hexsha": "bd0d4cef89c3d7d661f4c6abc094423ab6d1c7e1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "results/other_scripts/gkm_prot_tests.py", "max_issues_repo_name": "dblakely/FastSK", "max_issues_repo_head_hexsha": "bd0d4cef89c3d7d661f4c6abc094423ab6d1c7e1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "results/other_scripts/gkm_prot_tests.py", "max_forks_repo_name": "dblakely/FastSK", "max_forks_repo_head_hexsha": "bd0d4cef89c3d7d661f4c6abc094423ab6d1c7e1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2777777778, "max_line_length": 75, "alphanum_fraction": 0.656843484, "include": true, "reason": "import numpy", "num_tokens": 360}
|
#!/usr/bin/env python
import sys
import math
import numpy as np
ph2Kcal = 1.364
Kcal2kT = 1.688
class Microstate:
def __init__(self, state, E, count):
self.state = state
self.E = E
self.count = count
class Conformer:
def __init__(self):
self.iconf = 0
self.ires = 0
self.confid = ""
self.resid = ""
self.occ = 0.0
self.crg = 0.0
def load_from_head3lst(self, line):
fields = line.split()
self.iconf = int(fields[0]) - 1
self.confid = fields[1]
self.resid = self.confid[:3]+self.confid[5:11]
self.crg = float(fields[4])
class MSout:
def __init__(self, fname):
self.T = 273.15
self.pH = 7.0
self.Eh = 0.0
self.N_ms = 0
self.N_uniq = 0
self.lowest_E = 0.0
self.highest_E = 0.0
self.average_E = 0.0
self.fixed_iconfs = []
self.fixed_crg = 0.0
self.fixed_ne = 0.0
self.fixed_nh = 0.0
self.free_residues = [] # free residues, referred by conformer indices
self.iconf2ires = {} # from conformer index to free residue index
self.microstates = {}
self.conformers = []
self.load_msout(fname)
def load_msout(self, fname):
lines = open(fname).readlines()
# Get a valid line
while True:
line = lines.pop(0).strip()
if len(line) > 0 and line[0] != "#":
break
fields = line.split(",")
for field in fields:
key, value = field.split(":")
key = key.strip().upper()
value = float(value)
if key == "T":
self.T = value
elif key == "PH":
self.pH = value
elif key == "EH":
self.Eh = value
# second line, confirm this is from Monte Carlo sampleing
while True:
line = lines.pop(0).strip()
if len(line) > 0 and line[0] != "#":
break
key, value = line.split(":")
if key.strip() != "METHOD" or value.strip() != "MONTERUNS":
print("This file %s is not a valid microstate file" % fname)
sys.exit(-1)
# Third line, fixed conformer indicies
while True:
line = lines.pop(0).strip()
if len(line) > 0 and line[0] != "#":
break
_, iconfs = line.split(":")
self.fixed_iconfs = [int(i) for i in iconfs.split()]
# 4th line, free residues
while True:
line = lines.pop(0).strip()
if len(line) > 0 and line[0] != "#":
break
_, residues_str = line.split(":")
residues = residues_str.split(";")
self.free_residues = []
for f in residues:
if f.strip():
self.free_residues.append([int(i) for i in f.split()])
for i_res in range(len(self.free_residues)):
for iconf in self.free_residues[i_res]:
self.iconf2ires[iconf] = i_res
# find the next MC record
found_mc = False
newmc = False
self.N_ms = 0
for line in lines:
if line.find("MC:") == 0: # ms starts
found_mc = True
newmc = True
continue
elif newmc:
f1, f2 = line.split(":")
current_state = [int(c) for c in f2.split()]
newmc = False
continue
elif found_mc:
fields = line.split(",")
if len(fields) >= 3:
state_e = float(fields[0])
count = int(fields[1])
flipped = [int(c) for c in fields[2].split()]
for ic in flipped:
ir = self.iconf2ires[ic]
current_state[ir] = ic
ms = Microstate(list(current_state), state_e, count)
key = ",".join(["%d" % i for i in ms.state])
if key in self.microstates:
self.microstates[key].count += ms.count
else:
self.microstates[key] = ms
# find N_ms, lowerst, highst, averge E
self.N_ms = 0
E_sum = 0.0
self.lowest_E = next(iter(self.microstates.values())).E
self.highest_E = next(iter(self.microstates.values())).E
msvalues = self.microstates.values()
self.N_uniq = len(msvalues)
for ms in msvalues:
self.N_ms += ms.count
E_sum += ms.E * ms.count
if self.lowest_E > ms.E:
self.lowest_E = ms.E
if self.highest_E < ms.E:
self.highest_E = ms.E
self.average_E = E_sum / self.N_ms
def groupms_byenergy(microstates, ticks):
"""
This function takes in a list of microstates and a list of energy numbers (N values), divide the microstates into N
bands by using the energy number as lower boundaries. The list of energy will be sorted from small to large.
"""
N = len(ticks)
ticks.sort()
ticks.append(1.0e100) # add a big number as the rightest-most boundary
resulted_bands = [[] for i in range(N)]
for ms in microstates:
it = -1
for itick in range(N):
if ticks[itick] <= ms.E < ticks[itick+1]:
it = itick
break
if it >= 0:
resulted_bands[it].append(ms)
return resulted_bands
def groupms_byiconf(microstates, iconfs):
"""
This function takes in a list of microstates and a list of conformer indicies, divide microstates into two groups:
the first one is those contain one of the given conformers, the second one is those contain none of the listed conformers.
"""
ingroup = []
outgroup = []
for ms in microstates:
contain = False
for ic in iconfs:
if ic in ms.state:
ingroup.append(ms)
contain = True
break
if not contain:
outgroup.append(ms)
return ingroup, outgroup
def groupms_byconfid(microstates, confids):
"""
Group conformers by tge conformer IDs. IDs are in a list and ID is considered as a match as long as it is a
substring of the conformer name. The selected microstates must have all conformers and returned in the first group,
and the rest are in the second group.
"""
ingroup = []
outgroup = []
for ms in microstates:
contain = True
names = [conformers[ic].confid for ic in ms.state]
for confid in confids:
innames = False
for name in names:
if confid in name:
innames = True
break
contain = contain and innames
if contain:
ingroup.append(ms)
else:
outgroup.append(ms)
return ingroup, outgroup
def ms_energy_stat(microstates):
"""
Given a list of microstates, find the lowest energy, average energy, and highest energy
"""
ms = next(iter(microstates))
lowerst_E = highest_E = ms.E
N_ms = 0
total_E = 0.0
for ms in microstates:
if lowerst_E > ms.E:
lowerst_E = ms.E
elif highest_E < ms.E:
highest_E = ms.E
N_ms += ms.count
total_E += ms.E*ms.count
average_E = total_E/N_ms
return lowerst_E, average_E, highest_E
def ms_convert2occ(microstates):
"""
Given a list of microstates, convert to conformer occupancy of conformers appeared at least once in the microstates.
"""
occurance = {} # occurance of conformer, as a dictionary
occ = {}
N_ms = 0
for ms in microstates:
N_ms += ms.count
for ic in ms.state:
if ic in occurance:
occurance[ic] += ms.count
else:
occurance[ic] = ms.count
for key in occurance.keys():
occ[key] = occurance[key]/N_ms
return occ
def ms_counts(microstates):
"""
Calculate total counts of microstates
"""
N_ms = 0
for ms in microstates:
N_ms += ms.count
return N_ms
def ms_charge(ms):
"Compute microstate charge"
crg = 0.0
for ic in ms.state:
crg += conformers[ic].crg
return crg
def ms_convert2sumcrg(microstates, free_res):
"""
Given a list of microstates, convert to net charge of each free residue.
"""
iconf2ires = {}
for i_res in range(len(free_res)):
for iconf in free_res[i_res]:
iconf2ires[iconf] = i_res
charges_total = [0.0 for i in range(len(free_res))]
N_ms = 0
for ms in microstates:
N_ms += ms.count
for ic in ms.state:
ir = iconf2ires[ic]
charges_total[ir] += conformers[ic].crg * ms.count
charges = [x/N_ms for x in charges_total]
return charges
def read_conformers():
conformers = []
lines = open("head3.lst").readlines()
lines.pop(0)
for line in lines:
conf = Conformer()
conf.load_from_head3lst(line)
conformers.append(conf)
return conformers
def e2occ(energies):
"Given a list of energy values in unit Kacl/mol, calculate the occupancy by Boltzmann Distribution."
e = np.array(energies)
e = e - min(e)
Pi_raw = np.exp(-Kcal2kT*e)
Pi_sum = sum(Pi_raw)
Pi_norm = Pi_raw/Pi_sum
return Pi_norm
def bhata_distance(prob1, prob2):
d_max = 10000.0 # Max possible value set to this
p1 = np.array((prob1)) / sum(prob1)
p2 = np.array((prob2)) / sum(prob2)
if len(p1) != len(p2):
d = d_max
else:
bc = sum(np.sqrt(p1 * p2))
# print(bc, np.exp(-d_max))
if bc <= np.exp(-d_max):
d = d_max
else:
d = -np.log(bc)
return d
def whatchanged_conf(msgroup1, msgroup2):
"Given two group of microstates, calculate what changed at conformer level."
occ1 = ms_convert2occ(msgroup1)
occ2 = ms_convert2occ(msgroup2)
all_keys = set(occ1.keys())
all_keys |= set(occ2.keys())
all_keys = list(all_keys)
all_keys.sort()
diff_occ = {}
for key in all_keys:
if key in occ1:
p1 = occ1[key]
else:
p1 = 0.0
if key in occ2:
p2 = occ2[key]
else:
p2 = 0.0
diff_occ[key] = p2 - p1
return diff_occ
def whatchanged_res(msgroup1, msgroup2, free_res):
"Return a list of Bhatachaya Distance of free residues."
occ1 = ms_convert2occ(msgroup1)
occ2 = ms_convert2occ(msgroup2)
bhd = []
for res in free_res:
p1 = []
p2 = []
for ic in res:
if ic in occ1:
p1.append(occ1[ic])
else:
p1.append(0.0)
if ic in occ2:
p2.append(occ2[ic])
else:
p2.append(0.0)
bhd.append(bhata_distance(p1, p2))
return bhd
conformers = read_conformers()
if __name__ == "__main__":
msout = MSout("ms_out/pH4eH0ms.txt")
# e_step = (msout.highest_E - msout.lowest_E)/20
# ticks = [msout.lowest_E + e_step*(i) for i in range(20)]
# ms_in_bands = groupms_byenergy(msout.microstates.values(), ticks)
# print([len(band) for band in ms_in_bands])
# netural, charged = groupms_byiconf(msout.microstates.values(), [12, 13, 14, 15])
# l_E, a_E, h_E = ms_energy_stat(msout.microstates.values())
# print(l_E, a_E, h_E)
# charge over energy bands
# e_step = (msout.highest_E - msout.lowest_E) / 20
# ticks = [msout.lowest_E + e_step*(i+1) for i in range(19)]
# ms_in_bands = groupms_byenergy(msout.microstates.values(), ticks)
# for band in ms_in_bands:
# band_total_crg = 0.0
# for ms in band:
# band_total_crg += ms_charge(ms)
# print(band_total_crg/ms_counts(band))
# netural, charged = groupms_byiconf(msout.microstates.values(), [12, 13, 14, 15])
# diff_occ = whatchanged_conf(netural, charged)
# for key in diff_occ.keys():
# print("%3d, %s: %6.3f" % (key, conformers[key].confid, diff_occ[key]))
# diff_bhd = whatchanged_res(netural, charged, msout.free_residues)
# for ir in range(len(msout.free_residues)):
# print("%s: %6.4f" % (conformers[msout.free_residues[ir][0]].resid, diff_bhd[ir]))
# charges = ms_convert2sumcrg(msout.microstates.values(), msout.free_residues)
# for ir in range(len(msout.free_residues)):
# print("%s: %6.4f" % (conformers[msout.free_residues[ir][0]].resid, charges[ir]))
microstates = list(msout.microstates.values())
glu35_charged, _ = groupms_byconfid(microstates, ["GLU-1A0035"])
print(len(microstates))
print(len(glu35_charged))
|
{"hexsha": "87c9679bf26844c79f2c522c4555931069adb57e", "size": 12887, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/ms_analysis.py", "max_stars_repo_name": "umeshkhaniya/Stable-MCCE", "max_stars_repo_head_hexsha": "b037a417e722f46030fdd5e24e5bb44513440559", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-03-03T03:02:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T09:54:14.000Z", "max_issues_repo_path": "bin/ms_analysis.py", "max_issues_repo_name": "umeshkhaniya/Stable-MCCE", "max_issues_repo_head_hexsha": "b037a417e722f46030fdd5e24e5bb44513440559", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 89, "max_issues_repo_issues_event_min_datetime": "2019-07-28T13:22:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-15T18:06:25.000Z", "max_forks_repo_path": "bin/ms_analysis.py", "max_forks_repo_name": "umeshkhaniya/Stable-MCCE", "max_forks_repo_head_hexsha": "b037a417e722f46030fdd5e24e5bb44513440559", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2019-07-28T14:01:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-02T00:16:48.000Z", "avg_line_length": 29.2222222222, "max_line_length": 126, "alphanum_fraction": 0.5555986653, "include": true, "reason": "import numpy", "num_tokens": 3485}
|
# ======================================================================
# Copyright CERFACS (October 2018)
# Contributor: Adrien Suau (adrien.suau@cerfacs.fr)
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the
# CeCILL-B license as circulated by CEA, CNRS and INRIA at the following
# URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided
# only with a limited warranty and the software's author, the holder of
# the economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards
# their requirements in conditions enabling the security of their
# systems and/or data to be ensured and, more generally, to use and
# operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
# ======================================================================
"""Implementation of the group_commutator for :math:`SU(d)` matrices."""
import typing
import numpy
import qtoolkit.maths.matrix.su2.group_commutator as gc_su2
import qtoolkit.utils.types as qtypes
def group_commutator(
U: qtypes.SUdMatrix
) -> typing.Tuple[qtypes.SUdMatrix, qtypes.SUdMatrix]:
"""Finds :math:`V, W \\in U(d) \\mid U = V W V^\\dagger W^\\dagger`.
.. note::
The implementation of this method is based on `this implementation by
Paul Pham \
<https://github.com/cryptogoth/skc-python/blob/master/skc/group_factor.py#L75>`.
:param U: The unitary matrix in :math:`U(d)` to decompose.
:return: a tuple containing (:math:`V`, :math:`W`).
"""
dim = U.shape[0]
# We diagonalise the matrix.
eigvals, eigvecs = numpy.linalg.eig(U)
Vt = numpy.identity(dim, dtype=numpy.complex)
Wt = numpy.identity(dim, dtype=numpy.complex)
# We construct the 2*2 diagonal matrices from the eigenvalues.
for i in range(dim // 2):
U_i = numpy.diag(eigvals[2 * i : 2 * (i + 1)])
V_i, W_i = gc_su2.group_commutator(U_i)
a, b = 2 * i, 2 * i + 1
Vt[a : b + 1, a : b + 1] = V_i
Wt[a : b + 1, a : b + 1] = W_i
V, W = eigvecs @ Vt @ eigvecs.T.conj(), eigvecs @ Wt @ eigvecs.T.conj()
return V, W
|
{"hexsha": "16e924ae2eeebb12e8f96413b7d33463b3315d62", "size": 3031, "ext": "py", "lang": "Python", "max_stars_repo_path": "qtoolkit/maths/matrix/sud/group_commutator.py", "max_stars_repo_name": "nelimee/qtoolkit", "max_stars_repo_head_hexsha": "1e99bd7d3a143a327c3bb92595ea88ec12dbdb89", "max_stars_repo_licenses": ["CECILL-B"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-12-30T04:50:44.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-25T12:26:02.000Z", "max_issues_repo_path": "qtoolkit/maths/matrix/sud/group_commutator.py", "max_issues_repo_name": "nelimee/qtoolkit", "max_issues_repo_head_hexsha": "1e99bd7d3a143a327c3bb92595ea88ec12dbdb89", "max_issues_repo_licenses": ["CECILL-B"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qtoolkit/maths/matrix/sud/group_commutator.py", "max_forks_repo_name": "nelimee/qtoolkit", "max_forks_repo_head_hexsha": "1e99bd7d3a143a327c3bb92595ea88ec12dbdb89", "max_forks_repo_licenses": ["CECILL-B"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-08T15:59:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-08T15:59:46.000Z", "avg_line_length": 42.0972222222, "max_line_length": 87, "alphanum_fraction": 0.6611679314, "include": true, "reason": "import numpy", "num_tokens": 826}
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import moldesign as mdt
from moldesign import units as u
from .helpers import get_data_path
import pytest
from .molecule_fixtures import *
def test_inchain_residue_mutation_in_protein(pdb3aid):
_mutate_and_check(pdb3aid, 3, 'ALA',
{'C', 'CA', 'CB', 'HA', 'HB1', 'HB2', 'HB3', 'HN2', 'N', 'O'})
def test_nterm_residue_mutate_protein(pdb3aid):
_mutate_and_check(pdb3aid, 0, 'MET',
{'CE', 'HE1', 'C', 'N', 'HA', 'HB2', 'HE3', 'HG2', 'CG', 'CA', 'HG3',
'O', 'HB3', 'HN2', 'CB', 'HE2'})
def test_cterm_residue_mutate_protein(pdb3aid):
cterm = pdb3aid.chains['A'].c_terminal
_mutate_and_check(pdb3aid, cterm.index, 'LEU',
{'HD11', 'HD23', 'C', 'N', 'HD22', 'HA', 'HB2', 'CG', 'CA', 'CD2', 'HD21',
'O', 'CD1', 'HD12', 'HB3', 'HN2', 'HD13', 'CB', 'HG'})
def _mutate_and_check(mol, residx, resname, allatoms):
newmol = mdt.mutate_residues(mol, {mol.residues[residx]: resname})
assert newmol.num_chains == mol.num_chains
assert mol.num_residues == newmol.num_residues
foundnew = False
for i, (res, newres) in enumerate(zip(mol.residues, newmol.residues)):
if i == residx:
foundnew = True
assert newres.resname == resname
assert newres.name == resname+str(newres.pdbindex)
atomnames = set(atom.name for atom in newres)
assert len(atomnames) == newres.num_atoms
assert atomnames.issubset(allatoms)
else:
assert res.name == newres.name
assert res.num_atoms == newres.num_atoms
for oldatom, newatom in zip(res, newres):
assert oldatom.name == newatom.name
assert oldatom.atnum == newatom.atnum
if not foundnew:
assert oldatom.pdbindex == newatom.pdbindex
def test_mutate_docstring_dict_example(pdb3aid):
mol = pdb3aid
assert mol.residues[5].resname != 'ALA'
mut = mdt.mutate_residues(mol, {mol.residues[5]: 'ALA'}) # mutate residue 5 to ALA
assert mut.residues[5].resname == 'ALA'
def test_mutation_nomenclature_string_only(pdb3aid):
mol = pdb3aid
res25 = mol.get_residues(pdbindex=25)
assert len(res25) == 2
assert [r.resname for r in res25] == ['ASP', 'ASP']
mut = mdt.mutate_residues(mol, 'D25M') # Mutate ALA43 to MET43
assert mut.get_residues()
mut25 = mut.get_residues(pdbindex=25)
assert len(mut25) == 2
assert [r.resname for r in mut25] == ['MET', 'MET']
def test_mutation_topology(pdb1yu8):
""" Test the topology of the backbone atoms for a mutated molecule. """
molecule = pdb1yu8
mutation_residues = ["X.13G"]
mutated_molecule = mdt.mutate_residues(molecule, mutation_residues)
# Check that the number of bonds for backbone atoms match.
for res, mut_res in zip(molecule.residues, mutated_molecule.residues):
if not res.backbone:
continue
for atom in res.backbone:
bonds = [bond for bond in molecule.bond_graph[atom] if bond.name in res.backbone]
mut_atom = mutated_molecule.chains["X"].residues[mut_res.name].atoms[atom.name]
mut_bonds = mutated_molecule.bond_graph[mut_atom]
mut_bonds = [bond for bond in mutated_molecule.bond_graph[mut_atom] \
if bond.name in mut_res.backbone]
assert len(bonds) == len(mut_bonds)
@pytest.mark.screening
def test_multiple_mutations(pdb3aid):
mol = pdb3aid
mut = mdt.mutate_residues(mol, ['A.2S', 'B.3S']) # Mutate Chain A res 2 and B 3 to SER
assert [r.resname for r in mut.chains['A'].get_residues(pdbindex=2)] == ['SER']
assert [r.resname for r in mut.chains['B'].get_residues(pdbindex=3)] == ['SER']
def test_solvate_small_molecule_boxsize(benzene):
newmol = mdt.add_water(benzene, min_box_size=15.0*u.angstrom)
assert newmol.num_atoms > 50 # who knows how many? more than benzene though
def test_seawater_solvation_small_molecule(benzene):
newmol = mdt.add_water(benzene,
min_box_size=20.0*u.angstrom,
ion_concentration=0.6*u.molar)
assert newmol.num_atoms > 50 # who knows how many? more than benzene though
assert len(newmol.get_atoms(name='Cl')) == 3 # TODO: check that this is correct molarity
assert len(newmol.get_atoms(name='Na')) == 3 # TODO: check that this is correct molarity
@pytest.mark.screening
def test_solvation_alternative_ions(benzene):
newmol = mdt.add_water(benzene,
min_box_size=20.0*u.angstrom,
ion_concentration=0.6*u.molar,
positive_ion='Rb',
negative_ion='I')
assert newmol.num_atoms > 50 # who knows how many? more than benzene though
assert len(newmol.get_atoms(name='Rb')) == 3 # TODO: check that this is correct molarity
assert len(newmol.get_atoms(name='I')) == 3 # TODO: check that this is correct molarity
def test_solvate_protein_padding(pdb1yu8):
newmol = mdt.add_water(pdb1yu8, padding=5.0*u.angstrom)
assert newmol.num_atoms > pdb1yu8.num_atoms
oldmol = mdt.Molecule(newmol.residues[:pdb1yu8.num_residues])
assert oldmol.same_topology(pdb1yu8, verbose=True)
np.testing.assert_allclose(pdb1yu8.positions.value_in(u.angstrom),
oldmol.positions.value_in(u.angstrom),
atol=1e-3)
|
{"hexsha": "82ad69f93697bb2e3ebb91de69b7a31670866059", "size": 6277, "ext": "py", "lang": "Python", "max_stars_repo_path": "moldesign/_tests/test_pdbfixer_xface.py", "max_stars_repo_name": "Autodesk/molecular-design-toolkit", "max_stars_repo_head_hexsha": "5f45a47fea21d3603899a6366cb163024f0e2ec4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 147, "max_stars_repo_stars_event_min_datetime": "2016-07-15T18:53:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T04:36:39.000Z", "max_issues_repo_path": "moldesign/_tests/test_pdbfixer_xface.py", "max_issues_repo_name": "cherishyli/molecular-design-toolkit", "max_issues_repo_head_hexsha": "5f45a47fea21d3603899a6366cb163024f0e2ec4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 151, "max_issues_repo_issues_event_min_datetime": "2016-07-15T21:35:11.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-10T08:57:29.000Z", "max_forks_repo_path": "moldesign/_tests/test_pdbfixer_xface.py", "max_forks_repo_name": "cherishyli/molecular-design-toolkit", "max_forks_repo_head_hexsha": "5f45a47fea21d3603899a6366cb163024f0e2ec4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 33, "max_forks_repo_forks_event_min_datetime": "2016-08-02T00:04:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-02T10:05:04.000Z", "avg_line_length": 39.9808917197, "max_line_length": 96, "alphanum_fraction": 0.6527003346, "include": true, "reason": "import numpy", "num_tokens": 1767}
|
import numpy as np
from numpy.linalg import slogdet, solve
from numpy import log, pi
import pandas as pd
from scipy.special import expit
from .constants import mass_pion
from .kinematics import momentum_transfer_cm, cos0_cm_from_lab, omega_cm_from_lab
from .constants import omega_lab_cusp, dsg_label, DesignLabels
from sklearn.gaussian_process.kernels import RBF
import gsum as gm
def order_transition_old(n, n_inf, omega):
return n + (n_inf - n) * expit((omega-190)/20)
def order_transition_lower_orders(n, omega):
omega1 = 180.
omega2 = 240.
omegam = (omega1 + omega2) / 2.
f = 1. / (1 + np.exp(4 * np.log(3) * (omega - omegam) / (omega1 - omega2)))
if n % 2 == 0:
return (1 - f / 2.) * n
elif n % 2 == 1:
return (1 - f / 2.) * n - 5 * f / 2.
raise ValueError('n must be an integer')
def order_transition_truncation(n, omega, n_inf):
omega1 = 180.
omega2 = 240.
omegam = (omega1 + omega2) / 2.
f = 1. / (1 + np.exp(4 * np.log(3) * (omega - omegam) / (omega1 - omega2)))
return n - (n - n_inf) * f
def expansion_parameter(X, breakdown):
X = np.atleast_2d(X)
return np.squeeze((X[:, 0] + mass_pion) / breakdown)
def expansion_parameter_phillips(breakdown, factor=1):
return np.sqrt(mass_pion * factor / breakdown)
def expansion_parameter_cm(X, breakdown, mass, factor=1.):
X = np.atleast_2d(X)
omega_lab, _ = X.T
omega_cm = omega_cm_from_lab(omega_lab, mass=mass)
num = omega_cm + mass_pion
num = num * factor
# num = (omega_cm + mass_pion) / 2
return np.squeeze(np.sqrt(num / breakdown))
def expansion_parameter_momentum_transfer_cm(X, breakdown, mass, include_correction=False):
X = np.atleast_2d(X)
omega_lab, cos0_lab = X.T
cos0_lab = np.cos(np.deg2rad(cos0_lab))
omega_cm = omega_cm_from_lab(omega_lab, mass=mass)
cos0_cm = cos0_cm_from_lab(omega_lab, mass, cos0_lab)
# q = momentum_transfer_cm(omega_cm, cos0_cm)
num = omega_cm + mass_pion
# num = (omega_cm + mass_pion) / 2
if include_correction:
# height = 200
# omega_width = 50
height = 150
omega_width = 150
cos0_width = 1
lorentz = height / (
((omega_lab - omega_lab_cusp) / omega_width) ** 2 + ((cos0_lab - 1) / cos0_width) ** 2 + 1
)
num += lorentz
from scipy.special import softmax, logsumexp
# num = softmax([q, omega_cm], axis=0)
# num = logsumexp([q, omega_cm], axis=0)
# num = (q + omega_cm) / 2.
# return np.squeeze(num / breakdown)
return np.squeeze(np.sqrt(num / breakdown))
def compute_expansion_summation_matrix(Q, first_omitted_order):
Q_mat = Q[:, None] * Q
Q_to_omitted = Q ** first_omitted_order
return Q_to_omitted[:, None] * Q_to_omitted / (1 - Q_mat)
def coefficients(y, ratio, ref=1, orders=None):
"""Returns the coefficients of a power series
Parameters
----------
y : array, shape = (n_samples, n_curves)
ratio : scalar or array, shape = (n_samples,)
ref : scalar or array, shape = (n_samples,)
orders : 1d array, optional
The orders at which y was computed. Defaults to 0, 1, ..., n_curves-1
Returns
-------
An (n_samples, n_curves) array of the extracted coefficients
"""
if y.ndim != 2:
raise ValueError('y must be 2d')
if orders is None:
orders = np.arange(y.shape[-1])
if orders.shape[-1] != y.shape[-1]:
raise ValueError('partials and orders must have the same length')
ref, ratio, orders = np.atleast_1d(ref, ratio, orders)
ref = ref[:, None]
ratio = ratio[:, None]
# Make coefficients
coeffs = np.diff(y, axis=-1) # Find differences
coeffs = np.insert(coeffs, 0, y[..., 0], axis=-1) # But keep leading term
coeffs = coeffs / (ref * ratio**orders) # Scale each order appropriately
return coeffs
def compute_idx_mat(n):
idx = np.arange(n)
idx_rows, idx_cols = np.broadcast_arrays(idx[:, None], idx)
idx_mat = np.dstack([idx_rows, idx_cols])
return idx_mat
def p_sq_grad_coeff_mat(n):
n_rows = int(n * (n + 1) / 2)
idx_mat = compute_idx_mat(n)
idx_vec = idx_mat[np.triu_indices(n)]
p_sq_grad = np.zeros((n_rows, n))
for i in range(n):
p_sq_grad[:, i] = np.sum(idx_vec == i, axis=1)
return p_sq_grad
def p_sq_grad_idx_mat(n):
idx_mat = compute_idx_mat(n)
idx1, idx2 = np.triu_indices(idx_mat.shape[0])
idx_mat_tri = idx_mat[idx1, idx2, :]
n_rows = int(n * (n + 1) / 2)
idx_mat = np.zeros((n_rows, n), dtype=int)
for i in range(n):
mask = np.any(idx_mat_tri == i, axis=1)
idx_mat[mask, i] = np.arange(np.sum(mask), dtype=int)
return idx_mat
def quadratic(x, A, b, c, flat=True):
R"""Computes a multivariate quadratic function.
Parameters
----------
x : array, shape = (p,)
The input variables
A : array, shape = (N, p(p+1)/2,)
The flattened quadratic coefficients
b : array, shape = (N, p)
The linear coefficients
c : array, shape = (N,)
The constant term
flat
Returns
-------
array, shape = (N,)
"""
if flat:
x = np.atleast_1d(x)
x_sq = x[:, None] * x
x_quad = x_sq[np.triu_indices_from(x_sq)]
quad = A @ x_quad
else:
quad = np.einsum('...ij,i,j', A, x, x)
return quad + b @ x + c
def grad_quadratic(x, A, b, c, flat=True):
R"""Computes the gradient of a multivariate quadratic function.
Parameters
----------
x : array, shape = (p,)
The input variables
A : array, shape = (N, p(p+1)/2)
The flattened quadratic coefficients
b : array, shape = (N, p)
The linear coefficients
c : array, shape = (N,)
The constant term
flat
Returns
-------
array, shape = (p, N)
"""
if flat:
x = np.atleast_1d(x)
n = len(x)
coeff_mat = p_sq_grad_coeff_mat(n)
idx_mat = p_sq_grad_idx_mat(n)
x_sq_grad = coeff_mat * x[idx_mat]
quad = A @ x_sq_grad
else:
A_trans = np.swapaxes(A, -1, -2)
quad = (A + A_trans) @ x
return (quad + b).T
def quad_ratio(x, An, bn, cn, Ad, bd, cd, flat=True):
R"""Computes the ratio of multivariate quadratic functions.
Parameters
----------
x : array, shape = (p,)
The input variables
An : array, shape = (N, p, p)
The quadratic coefficients of the numerator
bn : array, shape = (N, p)
The linear coefficients of the numerator
cn : array, shape = (N,)
The constant term of the numerator
Ad : array, shape = (N, p, p)
The quadratic coefficients of the denominator
bd : array, shape = (N, p)
The linear coefficients of the denominator
cd : array, shape = (N,)
The constant term of the denominator
flat
Returns
-------
array, shape = (N,)
"""
return quadratic(x, An, bn, cn, flat=flat) / quadratic(x, Ad, bd, cd, flat=flat)
def grad_quad_ratio(x, An, bn, cn, Ad, bd, cd, flat=True):
R"""Computes the gradient of the ratio of multivariate quadratic functions.
Parameters
----------
x : array, shape = (p,)
The input variables
An : array, shape = (N, p, p)
The quadratic coefficients of the numerator
bn : array, shape = (N, p)
The linear coefficients of the numerator
cn : array, shape = (N,)
The constant term of the numerator
Ad : array, shape = (N, p, p)
The quadratic coefficients of the denominator
bd : array, shape = (N, p)
The linear coefficients of the denominator
cd : array, shape = (N,)
The constant term of the denominator
flat
Returns
-------
array, shape = (p, N)
"""
fn = quadratic(x, An, bn, cn, flat=flat)
grad_fn = grad_quadratic(x, An, bn, cn, flat=flat)
fd = quadratic(x, Ad, bd, cd, flat=flat)
grad_fd = grad_quadratic(x, Ad, bd, cd, flat=flat)
return grad_fn / fd - fn / fd ** 2 * grad_fd
def create_linearized_matrices(x0, An, bn, cn, Ad, bd, cd, flat=True):
f0 = quad_ratio(x0, An, bn, cn, Ad, bd, cd, flat=flat)
grad_f0 = grad_quad_ratio(x0, An, bn, cn, Ad, bd, cd, flat=flat)
return f0 - x0 @ grad_f0, grad_f0.T
def posterior_precision_linear(X, cov_data, prec_p):
R"""Computes the posterior precision for parameters under a linear Gaussian model
X : np.ndarray, shape = (n_data, n_features)
The feature matrix
cov_data : np.ndarray, shape = (n_data, n_data)
The covariance matrix for the data
prec_p : np.ndarray, shape = (n_features, n_features)
The prior precision on the parameters
"""
return prec_p + X.T @ solve(cov_data, X)
def shannon_expected_utility(X, cov_data, prec_p):
R"""Computes the expected utility using the Shannon information, or the KL divergence
X : np.ndarray, shape = (n_data, n_features)
The feature matrix
cov_data : np.ndarray, shape = (n_data, n_data)
The covariance matrix for the data
prec_p : np.ndarray, shape = (n_features, n_features)
The prior precision on the parameters
"""
_, log_det = slogdet(prec_p + X.T @ solve(cov_data, X)) # The negative of log |V|
_, log_det_prec = slogdet(prec_p) # The negative of log |V_0|
return 0.5 * (log_det - log_det_prec)
# p = prec_p.shape[0]
# return 0.5 * (- p * log(2 * pi) - p + log_det)
def create_observable_set(df, cov_exp=0., p0_proton=None, cov_p_proton=None, p0_neutron=None,
cov_p_neutron=None, scale_dsg=True, p_transform=None, expts_info=None):
from compton import proton_pol_vec_mean, neutron_pol_vec_mean, proton_pol_vec_std, neutron_pol_vec_std
proton_pol_cov = np.diag(proton_pol_vec_std)
neutron_pol_cov = np.diag(neutron_pol_vec_std)
groups = df.groupby(['observable', 'nucleon', 'order'])
compton_obs = {}
lin_vec = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6']
quad_vec = [col for col in df.columns if col[0] == 'C']
for (obs, nucleon, order), index in groups.groups.items():
if obs == 'crosssection':
obs = 'dsg'
df_i = df.loc[index]
df_n = df_i[df_i['is_numerator'] == 1]
df_d = df_i[df_i['is_numerator'] == 0]
cov_p = None
p0 = None
if nucleon == DesignLabels.proton:
cov_p = cov_p_proton
if cov_p_proton is None:
cov_p = proton_pol_cov
p0 = p0_proton
if p0_proton is None:
p0 = proton_pol_vec_mean
elif nucleon == DesignLabels.neutron:
cov_p = cov_p_neutron
if cov_p_neutron is None:
cov_p = neutron_pol_cov
p0 = p0_neutron
if p0_neutron is None:
p0 = neutron_pol_vec_mean
else:
raise ValueError('nucleon must be Proton or Neutron')
obs_kwargs = dict(
omega_lab=df_n['omegalab [MeV]'].values,
degrees_lab=df_n['thetalab [deg]'].values,
quad_n=df_n[quad_vec].values,
lin_n=df_n[lin_vec].values,
const_n=df_n['A'].values,
quad_d=df_d[quad_vec].values,
lin_d=df_d[lin_vec].values,
const_d=df_d['A'].values,
# name=obs if obs != 'crosssection' else 'dsg',
name=obs,
order=order,
nucleon=nucleon,
cov_p=cov_p,
trans_mat=p_transform,
)
compton_obs[obs, nucleon, order, 'nonlinear'] = ComptonObservable(**obs_kwargs)
if expts_info is None:
try:
cov_exp_i = cov_exp[obs, nucleon]
except (TypeError, IndexError):
if np.atleast_1d(cov_exp).ndim == 1:
cov_exp_i = cov_exp * np.eye(len(df_n))
else:
cov_exp_i = cov_exp.copy()
# if obs == 'crosssection' and scale_dsg:
if (obs == 'dsg' or obs == dsg_label) and scale_dsg:
pred_i = compton_obs[obs, nucleon, order, 'nonlinear'](p0)
cov_exp_i *= pred_i[:, None] * pred_i
else:
if nucleon == DesignLabels.proton:
cov_exp_i = expts_info[obs].cov_proton
elif nucleon == DesignLabels.neutron:
cov_exp_i = expts_info[obs].cov_neutron
else:
raise ValueError('nucleon must be Proton or Neutron')
compton_obs[obs, nucleon, order, 'linear'] = ComptonObservable(**obs_kwargs, p0=p0, cov_data=cov_exp_i)
return compton_obs
class ComptonObservable:
R"""
"""
def __init__(self, omega_lab, degrees_lab, quad_n, lin_n, const_n, quad_d, lin_d, const_d, order, name, nucleon,
p0=None, cov_data=None, cov_p=None, trans_mat=None):
self.omega_lab = omega_lab
self.degrees_lab = degrees_lab
self.order = order
self.name = name
self.nucleon = nucleon
self.linearized = False
self.quad_n = quad_n
self.lin_n = lin_n
self.const_n = const_n
self.quad_d = quad_d
self.lin_d = lin_d
self.const_d = const_d
self.p0 = p0
self.cov_data = cov_data
self.cov_p = cov_p
if cov_p is not None:
self.prec_p = np.linalg.inv(cov_p)
else:
self.prec_p = None
self.n_data = len(self.quad_n)
self.trans_mat = trans_mat
if (cov_p is not None) and not np.allclose(cov_p, cov_p.T):
print(f'Warning: Parameter covariance is not symmetric. name={name}, nucleon={nucleon}')
if (cov_data is not None) and not np.allclose(cov_data, cov_data.T):
print(f'Warning: Data covariance is not symmetric. name={name}, nucleon={nucleon}')
if p0 is not None:
self.linearized = True
const, lin = create_linearized_matrices(p0, quad_n, lin_n, const_n, quad_d, lin_d, const_d, flat=True)
self.const_approx = const
self.lin_approx = lin
if trans_mat is not None:
self.lin_approx = lin @ np.linalg.inv(trans_mat)
self.pred = self.prediction_linear
else:
self.const_approx = None
self.lin_approx = None
self.pred = self.prediction_ratio
def __call__(self, p):
return self.pred(p)
def prediction_ratio(self, p):
if self.trans_mat is not None:
p = np.linalg.inv(self.trans_mat) @ p
p_sq = p[:, None] * p
p_quad = p_sq[np.triu_indices_from(p_sq)]
num = self.quad_n @ p_quad + self.lin_n @ p + self.const_n
den = self.quad_d @ p_quad + self.lin_d @ p + self.const_d
return num / den
def prediction_linear(self, p):
return self.lin_approx @ p + self.const_approx
def utility_linear(self, idx, p_idx=None):
R"""Computes the expected shannon utility under the linear model assumption
Parameters
----------
idx : int or array
The data set index used to denote the design of the experiment
p_idx : int or array, optional
The subset of theory parameters used in the computation of the expected utility. Defaults to `None`,
which uses all theory parameters in the utility
Returns
-------
expected_utility : float
"""
X = self.lin_approx[idx]
cov = self.cov_data[idx][:, idx]
p_precision = self.prec_p
if p_idx is not None:
X = X[:, p_idx]
p_precision = p_precision[p_idx][:, p_idx]
return shannon_expected_utility(X, cov, p_precision)
def correlation_matrix(self, idx, p_idx=None):
X = self.lin_approx[idx]
cov = self.cov_data[idx][:, idx]
p_precision = self.prec_p
if p_idx is not None:
X = X[:, p_idx]
p_precision = p_precision[p_idx][:, p_idx]
# print(np.count_nonzero(cov - cov.T))
post_cov = np.linalg.inv(posterior_precision_linear(X, cov, p_precision))
post_stds = np.sqrt(np.diag(post_cov))
# print(post_cov)
return post_stds[:, None]**(-1) * post_cov * post_stds**(-1)
def __repr__(self):
name = f'{self.name}({self.order}, {self.nucleon})'
if self.p0 is not None:
name += f' about {self.p0}'
return name
class ConvergenceAnalyzer:
exp_param_funcs = {
'sum': expansion_parameter_cm,
# 'halfsum': lambda *args, **kwargs: expansion_parameter_cm(*args, **kwargs, factor=0.5),
'halfsum': expansion_parameter_cm,
'sumsq': lambda *args, **kwargs: expansion_parameter_cm(*args, **kwargs) ** 2,
'phillips': expansion_parameter_phillips,
}
def __init__(
self, name, nucleon, X, y, orders, train, ref, breakdown, excluded, exp_param='sum',
delta_transition=True, degrees_zeros=None, omega_zeros=None,
degrees_deriv_zeros=None, omega_deriv_zeros=None, **kwargs
):
from compton import DesignLabels, mass_proton, mass_neutron
self.name = name
self.X = X
self.y = y
self.orders = orders
self.train = train
self.excluded = excluded
self.ref = ref
self.breakdown = breakdown
self.kwargs = kwargs
self.degrees_zeros = degrees_zeros
self.omega_zeros = omega_zeros
self.degrees_deriv_zeros = degrees_deriv_zeros
self.omega_deriv_zeros = omega_deriv_zeros
# from gsum import cartesian
# self.X_zeros = cartesian(self.omega_zeros, self.degrees_zeros)
self.exp_param = exp_param
self.exp_param_func = self.exp_param_funcs[exp_param]
if nucleon == DesignLabels.proton:
mass = mass_proton
elif nucleon == DesignLabels.neutron:
mass = mass_neutron
else:
raise ValueError('nucleon must be DesignLabels.proton or DesignLabels.neutron')
self.nucleon = nucleon
self.mass = mass
included = ~ np.isin(orders, excluded)
self.included = included
if delta_transition:
from compton.constants import order_map
# order_map = {0: 0, 2: 1, 3: 2, 4: 3}
# ord_vals = np.array([order_transition(order, order_map[order], X[:, 0]) for order in orders]).T
ord_vals = np.array([order_transition_lower_orders(order, X[:, 0]) for order in orders]).T
else:
ord_vals = np.array([np.broadcast_to(order, X.shape[0]) for order in orders]).T
self.ord_vals = ord_vals
if exp_param == 'sum':
Q = expansion_parameter_cm(X, breakdown, mass=mass)
elif exp_param == 'halfsum':
Q = expansion_parameter_cm(X, breakdown, mass=mass, factor=0.5)
elif exp_param == 'sumsq':
Q = expansion_parameter_cm(X, breakdown, mass=mass)**2
elif exp_param == 'phillips':
Q = np.broadcast_to(expansion_parameter_phillips(breakdown), X.shape[0])
else:
raise ValueError('')
self.Q = Q
self.c = c = coefficients(y, Q, ref, ord_vals)
self.c_included = c[:, included]
self.X_train = self.X[train]
self.y_train = self.y[train][:, included]
self.c_train = c[train][:, included]
gp = gm.ConjugateGaussianProcess(**kwargs)
# print(self.c_train.shape)
gp.fit(self.X_train, self.c_train)
print('Fit kernel:', gp.kernel_)
self.cbar = np.sqrt(gp.cbar_sq_mean_)
print('cbar mean:', self.cbar)
self.gp = gp
def compute_conditional_cov(self, X, gp=None):
if gp is None:
gp = gm.ConjugateGaussianProcess(**self.kwargs)
gp.fit(self.X_train, self.c_train)
if self.degrees_zeros is None and self.omega_zeros is None:
return gp.cov(X)
[ls_omega, ls_degrees] = gp.kernel_.k1.get_params()['length_scale']
std = np.sqrt(gp.cbar_sq_mean_)
w = X[:, [0]]
t = X[:, [1]]
import gptools
kern_omega = gptools.SquaredExponentialKernel(
initial_params=[1, ls_omega], fixed_params=[True, True])
kern_theta = gptools.SquaredExponentialKernel(
initial_params=[1, ls_degrees], fixed_params=[True, True])
gp_omega = gptools.GaussianProcess(kern_omega)
gp_theta = gptools.GaussianProcess(kern_theta)
# gp_omega.add_data(np.array([[0], [0]]), np.array([0, 0]), n=np.array([0, 1]))
if self.omega_zeros is not None or self.omega_deriv_zeros is not None:
w_z = []
n_w = []
if self.omega_zeros is not None:
w_z.append(self.omega_zeros)
n_w.append(np.zeros(len(self.omega_zeros)))
if self.omega_deriv_zeros is not None:
w_z.append(self.omega_deriv_zeros)
n_w.append(np.ones(len(self.omega_deriv_zeros)))
w_z = np.concatenate(w_z)[:, None]
n_w = np.concatenate(n_w)
print(w_z, n_w)
gp_omega.add_data(w_z, np.zeros(w_z.shape[0]), n=n_w)
_, K_omega = gp_omega.predict(w, np.zeros(w.shape[0]), return_cov=True)
else:
K_omega = gp_omega.compute_Kij(w, w, np.zeros(w.shape[0]), np.zeros(w.shape[0]))
if self.degrees_zeros is not None or self.degrees_deriv_zeros is not None:
t_z = []
n_t = []
if self.degrees_zeros is not None:
t_z.append(self.degrees_zeros)
n_t.append(np.zeros(len(self.degrees_zeros)))
if self.degrees_deriv_zeros is not None:
t_z.append(self.degrees_deriv_zeros)
n_t.append(np.ones(len(self.degrees_deriv_zeros)))
t_z = np.concatenate(t_z)[:, None]
n_t = np.concatenate(n_t)
gp_theta.add_data(t_z, np.zeros(t_z.shape[0]), n=n_t)
_, K_theta = gp_theta.predict(t, np.zeros(t.shape[0]), return_cov=True)
else:
K_theta = gp_theta.compute_Kij(t, t, np.zeros(t.shape[0]), np.zeros(t.shape[0]))
# kernel_omega = RBF(ls_omega)
# kernel_theta = RBF(ls_degrees)
# if self.omega_zeros is not None:
#
# w_z = np.atleast_1d(self.omega_zeros)[:, None]
#
# K_omega = kernel_omega(w) - kernel_omega(w, w_z) @ np.linalg.solve(kernel_omega(w_z), kernel_omega(w_z, w))
# else:
# K_omega = kernel_omega(w)
#
# if self.degrees_zeros is not None:
# t_z = np.atleast_1d(self.degrees_zeros)[:, None]
# K_theta = kernel_theta(t) - kernel_theta(t, t_z) @ np.linalg.solve(kernel_theta(t_z), kernel_theta(t_z, t))
# else:
# K_theta = kernel_theta(t)
return std**2 * K_omega * K_theta
# X_z = self.X_zeros
# K_nn = gp.cov(X)
# K_zz = gp.cov(X_z)
# K_nz = gp.cov(X, X_z)
# K_zn = gp.cov(X_z, X)
# return K_nn - K_nz @ np.linalg.solve(K_zz, K_zn)
def plot_coefficient_slices(self, omegas, thetas, axes=None):
import matplotlib.pyplot as plt
assert len(omegas) == len(thetas)
n = len(omegas)
if axes is None:
fig, axes = plt.subplots(n, 2, figsize=(3.4, 1.2 * n), sharex='col', sharey=True)
fig = plt.gcf()
ymax_w = 0
ymax_t = 0
color_list = ['Oranges', 'Greens', 'Blues', 'Reds', 'Purples', 'Greys']
cmaps = [plt.get_cmap(name) for name in color_list]
# colors = [cmap(0.65 - 0.1 * (i == 0)) for i, cmap in enumerate(cmaps)]
colors = ['k', plt.get_cmap('Greens')(0.8), plt.get_cmap('Blues')(0.65), plt.get_cmap('Reds')(0.6)]
# cbar = self.cbar
# linestyles = ['-', '--', '-.', ':']
# linestyles = ['-', (0, (5, 1)), (0, (3, 1, 1, 1)), (0, (1, 1))]
linestyles = [(0, (5, 1)), '-', (0, (3, 1, 1, 1)), (0, (1, 1)), ]
linewidths = [1, 1.0, 1.1, 1.1]
cov = self.compute_conditional_cov(self.X)
std = np.sqrt(np.diag(cov))
# cov = self.gp.cov(self.X)
for i, (omega_i, theta_i) in enumerate(zip(omegas, thetas)):
ax_w, ax_t = axes[i]
omega_mask = self.X[:, 1] == theta_i
theta_mask = self.X[:, 0] == omega_i
omega_vals = self.X[omega_mask, 0]
theta_vals = self.X[theta_mask, 1]
std_omega = np.sqrt(np.diag(cov[omega_mask][:, omega_mask]))
std_theta = np.sqrt(np.diag(cov[theta_mask][:, theta_mask]))
orders = self.orders
c_w = self.c[omega_mask]
c_t = self.c[theta_mask]
for j, n in enumerate(orders):
ax_w.plot(
omega_vals, c_w[:, j], color=colors[j], label=f'$c_{{{n}}}$',
ls=linestyles[j], lw=linewidths[j], zorder=j/10
)
ax_t.plot(
theta_vals, c_t[:, j], color=colors[j], label=f'$c_{{{n}}}$',
ls=linestyles[j], lw=linewidths[j], zorder=j/10
)
ax_w.axhline(0, 0, 1, c='k', lw=1, zorder=-1)
ax_t.axhline(0, 0, 1, c='k', lw=1, zorder=-1)
bbox = dict(boxstyle='round', facecolor='w')
ax_w.text(
0.93, 0.9, fr'$\theta = {theta_i}^\circ$', transform=ax_w.transAxes,
bbox=bbox, ha='right', va='top',
)
ax_t.text(
0.93, 0.9, fr'$\omega = {omega_i}\,$MeV', transform=ax_t.transAxes,
bbox=bbox, ha='right', va='top',
)
# ax_w.axhline(-2 * cbar, 0, 1, c='lightgrey', lw=1, zorder=-1)
# ax_w.axhline(+2 * cbar, 0, 1, c='lightgrey', lw=1, zorder=-1)
# ax_t.axhline(-2 * cbar, 0, 1, c='lightgrey', lw=1, zorder=-1)
# ax_t.axhline(+2 * cbar, 0, 1, c='lightgrey', lw=1, zorder=-1)
std_lw = 1.2
# ax_w.plot(omega_vals, + 2 * std_omega, c='lightgrey', lw=std_lw, zorder=-1, label=r'$2\sigma$')
# ax_w.plot(omega_vals, - 2 * std_omega, c='lightgrey', lw=std_lw, zorder=-1)
# ax_t.plot(theta_vals, + 2 * std_theta, c='lightgrey', lw=std_lw, zorder=-1, label=r'$2\sigma$')
# ax_t.plot(theta_vals, - 2 * std_theta, c='lightgrey', lw=std_lw, zorder=-1)
ax_w.fill_between(
omega_vals, + 2 * std_omega, - 2 * std_omega, facecolor='0.92', lw=0.7,
zorder=-1, label=r'$2\sigma$', edgecolor='0.6'
)
ax_t.fill_between(
theta_vals, + 2 * std_theta, - 2 * std_theta, facecolor='0.92', lw=0.7,
zorder=-1, label=r'$2\sigma$', edgecolor='0.6'
)
ymax_w = np.max(np.abs(ax_w.get_ylim()))
ymax_t = np.max(np.abs(ax_t.get_ylim()))
ymax = np.max([ymax_w, ymax_t])
if ymax > 4.2 * np.max(std):
ymax = 4.2 * np.max(std)
if self.nucleon == 'Neutron':
title = f'{self.name}, {self.nucleon}'
else:
title = f'{self.name}'
# with plt.rc_context({"text.usetex": True, "text.latex.preview": True}):
axes[0, 0].text(
0.07, 0.9, title, transform=axes[0, 0].transAxes,
bbox=bbox, ha='left', va='top',
)
# plt.draw()
ax_w.set_ylim(-ymax, ymax)
ax_t.set_ylim(-ymax, ymax)
ax_w.set_xlabel(r'$\omega_{\mathrm{lab}}$\,[MeV]')
ax_t.set_xlabel(r'$\theta_{\mathrm{lab}}$\,[deg]')
ax_w.set_xticks([100, 200, 300])
ax_w.set_xticks([50, 150, 250], minor=True)
ax_t.set_xticks([60, 120])
ax_t.set_xticks([30, 90, 150], minor=True)
ax_w.set_xlim(self.X[:, 0].min(), 340)
ax_t.set_xlim(self.X[:, 1].min(), self.X[:, 1].max())
# fig.suptitle(f'{self.name}, {self.nucleon}')
# axes[0, 0].set_title(f'{self.name}, {self.nucleon}')
from matplotlib.ticker import AutoMinorLocator, MaxNLocator
axes[0, 0].yaxis.set_major_locator(MaxNLocator(3))
axes[0, 0].yaxis.set_minor_locator(AutoMinorLocator(2))
for ax in axes.ravel():
ax.tick_params(right=True, top=True, which='both')
fig.set_constrained_layout_pads(w_pad=1 / 72, h_pad=1 / 72)
plt.draw()
upper_right_display = axes[0, 1].transAxes.transform((1, 1))
upper_right_axes00 = axes[0, 0].transAxes.inverted().transform(upper_right_display)
axes[0, 0].legend(
loc='lower left', bbox_to_anchor=(0, 1.03, upper_right_axes00[0], 0), borderaxespad=0, ncol=5,
mode='expand',
columnspacing=0,
handletextpad=0.5,
# handlelength=1.2,
fancybox=False,
)
return axes
def log_marginal_likelihood(self, **kwargs):
included = self.included
orders = self.orders
ord_vals = self.ord_vals
ref = self.ref
train = self.train
breakdown = self.breakdown
X = self.X
mass = self.mass
if self.exp_param == 'sum' or self.exp_param == 'halfsum':
Q = expansion_parameter_cm(X, breakdown, mass=mass, **kwargs)
elif self.exp_param == 'sumsq':
Q = expansion_parameter_cm(X, breakdown, mass=mass, **kwargs)**2
elif self.exp_param == 'phillips':
Q = expansion_parameter_phillips(breakdown, **kwargs)
Q = np.broadcast_to(Q, X.shape[0])
else:
raise ValueError()
coeffs = coefficients(self.y, Q, self.ref, ord_vals)
coeffs = coeffs[self.train][:, included]
gp = gm.ConjugateGaussianProcess(**self.kwargs)
gp.fit(self.X_train, coeffs)
# K = self.compute_conditional_cov(self.X_train, gp)
# alpha = np.linalg.solve(K, coeffs)
# coeff_log_like = -0.5 * np.einsum('ik,ik->k', coeffs, alpha) - \
# 0.5 * np.linalg.slogdet(2 * np.pi * K)[-1]
# coeff_log_like = coeff_log_like.sum()
coeff_log_like = gp.log_marginal_likelihood_value_
orders_in = orders[included]
n = len(orders_in)
try:
ref_train = ref[train]
except TypeError:
ref_train = ref
det_factor = np.sum(
n * np.log(np.abs(ref_train)) +
np.sum(ord_vals[train][:, included], axis=1) * np.log(np.abs(Q[train]))
)
y_log_like = coeff_log_like - det_factor
return y_log_like
class RBFJump(RBF):
R"""An RBF Kernel that creates draws with a jump discontinuity in the function and all of its derivatives.
See Scikit learn documentation for info on the original RBF kernel.
The interesting new parameter is jump, which must have the same dimension as length_scale.
This is the location of the jump, and the space with X < jump will be separated from X > jump.
Thus, if dimension i has no jump, then one must set `jump[i] = np.inf`.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5), jump=None):
super().__init__(length_scale=length_scale, length_scale_bounds=length_scale_bounds)
self.jump = jump
def __call__(self, X, Y=None, eval_gradient=False):
# if eval_gradient:
# raise ValueError('gradients not implemented for jump kernel yet')
K = super().__call__(X, Y=Y, eval_gradient=eval_gradient)
if self.jump is None:
return K
if Y is None:
Y = X
mask_X = np.any(X > self.jump, axis=1)
mask_Y = np.any(Y > self.jump, axis=1)
# We want to find all pairs (x, x') where one is > jump and the other is < jump.
# These points should be uncorrelated with one another.
# We can use the XOR (exclusive or) operator to find all such pairs.
zeros_mask = mask_X[:, None] ^ mask_Y
if eval_gradient:
K, dK = K
K[zeros_mask] = 0.
dK[zeros_mask] = 0.
return K, dK
K[zeros_mask] = 0.
return K
from sklearn.gaussian_process.kernels import Kernel
class ConditionalKernel(RBFJump):
R"""
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-05, 100000.0), jump=None, X=None, dim=None):
super().__init__(length_scale=length_scale, length_scale_bounds=length_scale_bounds, jump=jump)
# self.k = k
self.X = X
self.dim = dim
def __call__(self, X, Y=None, eval_gradient=False):
X_cond = self.X
if self.dim is not None:
X = X[:, [self.dim]]
if Y is not None:
Y = Y[:, [self.dim]]
if self.X is None:
return super().__call__(X, Y, eval_gradient)
K_nn = super().__call__(X, Y, eval_gradient)
K_oo = super().__call__(X_cond, eval_gradient=eval_gradient)
K_no = super().__call__(X, X_cond, eval_gradient=False)
if eval_gradient:
K_nn, dK_nn = K_nn
K_oo, dK_oo = K_oo
# K_no, dK_no = K_no
alpha = np.linalg.solve(K_oo, K_no.T)
K = K_nn - K_no @ alpha
if eval_gradient:
# print(dK_oo.shape, K_no.shape)
dK = dK_nn
# d_alpha = np.linalg.solve(dK_oo.T, K_no)
# dK = dK_nn - np.einsum('ij,kjl,ilk', K_no, d_alpha)
# dK -= 2 * np.einsum('ijk,il->ilk', dK_no, alpha)
return K, dK
return K
# def diag(self, X):
# return self.k.diag(X)
#
# def is_stationary(self):
# return self.k.is_stationary()
#
# def __repr__(self):
# return repr(self.k)
|
{"hexsha": "5dce49741ed41519529ab27946095fd786abe07f", "size": 33825, "ext": "py", "lang": "Python", "max_stars_repo_path": "compton/convergence.py", "max_stars_repo_name": "buqeye/compton-scattering", "max_stars_repo_head_hexsha": "867703fc21e75155af50d543b61f794dc5bfe5a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-15T19:09:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-22T15:24:27.000Z", "max_issues_repo_path": "compton/convergence.py", "max_issues_repo_name": "buqeye/compton-scattering", "max_issues_repo_head_hexsha": "867703fc21e75155af50d543b61f794dc5bfe5a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compton/convergence.py", "max_forks_repo_name": "buqeye/compton-scattering", "max_forks_repo_head_hexsha": "867703fc21e75155af50d543b61f794dc5bfe5a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8315677966, "max_line_length": 121, "alphanum_fraction": 0.5785365854, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 9367}
|
from __future__ import absolute_import
import os.path
import numpy as np
from PIL import Image
import Levenshtein
from ocrd_utils import (
getLogger, concat_padded,
coordinates_for_segment,
polygon_from_bbox,
points_from_polygon,
MIMETYPE_PAGE
)
from ocrd_modelfactory import page_from_file
from ocrd_models.ocrd_page import to_xml, TextEquivType, CoordsType, GlyphType, WordType
from ocrd import Processor
from .. import get_ocrd_tool
from .ocrolib import lstm, load_object, midrange
from .common import (
pil2array,
check_line
)
LOG = getLogger('processor.OcropyRecognize')
def resize_keep_ratio(image, baseheight=48):
scale = baseheight / image.height
wsize = round(image.width * scale)
image = image.resize((wsize, baseheight), Image.ANTIALIAS)
return image, scale
# from ocropus-rpred process1, but without input files and without lineest/dewarping
def recognize(image, pad, network, check=True):
line = pil2array(image)
binary = np.array(line <= midrange(line), np.uint8)
raw_line = line.copy()
# validate:
if np.prod(line.shape) == 0:
raise Exception('image dimensions are zero')
if np.amax(line) == np.amin(line):
raise Exception('image is blank')
if check:
report = check_line(binary)
if report:
raise Exception(report)
# recognize:
line = lstm.prepare_line(line, pad)
pred = network.predictString(line)
# getting confidence
result = lstm.translate_back(network.outputs, pos=1)
scale = len(raw_line.T)*1.0/(len(network.outputs)-2*pad)
clist = []
rlist = []
confidlist = []
for r, c in result:
if c != 0:
confid = network.outputs[r, c]
c = network.l2s([c])
r = (r-pad)*scale
confidlist.append(confid)
clist.append(c)
rlist.append(r)
return str(pred), clist, rlist, confidlist
class OcropyRecognize(Processor):
def __init__(self, *args, **kwargs):
self.ocrd_tool = get_ocrd_tool()
kwargs['ocrd_tool'] = self.ocrd_tool['tools']['ocrd-cis-ocropy-recognize']
kwargs['version'] = self.ocrd_tool['version']
super(OcropyRecognize, self).__init__(*args, **kwargs)
# from ocropus-rpred:
self.network = load_object(self.get_model(), verbose=1)
for x in self.network.walk():
x.postLoad()
for x in self.network.walk():
if isinstance(x, lstm.LSTM):
x.allocate(5000)
self.pad = 16 # ocropus-rpred default
def get_model(self):
"""Search for the model file. First checks if
parameter['model'] is a valid readeable file and returns it.
If not, it checks if the model can be found in the
dirname(__file__)/models/ directory."""
canread = lambda p: os.path.isfile(p) and os.access(p, os.R_OK)
model = self.parameter['model']
if canread(model):
return model
ocropydir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(ocropydir, 'models', model)
if canread(path):
return path
raise FileNotFoundError("cannot find model: " + model)
def process(self):
"""Recognize lines / words / glyphs of the workspace.
Open and deserialise each PAGE input file and its respective image,
then iterate over the element hierarchy down to the requested
``textequiv_level``. If any layout annotation below the line level
already exists, then remove it (regardless of ``textequiv_level``).
Set up Ocropy to recognise each text line (via coordinates into
the higher-level image, or from the alternative image; the image
must have been binarised/grayscale-normalised, deskewed and dewarped
already). Rescale and pad the image, then recognize.
Create new elements below the line level, if necessary.
Put text results and confidence values into new TextEquiv at
``textequiv_level``, and make the higher levels consistent with that
up to the line level (by concatenation joined by whitespace).
If a TextLine contained any previous text annotation, then compare
that with the new result by aligning characters and computing the
Levenshtein distance. Aggregate these scores for each file and print
the line-wise and the total character error rates (CER).
Produce a new output file by serialising the resulting hierarchy.
"""
maxlevel = self.parameter['textequiv_level']
# LOG.info("Using model %s in %s for recognition", model)
for (n, input_file) in enumerate(self.input_files):
LOG.info("INPUT FILE %i / %s", n, input_file.pageId or input_file.ID)
pcgts = page_from_file(self.workspace.download_file(input_file))
page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID # (PageType has no id)
page = pcgts.get_Page()
page_image, page_coords, _ = self.workspace.image_from_page(
page, page_id)
LOG.info("Recognizing text in page '%s'", page_id)
# region, line, word, or glyph level:
regions = page.get_TextRegion()
if not regions:
LOG.warning("Page '%s' contains no text regions", page_id)
self.process_regions(regions, maxlevel, page_image, page_coords)
# update METS (add the PAGE file):
file_id = input_file.ID.replace(self.input_file_grp,
self.output_file_grp)
if file_id == input_file.ID:
file_id = concat_padded(self.output_file_grp, n)
file_path = os.path.join(self.output_file_grp,
file_id + '.xml')
out = self.workspace.add_file(
ID=file_id,
file_grp=self.output_file_grp,
pageId=input_file.pageId,
local_filename=file_path,
mimetype=MIMETYPE_PAGE,
content=to_xml(pcgts))
LOG.info('created file ID: %s, file_grp: %s, path: %s',
file_id, self.output_file_grp, out.local_filename)
def process_regions(self, regions, maxlevel, page_image, page_coords):
edits = 0
lengs = 0
for region in regions:
region_image, region_coords = self.workspace.image_from_segment(
region, page_image, page_coords)
LOG.info("Recognizing text in region '%s'", region.id)
textlines = region.get_TextLine()
if not textlines:
LOG.warning("Region '%s' contains no text lines", region.id)
else:
edits_, lengs_ = self.process_lines(textlines, maxlevel, region_image, region_coords)
edits += edits_
lengs += lengs_
if lengs > 0:
LOG.info('CER: %.1f%%', 100.0 * edits / lengs)
def process_lines(self, textlines, maxlevel, region_image, region_coords):
edits = 0
lengs = 0
for line in textlines:
line_image, line_coords = self.workspace.image_from_segment(
line, region_image, region_coords)
LOG.info("Recognizing text in line '%s'", line.id)
if line.get_TextEquiv():
linegt = line.TextEquiv[0].Unicode
else:
linegt = ''
LOG.debug("GT '%s': '%s'", line.id, linegt)
# remove existing annotation below line level:
line.set_TextEquiv([])
line.set_Word([])
if line_image.size[1] < 16:
LOG.debug("ERROR: bounding box is too narrow at line %s", line.id)
continue
# resize image to 48 pixel height
final_img, scale = resize_keep_ratio(line_image)
# process ocropy:
try:
linepred, clist, rlist, confidlist = recognize(
final_img, self.pad, self.network, check=True)
except Exception as err:
LOG.debug('ERROR: error processing line "%s": %s', line.id, err)
continue
LOG.debug("OCR '%s': '%s'", line.id, linepred)
edits += Levenshtein.distance(linepred, linegt)
lengs += len(linegt)
words = [x.strip() for x in linepred.split(' ') if x.strip()]
word_r_list = [[0]] # r-positions of every glyph in every word
word_conf_list = [[]] # confidences of every glyph in every word
if words != []:
w_no = 0
found_char = False
for i, c in enumerate(clist):
if c != ' ':
found_char = True
word_conf_list[w_no].append(confidlist[i])
word_r_list[w_no].append(rlist[i])
if c == ' ' and found_char:
if i == 0:
word_r_list[0][0] = rlist[i]
elif i+1 <= len(clist)-1 and clist[i+1] != ' ':
word_conf_list.append([])
word_r_list.append([rlist[i]])
w_no += 1
else:
word_conf_list = [[0]]
word_r_list = [[0, line_image.width]]
# conf for each word
wordsconf = [(min(x)+max(x))/2 for x in word_conf_list]
# conf for the line
line_conf = (min(wordsconf) + max(wordsconf))/2
# line text
line.add_TextEquiv(TextEquivType(
Unicode=linepred, conf=line_conf))
if maxlevel in ['word', 'glyph']:
for word_no, word_str in enumerate(words):
word_points = points_from_polygon(
coordinates_for_segment(
np.array(polygon_from_bbox(
word_r_list[word_no][0] / scale,
0,
word_r_list[word_no][-1] / scale,
0 + line_image.height)),
line_image,
line_coords))
word_id = '%s_word%04d' % (line.id, word_no)
word = WordType(id=word_id, Coords=CoordsType(word_points))
line.add_Word(word)
word.add_TextEquiv(TextEquivType(
Unicode=word_str, conf=wordsconf[word_no]))
if maxlevel == 'glyph':
for glyph_no, glyph_str in enumerate(word_str):
glyph_points = points_from_polygon(
coordinates_for_segment(
np.array(polygon_from_bbox(
word_r_list[word_no][glyph_no] / scale,
0,
word_r_list[word_no][glyph_no+1] / scale,
0 + line_image.height)),
line_image,
line_coords))
glyph_id = '%s_glyph%04d' % (word.id, glyph_no)
glyph = GlyphType(id=glyph_id, Coords=CoordsType(glyph_points))
word.add_Glyph(glyph)
glyph.add_TextEquiv(TextEquivType(
Unicode=glyph_str, conf=word_conf_list[word_no][glyph_no]))
return edits, lengs
|
{"hexsha": "f046f51a967b847f065ceacb1c731c56fd5ebeb3", "size": 11740, "ext": "py", "lang": "Python", "max_stars_repo_path": "ocrd_cis/ocropy/recognize.py", "max_stars_repo_name": "stweil/ocrd_cis", "max_stars_repo_head_hexsha": "e8c20e67ca78682059e20445c9a10849b9bdd7ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ocrd_cis/ocropy/recognize.py", "max_issues_repo_name": "stweil/ocrd_cis", "max_issues_repo_head_hexsha": "e8c20e67ca78682059e20445c9a10849b9bdd7ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ocrd_cis/ocropy/recognize.py", "max_forks_repo_name": "stweil/ocrd_cis", "max_forks_repo_head_hexsha": "e8c20e67ca78682059e20445c9a10849b9bdd7ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6228373702, "max_line_length": 101, "alphanum_fraction": 0.5581771721, "include": true, "reason": "import numpy", "num_tokens": 2547}
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.montgomery32_2e127m1_4limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition opp :
{ opp : feBW_small -> feBW_small
| forall a, phiM_small (opp a) = F.opp (phiM_small a) }.
Proof.
Set Ltac Profiling.
Time synthesize_opp ().
Show Ltac Profile.
Time Defined.
Print Assumptions opp.
|
{"author": "anonymous-code-submission-01", "repo": "sp2019-54-code", "sha": "8867f5bed0821415ec99f593b1d61f715ed4f789", "save_path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code", "path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code/sp2019-54-code-8867f5bed0821415ec99f593b1d61f715ed4f789/src/Specific/montgomery32_2e127m1_4limbs/feopp.v"}
|
#!/usr/bin/env python
import os
from time import time
from typing import Generator, Tuple
import numpy as np
import click
import json
from .lib import *
from cloudvolume import CloudVolume
from cloudvolume.lib import Vec, yellow
from chunkflow.lib.aws.sqs_queue import SQSQueue
from chunkflow.lib.bounding_boxes import BoundingBox, BoundingBoxes
from chunkflow.chunk import Chunk
from chunkflow.chunk.affinity_map import AffinityMap
from chunkflow.chunk.segmentation import Segmentation
from chunkflow.chunk.image.convnet.inferencer import Inferencer
from chunkflow.lib.utils import coordinates2bbox
# import operator functions
from .aggregate_skeleton_fragments import AggregateSkeletonFragmentsOperator
from .cloud_watch import CloudWatchOperator
from .read_precomputed import ReadPrecomputedOperator
from .downsample_upload import DownsampleUploadOperator
from .log_summary import load_log, print_log_statistics
from .mask import MaskOperator
from .mesh import MeshOperator
from .mesh_manifest import MeshManifestOperator
from .neuroglancer import NeuroglancerOperator
from .normalize_section_contrast import NormalizeSectionContrastOperator
from .normalize_section_shang import NormalizeSectionShangOperator
from .plugin import Plugin
from .read_pngs import read_png_images
from .write_precomputed import WritePrecomputedOperator
from .write_pngs import WritePNGsOperator
from .setup_env import setup_environment
from .skeletonize import SkeletonizeOperator
from .view import ViewOperator
@main.command('generate-tasks')
@click.option('--layer-path', '-l',
type=str, default=None,
help='dataset layer path to fetch dataset information.')
@click.option('--mip', '-m',
type=int, default=None, help='mip level of the dataset layer.')
@click.option('--roi-start', '-s',
type=int, default=None, nargs=3, callback=default_none,
help='(z y x), start of the chunks')
@click.option('--roi-stop', '-r',
type=int, nargs=3, default=None, callback=default_none,
help='stop coordinate of region of interest')
@click.option('--roi-size', '-z',
type=int, nargs=3, default=None, callback=default_none,
help='size of region of interest')
@click.option('--chunk-size', '-c',
type=int, required=True, nargs=3,
help='(z y x), size/shape of chunks')
@click.option('--grid-size', '-g',
type=int, default=None, nargs=3, callback=default_none,
help='(z y x), grid size of output blocks')
@click.option('--file-path', '-f', default = None,
type=click.Path(writable=True, dir_okay=False, resolve_path=True),
help='output tasks as an numpy array formated as npy.')
@click.option('--queue-name', '-q',
type=str, default=None, help='sqs queue name')
@click.option('--respect-chunk-size/--respect-stop',
default=True, help="""for the last bounding box, \
make the chunk size consistent or cut off at the stopping boundary.""")
@click.option('--aligned-block-size', '-a',
type=int, default=None, nargs=3, callback=default_none,
help='force alignment of block size. Note that the alignment start from (0, 0, 0).')
@click.option('--task-index-start', '-i',
type=int, default=None, help='starting index of task list.')
@click.option('--task-index-stop', '-p',
type=int, default=None, help='stop index of task list.')
@click.option('--disbatch/--no-disbatch', '-d',
default=False, help='use disBatch environment variable or not')
@generator
def generate_tasks(
layer_path: str, mip: int, roi_start: tuple, roi_stop: tuple,roi_size, chunk_size,
grid_size: tuple, file_path: str, queue_name: str, respect_chunk_size: bool,
aligned_block_size: tuple, task_index_start: tuple,
task_index_stop: tuple, disbatch: bool ):
if mip is None:
mip = state['mip']
assert mip >=0
"""Generate tasks."""
bboxes = BoundingBoxes.from_manual_setup(
chunk_size, layer_path=layer_path,
roi_start=roi_start, roi_stop=roi_stop,
roi_size=roi_size, mip=mip, grid_size=grid_size,
respect_chunk_size=respect_chunk_size,
aligned_block_size=aligned_block_size
)
print('total number of tasks: ', len(bboxes))
if task_index_start:
if task_index_stop is None:
task_index_stop = task_index_start + 1
bboxes = [*bboxes[task_index_start:task_index_stop]]
logging.info(f'selected task indexes from {task_index_start} to {task_index_stop}')
elif disbatch:
assert 'DISBATCH_REPEAT_INDEX' in os.environ
disbatch_index = int(os.environ['DISBATCH_REPEAT_INDEX'])
bboxes = [bboxes[disbatch_index],]
logging.info(f'selected a task with disBatch index {disbatch_index}')
# write out as a file
# this could be used for iteration in slurm cluster.
if file_path:
if not file_path.endswith('.npy'):
file_path += len(bboxes) + '.npy'
bboxes.to_file(file_path)
if queue_name is not None:
queue = SQSQueue(queue_name)
queue.send_message_list(bboxes)
else:
bbox_num = len(bboxes)
for bbox_index, bbox in enumerate(bboxes):
task = get_initial_task()
task['bbox'] = bbox
task['bbox_index'] = bbox_index
task['bbox_num'] = bbox_num
task['log']['bbox'] = bbox.to_filename()
yield task
@main.command('skip-task')
@click.option('--pre', '-e', required=True, type=str,
help='the pre part of result file path')
@click.option('--post', '-t', required=True, type=str,
help='the post part of result file path. Normally include file extention.')
@click.option('--grow-size', '-g', default=None, type=int, callback=default_none,
help='expand or shrink the bounding box. Currently, cloud-volume Bbox only support symetric grow.')
@operator
def skip_task(tasks: Generator, pre: str, post: str, grow_size: int):
"""if a result file already exists, skip this task."""
for task in tasks:
bbox = task['bbox'].clone()
if grow_size is not None:
bbox.grow(grow_size)
file_name = pre + bbox.to_filename() + post
if os.path.exists(file_name):
print('the result file already exist, skip this task')
task = None
yield task
@main.command('skip-all-zero')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME, help='input chunk name')
@click.option('--pre', '-e', type=str, default=None,
help = 'pre-path of a file. we would like to keep a trace that this task was executed.')
@click.option('--post', '-t', type=str, default=None,
help='post-path of a file. normally include the extention of result file.')
@click.option('--grow-size', '-g', type=int, default=None,
help='change the bounding box of chunk if it do not match with final result file name.')
@operator
def skip_all_zero(tasks, input_chunk_name: str, pre: str, post: str, grow_size: int):
"""if chunk has all zero, skip this task."""
for task in tasks:
if task is not None:
chunk = task[input_chunk_name]
if not np.any(chunk):
print('all zero chunk, skip this task')
if pre is not None:
bbox = chunk.bbox.clone()
if grow_size is not None:
# bbox.grow(grow_size)
# currently, cloud-volume do not support negative grow size
bbox.minpt -= grow_size
bbox.maxpt += grow_size
fname = os.path.join(pre, f'{bbox.to_filename()}{post}')
print('create an empty file as mark: ', fname)
with open(fname, 'a'):
os.utime(fname, None)
# label task as None and task will be skipped
task = None
yield task
@main.command('setup-env')
@click.option('--volume-start', required=True, nargs=3, type=int,
help='start coordinate of output volume in mip 0')
@click.option('--volume-stop', default=None, type=int, nargs=3, callback=default_none,
help='stop coordinate of output volume (noninclusive like python coordinate) in mip 0.')
@click.option('--volume-size', '-s',
default=None, type=int, nargs=3, callback=default_none,
help='size of output volume.')
@click.option('--layer-path', '-l',
type=str, required=True, help='the path of output volume.')
@click.option('--max-ram-size', '-r',
default=15, type=int, help='the maximum ram size (GB) of worker process.')
@click.option('--output-patch-size', '-z',
type=int, required=True, nargs=3, help='output patch size.')
@click.option('--input-patch-size', '-i',
type=int, default=None, nargs=3, callback=default_none,
help='input patch size.')
@click.option('--channel-num', '-c',
type=int, default=1,
help='output patch channel number. It is 3 for affinity map.')
@click.option('--dtype', '-d', type=click.Choice(['uint8', 'float16', 'float32']),
default='float32', help='output numerical precision.')
@click.option('--output-patch-overlap', '-o',
type=int, default=None, nargs=3, callback=default_none,
help='overlap of patches. default is 50% overlap')
@click.option('--crop-chunk-margin', '-c',
type=int, nargs=3, default=None,
callback=default_none, help='size of margin to be cropped.')
@click.option('--mip', '-m', type=click.IntRange(min=0, max=3), default=0,
help='the output mip level (default is 0).')
@click.option('--thumbnail-mip', '-b', type=click.IntRange(min=5, max=16), default=6,
help='mip level of thumbnail layer.')
@click.option('--max-mip', '-x', type=click.IntRange(min=5, max=16), default=8,
help='maximum MIP level for masks.')
@click.option('--queue-name', '-q',
type=str, default=None, help='sqs queue name.')
@click.option('--visibility-timeout', '-t',
type=int, default=3600, help='visibility timeout of the AWS SQS queue.')
@click.option('--thumbnail/--no-thumbnail', default=True, help='create thumbnail or not.')
@click.option('--encoding', '-e',
type=click.Choice(['raw', 'jpeg', 'compressed_segmentation',
'fpzip', 'kempressed']), default='raw',
help='Neuroglancer precomputed block compression algorithm.')
@click.option('--voxel-size', '-v', type=int, nargs=3, default=(40, 4, 4),
help='voxel size or resolution of mip 0 image.')
@click.option('--overwrite-info/--no-overwrite-info', default=False,
help='normally we should avoid overwriting info file to avoid errors.')
@generator
def setup_env(volume_start, volume_stop, volume_size, layer_path,
max_ram_size, output_patch_size, input_patch_size, channel_num, dtype,
output_patch_overlap, crop_chunk_margin, mip, thumbnail_mip, max_mip,
queue_name, visibility_timeout, thumbnail, encoding, voxel_size,
overwrite_info):
"""Setup convolutional net inference environment."""
bboxes = setup_environment(
state['dry_run'], volume_start, volume_stop, volume_size, layer_path,
max_ram_size, output_patch_size, input_patch_size, channel_num, dtype,
output_patch_overlap, crop_chunk_margin, mip, thumbnail_mip, max_mip,
thumbnail, encoding, voxel_size, overwrite_info)
if queue_name is not None and not state['dry_run']:
queue = SQSQueue(queue_name, visibility_timeout=visibility_timeout)
queue.send_message_list(bboxes)
else:
for bbox in bboxes:
task = get_initial_task()
task['bbox'] = bbox
task['log']['bbox'] = bbox.to_filename()
yield task
@main.command('cloud-watch')
@click.option('--name',
type=str,
default='cloud-watch',
help='name of this operator')
@click.option('--log-name',
type=str,
default='chunkflow',
help='name of the speedometer')
@operator
def cloud_watch(tasks, name, log_name):
"""Real time speedometer in AWS CloudWatch."""
operator = CloudWatchOperator(log_name=log_name, name=name)
for task in tasks:
if task is not None:
operator(task['log'])
yield task
@main.command('create-info')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME,
help="create info for this chunk.")
@click.option('--output-layer-path', '-l', type=str, default="file://.",
help='path of output layer.')
@click.option('--channel-num', '-c', type=int, default=1, help='number of channel')
@click.option('--layer-type', '-t',
type=click.Choice(['image', 'segmentation']),
default=None, help='type of layer. either image or segmentation.')
@click.option('--data-type', '-d',
type=click.Choice(['uint8', 'uint32', 'uint64', 'float32']),
default = None, help='data type of array')
@click.option('--encoding', '-e',
type=click.Choice(['raw', 'jpeg', 'compressed_segmentation',
'kempressed', 'npz', 'fpzip', 'npz_uint8']),
default='raw', help='compression algorithm.')
@click.option('--voxel-size', '-s', required=True, type=int, nargs=3,
help='voxel size with unit of nm')
@click.option('--voxel-offset', '-o', default=(0,0,0), type=int, nargs=3,
help='voxel offset of array')
@click.option('--volume-size', '-v',
type=int, nargs=3, default=None, callback=default_none,
help='total size of the volume.')
@click.option('--block-size', '-b',
type=int, nargs=3, required=True,
help='chunk size of each file.')
@click.option('--factor', '-f',
type=int, nargs=3, default=(2,2,2),
help='hierarchical downsampling factor')
@click.option('--max-mip', '-m',
type=int, default=0,
help = 'maximum mip level.')
@operator
def create_info(tasks,input_chunk_name: str, output_layer_path: str, channel_num: int,
layer_type: str, data_type: str, encoding: str, voxel_size: tuple,
voxel_offset: tuple, volume_size: tuple, block_size: tuple, factor: tuple, max_mip: int):
"""Create metadata for Neuroglancer Precomputed volume."""
for task in tasks:
if task is not None:
if input_chunk_name in task:
chunk = task[input_chunk_name]
if chunk.ndim == 3:
channel_num = 1
elif chunk.ndim == 4:
channel_num = chunk.shape[0]
else:
raise ValueError('chunk dimension can only be 3 or 4')
voxel_offset = chunk.voxel_offset
volume_size = chunk.shape
data_type = chunk.dtype.name
if layer_type is None:
if np.issubdtype(chunk.dtype, np.uint8) or \
np.issubdtype(chunk.dtype, np.float32) or \
np.issubdtype(chunk.dtype, np.float16):
layer_type = 'image'
else:
layer_type = 'segmentation'
assert volume_size is not None
assert data_type is not None
info = CloudVolume.create_new_info(
channel_num, layer_type=layer_type,
data_type=data_type,
encoding=encoding,
resolution=voxel_size[::-1],
voxel_offset=voxel_offset[::-1],
volume_size=volume_size[::-1],
chunk_size=block_size[::-1],
factor=Vec(factor),
max_mip=max_mip)
vol = CloudVolume(output_layer_path, info=info)
vol.commit_info()
yield task
@main.command('fetch-task-from-file')
@click.option('--file-path', '-f',
type=click.Path(file_okay=True, dir_okay=False, exists=True,
readable=True, resolve_path=True),
help='file contains bounding boxes or tasks.')
@click.option('--job-index', '-i',
type=int, default=None,
help='index of task in the tasks.')
@click.option('--slurm-job-array/--no-slurm-job-array',
default=False, help='use the slurm job array '+
'environment variable to identify task index.')
@click.option('--granularity', '-g',
type=int, default=1, help='number of tasks to do in one run.')
@generator
def fetch_task_from_file(file_path: str, job_index: int, slurm_job_array: bool, granularity: int):
if(slurm_job_array):
job_index = int(os.environ['SLURM_ARRAY_TASK_ID'])
assert job_index is not None
bbox_array = np.load(file_path)
task_start = job_index * granularity
task_stop = min(bbox_array.shape[0], task_start + granularity)
for idx in range(task_start, task_stop):
bbox = BoundingBox.from_list(bbox_array[idx, :])
task = get_initial_task()
task['bbox'] = bbox
yield task
@main.command('fetch-task-from-sqs')
@click.option('--queue-name', '-q',
type=str, default=None, help='sqs queue name')
@click.option('--visibility-timeout', '-v',
type=int, default=None,
help='visibility timeout of sqs queue; default is using the timeout of the queue.')
@click.option('--num', '-n', type=int, default=-1,
help='fetch limited number of tasks.' +
' This is useful in local cluster to control task time elapse.' +
'Negative value will be infinite.')
@click.option('--retry-times', '-r',
type=int, default=30,
help='the times of retrying if the queue is empty.')
@generator
def fetch_task_from_sqs(queue_name, visibility_timeout, num, retry_times):
"""Fetch task from queue."""
# This operator is actually a generator,
# it replaces old tasks to a completely new tasks and loop over it!
queue = SQSQueue(queue_name,
visibility_timeout=visibility_timeout,
retry_times=retry_times)
while num!=0:
task_handle, bbox_str = queue.handle_and_message
if task_handle is None:
return
num -= 1
print('get task: ', bbox_str)
bbox = BoundingBox.from_filename(bbox_str)
# record the task handle to delete after the processing
task = get_initial_task()
task['queue'] = queue
task['task_handle'] = task_handle
task['bbox'] = bbox
task['log']['bbox'] = bbox.to_filename()
yield task
@main.command('aggregate-skeleton-fragments')
@click.option('--name', type=str, default='aggregate-skeleton-fragments',
help='name of operator')
@click.option('--input-name', '-i', type=str, default='prefix',
help='input prefix name in task stream.')
@click.option('--prefix', '-p', type=str, default=None,
help='prefix of skeleton fragments.')
@click.option('--fragments-path', '-f', type=str, required=True,
help='storage path of skeleton fragments.')
@click.option('--output-path', '-o', type=str, default=None,
help='storage path of aggregated skeletons.')
@operator
def aggregate_skeleton_fragments(tasks, name, input_name, prefix, fragments_path, output_path):
"""Merge skeleton fragments."""
if output_path is None:
output_path = fragments_path
operator = AggregateSkeletonFragmentsOperator(fragments_path, output_path)
if prefix:
operator(prefix)
else:
for task in tasks:
if task is not None:
start = time()
operator(task[input_name])
task['log']['timer'][name] = time() - start
yield task
@main.command('create-chunk')
@click.option('--name',
type=str,
default='create-chunk',
help='name of operator')
@click.option('--size', '-s',
type=int, nargs=3, default=(64, 64, 64), help='the size of created chunk')
@click.option('--dtype',
type=click.Choice(
['uint8', 'uint32', 'uint16', 'float32', 'float64']),
default='uint8', help='the data type of chunk')
@click.option('--all-zero/--not-all-zero', default=False, help='all zero or not.')
@click.option('--voxel-offset', '-t',
type=int, nargs=3, default=(0, 0, 0), help='offset in voxel number.')
@click.option('--voxel-size', '-e',
type=int, nargs=3, default=(1,1,1), help='voxel size in nm')
@click.option('--output-chunk-name', '-o',
type=str, default="chunk", help="name of created chunk")
@operator
def create_chunk(tasks, name, size, dtype, all_zero, voxel_offset, voxel_size, output_chunk_name):
"""Create a fake chunk for easy test."""
print("creating chunk: ", output_chunk_name)
for task in tasks:
if task is not None:
task[output_chunk_name] = Chunk.create(
size=size, dtype=np.dtype(dtype),
all_zero = all_zero,
voxel_offset=voxel_offset,
voxel_size=voxel_size)
yield task
@main.command('read-nrrd')
@click.option('--name', type=str, default='read-nrrd',
help='read nrrd file from local disk.')
@click.option('--file-name', '-f', required=True,
type=click.Path(exists=True, dir_okay=False),
help='read chunk from NRRD file')
@click.option('--voxel-offset', '-v', type=int, nargs=3, default=None, callback=default_none,
help='global offset of this chunk')
@click.option('--voxel-size', '-s', type=int, nargs=3, default=None, callback=default_none,
help='physical size of voxels. The unit is assumed to be nm.')
@click.option('--dtype', '-d',
type=click.Choice(['uint8', 'uint32', 'uint64', 'float32', 'float64', 'float16']),
help='convert to data type')
@click.option('--output-chunk-name', '-o', type=str, default='chunk',
help='chunk name in the global state')
@operator
def read_nrrd(tasks, name: str, file_name: str, voxel_offset: tuple,
voxel_size: tuple, dtype: str, output_chunk_name: str):
"""Read NRRD file."""
for task in tasks:
if task is not None:
start = time()
task[output_chunk_name] = Chunk.from_nrrd(
file_name,
dtype=dtype,
voxel_offset=voxel_offset,
voxel_size=voxel_size)
task['log']['timer'][name] = time() - start
yield task
@main.command('write-nrrd')
@click.option('--name', type=str, default='write-nrrd', help='name of operator')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME, help='input chunk name')
@click.option('--file-name', '-f', default=None,
type=click.Path(dir_okay=False, resolve_path=True),
help='file name of NRRD file.')
@operator
def write_tif(tasks, name, input_chunk_name, file_name):
"""Write chunk as a NRRD file."""
for task in tasks:
if task is not None:
task[input_chunk_name].to_nrrd(file_name)
yield task
@main.command('read-pngs')
@click.option('--path-prefix', '-p',
required=True, type=str,
help='directory path prefix of png files.')
@click.option('--output-chunk-name', '-o',
type=str, default=DEFAULT_CHUNK_NAME,
help='output chunk name')
@click.option('--cutout-offset', '-c',
type=int, default=(0,0,0), nargs=3,
help='cutout chunk from an offset')
@click.option('--volume-offset', '-t',
type=int, nargs=3, default=(0,0,0),
help = 'the offset of png images volume, could be negative.')
@click.option('--voxel-size', '-x', type=int, nargs=3, default=None, callback=default_none,
help='physical size of voxels. the unit is assumed to be nm.')
@click.option('--chunk-size', '-s',
type=int, nargs=3, default=None, callback=default_none,
help='cutout chunk size')
@operator
def read_pngs(tasks, path_prefix, output_chunk_name, cutout_offset,
volume_offset, voxel_size, chunk_size):
"""Read a serials of png files."""
for task in tasks:
if task is not None:
if chunk_size is None:
assert 'bbox' in task, "no chunk_size, we are looking for bounding box in task"
bbox = task['bbox']
else:
bbox = BoundingBox.from_delta(cutout_offset, chunk_size)
task[output_chunk_name] = read_png_images(
path_prefix, bbox,
volume_offset=volume_offset,
voxel_size=voxel_size)
yield task
@main.command('read-tif')
@click.option('--name', type=str, default='read-tif',
help='read tif file from local disk.')
@click.option('--file-name', '-f', required=True,
type=click.Path(exists=True, dir_okay=False),
help='read chunk from TIFF file.')
@click.option('--voxel-offset', '-v', type=int, nargs=3, callback=default_none,
help='global offset of this chunk')
@click.option('--voxel-size', '-s', type=int, nargs=3, default=None, callback=default_none,
help='physical size of voxels. The unit is assumed to be nm.')
@click.option('--dtype', '-d',
type=click.Choice(['uint8', 'uint32', 'uint64', 'float32', 'float64', 'float16']),
help='convert to data type')
@click.option('--output-chunk-name', '-o', type=str, default='chunk',
help='chunk name in the global state')
@operator
def read_tif(tasks, name: str, file_name: str, voxel_offset: tuple,
voxel_size: tuple, dtype: str, output_chunk_name: str):
"""Read tiff files."""
for task in tasks:
if task is not None:
start = time()
task[output_chunk_name] = Chunk.from_tif(
file_name,
dtype=dtype,
voxel_offset=voxel_offset,
voxel_size=voxel_size)
task['log']['timer'][name] = time() - start
yield task
@main.command('write-tif')
@click.option('--name', type=str, default='write-tif', help='name of operator')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME, help='input chunk name')
@click.option('--file-name', '-f', default=None,
type=click.Path(dir_okay=False, resolve_path=True),
help='file name of tif file, the extention should be .tif or .tiff')
@operator
def write_tif(tasks, name, input_chunk_name, file_name):
"""Write chunk as a TIF file."""
for task in tasks:
if task is not None:
task[input_chunk_name].to_tif(file_name)
yield task
@main.command('read-h5')
@click.option('--name', type=str, default='read-h5',
help='read file from local disk.')
@click.option('--file-name', '-f', type=str, required=True,
help='read chunk from file, support .h5')
@click.option('--dataset-path', '-d', type=str, default=None,
help='the dataset path inside HDF5 file.')
@click.option('--dtype', '-e',
type=click.Choice(['float32', 'float64', 'uint32', 'uint64', 'uint8']),
default=None, help='transform data type.')
@click.option('--voxel-offset', '-v', type=int, nargs=3, default=None,
callback=default_none, help='voxel offset of the dataset in hdf5 file.')
@click.option('--voxel-size', '-x', type=int, nargs=3, default=None,
callback=default_none, help='physical size of voxels. The unit is assumed to be nm.')
@click.option('--cutout-start', '-t', type=int, nargs=3, callback=default_none,
help='cutout voxel offset in the array')
@click.option('--cutout-stop', '-p', type=int, nargs=3, callback=default_none,
help='cutout stop corrdinate.')
@click.option('--cutout-size', '-s', type=int, nargs=3, callback=default_none,
help='cutout size of the chunk.')
@click.option('--zero-filling/--no-zero-filling', default=False, type=bool,
help='if no such file, fill with zero.')
@click.option('--output-chunk-name', '-o',
type=str, default=DEFAULT_CHUNK_NAME,
help='chunk name in the global state')
@operator
def read_h5(tasks, name: str, file_name: str, dataset_path: str,
dtype: str, voxel_offset: tuple, voxel_size: tuple, cutout_start: tuple,
cutout_stop: tuple, cutout_size: tuple, zero_filling: bool, output_chunk_name: str):
"""Read HDF5 files."""
for task in tasks:
if task is not None:
start = time()
if 'bbox' in task and cutout_start is None:
bbox = task['bbox']
print('bbox: ', bbox)
cutout_start_tmp = bbox.minpt
cutout_stop_tmp = bbox.maxpt
cutout_size_tmp = cutout_stop_tmp - cutout_start_tmp
chunk = Chunk.from_h5(
file_name,
dataset_path=dataset_path,
voxel_offset=voxel_offset,
voxel_size=voxel_size,
cutout_start=cutout_start_tmp,
cutout_size=cutout_size_tmp,
cutout_stop=cutout_stop_tmp,
zero_filling = zero_filling,
dtype=dtype,
)
if dtype is not None:
chunk = chunk.astype(dtype)
task[output_chunk_name] = chunk
# make a bounding box for others operators to follow
if 'bbox' not in task:
task['bbox'] = chunk.bbox
task['log']['timer'][name] = time() - start
yield task
@main.command('write-h5')
@click.option('--input-chunk-name', '-i',
type=str, default='chunk', help='input chunk name')
@click.option('--file-name', '-f',
type=click.Path(dir_okay=True, resolve_path=False), required=True,
help='file name of hdf5 file.')
@click.option('--chunk-size', '-s', type=int, nargs=3,
default=None, callback=default_none,
help='save the big volume as chunks.')
@click.option('--compression', '-c', type=click.Choice(["gzip", "lzf", "szip"]),
default="gzip", help="compression used in the dataset.")
@click.option('--with-offset/--without-offset', default=True, type=bool,
help='add voxel_offset dataset or not.')
@click.option('--voxel-size', '-v',
default=None, type=int, callback=default_none, nargs=3,
help='voxel size of this chunk.'
)
@operator
def write_h5(tasks, input_chunk_name, file_name, chunk_size, compression, with_offset, voxel_size):
"""Write chunk to HDF5 file."""
for task in tasks:
if task is not None:
task[input_chunk_name].to_h5(
file_name, with_offset,
chunk_size=chunk_size,
compression=compression,
voxel_size=voxel_size)
yield task
@main.command('write-pngs')
@click.option('--name', type=str, default='write-pngs', help='name of operator')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME, help='input chunk name')
@click.option('--output-path', '-o',
type=str, default='./pngs/', help='output path of saved 2d images formated as png.')
@operator
def write_pngs(tasks, name, input_chunk_name, output_path):
"""Save as 2D PNG images."""
operator = WritePNGsOperator(output_path=output_path,
name=name)
for task in tasks:
if task is not None:
operator(task[input_chunk_name])
yield task
@main.command('skeletonize')
@click.option('--name', '-n', type=str, default='skeletonize',
help='create centerlines of objects in a segmentation chunk.')
@click.option('--input-chunk-name', '-i', type=str, default=DEFAULT_CHUNK_NAME,
help='input chunk name.')
@click.option('--output-name', '-o', type=str, default='skeletons')
@click.option('--voxel-size', type=int, nargs=3, required=True,
help='voxel size of segmentation chunk (zyx order)')
@click.option('--output-path', type=str, required=True,
help='output path with protocols, such as file:///bucket/my/path')
@operator
def skeletonize(tasks, name, input_chunk_name, output_name, voxel_size, output_path):
"""Skeletonize the neurons/objects in a segmentation chunk"""
operator = SkeletonizeOperator(output_path, name=name)
for task in tasks:
if task is not None:
seg = task[input_chunk_name]
skels = operator(seg, voxel_size)
task[output_name] = skels
yield task
@main.command('delete-task-in-queue')
@click.option('--name', type=str, default='delete-task-in-queue',
help='name of this operator')
@operator
def delete_task_in_queue(tasks, name):
"""Delete the task in queue."""
for task in tasks:
if task is not None:
if state['dry_run']:
print('skip deleting task in queue!')
else:
queue = task['queue']
task_handle = task['task_handle']
queue.delete(task_handle)
print('deleted task {} in queue: {}'.format(
task_handle, queue.queue_name))
yield task
@main.command('delete-chunk')
@click.option('--name', type=str, default='delete-var', help='delete variable/chunk in task')
@click.option('--chunk-name', '-c',
type=str, required=True, help='the chunk name need to be deleted')
@operator
def delete_chunk(tasks, name, chunk_name):
"""Delete a Chunk in task to release RAM"""
for task in tasks:
if task is not None:
logging.info(f'delete chunk: {chunk_name}')
del task[chunk_name]
yield task
@main.command('read-precomputed')
@click.option('--name',
type=str, default='read-precomputed', help='name of this operator')
@click.option('--volume-path', '-v',
type=str, required=True, help='volume path')
@click.option('--mip', '-m',
type=int, default=None, help='mip level of the cutout.')
@click.option('--expand-margin-size', '-e',
type=int, nargs=3, default=(0, 0, 0),
help='include surrounding regions of output bounding box.')
@click.option('--chunk-start', '-s',
type=int, nargs=3, default=None, callback=default_none,
help='chunk offset in volume.')
@click.option('--chunk-size', '-z',
type=int, nargs=3, default=None, callback=default_none,
help='cutout chunk size.')
@click.option('--fill-missing/--no-fill-missing',
default=False, help='fill the missing chunks in input volume with zeros ' +
'or not, default is false')
@click.option('--validate-mip',
type=int, default=None, help='validate chunk using higher mip level')
@click.option('--blackout-sections/--no-blackout-sections',
default=False, help='blackout some sections. ' +
'the section ids json file should named blackout_section_ids.json. default is False.')
@click.option(
'--output-chunk-name', '-o',
type=str, default=DEFAULT_CHUNK_NAME,
help='Variable name to store the cutout to for later retrieval.'
+ 'Chunkflow operators by default operates on a variable named "chunk" but' +
' sometimes you may need to have a secondary volume to work on.'
)
@operator
def read_precomputed(tasks, name, volume_path, mip, chunk_start, chunk_size, expand_margin_size,
fill_missing, validate_mip, blackout_sections, output_chunk_name):
"""Cutout chunk from volume."""
if mip is None:
mip = state['mip']
assert mip >= 0
operator = ReadPrecomputedOperator(
volume_path,
mip=mip,
expand_margin_size=expand_margin_size,
fill_missing=fill_missing,
validate_mip=validate_mip,
blackout_sections=blackout_sections,
dry_run=state['dry_run'],
name=name)
for task in tasks:
if task is not None:
if 'bbox' in task:
bbox = task['bbox']
else:
# use bounding box of volume
if chunk_start is None:
chunk_start = operator.vol.mip_bounds(mip).minpt[::-1]
else:
chunk_start = Vec(*chunk_start)
if chunk_size is None:
chunk_stop = operator.vol.mip_bounds(mip).maxpt[::-1]
chunk_size = chunk_stop - chunk_start
else:
chunk_size = Vec(*chunk_size)
bbox = BoundingBox.from_delta(chunk_start, chunk_size)
start = time()
assert output_chunk_name not in task
task[output_chunk_name] = operator(bbox)
task['log']['timer'][name] = time() - start
task['cutout_volume_path'] = volume_path
yield task
@main.command('remap-segmentation')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME, help='input chunk name.')
@click.option('--output-chunk-name', '-o',
type=str, default=DEFAULT_CHUNK_NAME, help='output chunk name.')
@operator
def remap_segmentation(tasks, input_chunk_name, output_chunk_name):
"""Renumber a serials of chunks."""
# state['remap_start_id'] = 0
start_id = 0
for task in tasks:
if task is not None:
seg = task[input_chunk_name]
assert seg.is_segmentation
if not isinstance(seg, Segmentation):
seg = Segmentation.from_chunk(seg)
seg, start_id = seg.remap(start_id)
task[output_chunk_name] = seg
yield task
@main.command('evaluate-segmentation')
@click.option("--segmentation-chunk-name",
"-s",
type=str,
default="chunk",
help="chunk name of segmentation")
@click.option("--groundtruth-chunk-name",
"-g",
type=str,
default="groundtruth")
@click.option('--output', '-o',
type=str, default='seg_score',
help='segmentation evaluation result name.')
@operator
def evaluate_segmenation(tasks, segmentation_chunk_name,
groundtruth_chunk_name, output):
"""Evaluate segmentation by split/merge error.
"""
for task in tasks:
if task is not None:
seg = Segmentation(task[segmentation_chunk_name])
groundtruth = Segmentation(task[groundtruth_chunk_name])
task[output] = seg.evaluate(groundtruth)
yield task
@main.command('downsample-upload')
@click.option('--name',
type=str, default='downsample-upload', help='name of operator')
@click.option('--input-chunk-name', '-i',
type=str, default='chunk', help='input chunk name')
@click.option('--volume-path', '-v', type=str, help='path of output volume')
@click.option('--factor', '-f', type=int, nargs=3, default=(2, 2, 2),
help='downsampling factor in z,y,x.')
@click.option('--chunk-mip', '-c', type=int, default=None, help='input chunk mip level')
@click.option('--start-mip', '-s',
type=int, default=None, help='the start uploading mip level.')
@click.option('--stop-mip', '-p',
type=int, default=5, help='stop mip level. the indexing follows python style and ' +
'the last index is exclusive.')
@click.option('--fill-missing/--no-fill-missing',
default=True, help='fill missing or not when there is all zero blocks.')
@operator
def downsample_upload(tasks, name, input_chunk_name, volume_path,
factor, chunk_mip, start_mip, stop_mip, fill_missing):
"""Downsample chunk and upload to volume."""
if chunk_mip is None:
chunk_mip = state['mip']
operator = DownsampleUploadOperator(
volume_path,
factor=factor,
chunk_mip=chunk_mip,
start_mip=start_mip,
stop_mip=stop_mip,
fill_missing=fill_missing,
name=name)
for task in tasks:
if task is not None:
start = time()
operator(task[input_chunk_name])
task['log']['timer'][name] = time() - start
yield task
@main.command('gaussian-filter')
@click.option('--name', type=str, default='gaussian-filter', help='name of operator')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME, help='input chunk name')
@click.option('--sigma', '-s',
type=int, default=1, help='standard deviation of gaussian kernel')
@operator
def gaussian_filter(tasks, name, input_chunk_name, sigma):
"""2D Gaussian blurring operated in-place."""
for task in tasks:
if task is not None:
start = time()
chunk = task[input_chunk_name]
chunk.gaussian_filter_2d(sigma)
task['log']['timer'][name] = time() - start
yield task
@main.command('log-summary')
@click.option('--log-dir', '-l',
type=click.Path(exists=True, dir_okay=True, readable=True),
default='./log', help='directory of json log files.')
@click.option('--output-size', '-s',
type=int, nargs=3, default=None, callback=default_none,
help='output size for each task. will be used for computing speed.')
@generator
def log_summary(log_dir, output_size):
"""Compute the statistics of large scale run."""
df = load_log(log_dir)
print_log_statistics(df, output_size=output_size)
task = get_initial_task()
yield task
@main.command('normalize-intensity')
@click.option('--name', type=str, default='normalize-intensity', help='name of operator')
@click.option('--input-chunk-name', '-i', type=str,
default=DEFAULT_CHUNK_NAME, help='input chunk name')
@click.option('--output-chunk-name', '-o', type=str,
default=DEFAULT_CHUNK_NAME, help='output chunk name')
@operator
def normalize_intensity(tasks, name, input_chunk_name, output_chunk_name):
"""transform gray image to float (-1:1). x=(x-127.5) - 1.0"""
for task in tasks:
if task is not None:
start = time()
chunk = task[input_chunk_name]
assert np.issubdtype(chunk.dtype, np.uint8)
chunk = chunk.astype('float32')
chunk /= 127.5
chunk -= 1.0
task[output_chunk_name] = chunk
task['log']['timer'][name] = time() - start
yield task
@main.command('normalize-contrast-nkem')
@click.option('--name', type=str, default='normalize-contrast-nkem',
help='name of operator.')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME, help='input chunk name')
@click.option('--output-chunk-name', '-o',
type=str, default=DEFAULT_CHUNK_NAME, help='output chunk name')
@click.option('--levels-path', '-p', type=str, required=True,
help='the path of section histograms.')
@click.option('--lower-clip-fraction', '-l', type=float, default=0.01,
help='lower intensity fraction to clip out.')
@click.option('--upper-clip-fraction', '-u', type=float, default=0.01,
help='upper intensity fraction to clip out.')
@click.option('--minval', type=int, default=1,
help='the minimum intensity of transformed chunk.')
@click.option('--maxval', type=int, default=255,
help='the maximum intensity of transformed chunk.')
@operator
def normalize_contrast_nkem(tasks, name, input_chunk_name, output_chunk_name,
levels_path, lower_clip_fraction,
upper_clip_fraction, minval, maxval):
"""Normalize the section contrast using precomputed histograms."""
operator = NormalizeSectionContrastOperator(
levels_path,
lower_clip_fraction=lower_clip_fraction,
upper_clip_fraction=upper_clip_fraction,
minval=minval, maxval=maxval, name=name)
for task in tasks:
if task is not None:
start = time()
task[output_chunk_name] = operator(task[input_chunk_name])
task['log']['timer'][name] = time() - start
yield task
@main.command('normalize-section-shang')
@click.option('--name',
type=str,
default='normalize-section-mu',
help='name of operator.')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME, help='input chunk name')
@click.option('--output-chunk-name', '-o',
type=str, default=DEFAULT_CHUNK_NAME, help='output chunk name')
@click.option('--nominalmin',
type=float,
default=None,
help='targeted minimum of transformed chunk.')
@click.option('--nominalmax',
type=float,
default=None,
help='targeted maximum of transformed chunk.')
@click.option('--clipvalues',
type=bool,
default=False,
help='clip transformed values to be within the target range.')
@operator
def normalize_section_shang(tasks, name, input_chunk_name, output_chunk_name,
nominalmin, nominalmax, clipvalues):
"""Normalize voxel values based on slice min/max within the chunk, Shang's method.
The transformed chunk has floating point values.
"""
operator = NormalizeSectionShangOperator(
nominalmin=nominalmin,
nominalmax=nominalmax,
clipvalues=clipvalues,
name=name)
for task in tasks:
if task is not None:
start = time()
task[output_chunk_name] = operator(task[input_chunk_name])
task['log']['timer'][name] = time() - start
yield task
@main.command('plugin')
@click.option('--name',
type=str,
default='plugin-1',
help='name of plugin. Multiple plugins should have different names.')
@click.option('--input-names', '-i',
type=str, default=None, help='input names with delimiter of comma')
@click.option('--output-names', '-o',
type=str, default=None, help='output names with dilimiter of comma')
@click.option('--file', '-f', type=str, help='''python file to call.
If it is just a name rather than full path,
we\'ll look for it in the plugin folder.''')
@click.option('--args', '-a',
type=str, default=None,
help='arguments of plugin, this string should be interpreted inside plugin.')
@operator
def plugin(tasks, name: str, input_names: str, output_names: str, file: str, args: str):
"""Insert custom program as a plugin.
The custom python file should contain a callable named "exec" such that
a call of `exec(chunk, args)` can be made to operate on the chunk.
"""
operator = Plugin(file, name=name)
for task in tasks:
if task is not None:
start = time()
if input_names is not None:
input_name_list = input_names.split(',')
inputs = [task[i] for i in input_name_list]
else:
inputs = []
outputs = operator(inputs, args=args)
if outputs is not None:
output_name_list = output_names.split(',')
assert len(outputs) == len(output_name_list)
for output_name, output in zip(output_name_list, outputs):
task[output_name] = output
else:
assert output_names is None
task['log']['timer'][name] = time() - start
yield task
@main.command('connected-components')
@click.option('--name', type=str, default='connected-components',
help='threshold a map and get the labels.')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME,
help='input chunk name')
@click.option('--output-chunk-name', '-o',
type=str, default=DEFAULT_CHUNK_NAME,
help='output chunk name')
@click.option('--threshold', '-t', type=float, default=None,
help='threshold to cut the map.')
@click.option('--connectivity', '-c',
type=click.Choice(['6', '18', '26']),
default='6', help='number of neighboring voxels used. Default is 6.')
@operator
def connected_components(tasks, name, input_chunk_name, output_chunk_name,
threshold, connectivity):
"""Threshold the probability map to get a segmentation."""
connectivity = int(connectivity)
for task in tasks:
if task is not None:
start = time()
task[output_chunk_name] = task[input_chunk_name].connected_component(
threshold=threshold, connectivity=connectivity)
task['log']['timer']['name'] = time() - start
yield task
@main.command('copy-var')
@click.option('--name', type=str, default='copy-var-1', help='name of step')
@click.option('--from-name',
type=str,
default='chunk',
help='Variable to be (shallow) copied/"renamed"')
@click.option('--to-name', type=str, default='chunk', help='New variable name')
@operator
def copy_var(tasks, name, from_name, to_name):
"""Copy a variable to a new name.
"""
for task in tasks:
if task is not None:
task[to_name] = task[from_name]
yield task
@main.command('inference')
@click.option('--name', type=str, default='inference',
help='name of this operator')
@click.option('--convnet-model', '-m',
type=str, default=None, help='convnet model path or type.')
@click.option('--convnet-weight-path', '-w',
type=str, default=None, help='convnet weight path')
@click.option('--input-patch-size', '-s',
type=int, nargs=3, required=True, help='input patch size')
@click.option('--output-patch-size', '-z', type=int, nargs=3, default=None,
callback=default_none, help='output patch size')
@click.option('--output-patch-overlap', '-v', type=int, nargs=3,
default=(4, 64, 64), help='patch overlap')
@click.option('--output-crop-margin', type=int, nargs=3,
default=None, callback=default_none, help='margin size of output cropping.')
@click.option('--patch-num', '-n', default=None, callback=default_none,
type=int, nargs=3, help='patch number in z,y,x.')
@click.option('--num-output-channels', '-c',
type=int, default=3, help='number of output channels')
@click.option('--dtype', '-d', type=click.Choice(['float32', 'float16']),
default='float32', help="""Even if we perform inference using float16,
the result will still be converted to float32.""")
@click.option('--framework', '-f',
type=click.Choice(['universal', 'identity', 'pytorch']),
default='universal', help='inference framework')
@click.option('--batch-size', '-b',
type=int, default=1, help='mini batch size of input patch.')
@click.option('--bump', type=click.Choice(['wu', 'zung']), default='wu',
help='bump function type (only support wu now!).')
@click.option('--mask-output-chunk/--no-mask-output-chunk', default=False,
help='mask output chunk will make the whole chunk like one output patch. '
+ 'This will also work with non-aligned chunk size.')
@click.option('--mask-myelin-threshold', '-y', default=None, type=float,
help='mask myelin if netoutput have myelin channel.')
@click.option('--input-chunk-name', '-i',
type=str, default='chunk', help='input chunk name')
@click.option('--output-chunk-name', '-o',
type=str, default='chunk', help='output chunk name')
@operator
def inference(tasks, name, convnet_model, convnet_weight_path, input_patch_size,
output_patch_size, output_patch_overlap, output_crop_margin, patch_num,
num_output_channels, dtype, framework, batch_size, bump, mask_output_chunk,
mask_myelin_threshold, input_chunk_name, output_chunk_name):
"""Perform convolutional network inference for chunks."""
with Inferencer(
convnet_model,
convnet_weight_path,
input_patch_size=input_patch_size,
output_patch_size=output_patch_size,
num_output_channels=num_output_channels,
output_patch_overlap=output_patch_overlap,
output_crop_margin=output_crop_margin,
patch_num=patch_num,
framework=framework,
dtype=dtype,
batch_size=batch_size,
bump=bump,
mask_output_chunk=mask_output_chunk,
mask_myelin_threshold=mask_myelin_threshold,
dry_run=state['dry_run']) as inferencer:
for task in tasks:
if task is not None:
if 'log' not in task:
task['log'] = {'timer': {}}
start = time()
task[output_chunk_name] = inferencer(
task[input_chunk_name])
task['log']['timer'][name] = time() - start
task['log']['compute_device'] = inferencer.compute_device
yield task
@main.command('mask')
@click.option('--name', type=str, default='mask', help='name of this operator')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME, help='input chunk name')
@click.option('--output-chunk-name', '-o',
type=str, default=DEFAULT_CHUNK_NAME, help='output chunk name')
@click.option('--volume-path', '-v',
type=str, required=True, help='mask volume path')
@click.option('--mip', '-m',
type=int, default=5, help='mip level of mask')
@click.option('--inverse/--no-inverse',
default=False,
help='inverse the mask or not. default is True. ' +
'the mask will be multiplied to chunk.')
@click.option('--fill-missing/--no-fill-missing',
default=False,
help='fill missing blocks with black or not. ' +
'default is False.')
@operator
def mask(tasks, name, input_chunk_name, output_chunk_name, volume_path,
mip, inverse, fill_missing):
"""Mask the chunk. The mask could be in higher mip level and we
will automatically upsample it to the same mip level with chunk.
"""
operator = MaskOperator(volume_path,
mip,
state['mip'],
inverse=inverse,
fill_missing=fill_missing,
name=name)
for task in tasks:
if task is not None:
start = time()
task[output_chunk_name] = operator(task[input_chunk_name])
# Note that mask operation could be used several times,
# this will only record the last masking operation
task['log']['timer'][name] = time() - start
yield task
@main.command('mask-out-objects')
@click.option('--name', '-n', type=str, default='mask-out-objects',
help='remove some objects in segmentation chunk.')
@click.option('--input-chunk-name', '-i', type=str, default=DEFAULT_CHUNK_NAME)
@click.option('--output_chunk_name', '-o', type=str, default=DEFAULT_CHUNK_NAME)
@click.option('--dust-size-threshold', '-d', type=int, default=None,
help='eliminate small objects with voxel number less than threshold.')
@click.option('--selected-obj-ids', '-s', type=str, default=None,
help="""a list of segment ids to mesh. This is for sparse meshing.
The ids should be separated by comma without space, such as "34,56,78,90"
it can also be a json file contains a list of ids. The json file path should
contain protocols, such as "gs://bucket/my/json/file/path.""")
@operator
def mask_out_objects(tasks, name, input_chunk_name, output_chunk_name,
dust_size_threshold, selected_obj_ids):
"""Mask out objects in a segmentation chunk."""
if isinstance(selected_obj_ids, str) and selected_obj_ids.endswith('.json'):
# assume that ids is a json file in the storage path
json_storage = Storage(os.path.dirname(selected_obj_ids))
ids_str = json_storage.get_file(os.path.basename(selected_obj_ids))
selected_obj_ids = set(json.loads(ids_str))
assert len(selected_obj_ids) > 0
logging.info(f'number of selected objects: {len(selected_obj_ids)}')
for task in tasks:
if task is not None:
seg = task[input_chunk_name]
if not isinstance(seg, Segmentation):
assert isinstance(seg, Chunk)
assert seg.is_segmentation
seg = Segmentation.from_chunk(seg)
seg.mask_fragments(dust_size_threshold)
seg.mask_except(selected_obj_ids)
task[output_chunk_name] = seg
yield task
@main.command('crop-margin')
@click.option('--name',
type=str,
default='crop-margin',
help='name of this operator')
@click.option('--margin-size', '-m',
type=int, nargs=3, default=None, callback=default_none,
help='crop the chunk margin. ' +
'The default is None and will use the bbox as croping range.')
@click.option('--input-chunk-name', '-i',
type=str, default='chunk', help='input chunk name.')
@click.option('--output-chunk-name', '-o',
type=str, default='chunk', help='output chunk name.')
@operator
def crop_margin(tasks, name, margin_size,
input_chunk_name, output_chunk_name):
"""Crop the margin of chunk."""
for task in tasks:
if task is not None:
start = time()
if margin_size:
task[output_chunk_name] = task[input_chunk_name].crop_margin(
margin_size=margin_size)
else:
# use the output bbox for croping
task[output_chunk_name] = task[
input_chunk_name].cutout(task['bbox'].to_slices())
task['log']['timer'][name] = time() - start
yield task
@main.command('mesh')
@click.option('--name', type=str, default='mesh', help='name of operator')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME, help='name of chunk needs to be meshed.')
@click.option('--mip', '-m',
type=int, default=None, help='mip level of segmentation chunk.')
@click.option('--voxel-size', '-v', type=int, nargs=3, default=None, callback=default_none,
help='voxel size of the segmentation. zyx order.')
@click.option('--output-path', '-o', type=str, default='file:///tmp/mesh/',
help='output path of meshes, follow the protocol rule of CloudVolume. \
The path will be adjusted if there is a info file with precomputed format.')
@click.option('--output-format', '-t', type=click.Choice(['ply', 'obj', 'precomputed']),
default='precomputed', help='output format, could be one of ply|obj|precomputed.')
@click.option('--simplification-factor', '-f', type=int, default=100,
help='mesh simplification factor.')
@click.option('--max-simplification-error', '-e', type=int, default=40,
help='max simplification error.')
@click.option('--manifest/--no-manifest', default=False, help='create manifest file or not.')
@click.option('--shard/--no-shard', default=False, help='combine meshes as one file')
@operator
def mesh(tasks, name, input_chunk_name, mip, voxel_size, output_path, output_format,
simplification_factor, max_simplification_error, manifest, shard):
"""Perform meshing for segmentation chunk."""
if mip is None:
mip = state['mip']
operator = MeshOperator(
output_path,
output_format,
mip=mip,
voxel_size=voxel_size,
simplification_factor=simplification_factor,
max_simplification_error=max_simplification_error,
manifest=manifest,
shard=shard,
)
for task in tasks:
if task is not None:
start = time()
operator( task[input_chunk_name] )
task['log']['timer'][name] = time() - start
yield task
@main.command('mesh-manifest')
@click.option('--name', type=str, default='mesh-manifest', help='name of operator')
@click.option('--input-name', '-i', type=str, default='prefix', help='input key name in task.')
@click.option('--prefix', '-p', type=int, default=None, help='prefix of meshes.')
@click.option('--disbatch/--no-disbatch', default=False, help='use disBatch task index as prefix')
@click.option('--digits', '-d', type=int, default=1, help='number of digits of prefix')
@click.option('--volume-path', '-v', type=str, required=True, help='cloudvolume path of dataset layer.' +
' The mesh directory will be automatically figure out using the info file.')
@operator
def mesh_manifest(tasks, name, input_name, prefix, disbatch, digits, volume_path):
"""Generate mesh manifest files."""
operator = MeshManifestOperator(volume_path)
if prefix:
operator(prefix, digits)
elif disbatch:
assert 'DISBATCH_REPEAT_INDEX' in os.environ
prefix = os.environ['DISBATCH_REPEAT_INDEX']
operator(prefix, digits)
elif input_name:
for task in tasks:
start = time()
operator(task[input_name], digits)
task['log']['timer'][name] = time() - start
yield task
else:
logging.error('requires one of parameters: prefix, input_name, disbatch')
@main.command('neuroglancer')
@click.option('--name', type=str, default='neuroglancer',
help='name of this operator')
@click.option('--voxel-size', '-v',
nargs=3, type=int, default=None, callback=default_none,
help='voxel size of chunk')
@click.option('--port', '-p', type=int, default=None, help='port to use')
@click.option('--inputs', '-i', type=str, default='chunk',
help='a list of chunk names separated by comma.')
@operator
def neuroglancer(tasks, name, voxel_size, port, inputs):
"""Visualize the chunk using neuroglancer."""
operator = NeuroglancerOperator(name=name, port=port, voxel_size=voxel_size)
for task in tasks:
if task is not None:
operator(task, selected=inputs)
yield task
@main.command('quantize')
@click.option('--name', type=str, default='quantize', help='name of this operator')
@click.option('--input-chunk-name', '-i', type=str, default='chunk', help = 'input chunk name')
@click.option('--output-chunk-name', '-o', type=str, default='chunk', help= 'output chunk name')
@operator
def quantize(tasks, name, input_chunk_name, output_chunk_name):
"""Transorm the last channel to uint8."""
for task in tasks:
if task is not None:
aff = task[input_chunk_name]
aff = AffinityMap(aff)
assert isinstance(aff, AffinityMap)
quantized_image = aff.quantize()
task[output_chunk_name] = quantized_image
yield task
@main.command('write-precomputed')
@click.option('--name', type=str, default='write-precomputed', help='name of this operator')
@click.option('--volume-path', '-v', type=str, required=True, help='volume path')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME, help='input chunk name')
@click.option('--mip', '-m',
type=int, default=None, help="mip level to write")
@click.option('--upload-log/--no-upload-log',
default=False, help='the log will be put inside volume-path')
@click.option('--create-thumbnail/--no-create-thumbnail',
default=False, help='create thumbnail or not. ' +
'the thumbnail is a downsampled and quantized version of the chunk.')
@click.option('--intensity-threshold', '-t',
default=None, help='do not save anything if all voxel intensity is below threshold.'
)
@operator
def write_precomputed(tasks, name, volume_path, input_chunk_name, mip, upload_log, create_thumbnail, intensity_threshold):
"""Save chunk to volume."""
if mip is None:
mip = state['mip']
operator = WritePrecomputedOperator(
volume_path,
mip,
upload_log=upload_log,
create_thumbnail=create_thumbnail,
name=name
)
for task in tasks:
if task is not None:
# the time elapsed was recorded internally
chunk = task[input_chunk_name]
if intensity_threshold is not None and np.all(chunk.array < intensity_threshold):
pass
else:
operator(chunk, log=task.get('log', {'timer': {}}))
task['output_volume_path'] = volume_path
yield task
@main.command('threshold')
@click.option('--name', type=str, default='threshold',
help='threshold a map and get the labels.')
@click.option('--input-chunk-name', '-i',
type=str, default=DEFAULT_CHUNK_NAME,
help='input chunk name')
@click.option('--output-chunk-name', '-o',
type=str, default=DEFAULT_CHUNK_NAME,
help='output chunk name')
@click.option('--threshold', '-t', type=float, default=0.5,
help='threshold to cut the map.')
@operator
def threshold(tasks, name, input_chunk_name, output_chunk_name,
threshold):
"""Threshold the probability map."""
for task in tasks:
if task is not None:
start = time()
logging.info('Segment probability map using a threshold...')
task[output_chunk_name] = task[input_chunk_name].threshold(threshold)
task['log']['timer'][name] = time() - start
yield task
@main.command('channel-voting')
@click.option('--name', type=str, default='channel-voting', help='name of operator')
@click.option('--input-chunk-name', type=str, default=DEFAULT_CHUNK_NAME)
@click.option('--output-chunk-name', type=str, default=DEFAULT_CHUNK_NAME)
@operator
def channel_voting(tasks, name, input_chunk_name, output_chunk_name):
"""all channels vote to get a uint8 volume. The channel with max intensity wins."""
for task in tasks:
task[output_chunk_name] = task[input_chunk_name].channel_voting()
yield task
@main.command('view')
@click.option('--name', type=str, default='view', help='name of this operator')
@click.option('--image-chunk-name',
type=str,
default='chunk',
help='image chunk name in the global state')
@click.option('--segmentation-chunk-name',
type=str,
default=None,
help='segmentation chunk name in the global state')
@operator
def view(tasks, name, image_chunk_name, segmentation_chunk_name):
"""Visualize the chunk using cloudvolume view in browser."""
operator = ViewOperator(name=name)
for task in tasks:
if task is not None:
operator(task[image_chunk_name],
seg=segmentation_chunk_name)
yield task
if __name__ == '__main__':
main()
|
{"hexsha": "cf32bd34a11df55a41df8b1367563d74cc081f97", "size": 67881, "ext": "py", "lang": "Python", "max_stars_repo_path": "chunkflow/flow/flow.py", "max_stars_repo_name": "julesberman/chunkflow", "max_stars_repo_head_hexsha": "c6af0d036bc2f308c64c591d49c94c414c569241", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chunkflow/flow/flow.py", "max_issues_repo_name": "julesberman/chunkflow", "max_issues_repo_head_hexsha": "c6af0d036bc2f308c64c591d49c94c414c569241", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chunkflow/flow/flow.py", "max_forks_repo_name": "julesberman/chunkflow", "max_forks_repo_head_hexsha": "c6af0d036bc2f308c64c591d49c94c414c569241", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.2914540816, "max_line_length": 122, "alphanum_fraction": 0.6160339418, "include": true, "reason": "import numpy", "num_tokens": 15156}
|
# coding: utf-8
import numpy as np
import talib as ta
from settings import evaluete
MAKER_COST = evaluete["maker_cost"]
TAKER_COST = evaluete["taker_cost"]
IMPACT = evaluete["impact"]
SLIDE = evaluete["slide"]
LEVER = evaluete["lever"]
MAX_POSITION = evaluete["max_position"]
STOP_EARN = evaluete["stop_earn"]
STOP_LOSS = evaluete["stop_loss"]
class tatic_eva(object):
"""
tatic_eva(self, df0, eva, eva4open, eva_weight=None, flag_riskcontrol=False, flag_riskcontrol_type=0)
flag_riskcontrol 0: disable stop_earn and stop_loss
1: enable stop_earn and stop_loss
flag_riskcontrol_type 0: use const percent
1: use atrN
2: use sar sign
3: use sar follow
return: <float>list (which is profit list)
"""
def __init__(self, df0, eva, eva4open, eva_weight=None, flag_riskcontrol=False, flag_riskcontrol_type=0):
self.l_profit=[]
self.eva=eva
self.eva4open = eva4open
self.eva_weight = eva_weight
self.source = np.array(df0.close)
self.high = np.array(df0.highest)
self.low = np.array(df0.lowest)
self.flag_riskcontrol = flag_riskcontrol
self.flag_riskcontrol_type = flag_riskcontrol_type
self.riskControl_profit = []
def get_pst_ctl(self, now_eva, pre_eva, eva_weight, evaw_col):
i,j,pc=int(now_eva),int(pre_eva),0
if i in eva_weight.keys() and j in eva_weight.keys():#evaw_has_key
pc = eva_weight[i][evaw_col[j]]
else:#evaw_not_has_key
pc=0
return pc
def get_evaw_col(self, eva_weight):
"""
eva_weight={#-3 -1 1 3
-3:[-0.2, -0.05, -0.3, -0.7],
-1:[-0.1, -0.05, -0.2, -0.5],
1:[0.5, 0.2, 0.05, 0.1],
3:[0.7, 0.3, 0.05, 0.2],
}
"""
i,j=0,0
evaw_col={}
for i in eva_weight.keys():
evaw_col[i]=j
j+=1
return evaw_col
def get_open_price(self, time, SLtype):
"""
get_open_price(self, time)
return: <float>
"""
if SLtype==1:
price = self.source[time]*(1+SLIDE)
if SLtype==0:
price = self.source[time]*(1-SLIDE)
return price
def get_close_price(self, time):
"""
get_close_price(self, time)
return: <float>
"""
if SLtype==1:
price = self.source[time]*(1-SLIDE)
if SLtype==0:
price = self.source[time]*(1+SLIDE)
return price
def get_open_sign(self, time):
"""
get_open_sign(self, time)
return 0: goto short condition
1: goto long condition
-1: goto zero condition
"""
if self.eva4open[time]==-1:
return 0
if self.eva4open[time]==1:
return 1
else:
return -1
def do(self):
return self.loop_time()
def loop_time(self):
evaw_col = self.get_evaw_col(self.eva_weight)
leva = len(self.eva)
for i in range(leva):
if i==0 or i==leva-1:#ig_first_time || ig_end_time
self.l_profit.append(0)
continue
open_sign = self.get_open_sign(i)
if open_sign==1:
eva_profit = self.loop_eva_long(i, leva, evaw_col)
self.l_profit.append(np.sum(eva_profit))
elif open_sign==0:
eva_profit = self.loop_eva_short(i, leva, evaw_col)
self.l_profit.append(np.sum(eva_profit))
elif open_sign==-1:
self.l_profit.append(0)
return self.l_profit
def loop_eva_long(self, time, leva, evaw_col):
"""
loop_eva_long(self, time, leva, evaw_col)
time : now time
"""
i_pc = 0
eva_profit = []
open_price = self.get_open_price(time)
for j in range(time+1, leva):
j_pc = self.get_pst_ctl(self.eva[j], self.eva[j-1], self.eva_weight, evaw_col)#get position control value
profit = get_profit(open_price, self.get_close_price(j), 1, flag_rate=True)
if j_pc==-1 or j_pc+i_pc<0 or j==leva-1 or (self.riskControl(profit, open_price, time, j, 1) and self.flag_riskcontrol):#full_out_long || L_min_position || end_bar || (stop_earn || stop_loss && flag_stop)
eva_profit.append(profit*i_pc)
break
elif j_pc==1 or j_pc+i_pc>=MAX_POSITION:#full_in_long || EB_max_position
j_pc=MAX_POSITION-i_pc
open_price = (i_pc*open_price + j_pc*self.get_open_price(j)) / (i_pc+j_pc)
i_pc=MAX_POSITION
elif j_pc<0:#out_long
eva_profit.append(profit*-j_pc)
i_pc+=j_pc
elif j_pc>0:#in_long
open_price = (i_pc*open_price + j_pc*self.get_open_price(j)) / (i_pc+j_pc)
i_pc+=j_pc
return eva_profit
def loop_eva_short(self, time, leva, evaw_col):
"""
loop_eva_short(self, time, leva, evaw_col)
time : now time
"""
i_pc = 0
eva_profit = []
open_price = self.get_open_price(time)
for j in range(time+1, leva):
j_pc = self.get_pst_ctl(self.eva[j], self.eva[j-1], self.eva_weight, evaw_col)
profit = get_profit(open_price, self.get_close_price(j), 0, flag_rate=True)
if j_pc==1 or j_pc+i_pc>0 or j==leva-1 or (self.riskControl(profit, open_price, time, j, 0) and self.flag_riskcontrol):#full_out_short || L_min_position || end_bar || (stop_earn || stop_loss && flag_stop)
eva_profit.append(profit*-i_pc)
break
elif j_pc==-1 or -(j_pc+i_pc)>=MAX_POSITION:#full_in_short || EB_max_position
j_pc=-MAX_POSITION-i_pc
open_price = (i_pc*open_price + j_pc*self.get_open_price(j)) / (i_pc+j_pc)
i_pc=-MAX_POSITION
elif j_pc>0:#out_short
eva_profit.append(profit*j_pc)
i_pc+=j_pc
elif j_pc<0:#in_short
open_price = (i_pc*open_price + j_pc*self.get_open_price(j)) / (i_pc+j_pc)
i_pc+=j_pc
return eva_profit
def riskControl(self, profit, open_price, open_time, time, SLtype):
"""
riskControl(self, profit, open_price, open_time, time, type)
self.flag_riskcontrol_type:
0 : percent
1 : atrN
2 : sar
3 : sarx
"""
l=[]
if 0 in self.flag_riskcontrol_type:
l.append(self.riskControl_percent(profit))
if 1 in self.flag_riskcontrol_type:
l.append(self.riskControl_atrN(open_price, time, SLtype))
if 2 in self.flag_riskcontrol_type:
l.append(self.riskControl_sar(open_price, time, SLtype))
if 3 in self.flag_riskcontrol_type:
l.append(self.riskControl_sarx(open_price, open_time, time, SLtype))
if 4 in self.flag_riskcontrol_type:
l.append(self.riskControl_drawdown(profit, open_time, time))
if True in l:
return True
else:
return False
def riskControl_percent(self, profit):
"""
use simple const percent
"""
if profit >= STOP_EARN or profit <= STOP_LOSS:
return True
else:
return False
def riskControl_atrN(self, open_price, time, SLtype, window=14, n_earn=2.5, n_loss=.5):
"""
riskControl_atrN(profit, open_price, window=14, n_earn=2.5, n_loss=.5)
if n_earn*now_atr < now_close-open then stop_earn; default 2.5
if n_loss*now_atr > open-now_close then stop_loss; default 0.5
return: float
"""
if time<window:
return False
atr = ta.ATR(self.high[time-window:time+1], self.low[time-window:time+1], self.source[time-window:time+1], timeperiod=window)
if SLtype:
if atr[-1]*n_earn < (self.source[time] - open_price) or atr[-1]*n_loss > (open_price - self.source[time]):
return True
else:
return False
elif not SLtype:
if atr[-1]*n_earn < (open_price - self.source[time]) or atr[-1]*n_loss > (self.source[time] - open_price):
return True
else:
return False
def riskControl_sar(self, open_price, time, SLtype, AF_increse=0.01, AF_max=0.1):
"""
riskControl_sar(self, open_price, time, type, AF_increse=0.01, AF_max=0.1)
type 0 : short
1 : long
return: float
"""
sar = ta.SAR(self.high[:time+1], self.low[:time+1], acceleration=AF_increse, maximum=AF_max)
if SLtype==1:
if sar[-1]>=0:
return False
elif sar[-1]<0:
return True
if SLtype==0:
if sar[-1]<=0:
return False
elif sar[-1]>0:
return True
return False
def riskControl_sarx(self, open_price, open_time, time, SLtype, AF_increse=0.01, AF_max=0.1):
"""
riskControl_sarx(self, open_price, open_time, time, type, AF_increse=0.02, AF_max=0.2)
type 0 : short
1 : long
return: float
"""
if SLtype==1:
sarx = ta.SAREXT(self.high[open_time:time+1], self.low[open_time:time+1], startvalue=open_price, offsetonreverse=0, accelerationinitlong=AF_increse, accelerationlong=AF_increse, accelerationmaxlong=AF_max, accelerationinitshort=0, accelerationshort=0, accelerationmaxshort=0)
if sarx[-1]>=0:
return False
elif sarx[-1]<0:
return True
if SLtype==0:
sarx = ta.SAREXT(self.high[:time+1], self.low[:time+1], startvalue=open_price, offsetonreverse=0, accelerationinitlong=0, accelerationlong=0, accelerationmaxlong=AF_max, accelerationinitshort=AF_increse, accelerationshort=AF_increse, accelerationmaxshort=AF_max)
if sarx[-1]<=0:
return False
elif sarx[-1]>0:
return True
return False
def riskControl_drawdown(self, profit, open_time, time, n_drawdown=.5):
if open_time == time+1:#ig_first_time
self.riskControl_profit=[]
return False
self.riskControl_profit.append(profit)
if self.riskControl_profit[-1]/np.max(self.riskControl_profit)<n_drawdown:
return True
return False
def get_profit(open_price, close_price, SLtype, maker_cost=MAKER_COST, taker_cost=TAKER_COST, impact=IMPACT, lever=LEVER, flag_rate=1, flag_mt=0):
"""
get_profit(open_price, close_price, SLtype, maker_cost=MAKER_COST, taker_cost=TAKER_COST, impact=IMPACT, lever=LEVER, flag_rate=1, flag_mt=0)
SLtype 0 : short
1 : long
flag_rate 0 : return rate
1 : return price
flag_mt 0 : taker
1 : maker
return: profit
"""
cost, profit, open_cost=0,0,0
if not flag_mt:
cost=taker_cost
else:
cost=maker_cost
if SLtype:
profit = close_price*(1-impact)*(1-cost) - open_price*(1+impact)*(1+cost)
if not flag_rate:
return profit*lever
if flag_rate:
open_cost = (open_price)*(1+impact)*(1+cost)
return profit/open_cost*lever
if not SLtype:
profit = open_price*(1-impact)*(1-cost) - close_price*(1+impact)*(1+cost)
if not flag_rate:
return profit*lever
if flag_rate:
open_cost = (open_price)*(1+impact)*(1+cost)
return profit/open_cost*lever
def get_pure_profit(prof_rate, init_position=1.0):
pure_profit=[]
i=0
for i in prof_rate:
init_position*=(1+i)
pure_profit.append(init_position)
return pure_profit
def get_benchmark_profit(source, source_shift_1):
change, bprofit=[],[]
change = np.nan_to_num(source - source_shift_1)
bprofit = change/source
return bprofit
def get_beta(tatic_profit, benchmark_profit):
cov, m_cov, var, beta=0,0,0,0
m_cov = np.cov((tatic_profit, benchmark_profit))
cov = m_cov[0][1]
var = np.var(benchmark_profit)
beta = cov/var
return beta
def get_alpha(tatic_profit, benchmark_profit, riskfree_profit=0.01):
beta, alpha=0,0
beta = get_beta(tatic_profit, benchmark_profit)
alpha = np.mean(tatic_profit - (riskfree_profit + beta*(benchmark_profit-riskfree_profit)))
return alpha
def get_maxDrawDown(pure_profit, window=90):
STOP_LOSSDD = []
min, max, maxDD =0,0,0
i=0
for i in range(len(pure_profit)-window):
min = np.min(pure_profit[i:i+window])
max = np.max(pure_profit[i:i+window])
STOP_LOSSDD.append(1-min/max)
maxDD = np.max(STOP_LOSSDD)
return maxDD
def get_sharpe(tatic_profit, riskfree_profit):
tf_mean, t_vol, sharpe = 0,0,0
tf_mean = np.mean(tatic_profit - riskfree_profit)
t_vol = np.std(tatic_profit)
sharpe = tf_mean/t_vol
return sharpe
def get_sortino(tatic_profit, riskfree_profit):
tf_mean, t_vol_dw, sortino=0,0,0
tf_mean = np.mean(tatic_profit - riskfree_profit)
t_vol_dw = np.std(tatic_profit[tatic_profit<0])
sortino = tf_mean/t_vol_dw
return sortino
def get_winLose_analysis(l_profit):
trade_time = len(l_profit)
win_time = len(l_profit[l_profit>0])
lose_time = len(l_profit[l_profit<0])
victories = win_time/trade_time
winEarn_rate = np.mean(l_profit[l_profit>0])
loseLoss_rate = np.mean(l_profit[l_profit<0])
odds = np.abs(winEarn_rate/loseLoss_rate)
max_win_time=0
max_lose_time=0
slp=np.sign(l_profit)
j=1
k=1
for i in range(1, len(slp)):
if slp[i]>0 and slp[i-1]>0:
j+=1
if slp[i]<0 and slp[i-1]>0:
if j>max_win_time:
max_win_time=j+1
j=1
if slp[i]<0 and slp[i-1]<0:
k+=1
if slp[i]>0 and slp[i-1]<0:
if k>max_lose_time:
max_lose_time=k+1
k=1
return [trade_time, win_time, lose_time, victories, winEarn_rate, loseLoss_rate, odds, max_win_time, max_lose_time]
def get_backtest(profit, benchmark_profit, riskfree_profit=0.01, drawdown_window=90, flag_rtn=0):
"""
flag_rtn 0: print all detail
1: return ditail
"""
profit = np.array(profit)
benchmark_profit = np.array(benchmark_profit)
positive_profit = np.sum(profit[profit>0])
negetive_profit = np.sum(profit[profit<0])
pure_profit = get_pure_profit(profit)
maxDrawDown = get_maxDrawDown(pure_profit, window=drawdown_window)
beta = get_beta(profit, benchmark_profit)
alpha = get_alpha(profit, benchmark_profit, riskfree_profit)
sharpe = get_sharpe(profit, riskfree_profit)
sortino = get_sortino(profit, riskfree_profit)
trade_time, win_time, lose_time, victories, winEarn_rate, loseLoss_rate, odds, max_win_time, max_lose_time = get_winLose_analysis(profit)
if not flag_rtn:#print detail
print('total positive profit:', positive_profit)
print('total negetive profit:', negetive_profit)
print('max Profit: ', np.max(profit))
print('min Profit: ', np.min(profit))
print('mean Profit: ', np.mean(profit))
print('var profit: ', np.var(profit))
print('pure profit max: ', np.max(pure_profit))
print('pure profit volatility:', np.var(pure_profit))
print('maxDrawDown: ', maxDrawDown)
print('beta: ', beta)
print('alpha: ', alpha)
print('sharpe: ', sharpe)
print('sortino: ', sortino)
print('total trade time: ', trade_time)
print('total win time: ', win_time)
print('total lose time: ', lose_time)
print('max continue win time:', max_win_time)
print('max continue lose time:', max_lose_time)
print('victories: ', victories)
print('win earn rate: ', winEarn_rate)
print('lose loss rate: ', loseLoss_rate)
print('odds: ', odds)
else:
return [positive_profit, negetive_profit, pure_profit, beta, alpha, maxDrawDown, sharpe, sortino, trade_time, win_time, lose_time, victories, winEarn_rate, loseLoss_rate, odds, max_win_time, max_lose_time]
|
{"hexsha": "b2099af20730ae29f4350ce5f34a843518b3270e", "size": 17023, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluete.py", "max_stars_repo_name": "gLhookniano/AlgTradeTest", "max_stars_repo_head_hexsha": "ab9bb92afe3c4ce3516fcaec0e401c2dad405080", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-10-31T08:30:20.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-31T08:30:20.000Z", "max_issues_repo_path": "evaluete.py", "max_issues_repo_name": "gLhookniano/AlgTradeTest", "max_issues_repo_head_hexsha": "ab9bb92afe3c4ce3516fcaec0e401c2dad405080", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-10-31T09:34:50.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-31T09:34:50.000Z", "max_forks_repo_path": "evaluete.py", "max_forks_repo_name": "gLhookniano/AlgTradeT", "max_forks_repo_head_hexsha": "ab9bb92afe3c4ce3516fcaec0e401c2dad405080", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6086021505, "max_line_length": 287, "alphanum_fraction": 0.579980027, "include": true, "reason": "import numpy", "num_tokens": 4585}
|
from aip import AipNlp
import pandas as pd
import numpy as np
import time
# 此处输入baiduAIid
APP_ID = ''
API_KEY = ''
SECRET_KEY = ''
client = AipNlp(APP_ID, API_KEY, SECRET_KEY)
def isPostive(text):
try:
if client.sentimentClassify(text)['items'][0]['positive_prob']>0.5:
return "积极"
else:
return "消极"
except:
return "积极"
# 读取文件,注意修改文件路径
file_path = 'mlxg.xls'
data = pd.read_excel(file_path,encoding='utf-8')
moods = []
count = 1
for i in data['微博内容']:
moods.append(isPostive(i))
count+=1
print("目前分析到:"+count)
data['情感倾向'] = pd.Series(moods)
# 此处为覆盖保存
data.to_excel(file_path)
print("分析完成,已保存")
'''
# 此处为简单分类:P
def fenlei(text):
xf = ['抽奖',"抽一个","抽一位","买","通贩"]
cz = ["画","实物","返图","合集","摸鱼","漫","自制","攻略","授权","草稿","绘"]
gj = ["hz","狗粉丝","狗女儿"]
for j in cz:
if j in text:
return "创作"
for i in xf:
if i in text:
return "消费"
for k in gj:
if k in text:
return "攻击"
return "其他"
'''
|
{"hexsha": "6dc93cde2750b81f4abb9adf099b8b5448179552", "size": 1110, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis.py", "max_stars_repo_name": "huihui7987/weibo-topic-spider", "max_stars_repo_head_hexsha": "a7e93f1a8fac4146be36b8a594b7977fbac019f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 188, "max_stars_repo_stars_event_min_datetime": "2020-02-01T12:27:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T22:01:12.000Z", "max_issues_repo_path": "analysis.py", "max_issues_repo_name": "kingking888/weibo-topic-spider", "max_issues_repo_head_hexsha": "e703a439c2e3e73b13ef129a3ff68f97808d6fbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2020-02-01T04:04:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-24T05:19:14.000Z", "max_forks_repo_path": "analysis.py", "max_forks_repo_name": "kingking888/weibo-topic-spider", "max_forks_repo_head_hexsha": "e703a439c2e3e73b13ef129a3ff68f97808d6fbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 49, "max_forks_repo_forks_event_min_datetime": "2020-02-01T12:34:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T13:46:56.000Z", "avg_line_length": 19.1379310345, "max_line_length": 76, "alphanum_fraction": 0.518018018, "include": true, "reason": "import numpy", "num_tokens": 407}
|
[STATEMENT]
lemma has_one_imp_equal:
assumes "\<one> \<in> I"
shows "I = R"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. I = R
[PROOF STEP]
by (metis assms lideal subset multiplicative.right_unit subsetI subset_antisym)
|
{"llama_tokens": 94, "file": "Grothendieck_Schemes_Comm_Ring", "length": 1}
|
!
!
! AMG4PSBLAS version 1.0
! Algebraic Multigrid Package
! based on PSBLAS (Parallel Sparse BLAS version 3.7)
!
! (C) Copyright 2021
!
! Salvatore Filippone
! Pasqua D'Ambra
! Fabio Durastante
!
! Redistribution and use in source and binary forms, with or without
! modification, are permitted provided that the following conditions
! are met:
! 1. Redistributions of source code must retain the above copyright
! notice, this list of conditions and the following disclaimer.
! 2. Redistributions in binary form must reproduce the above copyright
! notice, this list of conditions, and the following disclaimer in the
! documentation and/or other materials provided with the distribution.
! 3. The name of the AMG4PSBLAS group or the names of its contributors may
! not be used to endorse or promote products derived from this
! software without specific written permission.
!
! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
! ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
! TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
! PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AMG4PSBLAS GROUP OR ITS CONTRIBUTORS
! BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
! POSSIBILITY OF SUCH DAMAGE.
!
!
!
! The aggregator object hosts the aggregation method for building
! the multilevel hierarchy.
!
module amg_s_base_aggregator_mod
use amg_base_prec_type, only : amg_sml_parms, amg_saggr_data
use psb_base_mod, only : psb_sspmat_type, psb_lsspmat_type, psb_s_vect_type, &
& psb_s_base_vect_type, psb_slinmap_type, psb_spk_, &
& psb_ls_csr_sparse_mat, psb_ls_coo_sparse_mat, &
& psb_s_csr_sparse_mat, psb_s_coo_sparse_mat, &
& psb_ipk_, psb_epk_, psb_lpk_, psb_desc_type, psb_i_base_vect_type, &
& psb_erractionsave, psb_error_handler, psb_success_, psb_toupper
!
!
!
!> \class amg_s_base_aggregator_type
!!
!! It is the data type containing the basic interface definition for
!! building a multigrid hierarchy by aggregation. The base object has no attributes,
!! it is intended to be essentially an abstract type.
!!
!!
!! type amg_s_base_aggregator_type
!! end type
!!
!!
!! Methods:
!!
!! bld_tprol - Build a tentative prolongator
!!
!! mat_bld - Build prolongator/restrictor and coarse matrix ac
!!
!! mat_asb - Convert prolongator/restrictor/coarse matrix
!! and fix their descriptor(s)
!!
!! update_next - Transfer information to the next level; default is
!! to do nothing, i.e. aggregators at different
!! levels are independent.
!!
!! default - Apply defaults
!! set_aggr_type - For aggregator that have internal options.
!! fmt - Return a short string description
!! descr - Print a more detailed description
!!
!! cseti, csetr, csetc - Set internal parameters, if any
!
type amg_s_base_aggregator_type
! Do we want to purge explicit zeros when aggregating?
logical :: do_clean_zeros
contains
procedure, pass(ag) :: bld_tprol => amg_s_base_aggregator_build_tprol
procedure, pass(ag) :: mat_bld => amg_s_base_aggregator_mat_bld
procedure, pass(ag) :: mat_asb => amg_s_base_aggregator_mat_asb
procedure, pass(ag) :: bld_map => amg_s_base_aggregator_bld_map
procedure, pass(ag) :: update_next => amg_s_base_aggregator_update_next
procedure, pass(ag) :: clone => amg_s_base_aggregator_clone
procedure, pass(ag) :: free => amg_s_base_aggregator_free
procedure, pass(ag) :: default => amg_s_base_aggregator_default
procedure, pass(ag) :: descr => amg_s_base_aggregator_descr
procedure, pass(ag) :: sizeof => amg_s_base_aggregator_sizeof
procedure, pass(ag) :: set_aggr_type => amg_s_base_aggregator_set_aggr_type
procedure, nopass :: fmt => amg_s_base_aggregator_fmt
procedure, pass(ag) :: cseti => amg_s_base_aggregator_cseti
procedure, pass(ag) :: csetr => amg_s_base_aggregator_csetr
procedure, pass(ag) :: csetc => amg_s_base_aggregator_csetc
generic, public :: set => cseti, csetr, csetc
procedure, nopass :: xt_desc => amg_s_base_aggregator_xt_desc
end type amg_s_base_aggregator_type
abstract interface
subroutine amg_s_soc_map_bld(iorder,theta,clean_zeros,a,desc_a,nlaggr,ilaggr,info)
import :: psb_sspmat_type, psb_desc_type, psb_spk_, psb_ipk_, psb_lpk_
implicit none
integer(psb_ipk_), intent(in) :: iorder
logical, intent(in) :: clean_zeros
type(psb_sspmat_type), intent(in) :: a
type(psb_desc_type), intent(in) :: desc_a
real(psb_spk_), intent(in) :: theta
integer(psb_lpk_), allocatable, intent(out) :: ilaggr(:),nlaggr(:)
integer(psb_ipk_), intent(out) :: info
end subroutine amg_s_soc_map_bld
end interface
interface amg_ptap_bld
subroutine amg_s_ptap_bld(a_csr,desc_a,nlaggr,parms,ac,&
& coo_prol,desc_cprol,coo_restr,info,desc_ax)
import :: psb_s_csr_sparse_mat, psb_sspmat_type, psb_desc_type, &
& psb_s_coo_sparse_mat, amg_sml_parms, psb_spk_, psb_ipk_, psb_lpk_
implicit none
type(psb_s_csr_sparse_mat), intent(inout) :: a_csr
type(psb_desc_type), intent(inout) :: desc_a
integer(psb_lpk_), intent(inout) :: nlaggr(:)
type(amg_sml_parms), intent(inout) :: parms
type(psb_s_coo_sparse_mat), intent(inout) :: coo_prol, coo_restr
type(psb_desc_type), intent(inout) :: desc_cprol
type(psb_sspmat_type), intent(out) :: ac
integer(psb_ipk_), intent(out) :: info
type(psb_desc_type), intent(inout), optional :: desc_ax
end subroutine amg_s_ptap_bld
end interface amg_ptap_bld
interface amg_rap
subroutine amg_s_rap(a_csr,desc_a,nlaggr,parms,ac,&
& coo_prol,desc_cprol,coo_restr,info)
import :: psb_s_csr_sparse_mat, psb_sspmat_type, psb_desc_type, &
& psb_s_coo_sparse_mat, amg_sml_parms, psb_spk_, psb_ipk_, psb_lpk_
implicit none
type(psb_s_csr_sparse_mat), intent(inout) :: a_csr
type(psb_desc_type), intent(inout) :: desc_a
integer(psb_lpk_), intent(inout) :: nlaggr(:)
type(amg_sml_parms), intent(inout) :: parms
type(psb_s_coo_sparse_mat), intent(inout) :: coo_prol, coo_restr
type(psb_desc_type), intent(inout) :: desc_cprol
type(psb_sspmat_type), intent(out) :: ac
integer(psb_ipk_), intent(out) :: info
end subroutine amg_s_rap
end interface amg_rap
contains
subroutine amg_s_base_aggregator_cseti(ag,what,val,info,idx)
Implicit None
! Arguments
class(amg_s_base_aggregator_type), intent(inout) :: ag
character(len=*), intent(in) :: what
integer(psb_ipk_), intent(in) :: val
integer(psb_ipk_), intent(out) :: info
integer(psb_ipk_), intent(in), optional :: idx
! Do nothing
info = 0
end subroutine amg_s_base_aggregator_cseti
subroutine amg_s_base_aggregator_csetr(ag,what,val,info,idx)
Implicit None
! Arguments
class(amg_s_base_aggregator_type), intent(inout) :: ag
character(len=*), intent(in) :: what
real(psb_spk_), intent(in) :: val
integer(psb_ipk_), intent(out) :: info
integer(psb_ipk_), intent(in), optional :: idx
! Do nothing
info = 0
end subroutine amg_s_base_aggregator_csetr
subroutine amg_s_base_aggregator_csetc(ag,what,val,info,idx)
Implicit None
! Arguments
class(amg_s_base_aggregator_type), intent(inout) :: ag
character(len=*), intent(in) :: what
character(len=*), intent(in) :: val
integer(psb_ipk_), intent(out) :: info
integer(psb_ipk_), intent(in), optional :: idx
! Set clean zeros, or do nothing.
select case (psb_toupper(trim(what)))
case('AGGR_CLEAN_ZEROS')
select case (psb_toupper(trim(val)))
case('TRUE','T')
ag%do_clean_zeros = .true.
case('FALSE','F')
ag%do_clean_zeros = .false.
end select
end select
info = 0
end subroutine amg_s_base_aggregator_csetc
subroutine amg_s_base_aggregator_update_next(ag,agnext,info)
implicit none
class(amg_s_base_aggregator_type), target, intent(inout) :: ag, agnext
integer(psb_ipk_), intent(out) :: info
!
! Base version does nothing.
!
info = 0
end subroutine amg_s_base_aggregator_update_next
subroutine amg_s_base_aggregator_clone(ag,agnext,info)
implicit none
class(amg_s_base_aggregator_type), intent(inout) :: ag
class(amg_s_base_aggregator_type), allocatable, intent(inout) :: agnext
integer(psb_ipk_), intent(out) :: info
info = 0
if (allocated(agnext)) then
call agnext%free(info)
if (info == 0) deallocate(agnext,stat=info)
end if
if (info /= 0) return
allocate(agnext,source=ag,stat=info)
end subroutine amg_s_base_aggregator_clone
subroutine amg_s_base_aggregator_free(ag,info)
implicit none
class(amg_s_base_aggregator_type), intent(inout) :: ag
integer(psb_ipk_), intent(out) :: info
info = psb_success_
return
end subroutine amg_s_base_aggregator_free
subroutine amg_s_base_aggregator_default(ag)
implicit none
class(amg_s_base_aggregator_type), intent(inout) :: ag
! Only one default setting
ag%do_clean_zeros = .true.
return
end subroutine amg_s_base_aggregator_default
function amg_s_base_aggregator_fmt() result(val)
implicit none
character(len=32) :: val
val = "Default aggregator "
end function amg_s_base_aggregator_fmt
function amg_s_base_aggregator_sizeof(ag) result(val)
implicit none
class(amg_s_base_aggregator_type), intent(in) :: ag
integer(psb_epk_) :: val
val = 1
end function amg_s_base_aggregator_sizeof
function amg_s_base_aggregator_xt_desc() result(val)
implicit none
logical :: val
val = .false.
end function amg_s_base_aggregator_xt_desc
subroutine amg_s_base_aggregator_descr(ag,parms,iout,info)
implicit none
class(amg_s_base_aggregator_type), intent(in) :: ag
type(amg_sml_parms), intent(in) :: parms
integer(psb_ipk_), intent(in) :: iout
integer(psb_ipk_), intent(out) :: info
write(iout,*) 'Aggregator object type: ',ag%fmt()
call parms%mldescr(iout,info)
return
end subroutine amg_s_base_aggregator_descr
subroutine amg_s_base_aggregator_set_aggr_type(ag,parms,info)
implicit none
class(amg_s_base_aggregator_type), intent(inout) :: ag
type(amg_sml_parms), intent(in) :: parms
integer(psb_ipk_), intent(out) :: info
! Do nothing
return
end subroutine amg_s_base_aggregator_set_aggr_type
!
!> Function bld_tprol:
!! \memberof amg_s_base_aggregator_type
!! \brief Build a tentative prolongator.
!! The routine will map the local matrix entries to aggregates.
!! The mapping is store in ILAGGR; for each local row index I,
!! ILAGGR(I) contains the index of the aggregate to which index I
!! will contribute, in global numbering.
!! Many aggregations produce a binary tentative prolongator, but some
!! do not, hence we also need the OP_PROL output.
!! AG_DATA is passed here just in case some of the
!! aggregators need it internally, most of them will ignore.
!!
!! \param ag The input aggregator object
!! \param parms The auxiliary parameters object
!! \param ag_data Auxiliary global aggregation info
!! \param a The local matrix part
!! \param desc_a The descriptor
!! \param ilaggr Output aggregation map
!! \param nlaggr Sizes of ilaggr on all processes
!! \param op_prol The tentative prolongator operator
!! \param info Return code
!!
!
subroutine amg_s_base_aggregator_build_tprol(ag,parms,ag_data,&
& a,desc_a,ilaggr,nlaggr,t_prol,info)
use psb_base_mod
implicit none
class(amg_s_base_aggregator_type), target, intent(inout) :: ag
type(amg_sml_parms), intent(inout) :: parms
type(amg_saggr_data), intent(in) :: ag_data
type(psb_sspmat_type), intent(inout) :: a
type(psb_desc_type), intent(inout) :: desc_a
integer(psb_lpk_), allocatable, intent(out) :: ilaggr(:),nlaggr(:)
type(psb_lsspmat_type), intent(out) :: t_prol
integer(psb_ipk_), intent(out) :: info
integer(psb_ipk_) :: err_act
character(len=20) :: name='s_base_aggregator_build_tprol'
call psb_erractionsave(err_act)
info = psb_err_missing_override_method_
call psb_errpush(info,name)
goto 9999
call psb_erractionrestore(err_act)
return
9999 call psb_error_handler(err_act)
return
end subroutine amg_s_base_aggregator_build_tprol
!
!> Function mat_bld
!! \memberof amg_s_base_aggregator_type
!! \brief Build prolongator/restrictor/coarse matrix.
!!
!!
!! \param ag The input aggregator object
!! \param parms The auxiliary parameters object
!! \param a The local matrix part
!! \param desc_a The descriptor
!! \param ilaggr Aggregation map
!! \param nlaggr Sizes of ilaggr on all processes
!! \param ac On output the coarse matrix
!! \param op_prol On input, the tentative prolongator operator, on output
!! the final prolongator
!! \param op_restr On output, the restrictor operator;
!! in many cases it is the transpose of the prolongator.
!! \param info Return code
!!
subroutine amg_s_base_aggregator_mat_bld(ag,parms,a,desc_a,ilaggr,nlaggr,&
& ac,desc_ac,op_prol,op_restr,t_prol,info)
use psb_base_mod
implicit none
class(amg_s_base_aggregator_type), target, intent(inout) :: ag
type(amg_sml_parms), intent(inout) :: parms
type(psb_sspmat_type), intent(in) :: a
type(psb_desc_type), intent(inout) :: desc_a
integer(psb_lpk_), intent(inout) :: ilaggr(:), nlaggr(:)
type(psb_lsspmat_type), intent(inout) :: t_prol
type(psb_sspmat_type), intent(out) :: op_prol, ac,op_restr
type(psb_desc_type), intent(inout) :: desc_ac
integer(psb_ipk_), intent(out) :: info
integer(psb_ipk_) :: err_act
character(len=20) :: name='s_base_aggregator_mat_bld'
call psb_erractionsave(err_act)
info = psb_err_missing_override_method_
call psb_errpush(info,name)
goto 9999
call psb_erractionrestore(err_act)
return
9999 call psb_error_handler(err_act)
return
end subroutine amg_s_base_aggregator_mat_bld
!
!> Function mat_asb
!! \memberof amg_s_base_aggregator_type
!! \brief Build prolongator/restrictor/coarse matrix.
!!
!!
!! \param ag The input aggregator object
!! \param parms The auxiliary parameters object
!! \param a The local matrix part
!! \param desc_a The descriptor
!! \param ilaggr Aggregation map
!! \param nlaggr Sizes of ilaggr on all processes
!! \param ac On output the coarse matrix
!! \param op_prol On input, the tentative prolongator operator, on output
!! the final prolongator
!! \param op_restr On output, the restrictor operator;
!! in many cases it is the transpose of the prolongator.
!! \param info Return code
!!
subroutine amg_s_base_aggregator_mat_asb(ag,parms,a,desc_a,&
& ac,desc_ac, op_prol,op_restr,info)
use psb_base_mod
implicit none
class(amg_s_base_aggregator_type), target, intent(inout) :: ag
type(amg_sml_parms), intent(inout) :: parms
type(psb_sspmat_type), intent(in) :: a
type(psb_desc_type), intent(inout) :: desc_a
type(psb_sspmat_type), intent(inout) :: op_prol,ac,op_restr
type(psb_desc_type), intent(inout) :: desc_ac
integer(psb_ipk_), intent(out) :: info
integer(psb_ipk_) :: err_act
character(len=20) :: name='s_base_aggregator_mat_asb'
call psb_erractionsave(err_act)
info = psb_err_missing_override_method_
call psb_errpush(info,name)
goto 9999
call psb_erractionrestore(err_act)
return
9999 call psb_error_handler(err_act)
return
end subroutine amg_s_base_aggregator_mat_asb
!
!> Function bld_map
!! \memberof amg_s_base_aggregator_type
!! \brief Build linear map between hierarchy levels
!!
!!
!! \param ag The input aggregator object
!! \param desc_a The fine space descriptor
!! \param desc_ac The coarse space descriptor
!! \param ilaggr Aggregation map vector
!! \param nlaggr Sizes of ilaggr on all processes
!! \param op_prol The prolongator operator
!! \param op_restr The restrictor operator
!! \param map The output map
!! \param info Return code
!!
subroutine amg_s_base_aggregator_bld_map(ag,desc_a,desc_ac,ilaggr,nlaggr,&
& op_restr,op_prol,map,info)
use psb_base_mod
implicit none
class(amg_s_base_aggregator_type), target, intent(inout) :: ag
type(psb_desc_type), intent(in), target :: desc_a, desc_ac
integer(psb_lpk_), intent(inout) :: ilaggr(:), nlaggr(:)
type(psb_sspmat_type), intent(inout) :: op_restr, op_prol
type(psb_slinmap_type), intent(out) :: map
integer(psb_ipk_), intent(out) :: info
integer(psb_ipk_) :: err_act
character(len=20) :: name='s_base_aggregator_bld_map'
call psb_erractionsave(err_act)
!
! Copy the prolongation/restriction matrices into the descriptor map.
! op_restr => PR^T i.e. restriction operator
! op_prol => PR i.e. prolongation operator
!
! WARNING: need to check whether the copy into IOP_RESTR/IOP_PROL
! is safe or not.
!
! This default implementation reuses desc_a/desc_ac through
! pointers in the map structure.
!
map = psb_linmap(psb_map_aggr_,desc_a,&
& desc_ac,op_restr,op_prol,ilaggr,nlaggr)
call psb_erractionrestore(err_act)
return
9999 call psb_error_handler(err_act)
return
end subroutine amg_s_base_aggregator_bld_map
end module amg_s_base_aggregator_mod
|
{"hexsha": "2c07fc4ac0fc943361561dfceb9c387f06296bcd", "size": 19255, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "amgprec/amg_s_base_aggregator_mod.f90", "max_stars_repo_name": "sfilippone/amg4psblas", "max_stars_repo_head_hexsha": "45fabb5214b27d6c67cdf8f6a82277374a059e34", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-04-12T16:39:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T13:35:54.000Z", "max_issues_repo_path": "amgprec/amg_s_base_aggregator_mod.f90", "max_issues_repo_name": "sfilippone/amg4psblas", "max_issues_repo_head_hexsha": "45fabb5214b27d6c67cdf8f6a82277374a059e34", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-19T10:21:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-07T12:59:38.000Z", "max_forks_repo_path": "amgprec/amg_s_base_aggregator_mod.f90", "max_forks_repo_name": "sfilippone/amg4psblas", "max_forks_repo_head_hexsha": "45fabb5214b27d6c67cdf8f6a82277374a059e34", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9783037475, "max_line_length": 87, "alphanum_fraction": 0.6718254999, "num_tokens": 5433}
|
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
from solarescape_env import SolarescapeEnv
import pygame
from ple import PLE
import random
# Get the environment and extract the number of actions.
game = SolarescapeEnv(width=856, height=856, dt=1)
game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)
p = PLE(game, fps=30, frame_skip = 3, num_steps = 1,
force_fps = False, display_screen=False)
nb_actions = list(game.getActions())
print(nb_actions)
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + (16, 4)))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=50000, visualize=True, verbose=2)
# After training is done, we save the final weights.
dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=5, visualize=True)
|
{"hexsha": "f7fb00f9c1bd6ed9ba70af36b34cda97083dc27d", "size": 1889, "ext": "py", "lang": "Python", "max_stars_repo_path": "learningAgentKeras.py", "max_stars_repo_name": "kaiobarb/solarescape", "max_stars_repo_head_hexsha": "18f2c432a48e4b2fe9dc116ec7b9190ee5637401", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-28T04:38:05.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-28T04:38:05.000Z", "max_issues_repo_path": "learningAgentKeras.py", "max_issues_repo_name": "kaiobarb/solarescape", "max_issues_repo_head_hexsha": "18f2c432a48e4b2fe9dc116ec7b9190ee5637401", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "learningAgentKeras.py", "max_forks_repo_name": "kaiobarb/solarescape", "max_forks_repo_head_hexsha": "18f2c432a48e4b2fe9dc116ec7b9190ee5637401", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1403508772, "max_line_length": 93, "alphanum_fraction": 0.7617787189, "include": true, "reason": "import numpy", "num_tokens": 485}
|
#!/usr/bin/env python
"""Tests and validates classes from :py:mod:`plastid.genomics.genome_array`,
these being |GenomeArray|, |SparseGenomeArray| and |BAMGenomeArray|,
using test data found in plastid.test.data.
This module additionally contains utilites to generate other test datasets.
To do, please see the documentation for :py:func:`create_dataset`. Note,
this requires as external dependencies, Bowtie, Tophat, and Samtools.
"""
import copy
import tempfile
import os
import subprocess
import functools
import re
import unittest
import warnings
import pysam
import numpy.random
from pkg_resources import resource_filename, cleanup_resources
from nose.plugins.attrib import attr
from Bio import SeqIO
import plastid.util.services.exceptions
from plastid.readers.bed import BED_Reader
from plastid.genomics.genome_array import (
GenomeArray,
SparseGenomeArray,
BigWigGenomeArray,
BAMGenomeArray,
ThreePrimeMapFactory,
FivePrimeMapFactory,
CenterMapFactory,
five_prime_map,
three_prime_map,
center_map,
)
from plastid.genomics.roitools import GenomicSegment, SegmentChain
from plastid.genomics.seqtools import random_seq
from plastid.util.io.openers import NullWriter
from plastid.util.services.decorators import skip_if_abstract
from plastid.util.services.mini2to3 import cStringIO
#===============================================================================
# INDEX: annotations/data used in unit tests and in generation of test datasets
#===============================================================================
# parameters to flesh out unit tests
# these are used by AbstractGenomeArrayHelper.set_class_parameters below
_GENOME_ARRAY_PARAMS = {
"test_class": GenomeArray,
"empty_regions": ["splice", "introns"],
"native_format": "bowtie",
}
_SPARSE_GENOME_ARRAY_PARAMS = {
"test_class": SparseGenomeArray,
"empty_regions": ["splice", "introns"],
"native_format": "bowtie",
}
_BIGWIG_GENOME_ARRAY_PARAMS = {
"test_class": BigWigGenomeArray,
"empty_regions": ["introns"],
"native_format": "bigwig",
}
_BAM_GENOME_ARRAY_PARAMS = {
"test_class": BAMGenomeArray,
"empty_regions": ["introns"],
"native_format": "BAM",
}
# descriptions of mapping configurations that we will use in test datasets
_SAMPLE_PAT = re.compile(r"(center|fiveprime|threeprime)_([0-9]+)")
_SAMPLE_BASES = [
'center_0',
'center_12',
'fiveprime_0',
'fiveprime_15',
'threeprime_0',
'threeprime_15',
]
_GA_MAP_FUNCTIONS = {
"fiveprime": five_prime_map,
"threeprime": three_prime_map,
"center": center_map,
}
_BAM_MAP_RULES = {
"fiveprime_0": FivePrimeMapFactory(),
"threeprime_0": ThreePrimeMapFactory(),
"center_0": CenterMapFactory(),
"fiveprime_15": FivePrimeMapFactory(15),
"threeprime_15": ThreePrimeMapFactory(15),
"center_12": CenterMapFactory(12),
}
# constants/names of files in test datasets, to use in test cases
# or to generate using methods below
#
# all filenames are relative to a base_folder that is passed to individual functions
# these files will be created by create_test_dataset(), below
_TEST_FILES = {
"variable_step_fw": os.path.join("wig", "variable_step_fw.wig"),
"variable_step_rc": os.path.join("wig", "variable_step_rc.wig"),
"bedgraph_fw": os.path.join("wig", "bedgraph_fw.wig"),
"bedgraph_rc": os.path.join("wig", "bedgraph_rc.wig"),
"juncs": os.path.join("ebwt", "chrA.juncs"),
"bowtie_index": os.path.join("ebwt", "chrA"),
"bed": os.path.join("bed", "chrA.bed"),
"reads": os.path.join("fasta", "chrA_reads.fa"),
"bowtie": os.path.join("align", "chrA_unspliced.bowtie"),
"bam": os.path.join("align", "chrA_tophat.bam"),
"genome": os.path.join("fasta", "chrA.fa"),
}
# annotation data
TEST_CHR_BED = """chrA 100 1100 unique_plus 0 + -1 -1 0,0,0 1 1000, 0,
chrA 100 1100 unique_minus 0 - -1 -1 0,0,0 1 1000, 0,
chrA 1200 2250 entire_repeat_region_plus 0 + -1 -1 0,0,0 1 1050, 0,
chrA 1200 2250 entire_repeat_region_minus 0 - -1 -1 0,0,0 1 1050, 0,
chrA 1200 1700 repeat_1_plus 0 + -1 -1 0,0,0 1 500, 0,
chrA 1200 1700 repeat_1_minus 0 - -1 -1 0,0,0 1 500, 0,
chrA 1750 2250 repeat_2_plus 0 + -1 -1 0,0,0 1 500, 0,
chrA 1750 2250 repeat_2_minus 0 - -1 -1 0,0,0 1 500, 0,
chrA 2350 2475 splice_plus 100 + -1 -1 0,0,0 2 25,25, 0,100,
chrA 2350 2475 splice_minus 100 - -1 -1 0,0,0 2 25,25, 0,100,
chrA 2375 2450 intron_plus 0 + -1 -1 0,0,0 1 75, 0,
chrA 2375 2450 intron_minus 0 - -1 -1 0,0,0 1 75, 0,""".replace(
" ", "\t"
)
TEST_CHR_JUNCS = """chrA 2374 2450 +
chrA 2374 2450 -""".replace(" ", "\t")
# miscellaneous constants
STRAND_KEYS = {"+": "fw", "-": "rc"}
DEFAULT_READS_PER_REGION = 1000
DEFAULT_READ_LENGTH = 30
#===============================================================================
# INDEX: Helper functions for unit tests and test dataset creation methods below
#===============================================================================
def tearDownModule():
"""Remove test dataset files after unit tests are complete"""
cleanup_resources()
def fetch_regions():
"""Parses test regions of interest for synthetic genomes
Returns
-------
list<SegmentChain>
"""
return list(BED_Reader(cStringIO.StringIO(TEST_CHR_BED), return_type=SegmentChain))
def _read_count_vectors(base_folder):
"""Read count vectors from a synthetic datasets
generated by :py:method:`create_test_dataset`
Parameters
----------
base_folder : str
path to base folder passed to :py:method:`create_test_dataset`
Returns
-------
dict : dict of numpy.ndarrays of count data
"""
dtmp = {}
for k in _SAMPLE_BASES:
for strand_key in ("fw", "rc"):
dtmp["%s_%s" % (k, strand_key)] = numpy.loadtxt(
os.path.join(base_folder, "count_vectors", "%s_%s.txt" % (k, strand_key))
)
return dtmp
def _read_bowtie_files_to_genome_arrays(base_folder, test_class=GenomeArray):
"""Construct |GenomeArray| s from bowtie files
Parameters
----------
base_folder : str
path to base folder passed to create_test_dataset()
test_class : class
Subclass of |MutableGenomeArray| (e.g. |GenomeArray| or |SparseGenomeArray| to test)
Returns
-------
dict : dict of |GenomeArray| s of mapped read alignments from bowtie
"""
gnds = {}
for k in _SAMPLE_BASES:
mapping, offset = _SAMPLE_PAT.search(k).groups()
trans_key = "nibble" if mapping == "center" else "offset"
trans_args = {trans_key: int(offset)}
gnds[k] = test_class()
with open(os.path.join(base_folder, _TEST_FILES["bowtie"])) as fh:
gnds[k].add_from_bowtie(fh, _GA_MAP_FUNCTIONS[k.split("_")[0]], **trans_args)
return gnds
def _get_ivc_numpy_counts(ivc, count_vec):
"""Fetches appropriately-spliced counts at each position in an ROI from a numpy array
Parameters
----------
ivc : |SegmentChain|
SegmentChain describing region of interest
count_vec : numpy.ndarray
numpy.ndarray, in coordinates matching those of ivc
Returns
-------
numpy.ndarray : numpy.ndarray of counts each each position in ivc
"""
counts = []
for iv in ivc:
counts.extend(count_vec[iv.start:iv.end])
if ivc.spanning_segment.strand == "-":
counts = counts[::-1]
return numpy.array(counts)
#===============================================================================
# INDEX: unittest suites
#===============================================================================
class AbstractGenomeArrayHelper(unittest.TestCase):
"""Abstract base class for various types of |AbstractGenomeArray| test cases"""
set_up = False
@staticmethod
def set_class_parameters(
cls, params, test_folder=resource_filename("plastid", "test/data/mini"), tol=1e-8
):
"""Set class parameters on the creation of the first instance.
This is a bit of a hack because we need to set class parameters.
We can't do this in a ``setUpClass`` method, because ``setUpClass`` only
accepts a single parameter (the class). We don't want to do this in
``__init__`` either, because unittest calls ``__init__`` once per test run,
and these operations are expensive. So, instead we define this method,
and call it from ``__init__`` if and only if ``cls.set_up == False``
Parameters
----------
cls : class
class that is a subclass of :py:class:`unittest.TestCase`,
to which parameters will be appended
params : dict
Parameters specific to the set-up of test suites for specific
types of GenomeArrays
test_folder : str or :py:class:`Resource`
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
cls.test_folder = test_folder
cls.tol = tol
cls.test_class = params["test_class"]
cls.native_format = params["native_format"]
cls.count_vecs = _read_count_vectors(cls.test_folder)
cls.regions = fetch_regions()
cls.region_classes = {
"unique": [X for X in cls.regions if "unique" in X.get_name()],
"repeat":
[X for X in cls.regions if "entire" not in X.get_name() and "repeat" in X.get_name()],
"introns": [X for X in cls.regions if "intron" in X.get_name()],
"splice": [X for X in cls.regions if "splice" in X.get_name()],
"entire": [X for X in cls.regions if "entire" in X.get_name()],
}
cls.region_classes["empty"] = []
cls.empty_names = []
for k in params["empty_regions"]:
my_regions = cls.region_classes[k]
cls.region_classes["empty"].extend(my_regions)
cls.empty_names.extend([X.get_name() for X in my_regions])
cls.expected_unnorm_sum = 0
#for region in set(cls.regions) - set(cls.region_classes["empty"]) - set(cls.region_classes["entire"]):
read_regions = [
X for X in cls.regions
if all([X.get_name() not in cls.empty_names, "entire" not in X.get_name()])
]
for region in read_regions:
vec_key = "fw" if region.strand == "+" else "rc"
cls.expected_unnorm_sum += _get_ivc_numpy_counts(
region, cls.count_vecs["fiveprime_0_%s" % vec_key]
).sum()
cls.set_up = True
def __init__(
self,
methodName='runTest',
params={},
test_folder=resource_filename("plastid", "test/data/mini"),
tol=1e-8
):
"""Initialize test case to run a single method.
We override this method to make sure expensive operations are only run when
the first instance is made, and then stored in class attributes
Parameters
----------
methodName : str
Name of method being run. Required by :py:class:`unittest.TestCase`
params : dict
Parameters specific to the set-up of test suites for specific
|AbstractGenomeArray| subclasses. Don't change these
test_folder : str or :py:class:`Resource`
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
unittest.TestCase.__init__(self, methodName=methodName)
# only do setup if __init__ is called by a subclass
if "Abstract" not in self.__class__.__name__:
if self.__class__.set_up == False:
AbstractGenomeArrayHelper.set_class_parameters(
self.__class__, params=params, test_folder=test_folder, tol=tol
)
@skip_if_abstract
def test_chroms(self):
for v in self.gnds.values():
self.assertEqual(set(v.chroms()), set(["chrA"]))
@skip_if_abstract
def test_strands(self):
possible_strands = set(["+", "-", "."])
for v in self.gnds.values():
self.assertGreaterEqual(len(set(v.strands()) & possible_strands), 0)
@skip_if_abstract
def test_test_class(self):
# Assure all genome arrays tested are of correct subclass
for k, v in self.gnds.items():
self.assertTrue(
isinstance(v, self.test_class),
"Test %s: instance is of wrong class (expected: %s, found %s)" %
(k, self.test_class.__name__, v.__class__.__name__)
)
@skip_if_abstract
def test_native_import_positionwise_equality_unique_regions(self):
for k in _SAMPLE_BASES:
for region in self.region_classes["unique"]:
strand_key = STRAND_KEYS[region.spanning_segment.strand]
gnd_counts = numpy.array(region.get_counts(self.gnds[k]))
self.assertGreater(gnd_counts.sum(), 0, "Region is empty in sample %s" % k)
known_counts = _get_ivc_numpy_counts(
region, self.count_vecs["%s_%s" % (k, strand_key)]
)
max_err = max(abs(gnd_counts - known_counts))
msg1 = "Positionwise count difference (%s) exceeded tolerance (%s) for '%s' file import for sample test '%s'" % (
self.tol, max_err, self.native_format, k
)
self.assertLessEqual(max_err, self.tol, msg1)
sum_diff = abs(known_counts.sum() - gnd_counts.sum())
msg2 = "Error in difference of total counts (%s) exceeded tolerance (%s) for '%s' import for sample test %s" % (
sum_diff, self.tol, self.native_format, k
)
self.assertLessEqual(sum_diff, self.tol, msg2)
@skip_if_abstract
def test_native_import_positionwise_equality_repeat_regions(self):
# test sums of position-wise vectors for repeat regions
plus_repeat = [X for X in self.region_classes["repeat"] if X.spanning_segment.strand == "+"]
minus_repeat = [
X for X in self.region_classes["repeat"] if X.spanning_segment.strand == "-"
]
lengths = set([X.length for X in plus_repeat + minus_repeat])
self.assertEqual(len(lengths), 1)
for k in _SAMPLE_BASES:
plus_vec = numpy.zeros(plus_repeat[0].length)
minus_vec = numpy.zeros(plus_repeat[0].length)
known_plus_vec = numpy.zeros(plus_repeat[0].length)
known_minus_vec = numpy.zeros(plus_repeat[0].length)
for region in plus_repeat:
plus_vec += region.get_counts(self.gnds[k])
known_plus_vec += _get_ivc_numpy_counts(
region, self.count_vecs["%s_%s" % (k, "fw")]
)
for region in minus_repeat:
minus_vec += region.get_counts(self.gnds[k])
known_minus_vec += _get_ivc_numpy_counts(
region, self.count_vecs["%s_%s" % (k, "rc")]
)
self.assertGreater(plus_vec.sum(), 0)
self.assertGreater(minus_vec.sum(), 0)
self.assertTrue(
(abs(known_plus_vec - plus_vec) <= self.tol).all(),
"Positionwise count difference exceeded tolerance %s for %s import on sample test %s on plus strand"
% (self.tol, self.native_format, k)
)
self.assertTrue(
(abs(known_minus_vec - minus_vec) <= self.tol).all(),
"Positionwise count difference exceeded tolerance %s for %s import for sample test %s on minus strand"
% (self.tol, self.native_format, k)
)
@skip_if_abstract
def test_native_import_empty_regions(self):
# test regions that should be empty (e.g. introns and splicing)
for k in _SAMPLE_BASES:
for region in self.region_classes["empty"]:
self.assertEqual(
sum(region.get_counts(self.gnds[k])), 0,
"Found counts in region that should be empty for sample test %s" % k
)
@skip_if_abstract
def variablestep_and_bedgraph_export_helper(
self, wiggle_type, export_function, input_class=None, **kwargs
):
"""Helper function to evaluate tests on variable step wiggle or BED export
Parameters
----------
wiggle_type : str
Type of wiggle file. "variable_step" or "bedgraph"
export_function : function
unbound method defining export type (e.g. GenomeArray.to_variable_step, BAMGenomeArray.to_bedgraph)
input_class : subclass of |MutableAbstractGenomeArray| or None
Class into which exported wiggle or bedgraph files will be read. If None, defaults to self.test_class
kwargs : keyword arguments
"""
if input_class is None:
input_class = self.test_class
for k, v in self.gnds.items():
fw_out = tempfile.NamedTemporaryFile(mode="w", delete=False)
rc_out = tempfile.NamedTemporaryFile(mode="w", delete=False)
export_function(v, fw_out, "test", "+", **kwargs)
export_function(v, rc_out, "test", "-", **kwargs)
fw_out.close()
rc_out.close()
new_gnd = input_class()
with open(fw_out.name) as fh:
new_gnd.add_from_wiggle(fh, "+")
with open(rc_out.name) as fh:
new_gnd.add_from_wiggle(fh, "-")
self.assertGreater(v.lengths()["chrA"], 0)
self.assertGreater(new_gnd.lengths()["chrA"], 0)
ivplus = GenomicSegment("chrA", 0, v.lengths()["chrA"], "+")
ivminus = GenomicSegment("chrA", 0, v.lengths()["chrA"], "-")
# test equality of what was exported with current state of GenomeArray
self.assertTrue(
abs(new_gnd[ivplus] - v[ivplus] <= self.tol).all(),
"%s wiggle output on plus strand failed positionwise tolerance %s for test %s" %
(wiggle_type, self.tol, k)
)
self.assertGreater(
new_gnd[ivplus].sum(), 0,
"No counts found for %s reimport test %s" % (wiggle_type, k)
)
self.assertTrue(
abs(new_gnd[ivminus] - v[ivminus] <= self.tol).all(),
"%s wiggle output on minus strand failed positionwise tolerance %s for test %s" %
(wiggle_type, self.tol, k)
)
self.assertGreater(
new_gnd[ivminus].sum(), 0,
"No counts found for %s reimport test %s" % (wiggle_type, k)
)
# ground-truth test against numpy arrays for unique regions
for region in self.region_classes["unique"]:
strand_key = STRAND_KEYS[region.spanning_segment.strand]
gnd_counts = numpy.array(region.get_counts(new_gnd))
self.assertGreater(
gnd_counts.sum(), 0, "Reimported region is empty in sample %s" % k
)
known_counts = _get_ivc_numpy_counts(
region, self.count_vecs["%s_%s" % (k, strand_key)]
)
max_err = max(abs(gnd_counts - known_counts))
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference (%s) exceeded tolerance (%s) for %s reimport after export from class '%s' for sample '%s'"
% (self.tol, max_err, self.native_format, self.test_class, k)
)
os.remove(fw_out.name)
os.remove(rc_out.name)
@skip_if_abstract
def test_unnormalized_sum(self):
for k, v in self.gnds.items():
v.set_normalize(False)
found = v.sum()
expected = self.expected_unnorm_sum
err = abs(found - expected)
err_msg = "Observed error (%s) in unnormalized sum (observed %s; expected %s) greater than tolerance (%s) for sample '%s'" % (
err, found, expected, self.tol, k
)
self.assertLessEqual(err, self.tol, err_msg)
@skip_if_abstract
def test_normalize_not_change_sum(self):
for k, v in self.gnds.items():
v.set_normalize(True)
found_sum = v.sum()
err_msg = "Normalize flag changed sum to %s from %s for sample '%s'" % (
found_sum, self.expected_unnorm_sum, k
)
err = abs(found_sum - self.expected_unnorm_sum)
self.assertLessEqual(err, self.tol, err_msg)
v.set_normalize(False)
@skip_if_abstract
def test_set_and_reset_sum(self):
expected_unnorm_sum2 = 50000
for k, v in self.gnds.items():
v.set_sum(expected_unnorm_sum2)
v.set_normalize(False)
found = v.sum()
err = abs(found - expected_unnorm_sum2)
err_msg = "Observed error (%s) in sample '%s' set unnormalized sum (observed %s; expected %s) greater than tolerance %s" % (
err, k, found, expected_unnorm_sum2, self.tol
)
self.assertLessEqual(err, self.tol, err_msg)
v.set_normalize(True)
found = v.sum()
err = abs(found - expected_unnorm_sum2)
err_msg = "Observed error (%s) in sample '%s' set normalized sum (observed %s; expected %s) greater than tolerance %s" % (
err, k, found, expected_unnorm_sum2, self.tol
)
self.assertLessEqual(err, self.tol, err_msg)
v.set_normalize(False)
v.reset_sum()
found = v.sum()
err = abs(found - self.expected_unnorm_sum)
err_msg = "Observed error (%s) in sample '%s' reset sum (observed %s; expected %s) greater than tolerance %s" % (
err, k, found, self.expected_unnorm_sum, self.tol
)
self.assertLessEqual(err, self.tol, err_msg)
@skip_if_abstract
def test_regionwise_normalize_and_sum(self):
expected_unnorm_sum2 = 50000
for k, v in self.gnds.items():
v.set_normalize(False)
v.reset_sum()
# add an order of magnitude to account for summing
tol = self.tol * 10
# exclude repeat regions, because those will align differently than they were generated
# remove "empty" also, because this will include spliced regions for some tests,
# as necessary
nonrepeat_nonempty = [
X for X in self.regions
if all(["repeat" not in X.get_name(),
X.get_name() not in self.empty_names])
]
for region in nonrepeat_nonempty: #set(self.regions) - set(self.region_classes["repeat"]) - set(self.region_classes["empty"]):
# Make sure baseline number is ok
found_region_sum = sum(region.get_counts(v))
expected_region_unnorm = _get_ivc_numpy_counts(
region, self.count_vecs["%s_%s" % (k, STRAND_KEYS[region.strand])]
).sum()
err = abs(found_region_sum - expected_region_unnorm)
self.assertLessEqual(
err, tol,
"Found unnormalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_unnorm, tol)
)
# Test normalize
v.set_normalize(True)
expected_region_norm = float(expected_region_unnorm) / v.sum() * 10**6
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_norm)
self.assertLessEqual(
err, tol,
"Found normalized region sum (%s) different from expected (%s) more than error (observed %s; tolerance %s) for sample '%s'"
% (found_region_sum, expected_region_norm, err, tol, k)
)
# Test reversibility
v.set_normalize(False)
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_unnorm)
self.assertLessEqual(
err, tol,
"Found re-unnormalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_unnorm, tol)
)
# Set sum, no normalization
v.set_sum(expected_unnorm_sum2)
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_unnorm)
self.assertLessEqual(
err, tol,
"Found post-global-sum-set unnormalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_unnorm, tol)
)
# Add normalization on top of set sum
v.set_normalize(True)
expected_region_norm2 = float(expected_region_unnorm) / expected_unnorm_sum2 * 10**6
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_norm2)
self.assertLessEqual(
err, tol,
"Found post-global-sum-set normalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_norm2, tol)
)
# Reset sum, keep normalization
v.reset_sum()
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_norm)
self.assertLessEqual(
err, tol,
"Found post-reset normalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_norm, tol)
)
# Revert all
v.set_normalize(False)
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_unnorm)
self.assertLessEqual(
err, tol,
"Found unnormalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_unnorm, tol)
)
@skip_if_abstract
def test_get_genomicsegment_roi_order_false(self):
k = _SAMPLE_BASES[0]
for region in self.region_classes["unique"]:
seg = region.spanning_segment
strand_key = STRAND_KEYS[region.spanning_segment.strand]
#gnd_counts = self.gnds[k].__getitem__(seg,roi_order=False)
gnd_counts = self.gnds[k].get(seg, roi_order=False)
known_counts = self.count_vecs["%s_%s" % (k, strand_key)][seg.start:seg.end]
max_err = max(abs(gnd_counts - known_counts))
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference '%s' exceeded tolerance '%s' for %s __getitem__ with roi_order==False for sample test %s"
% (self.tol, max_err, self.native_format, k)
)
@skip_if_abstract
def test_get_genomicsegment_roi_order_true(self):
k = _SAMPLE_BASES[0]
for region in self.region_classes["unique"]:
seg = region.spanning_segment
strand_key = STRAND_KEYS[region.spanning_segment.strand]
gnd_counts = self.gnds[k].get(seg, roi_order=True)
known_counts = self.count_vecs["%s_%s" % (k, strand_key)][seg.start:seg.end]
if seg.strand == "-":
known_counts = known_counts[::-1]
max_err = max(abs(gnd_counts - known_counts))
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference '%s' exceeded tolerance '%s' for %s __getitem__ with roi_order==True for sample test %s"
% (self.tol, max_err, self.native_format, k)
)
@skip_if_abstract
def test_getitem_genomicsegment(self):
k = _SAMPLE_BASES[0]
for region in self.region_classes["unique"]:
seg = region.spanning_segment
strand_key = STRAND_KEYS[region.spanning_segment.strand]
gnd_counts = self.gnds[k].__getitem__(seg)
known_counts = self.count_vecs["%s_%s" % (k, strand_key)][seg.start:seg.end]
if seg.strand == "-":
known_counts = known_counts[::-1]
max_err = max(abs(gnd_counts - known_counts))
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference '%s' exceeded tolerance '%s' for %s __getitem__ with roi_order==True for sample test %s"
% (self.tol, max_err, self.native_format, k)
)
@skip_if_abstract
def test_getitem_segmentchain(self):
k = _SAMPLE_BASES[0]
for region in self.region_classes["unique"]:
strand_key = STRAND_KEYS[region.spanning_segment.strand]
gnd_counts = self.gnds[k][region] # test
self.assertGreater(gnd_counts.sum(), 0, "Region is empty in sample %s" % k)
known_counts = _get_ivc_numpy_counts(region, self.count_vecs["%s_%s" % (k, strand_key)])
max_err = max(abs(gnd_counts - known_counts))
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference '%s' exceeded tolerance '%s' for %s import for sample test %s"
% (self.tol, max_err, self.native_format, k)
)
class AbstractExportableGenomeArrayHelper(AbstractGenomeArrayHelper):
@skip_if_abstract
def test_variablestep_export(self):
self.variablestep_and_bedgraph_export_helper(
"variable_step", self.test_class.to_variable_step
)
@skip_if_abstract
def test_bedgraph_export(self):
self.variablestep_and_bedgraph_export_helper("bedgraph", self.test_class.to_bedgraph)
@attr(test="unit")
@attr(speed="slow")
class TestGenomeArray(AbstractExportableGenomeArrayHelper):
"""Test case for :py:class:`GenomeArray`"""
set_up = False
has_gnds = False
def __init__(
self,
methodName='runTest',
params=_GENOME_ARRAY_PARAMS,
test_folder=resource_filename("plastid", "test/data/mini"),
tol=1e-8
):
"""Initialize test case to run a single method.
We override this method to make sure expensive operations are only run when
the first instance is made, and then stored in class attributes
Parameters
----------
methodName : str
Name of method being run. Required by :py:class:`unittest.TestCase`
params : dict
Parameters specific to the set-up of test suites for specific
|AbstractGenomeArray| subclasses. Don't change these
test_folder : str or Resource
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
AbstractGenomeArrayHelper.__init__(
self, methodName=methodName, params=params, test_folder=test_folder, tol=tol
)
if self.__class__.has_gnds == False:
self.__class__.gnds = _read_bowtie_files_to_genome_arrays(
self.test_folder, self.test_class
)
self.__class__.has_gnds = True
#TestGenomeArray.setUpClassOnlyOnce()
def test_setitem_genomicsegment_scalar(self):
ga = GenomeArray({"chrA": 2000})
segplus = GenomicSegment("chrA", 50, 100, "+")
segminus = GenomicSegment("chrA", 50, 100, "-")
# scalar set
ga[segplus] = 52
ga[segminus] = 342
self.assertTrue(
(ga._chroms["chrA"]["+"][50:100] == 52).all(),
"%s failed scalar genomicsegment __setitem__ for plus strand."
)
self.assertTrue(
(ga._chroms["chrA"]["-"][50:100] == 342).all(),
"%s failed scalar genomicsegment __setitem__ for minus strand."
)
self.assertEqual(ga.sum(), 52 * len(segplus) + 342 * len(segminus))
def test_setitem_genomicsegment_vector(self):
ga = GenomeArray({"chrA": 2000})
segplus = GenomicSegment("chrA", 50, 100, "+")
segminus = GenomicSegment("chrA", 50, 100, "-")
# vector set
r1 = numpy.random.randint(0, high=242, size=50)
r2 = numpy.random.randint(0, high=242, size=50)
ga[segplus] = r1
ga[segminus] = r2
self.assertTrue(
(ga._chroms["chrA"]["+"][50:100] == r1).all(),
"%s failed vector genomicsegment __setitem__ for plus strand."
)
self.assertTrue(
(ga._chroms["chrA"]["-"][50:100] == r2[::-1]).all(),
"%s failed vector genomicsegment __setitem__ for minus strand."
)
self.assertEqual(ga.sum(), r1.sum() + r2.sum())
def test_setitem_segmentchain_scalar(self):
ga = GenomeArray({"chrA": 2000})
pluschain = SegmentChain(
GenomicSegment("chrA", 50, 100, "+"),
GenomicSegment("chrA", 150, 732, "+"),
GenomicSegment("chrA", 1800, 2500, "+"),
)
minuschain = SegmentChain(
GenomicSegment("chrA", 50, 100, "-"),
GenomicSegment("chrA", 150, 732, "-"),
GenomicSegment("chrA", 1800, 2500, "-"),
)
ga[pluschain] = 31
ga[minuschain] = 424
for seg in pluschain:
self.assertTrue((ga._chroms[seg.chrom][seg.strand][seg.start:seg.end] == 31).all())
for seg in minuschain:
self.assertTrue((ga._chroms[seg.chrom][seg.strand][seg.start:seg.end] == 424).all())
self.assertEqual(ga.sum(), 31 * pluschain.length + 424 * minuschain.length)
def test_setitem_segmentchain_vector(self):
ga = GenomeArray({"chrA": 2000})
pluschain = SegmentChain(
GenomicSegment("chrA", 50, 100, "+"),
GenomicSegment("chrA", 150, 732, "+"),
GenomicSegment("chrA", 1800, 2500, "+"),
)
minuschain = SegmentChain(
GenomicSegment("chrA", 50, 100, "-"),
GenomicSegment("chrA", 150, 732, "-"),
GenomicSegment("chrA", 1800, 2500, "-"),
)
plusvec = numpy.random.randint(0, high=250, size=pluschain.length)
minusvec = numpy.random.randint(0, high=250, size=minuschain.length)
ga[pluschain] = plusvec
ga[minuschain] = minusvec
x = 0
for seg in pluschain:
subvec = ga._chroms["chrA"]["+"][seg.start:seg.end]
self.assertTrue((subvec == plusvec[x:x + len(subvec)]).all())
x += len(subvec)
x = 0
for seg in minuschain:
subvec = ga._chroms["chrA"]["-"][seg.start:seg.end][::-1]
self.assertTrue(
(subvec == minusvec[len(minusvec) - x - len(subvec):len(minusvec) - x]).all()
)
x += len(subvec)
self.assertEqual(ga.sum(), plusvec.sum() + minusvec.sum())
def variablestep_and_bed_import_helper(self, wiggle_type):
"""Helper function to evaluate tests on variable step wiggle or BEDgraph import
Parameters
----------
wiggle_type : str
Type of wiggle file. "variable_step" or "bedgraph"
"""
gnd = self.test_class()
with open(os.path.join(self.test_folder, _TEST_FILES["%s_%s" % (wiggle_type, "fw")])) as fh:
gnd.add_from_wiggle(fh, "+")
with open(os.path.join(self.test_folder, _TEST_FILES["%s_%s" % (wiggle_type, "rc")])) as fh:
gnd.add_from_wiggle(fh, "-")
# Make sure imported counts are nonzero
self.assertGreater(gnd.sum(), 0, "Import of %s yielded no counts!" % wiggle_type)
chrA_len = gnd.lengths()["chrA"]
for strand, trackstub, label in [("+", "fw", "plus"), ("-", "rc", "minus")]:
my_vec = self.count_vecs["fiveprime_0_%s" % trackstub]
vec_len = len(my_vec)
empty_iv = GenomicSegment("chrA", vec_len, chrA_len, strand)
nonempty_iv = GenomicSegment("chrA", 0, vec_len, strand)
nonempty_vec = gnd.get(nonempty_iv, roi_order=False)
# make sure count vector has requisite counts
self.assertGreater(my_vec.sum(), 0)
# make sure sums are equal
self.assertEquals(my_vec.sum(), nonempty_vec.sum())
# make sure all regions after count vector are empty
self.assertEquals(
gnd[empty_iv].sum(), 0, "Found counts in region that should be empty."
)
# make sure all positions in regions up to count vector are correct
max_err = abs(my_vec - nonempty_vec).max()
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference %s exceeded tolerance %s for wiggle import on %s strand"
% (max_err, self.tol, label)
)
def test_bedgraph_import(self):
self.variablestep_and_bed_import_helper("bedgraph")
def test_variablestep_import(self):
self.variablestep_and_bed_import_helper("variable_step")
def test_genome_wide_scalar_plus_equals_sum(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
self.assertEqual(len(gnd), expected_length)
gnd += 5
self.assertEqual(gnd.sum(), 5 * expected_length)
gnd -= 5
self.assertEqual(gnd.sum(), 0)
def test_genome_wide_scalar_plus_equals_not_change_length(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
self.assertEqual(len(gnd), expected_length)
gnd += 5
self.assertEqual(len(gnd), expected_length)
def test_genome_wide_scalar_plus_sum(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
self.assertEqual(len(gnd), expected_length)
# add scalar
gnd2 = gnd + 5
self.assertEqual(gnd2.sum(), 5 * expected_length)
self.assertEqual(gnd.sum(), 0)
# add scalar to occupied gnd
gnd3 = gnd2 + 1
self.assertEqual(gnd3.sum(), 6 * expected_length)
def test_genome_scalar_times_equals_sum(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
self.assertEqual(len(gnd), expected_length)
gnd *= 5
self.assertEqual(gnd.sum(), 0)
gnd += 1
gnd *= 5
self.assertEqual(gnd.sum(), 5 * expected_length)
def test_genome_wide_scalar_times_equals_not_change_length(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
self.assertEqual(len(gnd), expected_length)
gnd *= 5
self.assertEqual(len(gnd), expected_length)
def test_genome_wide_scalar_times(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
gnd += 1
self.assertEqual(gnd.sum(), expected_length)
gnd2 = gnd * 2
self.assertEqual(gnd2.sum(), 2 * expected_length)
self.assertEqual(gnd.sum(), expected_length)
def test_genome_wide_array_add_same_size(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd1 = self.test_class(chroms)
gnd2 = self.test_class(chroms)
gnd1 += 1
gnd2 += 3
gnd3 = gnd1 + gnd2
self.assertEquals(gnd3.sum(), gnd2.sum() + gnd1.sum())
iv1plus = GenomicSegment("chrA", 0, 1000, "+")
iv1minus = GenomicSegment("chrA", 0, 1000, "+")
self.assertTrue((gnd3[iv1plus] == gnd2[iv1plus] + gnd1[iv1plus]).all())
self.assertTrue((gnd3[iv1minus] == gnd2[iv1minus] + gnd1[iv1minus]).all())
def test_genome_wide_array_multiply_same_size(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd1 = self.test_class(chroms)
gnd2 = self.test_class(chroms)
gnd1 += 2
gnd2 += 3
gnd3 = gnd1 * gnd2
iv1plus = GenomicSegment("chrA", 0, 1000, "+")
iv1minus = GenomicSegment("chrA", 0, 1000, "+")
self.assertTrue((gnd3[iv1plus] == gnd2[iv1plus] * gnd1[iv1plus]).all())
self.assertTrue((gnd3[iv1minus] == gnd2[iv1minus] * gnd1[iv1minus]).all())
def test_iadd_no_normalize(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd = self.test_class(chroms)
gnd.set_normalize(False)
self.assertEqual(0, gnd.sum())
iv1plus = GenomicSegment("chrA", 0, 1000, "+")
iv2plus = GenomicSegment("chrA", 0, 500, "+")
gnd[iv1plus] += 1
self.assertEqual(1000, gnd.sum())
gnd[iv1plus] += 1
self.assertEqual(2000, gnd.sum())
gnd[iv2plus] += 1
self.assertEqual(2500, gnd.sum())
def test_iadd_with_normalize_raises_warning(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd = self.test_class(chroms)
gnd.set_normalize(True)
def my_func(ga):
ga.set_normalize(True)
ga[GenomicSegment("chrA", 0, 1000, "+")] += 5
# manually reset registry before test
plastid.util.services.exceptions.pl_once_registry = {}
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
my_func(gnd)
got_warning = False
for w in warns:
if "turning off normalization" in str(w.message):
got_warning = True
self.assertTrue(got_warning)
def test_eq(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd1 = self.test_class(chroms)
gnd2 = self.test_class(chroms)
self.assertEqual(gnd1, gnd2)
# diff chrom nonzero positions
gnd2[GenomicSegment("chrC", 500, 1000, "-")] = 200
self.assertNotEqual(gnd1, gnd2)
# same chroms, nonzero positions, and values
gnd1[GenomicSegment("chrC", 500, 1000, "-")] = 200
self.assertEqual(gnd1, gnd2)
# same chroms and nonzero positions, diff values
gnd1[GenomicSegment("chrC", 500, 1000, "-")] += 200
self.assertNotEqual(gnd1, gnd2)
def test_setters_and_getters(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd = self.test_class(chroms)
#generate random read data set
for chr_name, chr_len in chroms.items():
for strand in ("+", "-"):
num_iters = numpy.random.randint(50, 100) #000)
starts = numpy.random.randint(0, chr_len - 50, size=num_iters)
ends = [X + numpy.random.randint(0, 50) for X in starts]
vals = numpy.random.randint(0, 100, size=num_iters)
for i in range(num_iters):
iv = GenomicSegment(chr_name, int(starts[i]), int(ends[i]), strand)
gnd[iv] = vals[i]
self.assertEquals(
len(gnd), 2 * sum(chroms.values()),
"Chromosome lengths incorrect: %s vs %s" % (len(gnd), 2 * sum(chroms.values()))
)
# auto-grow
iv1 = GenomicSegment("chrA", 10000, 11000, "+")
iv2 = GenomicSegment("chrA", 10500, 11000, "+")
iv3 = GenomicSegment("chrA", int(5 * 1e5) + 10500, int(5 * 1e5) + 11000, "+")
iv4 = GenomicSegment("chrB", int(5 * 1e5) + 10500, int(5 * 1e5) + 11000, "+")
gnd[iv1] = 1
self.assertEquals(sum(gnd[iv1]), 1000, "Auto-grow failed during set")
gnd[iv2] += 1
self.assertEquals(sum(gnd[iv1]), 1500, "+= failed during set")
self.assertEquals(sum(gnd[iv2]), 1000)
self.assertGreater(gnd.lengths()["chrA"], 1000, "Auto-grow failed chrA")
gnd[iv3] += 1
self.assertEqual(sum(gnd[iv3]), 500, "+= failed during set")
self.assertEqual(sum(gnd[iv4]), 0, "Counts where not expected")
# setters & getters
iv1 = GenomicSegment("chrA", 200, 500, "+")
oldvals = copy.deepcopy(gnd[iv1])
gnd[iv1] = 5
newvals = copy.deepcopy(gnd[iv1])
self.assertTrue((oldvals != newvals).any(), "Set failed")
self.assertEqual(newvals.sum(), 5 * len(newvals))
self.assertEqual(newvals.sum(), 5 * len(iv1))
newvals2 = copy.deepcopy(gnd[iv1])
self.assertTrue((newvals2 == newvals).all(), "Set failed")
newvals = newvals2 = None
gnd[iv1] = oldvals
new_old = gnd[iv1]
self.assertTrue((new_old == oldvals).all(), "Set valed")
# scalar add
gnd_plus5 = gnd + 5
for iv in (iv1, iv2, iv3, iv4):
self.assertTrue(
(gnd_plus5[iv] == (gnd[iv] + 5)).all(),
"Scalar addition globally failed interval test"
)
# FIXME- what is len of a sparse array?
diff = abs(gnd_plus5.sum() - (gnd.sum() + 5 * len(gnd)))
self.assertLess(
diff, self.tol,
"Error in genome-wide scalar addition (%s) exceeded tolerance %s. Raw vals: %s vs %s " %
(diff, self.tol, gnd_plus5.sum(), gnd.sum() + 5 * len(gnd))
)
# scalar multiply
gnd3 = gnd * 3
for iv in (iv1, iv2, iv3, iv4):
self.assertTrue(
(gnd3[iv] == (gnd[iv] * 3)).all(),
"Scalar multip.lication globally failed interal test"
)
quotient = 1.0 - (gnd3.sum() / 3 * gnd.sum())
self.assertLess(
quotient, self.tol,
"Error in scalar multiplication (%s) exceeded tolerance %s" % (diff, self.tol)
)
# genome-wide multiply
gnd4 = gnd + gnd3
gndmul = gnd3 * gnd4
for iv in (iv1, iv2, iv3, iv4):
self.assertTrue(
(gndmul[iv] - (gnd3[iv] * gnd4[iv]) <= self.tol).all(),
"Error in genome-wide multiply failed exceeded tolerance %s" % self.tol
)
# genome-wide add
is_ok = True
for iv in (iv1, iv2, iv3):
is_ok &= sum(gnd4[iv]) > 0
self.assertTrue(
(gnd4[iv] == gnd[iv] + gnd3[iv]).all(),
"Error in genome-wide addition exceeded tolerance %s" % self.tol
)
self.assertGreater(sum(gnd4[iv]), 0)
def test_nonzero(self):
excluded_set = set([])
for region in self.region_classes["repeat"] + self.region_classes["splice"]:
excluded_set |= region.get_position_set()
for k, v in self.gnds.items():
nz_dict = v.nonzero()
for strand in v.strands():
strand_key = STRAND_KEYS[strand]
expected = self.count_vecs["%s_%s" % (k, strand_key)].nonzero()[0]
found = nz_dict["chrA"][strand]
self.assertEqual(set(expected) - excluded_set, set(found) - excluded_set)
@attr(test="unit")
@attr(speed="slow")
class TestSparseGenomeArray(TestGenomeArray):
"""Test suite for |SparseGenomeArray|"""
set_up = False
has_gnds = False
def __init__(
self,
methodName='runTest',
params=_SPARSE_GENOME_ARRAY_PARAMS,
test_folder=resource_filename("plastid", "test/data/mini"),
tol=1e-8
):
"""Initialize test case to run a single method.
We override this method to make sure expensive operations are only run when
the first instance is made, and then stored in class attributes
Parameters
----------
methodName : str
Name of method being run. Required by :py:class:`unittest.TestCase`
params : dict
Parameters specific to the set-up of test suites for specific
AbstractgenomeArray subclasses. Don't change these
test_folder : str or Resource
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
TestGenomeArray.__init__(
self, methodName=methodName, params=params, test_folder=test_folder, tol=tol
)
@attr(test="unit")
@attr(speed="slow")
class TestBigWigGenomeArray(AbstractGenomeArrayHelper):
"""Test suite for |SparseGenomeArray|"""
set_up = False
has_gnds = False
def __init__(
self,
methodName='runTest',
params=_BIGWIG_GENOME_ARRAY_PARAMS,
test_folder=resource_filename("plastid", "test/data/mini"),
tol=1e-3
):
"""Initialize test case to run a single method.
We override this method to make sure expensive operations are only run when
the first instance is made, and then stored in class attributes
Parameters
----------
methodName : str
Name of method being run. Required by :py:class:`unittest.TestCase`
params : dict
Parameters specific to the set-up of test suites for specific
AbstractgenomeArray subclasses. Don't change these
test_folder : str or Resource
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
AbstractGenomeArrayHelper.__init__(
self, methodName=methodName, params=params, test_folder=test_folder, tol=tol
)
if self.__class__.has_gnds == False:
TestBigWigGenomeArray.gnds = TestBigWigGenomeArray.read_bigwig_files()
self.__class__.has_gnds = True
@staticmethod
def read_bigwig_files():
"""Read bigwig files into a dictionary
"""
dtmp = {}
for k in _SAMPLE_BASES:
dtmp[k] = ga = BigWigGenomeArray(fill=0)
fw = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_%s_fw.bw" % k)
rc = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_%s_rc.bw" % k)
ga.add_from_bigwig(fw, "+")
ga.add_from_bigwig(rc, "-")
return dtmp
def test_multiple_same_strand_sum(self):
# should see sum double
bigwigfile = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_center_12_fw.bw")
bw = BigWigGenomeArray(fill=0)
bw.add_from_bigwig(bigwigfile, "+")
self.assertLessEqual(abs(bw.sum() - 4000), self.tol)
bw.add_from_bigwig(bigwigfile, "+")
self.assertLessEqual(abs(bw.sum() - 8000), self.tol)
bw.add_from_bigwig(bigwigfile, "+")
self.assertLessEqual(abs(bw.sum() - 12000), self.tol)
def test_multiple_same_strand_fetch(self):
bigwigfw = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_center_12_fw.bw")
bigwigrc = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_center_12_rc.bw")
wigfw = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_center_12_fw.wig")
wigrc = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_center_12_rc.wig")
bw = BigWigGenomeArray(fill=0)
bw.add_from_bigwig(bigwigfw, "+")
bw.add_from_bigwig(bigwigfw, "+")
bw.add_from_bigwig(bigwigrc, "-")
bw.add_from_bigwig(bigwigrc, "-")
ga = GenomeArray(bw.lengths())
with open(wigfw) as fh:
ga.add_from_wiggle(fh, "+")
with open(wigrc) as fh:
ga.add_from_wiggle(fh, "-")
for chrom, length in bw.lengths().items():
for strand in bw.strands():
seg = GenomicSegment(chrom, 0, length, strand)
maxdiff = abs(bw[seg] - 2 * ga[seg]).max()
msg = "Maximum difference for multiple_strand_fetch (%s) exceeds tolerance (%s)" % (
maxdiff, self.tol
)
self.assertLessEqual(maxdiff, self.tol, msg)
def test_to_genome_array(self):
for test, orig in self.gnds.items():
fw = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_%s_fw.wig" % test)
rc = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_%s_rc.wig" % test)
expected = GenomeArray()
with open(fw) as fh:
expected.add_from_wiggle(fh, "+")
with open(rc) as fh:
expected.add_from_wiggle(fh, "-")
found = orig.to_genome_array()
for chrom, length in expected.lengths().items():
for strand in ("+", "-"):
seg = GenomicSegment(chrom, 0, length, strand)
diffvec = abs(orig[seg] - found[seg])
diffmax = diffvec.max()
msg1 = "Maximum difference between exported GenomeArray and BigWigGenomeArray (%s) exceeds tolerance (%s) for test '%s' strand '%s'" % (
diffmax, self.tol, test, strand
)
self.assertLessEqual(diffmax, self.tol, msg1)
for chrom, length in expected.lengths().items():
for strand in ("+", "-"):
seg = GenomicSegment(chrom, 0, length, strand)
diffvec = abs(expected[seg] - found[seg])
diffmax = diffvec.max()
msg1 = "Maximum difference between exported GenomeArray and wiggle-imported array (%s) exceeds tolerance (%s) for test '%s' strand '%s'" % (
diffmax, self.tol, test, strand
)
self.assertLessEqual(diffmax, self.tol, msg1)
class FakeDict(object):
"""Creates a dictionary-like object that provies dictionary-like access
to a BAMGenomeArray under various mapping rules, as if it were a collection
of separate GenomeArrays. This is only a convenience class to allow us to
re-use functions in the |AbstractGenomeArrayHelper| test suite in
|TestBAMGenomeArray|
"""
def __init__(self, bga, map_functions=_BAM_MAP_RULES):
"""Create a FakeDict
Parameters
----------
bga : |BAMGenomeArray|
map_functions : dict
Dictionary mapping descriptive names to mapping functions,
such as those made by :py:func:`plastid.genomics.genome_array.FivePrimeMapFactory`
"""
self.bga = bga
self.map_functions = map_functions
def __getitem__(self, key):
self.bga.set_mapping(self.map_functions[key])
return self.bga
def items(self):
for k in self.map_functions:
yield (k, self[k])
def values(self):
# must use key, to trigger map setting in __getitem__
for k in self.map_functions:
yield self[k]
@attr(test="unit")
@attr(speed="slow")
class TestBAMGenomeArray(AbstractExportableGenomeArrayHelper):
set_up = False
has_gnds = False
def __init__(
self,
methodName='runTest',
params=_BAM_GENOME_ARRAY_PARAMS,
test_folder=resource_filename("plastid", "test/data/mini"),
tol=1e-8
):
"""Initialize test case to run a single method.
We override this method to make sure expensive operations are only run when
the first instance is made, and then stored in class attributes
Parameters
----------
methodName : str
Name of method being run. Required by py:class:`unittest.TestCase`
params : dict
Parameters specific to the set-up of test suites for specific
AbstractgenomeArray subclasses. Don't change these
test_folder : str or Resource
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
AbstractGenomeArrayHelper.__init__(
self, methodName=methodName, params=params, test_folder=test_folder, tol=tol
)
if self.__class__.has_gnds == False:
bga = BAMGenomeArray(
[pysam.Samfile(os.path.join(self.test_folder, _TEST_FILES["bam"]), "rb")]
)
TestBAMGenomeArray.gnds = FakeDict(bga)
TestBAMGenomeArray.bga = bga
self.__class__.has_gnds = True
def test_open_str_filename(self):
z = BAMGenomeArray(os.path.join(self.test_folder, _TEST_FILES["bam"]))
self.assertEqual(z.sum(), self.bga.sum())
def test_open_multi_list(self):
v = [os.path.join(self.test_folder, _TEST_FILES["bam"])] * 2
z = BAMGenomeArray(v)
self.assertEqual(z.sum(), 2 * self.bga.sum())
def test_open_multi_filename(self):
f = os.path.join(self.test_folder, _TEST_FILES["bam"])
z = BAMGenomeArray(f, f)
self.assertEqual(z.sum(), 2 * self.bga.sum())
def mutable_conversion_helper(self, new_class):
"""Helper function to test conversion of |BAMGenomeArray| to various |MutableAbstractGenomeArray| types
Parameters
----------
new_class : class
Non-abstract subclass of |MutableAbstractGenomeArray|
"""
ivplus = GenomicSegment("chrA", 0, self.bga.lengths()["chrA"], "+")
ivminus = GenomicSegment("chrA", 0, self.bga.lengths()["chrA"], "-")
for k, v in self.gnds.items():
new_gnd = v.to_genome_array(array_type=new_class)
for iv in (ivplus, ivminus):
self.assertGreater(v[iv].sum(), 0)
self.assertGreater(new_gnd[iv].sum(), 0)
max_err = max(abs(v[iv] - new_gnd[iv]))
err_message = "%s BAMGenomeArray conversion to %s error %s exceeded tolerance %s." % (
k, new_class.__name__, max_err, self.tol
)
self.assertLess(max_err, self.tol, err_message)
def test_to_genome_array(self):
self.mutable_conversion_helper(GenomeArray)
def test_to_sparse_genome_array(self):
self.mutable_conversion_helper(SparseGenomeArray)
def variablestep_and_bedgraph_export_helper(self, wiggle_type, export_function):
# override function so we can test window size parameters in export
for window_size in (1, 2, 5, 10, 25, 100, 500, 1000, 10000):
AbstractGenomeArrayHelper.variablestep_and_bedgraph_export_helper(
self,
wiggle_type,
export_function,
input_class=GenomeArray,
window_size=window_size
)
def test_add_remove_filter(self):
# add a plus-strand filter and require minus strand regions be zero
# then remove and watch it come back
bga = self.bga
def minus_only_filter(read):
return read.is_reverse
entire_iv_plus = GenomicSegment("chrA", 0, bga.lengths()["chrA"], "+")
entire_iv_minus = GenomicSegment("chrA", 0, bga.lengths()["chrA"], "-")
# fetch counts & check
pre_plus = bga[entire_iv_plus]
self.assertGreater(pre_plus.sum(), 0)
pre_minus = bga[entire_iv_minus]
self.assertGreater(pre_minus.sum(), 0)
# add filter, re-fetch
bga.add_filter("minus_only", minus_only_filter)
post_plus = bga[entire_iv_plus]
post_minus = bga[entire_iv_minus]
self.assertEqual(post_plus.sum(), 0)
self.assertFalse((post_plus == pre_plus).all())
self.assertEqual(post_minus.sum(), pre_minus.sum())
self.assertTrue((post_minus == pre_minus).all())
# remove_filter, re_fetch
bga.remove_filter("minus_only")
post_post_plus = bga[entire_iv_plus]
post_post_minus = bga[entire_iv_minus]
self.assertEqual(post_post_plus.sum(), pre_plus.sum())
self.assertTrue((post_post_plus == pre_plus).all())
self.assertEqual(post_post_minus.sum(), pre_minus.sum())
self.assertTrue((post_post_minus == pre_minus).all())
#===============================================================================
# INDEX: tools for generating test datasets with known results
#===============================================================================
def _detect_or_create_folders(base_folder):
"""Creates and tests folder hierarchy needed for unit/integrative tests below.
Parameters
----------
base_folder : str
path to base folder in which test data will be created
"""
sub_folders = ["fasta", "ebwt", "count_vectors", "align", "wig", "bed"]
if not os.path.isdir(base_folder):
os.mkdir(base_folder)
for name in sub_folders:
sf = os.path.join(base_folder, name)
if not os.path.isdir(sf):
os.mkdir(sf)
# BED file for use later
with open(os.path.join(base_folder, _TEST_FILES["bed"]), "w") as fout:
fout.write(TEST_CHR_BED)
fout.close()
# .juncs file for tophat
with open(os.path.join(base_folder, _TEST_FILES["juncs"]), "w") as fout:
fout.write(TEST_CHR_JUNCS)
fout.close()
def detect_base_folder(func):
"""Decorator function to ensure that folders required by functions below exist.
For this decorator to work, the function it wraps MUST require base_folder
as its first parameter.
Parameters
----------
func : Function
Function to decorate
Returns
-------
Function : wrapped function
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
_detect_or_create_folders(args[0])
return func(*args, **kwargs)
return new_func
@detect_base_folder
def create_synthetic_genome(base_folder):
"""Create a synthetic genome with unique and multimapping regions annotated
as in TEST_CHR_BED
Parameters
----------
base_folder : str
path to base folder in which test data will be created
Genome will base saved as base_folder/fasta/chrA.fa
Returns
-------
str : synthetic genome sequence
"""
ustart = 100
ulength = 1000
spacer_length = 100
#uend = ustart + ulength
#dstart = uend + spacer_length
dlength = 500
dspacer = 50
#dstart = uend + spacer_length
#dend = dstart + dlength + dspacer + dlength
#splice_start = dend + spacer_length
splice_flank_length = 25
splice_spacer = 75
# generate sequence
unique_region = random_seq(ulength)
duplicated_repeat = random_seq(dlength)
duplicated_region = duplicated_repeat + ("N" * dspacer) + duplicated_repeat
splice_region = random_seq(splice_flank_length) + ("N" * splice_spacer
) + random_seq(splice_flank_length)
# write to FASTA
total_genome = (
"N" * ustart
) + unique_region + "N" * spacer_length + duplicated_region + "N" * spacer_length + splice_region + "N" * 100
with open(os.path.join(base_folder, _TEST_FILES["genome"]), "w") as fh:
fh.write(">%s\n%s\n" % ("chrA", total_genome))
fh.close()
return total_genome
@detect_base_folder
def create_bowtie_index(
base_folder, bowtie_location=os.path.join(os.path.sep, "usr", "local", "bin")
):
"""Build bowtie indices to enable alignments in bowtie and tophat
against synthetic genome. Creates bowtie indices in base_folder/ebwt.
Requires a FASTA file of the synthetic genome in base_folder/fasta/chrA.fa,
so run create_synthetic_genome() first
Parameters
----------
base_folder : str
path to base folder in which test data will be created
bowtie_location : str
path to folder containing bowtie-build
Returns
-------
int : exit status of bowtie-build
"""
unspliced_args = [
os.path.join(bowtie_location, "bowtie-build"),
os.path.join(base_folder, _TEST_FILES["genome"]),
os.path.join(base_folder, _TEST_FILES["bowtie_index"])
]
unspliced_exit = subprocess.call(unspliced_args)
return unspliced_exit #| spliced_exit
def _ndarray_to_variable_step(count_vec, fh, name):
"""Write a numpy.ndarray to a variableStep wiggle file
Parameters
----------
count_vec : numpy.ndarray
vector of counts
fh : file-like
open filehandle
name : str
Track name
"""
fh.write("track type=wiggle_0 name=%s\n" % name)
fh.write("variableStep chrom=chrA span=1\n")
for i in count_vec.nonzero()[0]:
val = count_vec[i]
fh.write("%s\t%s\n" % (i + 1, val))
def _ndarray_to_bedgraph(count_vec, fh, name):
"""Write a numpy.ndarray to a BEDGraph file
Parameters
----------
count_vec : numpy.ndarray
vector of counts
fh : file-like
open filehandle
name : str
Track name
"""
fh.write("track type=bedGraph name=%s\n" % name)
last_val = count_vec[0]
start_i = 0
for i, val in enumerate(count_vec):
if val != last_val:
fh.write("%s\t%s\t%s\t%s\n" % ("chrA", start_i, i, last_val))
start_i = i
last_val = val
fh.write("%s\t%s\t%s\t%s\n" % ("chrA", start_i, i + 1, last_val))
@detect_base_folder
def generate_reads(
base_folder, reads_per_region=DEFAULT_READS_PER_REGION, read_length=DEFAULT_READ_LENGTH
):
"""Generates 30-nucleotide reads from a genome created by create_synthetic_genome,
choosing from uniquely-mapping and multimapping regions annotated in
TEST_CHR_BED. 10000 reads are generated for each region type. Reads are
returned in FASTA format. Also saves a numpy array of how many reads are expected
to align to each nucleotide position in the synthetic genome, if reads are mapped
at their 5' ends.
Parameters
----------
base_folder : str
path to base folder in which test data will be created.
Reads will base saved as base_folder/fasta/chrA_reads.fa.
Count vectors will base saved as various text files in
base_folder/count_vectors
reads_per_region : int
Number of reads to generate in each region
read_length : int
Length of reads to generate
Returns
-------
dict : dictionary of numpy.ndarrays corresponding to expected number of counts
at each genomic position, under various read alignment mapping rules
"""
with open(os.path.join(base_folder, "fasta", "chrA.fa")) as fh:
genome = SeqIO.to_dict(SeqIO.parse(fh), "fasta")
len_A = len(genome["chrA"])
# TODO : align
count_vectors = {
"fiveprime_0_fw": numpy.zeros(len_A).astype(int),
"fiveprime_15_fw": numpy.zeros(len_A).astype(int),
"threeprime_0_fw": numpy.zeros(len_A).astype(int),
"threeprime_15_fw": numpy.zeros(len_A).astype(int),
"center_0_fw": numpy.zeros(len_A),
"center_12_fw": numpy.zeros(len_A),
"fiveprime_0_rc": numpy.zeros(len_A).astype(int),
"fiveprime_15_rc": numpy.zeros(len_A).astype(int),
"threeprime_0_rc": numpy.zeros(len_A).astype(int),
"threeprime_15_rc": numpy.zeros(len_A).astype(int),
"center_0_rc": numpy.zeros(len_A),
"center_12_rc": numpy.zeros(len_A),
} # yapf: disable
with open(os.path.join(base_folder, _TEST_FILES["reads"]), "w") as read_fh:
regions = filter(lambda x: "intron" not in x.get_name()\
and "entire" not in x.get_name(),
fetch_regions())
for region in regions:
strand_key = STRAND_KEYS[region.spanning_segment.strand]
my_seq = region.get_sequence(genome)
# choose 5' read locations
read_locs = numpy.random.randint(
0, high=len(my_seq) - read_length + 1, size=reads_per_region
)
# generate FASTA File
# and save read positions to count vectors under various alignment mapping rules
for n, loc in enumerate(read_locs):
# write reads
read_fh.write(
">%s_%s\n%s\n" % (region.get_name(), n, my_seq[loc:loc + read_length])
)
_, position, _ = region.get_genomic_coordinate(loc)
# populate 5' and 3' mapped count vectors
for offset in (0, 15):
_, position, _ = region.get_genomic_coordinate(loc + offset)
count_vectors["fiveprime_%s_%s" % (offset, strand_key)][position] += 1
_, position, _ = region.get_genomic_coordinate(loc + read_length - offset - 1)
count_vectors["threeprime_%s_%s" % (offset, strand_key)][position] += 1
# populate center-mapped count vectors
read_positions = region.get_subchain(loc, loc + read_length).get_position_list()
assert len(read_positions) == read_length
for pos in read_positions:
count_vectors["center_0_%s" % strand_key][pos] += 1.0 / len(read_positions)
assert len(read_positions[12:-12]) == read_length - 24
for pos in read_positions[12:-12]:
count_vectors["center_12_%s" % strand_key][pos] += 1.0 / (
len(read_positions) - 24
)
for k, v in count_vectors.items():
numpy.savetxt(os.path.join(base_folder, "count_vectors", "%s.txt" % k), v)
# export 5' mapped BEDGraph files
with open(os.path.join(base_folder, _TEST_FILES["bedgraph_fw"]), "w") as bedgraph_fw:
_ndarray_to_bedgraph(count_vectors["fiveprime_0_fw"], bedgraph_fw, base_folder)
with open(os.path.join(base_folder, _TEST_FILES["bedgraph_rc"]), "w") as bedgraph_rc:
_ndarray_to_bedgraph(count_vectors["fiveprime_0_rc"], bedgraph_rc, base_folder)
# export 5' mapped variableStep wiggle files
with open(os.path.join(base_folder, _TEST_FILES["variable_step_fw"]), "w") as vs_fw:
_ndarray_to_variable_step(count_vectors["fiveprime_0_fw"], vs_fw, base_folder)
with open(os.path.join(base_folder, _TEST_FILES["variable_step_rc"]), "w") as vs_rc:
_ndarray_to_variable_step(count_vectors["fiveprime_0_rc"], vs_rc, base_folder)
return count_vectors
@detect_base_folder
def perform_alignments(
base_folder,
bowtie_location=os.path.join(os.path.sep, "usr", "local", "bin"),
tophat_location=os.path.join(os.path.sep, "usr", "local", "bin"),
samtools_location=os.path.join(os.path.sep, "usr", "local", "bin"),
):
"""Perform alignments of generated reads against synthetic genome,
in both Tophat and Bowtie so that both BAM and bowtie input may be tested.
Note: spliced reads will not align in bowtie.
Read alignments will be placed in base_folder/align
Requires a bowtie index of the synthetic genome in base_folder/ebwt, and
syntheti reads to align in base_folder/fasta/chrA_reads.fa
so run build_bowtie_index() and generate_reads() first.
Parameters
----------
base_folder : str
path to base folder in which test data will be created.
bowtie_location : str
path to folder containing bowtie executable
tophat_location : str
path to folder containing tophat executable
samtools_location : str
path to folder containing samtools executable
Returns
-------
int : ORed exit status of bowtie and tophat (0 if both successful; 1 otherwise)
"""
# align with no mismatches, choosing 1 random alignment from repeat regions
bowtie_args = [
os.path.join(bowtie_location, "bowtie"), "-v0", "-k1", "--best", "-f", "--un",
os.path.join(base_folder, "align", "chrA_unspliced_unaligned.fa"),
os.path.join(base_folder, _TEST_FILES["bowtie_index"]),
os.path.join(base_folder, _TEST_FILES["reads"]),
os.path.join(base_folder, _TEST_FILES["bowtie"])
]
# align with no mismatches, choosing 1 random alignment from repeat regions
tophat_args = [
os.path.join(bowtie_location, "tophat"),
"--bowtie1",
"--read-mismatches=0",
"--min-intron-length=20",
"--library-type=fr-firststrand",
"--raw-juncs",
os.path.join(base_folder, _TEST_FILES["juncs"]),
"--no-novel-juncs",
"-o",
os.path.join(base_folder, "align", "tophat"),
os.path.join(base_folder, _TEST_FILES["bowtie_index"]),
os.path.join(base_folder, _TEST_FILES["reads"]),
]
samtools_multi_args = [
os.path.join(samtools_location, "samtools"), "view", "-F", "256",
os.path.join(base_folder, "align", "tophat", "accepted_hits.bam"), "-b", "-o",
os.path.join(base_folder, "align", "chrA_tophat.bam")
]
samtools_index_args = [
os.path.join(samtools_location, "samtools"), "index",
os.path.join(base_folder, "align", "chrA_tophat.bam")
]
bowtie_exit = subprocess.call(bowtie_args)
tophat_exit = subprocess.call(tophat_args)
samtools_exit_1 = subprocess.call(samtools_multi_args)
samtools_exit_2 = subprocess.call(samtools_index_args)
return bowtie_exit | tophat_exit | samtools_exit_1 | samtools_exit_2
def create_dataset(
base_folder,
bowtie_location=os.path.join(os.path.sep, "usr", "local", "bin"),
tophat_location=os.path.join(os.path.sep, "usr", "local", "bin"),
samtools_location=os.path.join(os.path.sep, "usr", "local", "bin"),
):
"""Create a ground-truth dataset for testing |GenomeArray|
This dataset includes a synthetic genome of random sequence, containing
uniquely-mapping and multimapping regions; short sequence reads generated
from these regions; alignments of these reads made in bowtie and tophat;
and saved numpy tables indicating how many counts should appear at each
genomic position under various mapping rules and offsets.
Parameters
----------
base_folder : str
path to base folder in which test data will be created.
bowtie_location : str
path to folder containing bowtie executable
tophat_location : str
path to folder containing tophat executable
samtools_location : str
path to folder containing samtools executable
Returns
-------
dict: dictionary containing the genome sequence, count vectors,
and alignment status
"""
dtmp = {}
dtmp["base_folder"] = base_folder
dtmp["genome_str"] = create_synthetic_genome(base_folder)
dtmp["aligned"] = False
if create_bowtie_index(base_folder, bowtie_location=bowtie_location) == 0:
dtmp.update(generate_reads(base_folder))
if perform_alignments(base_folder, bowtie_location=bowtie_location,
tophat_location=tophat_location,
samtools_location=samtools_location) == 0:
dtmp["aligned"] = True
return dtmp
return dtmp
|
{"hexsha": "04be15ba5578baa6bcaa9a43b79d648a5dd3c6dd", "size": 76671, "ext": "py", "lang": "Python", "max_stars_repo_path": "plastid/test/unit/genomics/test_genome_array.py", "max_stars_repo_name": "joshuagryphon/plastid", "max_stars_repo_head_hexsha": "e63a818e33766b01d84b3ac9bc9f55e6a1ece42f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2016-04-05T09:58:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T11:58:30.000Z", "max_issues_repo_path": "plastid/test/unit/genomics/test_genome_array.py", "max_issues_repo_name": "joshuagryphon/plastid", "max_issues_repo_head_hexsha": "e63a818e33766b01d84b3ac9bc9f55e6a1ece42f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 49, "max_issues_repo_issues_event_min_datetime": "2015-09-15T19:50:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-06T18:17:35.000Z", "max_forks_repo_path": "plastid/test/unit/genomics/test_genome_array.py", "max_forks_repo_name": "joshuagryphon/plastid", "max_forks_repo_head_hexsha": "e63a818e33766b01d84b3ac9bc9f55e6a1ece42f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2017-02-08T09:38:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-16T02:32:46.000Z", "avg_line_length": 38.0501240695, "max_line_length": 160, "alphanum_fraction": 0.5953489585, "include": true, "reason": "import numpy", "num_tokens": 18776}
|
import os
import sys
import numpy as np
from datetime import datetime
from functools import wraps
from time import time
def stop_watch(func):
@wraps(func)
def wrapper(*args, **kargs):
start = time()
log = "[START] {}: {}() | PID: {} ({})".format(sys.argv[0], func.__qualname__, os.getpid(), datetime.today())
print(log)
result = func(*args, **kargs)
elapsed_time = int(time() - start)
minits, sec = divmod(elapsed_time, 60)
hour, minits = divmod(minits, 60)
log = "[FINISH] {}: {}() | PID: {} ({}) >> [Time]: {:0>2}:{:0>2}:{:0>2}".format(
sys.argv[0], func.__qualname__, os.getpid(), datetime.today(), hour, minits, sec)
print(log)
return result
return wrapper
def reduce_mem_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtype
if col_type != 'object' and col_type != 'datetime64[ns]':
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float32) # feather-format cannot accept float16
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage: {:.2f} MB -> {:.2f} MB (Decreased by {:.1f}%)'.format(
start_mem, end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
def predict_chunk(model, test):
initial_idx = 0
chunk_size = 1000000
current_pred = np.zeros(len(test))
while initial_idx < test.shape[0]:
final_idx = min(initial_idx + chunk_size, test.shape[0])
idx = range(initial_idx, final_idx)
current_pred[idx] = model.predict(test.iloc[idx], num_iteration=model.best_iteration)
initial_idx = final_idx
return current_pred
|
{"hexsha": "f3a5c70c632d9aa7c024166164e712b6f72881dd", "size": 2711, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/common.py", "max_stars_repo_name": "j20232/kaggle_earthquake", "max_stars_repo_head_hexsha": "47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/common.py", "max_issues_repo_name": "j20232/kaggle_earthquake", "max_issues_repo_head_hexsha": "47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/common.py", "max_forks_repo_name": "j20232/kaggle_earthquake", "max_forks_repo_head_hexsha": "47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7076923077, "max_line_length": 118, "alphanum_fraction": 0.5647362597, "include": true, "reason": "import numpy", "num_tokens": 734}
|
!----------------------------------------------------------------------------- best with 100 columns
!> finite element discretizations
module modBasicFEM
public
contains
!> find Laplacian operator
!> NOTE: A must be pre-initialized to contain the temporary CSR matrix with duplications
subroutine findLaplacian(grid,A,isDirichlet)
use modPolyFeGrid
use modPolyGrid
use modSMQ
use modSparse
class(polyFeGrid),intent(inout)::grid !< the grid
class(linEq),intent(inout)::A !< the discretized linear system
logical,intent(in),optional::isDirichlet(grid%nN) !< whether each node is Dirichlet node
integer::nnz,maxNNZ
integer,allocatable::iA(:),jA(:)
double precision,allocatable::vA(:)
call grid%up()
maxNNZ=size(grid%iNE,1)**2*grid%nC
allocate(iA(maxNNZ)) ! COO temporary matrix
allocate(jA(maxNNZ))
allocate(vA(maxNNZ))
nnz=0
do i=1,grid%nC
select case(grid%sE(i))
case(TET)
do j=1,TET_N
if(present(isDirichlet))then
if(isDirichlet(grid%iNE(j,i)))then
cycle
end if
end if
do k=1,TET_N
nnz=nnz+1
iA(nnz)=grid%iNE(j,i)
jA(nnz)=grid%iNE(k,i)
vA(nnz)=0d0
do l=1,size(TET_QW)
vA(nnz)=vA(nnz)+dot_product(matmul(grid%invJ(:,:,l,i),TET_GRAD_QP(:,j,l)),&
& matmul(grid%invJ(:,:,l,i),TET_GRAD_QP(:,k,l)))*&
& grid%detJ(l,i)*TET_QW(l)
end do
end do
end do
case(TET10)
do j=1,TET10_N
if(present(isDirichlet))then
if(isDirichlet(grid%iNE(j,i)))then
cycle
end if
end if
do k=1,TET10_N
nnz=nnz+1
iA(nnz)=grid%iNE(j,i)
jA(nnz)=grid%iNE(k,i)
vA(nnz)=0d0
do l=1,size(TET10_QW)
vA(nnz)=vA(nnz)+dot_product(matmul(grid%invJ(:,:,l,i),TET10_GRAD_QP(:,j,l)),&
& matmul(grid%invJ(:,:,l,i),TET10_GRAD_QP(:,k,l)))*&
& grid%detJ(l,i)*TET10_QW(l)
end do
end do
end do
case default
end select
end do
if(present(isDirichlet))then
do i=1,grid%nN
if(isDirichlet(i))then
nnz=nnz+1
iA(nnz)=i
jA(nnz)=i
vA(nnz)=1d0
end if
end do
end if
call A%setCOO(iA,jA,vA,nnz,job=CSR_CLEAN_SORT)
deallocate(iA,jA,vA)
end subroutine
!> find volumetric source discretization factor
subroutine findVolSrc(grid,src)
use modPolyFeGrid
use modPolyGrid
use modSMQ
class(polyFeGrid),intent(inout)::grid !< the grid
double precision,allocatable,intent(inout)::src(:) !< the discretized source factor
call grid%up()
if(.not.allocated(src))then
allocate(src(grid%nN))
end if
src(:)=0d0
do i=1,grid%nC
select case(grid%sE(i))
case(TET)
do j=1,TET_N
do l=1,size(TET_QW)
src(grid%iNE(j,i))=src(grid%iNE(j,i))+TET_SHAPE_QP(j,l)*grid%detJ(l,i)*TET_QW(l)
end do
end do
case(TET10)
do j=1,TET10_N
do l=1,size(TET10_QW)
src(grid%iNE(j,i))=src(grid%iNE(j,i))+TET10_SHAPE_QP(j,l)*grid%detJ(l,i)*TET10_QW(l)
end do
end do
case default
end select
end do
end subroutine
!> find surface source (i.e. Neumann BC) discretization factor
subroutine findSurfSrc(grid,src)
use modPolyFeGrid
use modPolyGrid
use modSMQ
class(polyFeGrid),intent(inout)::grid !< the grid
double precision,allocatable,intent(inout)::src(:) !< the discretized source factor
call grid%up()
if(.not.allocated(src))then
allocate(src(grid%nN))
end if
src(:)=0d0
do i=grid%nC+1,grid%nE
select case(grid%sE(i))
case(TRI)
do j=1,TRI_N
do l=1,size(TRI_QW)
src(grid%iNE(j,i))=src(grid%iNE(j,i))+TRI_SHAPE_QP(j,l)*grid%detJ(l,i)*TRI_QW(l)
end do
end do
case(TRI6)
do j=1,TRI6_N
do l=1,size(TRI6_QW)
src(grid%iNE(j,i))=src(grid%iNE(j,i))+TRI6_SHAPE_QP(j,l)*grid%detJ(l,i)*TRI6_QW(l)
end do
end do
case default
end select
end do
end subroutine
end module
|
{"hexsha": "539016d7a005dd5ffb883c0807e79105aa75e8e0", "size": 4428, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/libfosolvers/FEM/basicFEM.f90", "max_stars_repo_name": "mianzhi/fosolvers", "max_stars_repo_head_hexsha": "be4877a9cccd7bf6b97d4e01c58e10634684415d", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2015-08-05T14:10:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T10:50:06.000Z", "max_issues_repo_path": "src/libfosolvers/FEM/basicFEM.f90", "max_issues_repo_name": "mianzhi/fosolvers", "max_issues_repo_head_hexsha": "be4877a9cccd7bf6b97d4e01c58e10634684415d", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-11-24T15:46:54.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-30T13:30:29.000Z", "max_forks_repo_path": "src/libfosolvers/FEM/basicFEM.f90", "max_forks_repo_name": "mianzhi/fosolvers", "max_forks_repo_head_hexsha": "be4877a9cccd7bf6b97d4e01c58e10634684415d", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-08-23T03:10:35.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-01T05:17:19.000Z", "avg_line_length": 29.3245033113, "max_line_length": 100, "alphanum_fraction": 0.5447154472, "num_tokens": 1401}
|
import numpy as np
import torch
import torch.nn
from .nnutils import Network, one_hot, extract
class QNet(Network):
def __init__(self, n_features, n_actions, n_hidden_layers=1, n_units_per_layer=32):
super().__init__()
self.n_actions = n_actions
self.layers = []
if n_hidden_layers == 0:
self.layers.extend([torch.nn.Linear(n_features, n_actions)])
else:
self.layers.extend([torch.nn.Linear(n_features, n_units_per_layer), torch.nn.ReLU()])
self.layers.extend(
[torch.nn.Linear(n_units_per_layer, n_units_per_layer),
torch.nn.ReLU()] * (n_hidden_layers - 1))
self.layers.extend([torch.nn.Linear(n_units_per_layer, n_actions)])
self.model = torch.nn.Sequential(*self.layers)
def forward(self, z):
return self.model(z)
|
{"hexsha": "7e883e083b9bdf2eaf1bc78f3c870a14abb0d75b", "size": 867, "ext": "py", "lang": "Python", "max_stars_repo_path": "markov_abstr/gridworld/models/qnet.py", "max_stars_repo_name": "camall3n/regenernet", "max_stars_repo_head_hexsha": "c1b23624bf8ded7c1dadb858de90f58838586413", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-08-09T18:34:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T06:23:36.000Z", "max_issues_repo_path": "markov_abstr/gridworld/models/qnet.py", "max_issues_repo_name": "camall3n/regenernet", "max_issues_repo_head_hexsha": "c1b23624bf8ded7c1dadb858de90f58838586413", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "markov_abstr/gridworld/models/qnet.py", "max_forks_repo_name": "camall3n/regenernet", "max_forks_repo_head_hexsha": "c1b23624bf8ded7c1dadb858de90f58838586413", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-09T18:34:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-09T18:34:43.000Z", "avg_line_length": 33.3461538462, "max_line_length": 97, "alphanum_fraction": 0.6412918108, "include": true, "reason": "import numpy", "num_tokens": 193}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 10:08:24 2020
@author: hannes
"""
#General imports
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import os
import numpy as np
import skimage as skimage
"""
NOTE:
In order to generate the image sequence we can run following command in a terminal:
ffmpeg -i "barium_cloud_1_movie.mp4" -f image2 "video-frame%05d.png"
"""
def readAllFiles(path):
"""Function which opens every subdir and reads the logFiles
Input arguments:
path = path to main data directory
"""
images = os.listdir(path)
sortedImages = sorted(images)
cropBox = [65,200,160,610]
scaling = 15
dataSmall = []
dataBig = []
for entry in sortedImages:
if os.path.isfile(os.path.join(path, entry)):
image = mpimg.imread(path + entry)
image = image[cropBox[0]:cropBox[1],cropBox[2]:cropBox[3],0]
image = (image + 1)*(-1) + 2.4
image = np.where(image < 1.4, image, 0)
image = np.where(image > 1, 0, image)
image = image*scaling
image = np.pad(image,((50,50),(0,0)))
image = skimage.filters.gaussian(image,2)
newImage = skimage.transform.resize(image,[600,600])
plt.imshow(newImage, origin='lower',vmin=-1, vmax=15)
plt.colorbar()
plt.savefig(savePath + entry)
plt.close()
dataBig.append(newImage)
dataSmall.append(image)
return dataSmall, dataBig
if __name__ == "__main__":
"""Entry in barium cloud preprocessing Program
No Input
"""
print('Starting barium cloud data generation\n')
basePath = '/home/hannes/MasterThesisCode/AdaptiveSamplingIntermittentComms/src/Data/BariumCloudImages/Raw/'
savePath = '/home/hannes/MasterThesisCode/AdaptiveSamplingIntermittentComms/src/Data/BariumCloudImages/Processed/'
dataSmall, dataBig = readAllFiles(basePath)
np.savez(savePath + 'BariumCloudDataSmall', data=dataSmall)
np.savez(savePath + 'BariumCloudDataBig', data=dataBig)
print('Succesfull executed barium cloud data generation\n')
|
{"hexsha": "35e328eb78014fabbd3a0e9796442715aa1ba084", "size": 2206, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Utilities/BariumCloudPreprocessing.py", "max_stars_repo_name": "hsiehScalAR/AdaptiveSamplingIntermittentComms", "max_stars_repo_head_hexsha": "5aec4677fbe3f3bf19213ae6abe4d5ea8b4d052c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Utilities/BariumCloudPreprocessing.py", "max_issues_repo_name": "hsiehScalAR/AdaptiveSamplingIntermittentComms", "max_issues_repo_head_hexsha": "5aec4677fbe3f3bf19213ae6abe4d5ea8b4d052c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Utilities/BariumCloudPreprocessing.py", "max_forks_repo_name": "hsiehScalAR/AdaptiveSamplingIntermittentComms", "max_forks_repo_head_hexsha": "5aec4677fbe3f3bf19213ae6abe4d5ea8b4d052c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2345679012, "max_line_length": 118, "alphanum_fraction": 0.6400725295, "include": true, "reason": "import numpy", "num_tokens": 564}
|
[STATEMENT]
lemma lemma_2_8_i1:
"a \<in> supremum A \<Longrightarrow> a r\<rightarrow> b \<in> infimum ((\<lambda> x . x r\<rightarrow> b)`A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<in> supremum A \<Longrightarrow> a r\<rightarrow> b \<in> infimum ((\<lambda>x. x r\<rightarrow> b) ` A)
[PROOF STEP]
by (fact pseudo_hoop_dual.lemma_2_8_i)
|
{"llama_tokens": 147, "file": "PseudoHoops_PseudoHoops", "length": 1}
|
from .provider_test import ProviderTest
from gunpowder import (
RandomLocation,
BatchProvider,
Roi,
Coordinate,
ArrayKeys,
ArrayKey,
ArraySpec,
Array,
Roi,
Coordinate,
Batch,
BatchRequest,
BatchProvider,
RandomLocation,
MergeProvider,
build,
)
import numpy as np
from gunpowder.pipeline import PipelineRequestError
class ExampleSourceRandomLocation(BatchProvider):
def __init__(self, array):
self.array = array
self.roi = Roi((-200, -20, -20), (1000, 100, 100))
self.data_shape = (60, 60, 60)
self.voxel_size = (20, 2, 2)
x = np.linspace(-10, 49, 60).reshape((-1, 1, 1))
self.data = x + x.transpose([1, 2, 0]) + x.transpose([2, 0, 1])
def setup(self):
self.provides(self.array, ArraySpec(roi=self.roi, voxel_size=self.voxel_size))
def provide(self, request):
batch = Batch()
spec = request[self.array].copy()
spec.voxel_size = Coordinate((20, 2, 2))
start = (request[self.array].roi.get_begin() / self.voxel_size) + (10, 10, 10,)
end = (request[self.array].roi.get_end() / self.voxel_size) + (10, 10, 10)
data_slices = tuple(map(slice, start, end))
data = self.data[data_slices]
batch.arrays[self.array] = Array(data=data, spec=spec)
return batch
class CustomRandomLocation(RandomLocation):
def __init__(self, array, *args, **kwargs):
super().__init__(*args, **kwargs)
self.array = array
# only accept random locations that contain (0, 0, 0)
def accepts(self, request):
return request.array_specs[self.array].roi.contains((0, 0, 0))
class TestRandomLocation(ProviderTest):
def test_output(self):
a = ArrayKey("A")
b = ArrayKey("B")
source_a = ExampleSourceRandomLocation(a)
source_b = ExampleSourceRandomLocation(b)
pipeline = (source_a, source_b) + MergeProvider() + CustomRandomLocation(a)
with build(pipeline):
for i in range(10):
batch = pipeline.request_batch(
BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20))),
b: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20))),
}
)
)
self.assertTrue(0 in batch.arrays[a].data)
self.assertTrue(0 in batch.arrays[b].data)
# Request a ROI with the same shape as the entire ROI
full_roi_a = Roi((0, 0, 0), source_a.roi.get_shape())
full_roi_b = Roi((0, 0, 0), source_b.roi.get_shape())
batch = pipeline.request_batch(
BatchRequest(
{a: ArraySpec(roi=full_roi_a), b: ArraySpec(roi=full_roi_b)}
)
)
def test_random_seed(self):
raw = ArrayKey("RAW")
pipeline = ExampleSourceRandomLocation(raw) + CustomRandomLocation(raw)
with build(pipeline):
seeded_sums = []
unseeded_sums = []
for i in range(10):
batch_seeded = pipeline.request_batch(
BatchRequest(
{raw: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20)))},
random_seed=10,
)
)
seeded_sums.append(batch_seeded[raw].data.sum())
batch_unseeded = pipeline.request_batch(
BatchRequest({raw: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20)))})
)
unseeded_sums.append(batch_unseeded[raw].data.sum())
self.assertEqual(len(set(seeded_sums)), 1)
self.assertGreater(len(set(unseeded_sums)), 1)
def test_impossible(self):
a = ArrayKey("A")
b = ArrayKey("B")
null_key = ArrayKey("NULL")
source_a = ExampleSourceRandomLocation(a)
source_b = ExampleSourceRandomLocation(b)
pipeline = (
(source_a, source_b) + MergeProvider() + CustomRandomLocation(null_key)
)
with build(pipeline):
with self.assertRaises(PipelineRequestError):
batch = pipeline.request_batch(
BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0, 0), (200, 20, 20))),
b: ArraySpec(roi=Roi((1000, 100, 100), (220, 22, 22))),
}
)
)
|
{"hexsha": "46c4b2a97ceb2077366e92ba1bb426b9bbad0887", "size": 4599, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/cases/random_location.py", "max_stars_repo_name": "trivoldus28/gunpowder", "max_stars_repo_head_hexsha": "97e9e64709fb616e2c47567b22d5f11a9234fe48", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2017-05-03T22:27:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T19:07:28.000Z", "max_issues_repo_path": "tests/cases/random_location.py", "max_issues_repo_name": "trivoldus28/gunpowder", "max_issues_repo_head_hexsha": "97e9e64709fb616e2c47567b22d5f11a9234fe48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 102, "max_issues_repo_issues_event_min_datetime": "2017-06-09T10:11:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T13:56:37.000Z", "max_forks_repo_path": "tests/cases/random_location.py", "max_forks_repo_name": "trivoldus28/gunpowder", "max_forks_repo_head_hexsha": "97e9e64709fb616e2c47567b22d5f11a9234fe48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 43, "max_forks_repo_forks_event_min_datetime": "2017-04-25T20:25:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T19:07:34.000Z", "avg_line_length": 32.6170212766, "max_line_length": 87, "alphanum_fraction": 0.5325070668, "include": true, "reason": "import numpy", "num_tokens": 1122}
|
"""Provides a data proxy for deferring access to data from a mongoDB query."""
from bson.objectid import ObjectId
import numpy as np
import pymongo
# Inspired by https://github.com/SciTools/iris/blob/master/lib/iris/fileformats/netcdf.py#L418.
class MongoDBDataProxy:
"""A proxy to the data of a single TileDB array attribute."""
__slots__ = ("shape", "dtype", "host", "port", "db_name", "collection_name", "obj_id")
def __init__(self, shape, dtype,
host, port, db_name, collection_name, obj_id):
self.shape = shape
self.dtype = dtype
self.host = host
self.port = port
self.db_name = db_name
self.collection_name = collection_name
self.obj_id = obj_id
@property
def ndim(self):
return len(self.shape)
def _str_to_num(self, num_str):
"""Convert a number expressed as a string to an int or a float."""
try:
result = int(num_str)
except ValueError:
result = float(num_str)
return result
def _load_data(self, data_dict):
"""Convert the data-containing dict back into a (possibly masked) NumPy array."""
data = data_dict["data"]
try:
mask = data_dict["mask"]
fill_value = self._str_to_num(data_dict["fill_value"])
except KeyError:
data = np.array(data)
else:
data = np.ma.masked_array(data, mask=mask, fill_value=fill_value)
return data.reshape(self.shape).astype(self.dtype)
def __getitem__(self, keys):
# Set up a client connection.
mdb_client = pymongo.MongoClient(self.host, self.port)
db = mdb_client[self.db_name]
collection = db[self.collection_name]
document = collection.find_one({"_id": ObjectId(self.obj_id)})
data = self._load_data(document)
return data[keys]
def __getstate__(self):
return {attr: getattr(self, attr) for attr in self.__slots__}
def __setstate__(self, state):
for key, value in state.items():
setattr(self, key, value)
|
{"hexsha": "fede0828c39328da7781c617b8e1ca9510c99dec", "size": 2112, "ext": "py", "lang": "Python", "max_stars_repo_path": "metadatabase/data_proxy.py", "max_stars_repo_name": "informatics-lab/metadatabase", "max_stars_repo_head_hexsha": "380cfd683cc28d57bfc20b1965ed884541e63a6c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "metadatabase/data_proxy.py", "max_issues_repo_name": "informatics-lab/metadatabase", "max_issues_repo_head_hexsha": "380cfd683cc28d57bfc20b1965ed884541e63a6c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-01T13:09:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-17T14:23:34.000Z", "max_forks_repo_path": "metadatabase/data_proxy.py", "max_forks_repo_name": "informatics-lab/metadatabase", "max_forks_repo_head_hexsha": "380cfd683cc28d57bfc20b1965ed884541e63a6c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-10T23:55:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-10T23:55:58.000Z", "avg_line_length": 33.0, "max_line_length": 95, "alphanum_fraction": 0.6264204545, "include": true, "reason": "import numpy", "num_tokens": 485}
|
import argparse
import copy
import json
import os
import random
import torch
import sys
import numpy as np
import multiprocessing as mp
from audio_conditioned_unet.dataset import iterate_dataset, load_dataset, NonSequentialDatasetWrapper
from audio_conditioned_unet.network import ConditionalUNet
from audio_conditioned_unet.utils import load_game_config
from time import gmtime, strftime
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.tensorboard import SummaryWriter
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train Script for ISMIR 2020')
parser.add_argument('--film_layers', nargs='+', help='list of block indices where conditioning should be applied', type=int)
parser.add_argument('--augment', help='activate data augmentation', default=False, action='store_true')
parser.add_argument('--tempo_augment', help='activate tempo augmentation', default=False, action='store_true')
parser.add_argument('--train_set', help='path to train dataset.', type=str)
parser.add_argument('--val_set', help='path to validation dataset.', type=str)
parser.add_argument('--batch_size', help='batch size.', type=int, default=4)
parser.add_argument('--seq_len', help='sequence length for training', type=int, default=16)
parser.add_argument('--log_root', help='path to log directory', type=str, default="runs")
parser.add_argument('--dump_root', help='name for the stored network', type=str, default="params")
parser.add_argument('--tag', help='additional tag', type=str, default="")
parser.add_argument('--n_encoder_layers', '--enc', help='number of encoding layers.', type=int, default=4)
parser.add_argument('--n_filters_start', '--filters', help='number of initial filters.', type=int, default=8)
parser.add_argument('--rnn_size', help='number of rnn units.', type=int, default=128)
parser.add_argument('--spec_enc', help='number of hidden units for the dense layer before the rnn', type=int, default=32)
parser.add_argument('--rnn_layer', help='number of rnn layer.', type=int, default=1)
parser.add_argument('--learning_rate', "--lr", help='learning rate.', type=float, default=1e-4)
parser.add_argument('--scale_factor', help='sheet image scale factor.', type=int, default=3)
parser.add_argument('--weight_decay', help='weight decay value.', type=float, default=1e-5)
parser.add_argument('--param_path', help='load network weights', type=str, default=None)
parser.add_argument('--no_save', help='do not save parameters', default=False, action='store_true')
parser.add_argument('--use_lstm', help='if set use LSTM otherwise no long-term temporal context is used', default=False, action='store_true')
parser.add_argument('--all_tempi', help='use all tempi for augmentation', default=False, action='store_true')
parser.add_argument('--config', help='path to config.', type=str)
parser.add_argument('--patience', help='patience before decreasing the learning rate.', type=int, default=5)
parser.add_argument('--seed', help='random seed.', type=int, default=4711)
parser.add_argument('--audio_encoder', help='audio encoder', type=str, default="CBEncoder")
parser.add_argument('--clip_grads', help='gradient clipping value', type=float, default=None)
args = parser.parse_args()
# set random seed and variables for reproducibility https://pytorch.org/docs/stable/notes/randomness.html
# unfortunately there is still some source of non-determinism possibly e.g. due to nn.Upsample
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# makes training/evaluation very slow
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# apparently the code gets stuck without this line when computing the spectrograms
mp.set_start_method('spawn', force=True)
config = load_game_config(args.config)
train_path = args.train_set
val_path = args.val_set
time_stamp = strftime("%Y%m%d_%H%M%S", gmtime()) + "_{}".format(args.tag)
if not os.path.exists(args.log_root):
os.mkdir(args.log_root)
if not os.path.exists(args.dump_root):
os.mkdir(args.dump_root)
dump_path = os.path.join(args.dump_root, time_stamp)
if not os.path.exists(dump_path):
os.mkdir(dump_path)
train_parameters = dict(
num_epochs=100,
batch_size=args.batch_size,
max_patience=args.patience*2, # max patience before stopping training is twice the patience used to reduce learn rate
lr=args.learning_rate,
dump_path=dump_path,
augment=args.augment,
tempo_augment=args.tempo_augment,
seq_len=args.seq_len
)
log_dir = os.path.join(args.log_root, time_stamp)
log_writer = SummaryWriter(log_dir=log_dir)
text = ""
arguments = np.sort([arg for arg in vars(args)])
for arg in arguments:
text += "**{}:** {}<br>".format(arg, getattr(args, arg))
for key in train_parameters.keys():
text += "**{}:** {}<br>".format(key, train_parameters[key])
log_writer.add_text("run_config", text)
log_writer.add_text("cmd", " ".join(sys.argv))
net_config = {'film_layers': args.film_layers,
'n_encoder_layers': args.n_encoder_layers,
'n_filters_start': args.n_filters_start,
'rnn_size': args.rnn_size,
'rnn_layer': args.rnn_layer,
'use_lstm': args.use_lstm,
'audio_encoder': args.audio_encoder,
'spec_enc': args.spec_enc}
# store the network configuration
with open(os.path.join(dump_path, 'net_config.json'), "w") as f:
json.dump(net_config, f)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
network = ConditionalUNet(net_config)
if args.param_path is not None:
print('Loading model from {}'.format(args.param_path))
network.load_state_dict(torch.load(args.param_path, map_location=lambda storage, location: storage))
# no augmentation on the validation set
val_config = copy.deepcopy(config)
val_config['tempo_factors'] = [1000]
n_frames = network.perf_encoder.n_input_frames
train_dataset = load_dataset(train_path, config, n_frames=n_frames, augment=train_parameters['augment'],
scale_factor=args.scale_factor, all_tempi=args.all_tempi)
val_dataset = load_dataset(val_path, val_config, n_frames=n_frames, augment=False, scale_factor=args.scale_factor)
specs = [train_dataset.performances[elem][1000]['spec'] for elem in train_dataset.performances]
means = np.mean(np.concatenate(specs, axis=-1), axis=1)
stds = np.std(np.concatenate(specs, axis=-1), axis=1)
network.perf_encoder.set_stats(means, stds)
if not args.use_lstm:
# dataset wrapper to use same script for sequential and non sequential case
train_dataset = NonSequentialDatasetWrapper(train_dataset)
val_dataset = NonSequentialDatasetWrapper(val_dataset)
print(f"Putting model to {device}")
network.to(device)
optim = torch.optim.Adam(network.parameters(), lr=train_parameters['lr'], weight_decay=args.weight_decay)
scheduler = ReduceLROnPlateau(optim, mode="min", patience=args.patience, factor=0.5, verbose=True)
patience = train_parameters['max_patience']
min_loss = np.infty
batch_size = train_parameters['batch_size']
for epoch in range(train_parameters['num_epochs']):
tr_stats = iterate_dataset(network, optim, train_dataset, batch_size, seq_len=train_parameters['seq_len'],
train=True, device=device, threshold=0.5, clip_grads=args.clip_grads)
tr_loss, tr_prec, tr_rec = tr_stats['loss'], tr_stats['precision'], tr_stats['recall']
val_stats = iterate_dataset(network, None, val_dataset, batch_size=batch_size, seq_len=train_parameters['seq_len'],
train=False, device=device, threshold=0.5)
val_loss, val_prec, val_rec = val_stats['loss'], val_stats['precision'], val_stats['recall']
scheduler.step(val_loss)
if val_loss < min_loss:
min_loss = val_loss
color ='\033[92m'
patience = train_parameters['max_patience']
if not args.no_save:
print("Store best model...")
torch.save(network.state_dict(), os.path.join(train_parameters['dump_path'], "best_model.pt"))
else:
color = '\033[91m'
patience -= 1
# store latest model
if not args.no_save:
torch.save(network.state_dict(), os.path.join(train_parameters['dump_path'], "latest_model.pt".format(epoch)))
tr_f1 = 2*(tr_prec*tr_rec)/(tr_prec + tr_rec) if tr_prec > 0 and tr_rec > 0 else 0
val_f1 = 2*(val_prec*val_rec)/(val_prec + val_rec) if val_prec > 0 and val_rec > 0 else 0
log_writer.add_scalar('training/loss', tr_loss, epoch)
log_writer.add_scalar('training/precision', tr_prec, epoch)
log_writer.add_scalar('training/recall', tr_rec, epoch)
log_writer.add_scalar('training/f1', tr_f1, epoch)
log_writer.add_scalar('training/lr', optim.param_groups[0]['lr'], epoch)
log_writer.add_scalar('validation/loss', val_loss, epoch)
log_writer.add_scalar('validation/precision', val_prec, epoch)
log_writer.add_scalar('validation/recall', val_rec, epoch)
log_writer.add_scalar('validation/f1', val_f1, epoch)
print("\n{}Epoch {} | Train Loss: {}, Precision: {}, Recall: {}\033[0m".format(color, epoch, tr_loss, tr_prec, tr_rec))
print("{}Epoch {} | Val Loss: {}, Precision: {}, Recall: {}\033[0m".format(color, epoch, val_loss, val_prec, val_rec))
|
{"hexsha": "a755e8f0d0d91c58b422a434e05b50846b312721", "size": 9864, "ext": "py", "lang": "Python", "max_stars_repo_path": "audio_conditioned_unet/train_model.py", "max_stars_repo_name": "CPJKU/audio_conditioned_unet", "max_stars_repo_head_hexsha": "68f20f5280079e99be260f9fe9933c0064eb2d7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2019-10-17T01:54:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T17:37:07.000Z", "max_issues_repo_path": "audio_conditioned_unet/train_model.py", "max_issues_repo_name": "CPJKU/audio_conditioned_unet", "max_issues_repo_head_hexsha": "68f20f5280079e99be260f9fe9933c0064eb2d7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-24T16:52:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-06T08:16:13.000Z", "max_forks_repo_path": "audio_conditioned_unet/train_model.py", "max_forks_repo_name": "CPJKU/audio_conditioned_unet", "max_forks_repo_head_hexsha": "68f20f5280079e99be260f9fe9933c0064eb2d7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-10-15T14:18:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-20T09:15:59.000Z", "avg_line_length": 46.5283018868, "max_line_length": 145, "alphanum_fraction": 0.6903892944, "include": true, "reason": "import numpy", "num_tokens": 2264}
|
import os
import os.path as osp
import pandas as pd
import numpy as np
from PIL import Image
import multiprocessing
import argparse
################################################################################
# Evaluate the performance by computing mIoU.
# It assumes that every CAM or CRF dict file is already infered and saved.
# For CAM, threshold will be searched in range [0.01, 0.80].
#
# If you want to evaluate CAM performance...
# python evaluation.py --name [exp_name] --task cam --dict_dir dict
#
# Or if you want to evaluate CRF performance of certain alpha (let, alpha=4)...
# python evaluation.py --name [exp_name] --task crf --dict_dir crf/04
################################################################################
categories = ['background','aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow',
'diningtable','dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor']
def do_python_eval(predict_folder, gt_folder, name_list, num_cls, task, threshold, printlog=False):
TP = []
P = []
T = []
for i in range(num_cls):
TP.append(multiprocessing.Value('i', 0, lock=True))
P.append(multiprocessing.Value('i', 0, lock=True))
T.append(multiprocessing.Value('i', 0, lock=True))
def compare(start,step,TP,P,T,task,threshold):
for idx in range(start,len(name_list),step):
name = name_list[idx]
if task=='cam':
predict_file = os.path.join(predict_folder,'%s.npy'%name)
predict_dict = np.load(predict_file, allow_pickle=True).item()
h, w = list(predict_dict.values())[0].shape
tensor = np.zeros((21,h,w),np.float32)
for key in predict_dict.keys():
tensor[key+1] = predict_dict[key]
tensor[0,:,:] = threshold
predict = np.argmax(tensor, axis=0).astype(np.uint8)
if task=='crf':
predict_file = os.path.join(predict_folder,'%s.npy'%name)
predict_dict = np.load(predict_file, allow_pickle=True).item()
h, w = list(predict_dict.values())[0].shape
tensor = np.zeros((21,h,w),np.float32)
for key in predict_dict.keys():
tensor[key] = predict_dict[key]
predict = np.argmax(tensor, axis=0).astype(np.uint8)
gt_file = os.path.join(gt_folder,'%s.png'%name)
gt = np.array(Image.open(gt_file))
cal = gt<255 # Reject object boundary
mask = (predict==gt) * cal
for i in range(num_cls):
P[i].acquire()
P[i].value += np.sum((predict==i)*cal)
P[i].release()
T[i].acquire()
T[i].value += np.sum((gt==i)*cal)
T[i].release()
TP[i].acquire()
TP[i].value += np.sum((gt==i)*mask)
TP[i].release()
p_list = []
for i in range(8):
p = multiprocessing.Process(target=compare, args=(i,8,TP,P,T,task,threshold))
p.start()
p_list.append(p)
for p in p_list:
p.join()
IoU = []
T_TP = []
P_TP = []
FP_ALL = []
FN_ALL = []
for i in range(num_cls):
IoU.append(TP[i].value/(T[i].value+P[i].value-TP[i].value+1e-10))
T_TP.append(T[i].value/(TP[i].value+1e-10))
P_TP.append(P[i].value/(TP[i].value+1e-10))
FP_ALL.append((P[i].value-TP[i].value)/(T[i].value + P[i].value - TP[i].value + 1e-10))
FN_ALL.append((T[i].value-TP[i].value)/(T[i].value + P[i].value - TP[i].value + 1e-10))
loglist = {}
for i in range(num_cls):
loglist[categories[i]] = IoU[i] * 100
miou = np.mean(np.array(IoU))
loglist['mIoU'] = miou * 100
if printlog:
for i in range(num_cls):
if i%2 != 1:
print('%11s:%7.3f%%'%(categories[i],IoU[i]*100),end='\t')
else:
print('%11s:%7.3f%%'%(categories[i],IoU[i]*100))
print('\n======================================================')
print('%11s:%7.3f%%'%('mIoU',miou*100))
return loglist
def writedict(file, dictionary):
s = ''
for key in dictionary.keys():
sub = '%s:%s '%(key, dictionary[key])
s += sub
s += '\n'
file.write(s)
def writelog(filepath, metric, comment):
filepath = filepath
logfile = open(filepath,'a')
import time
logfile.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
logfile.write('\t%s\n'%comment)
writedict(logfile, metric)
logfile.write('=====================================\n')
logfile.close()
def validation(type_list, name, task, dict_dir, gt_dir='./data/VOC2012/SegmentationClass', logger=None):
eval_list = './data/VOC2012/ImageSets/Segmentation/' + type_list + '.txt'
df = pd.read_csv(eval_list, names=['filename'])
name_list = df['filename'].values
pred_dir = osp.join('./experiments', name, dict_dir)
logger.info('Evaluate ' + pred_dir + ' with ' + eval_list)
max_miou = 0
max_t = 0
for i in range(30):
t = i/100.0+0.1
loglist = do_python_eval(pred_dir, gt_dir, name_list, 21, task, t, printlog=False)
logger.info('%d/60 threshold: %.3f\tmIoU: %.3f%%'%(i, t, loglist['mIoU']))
if loglist['mIoU']>max_miou:
max_miou = loglist['mIoU']
max_t = t
return max_miou, max_t
def do_eval_from_arg(type_list, name, task, dict_dir, gt_dir='./data/VOC2012/SegmentationClass'):
eval_list = './data/VOC2012/ImageSets/Segmentation/' + type_list + '.txt'
df = pd.read_csv(eval_list, names=['filename'])
name_list = df['filename'].values
pred_dir = osp.join('./experiments', name, dict_dir)
print('Evaluate ' + pred_dir + ' with ' + eval_list)
if task=='cam':
for i in range(30):
t = i/100.0+0.1
loglist = do_python_eval(pred_dir, gt_dir, name_list, 21, task, t, printlog=False)
print('%d/60 threshold: %.3f\tmIoU: %.3f%%'%(i, t, loglist['mIoU']))
elif task=='crf':
loglist = do_python_eval(pred_dir, gt_dir, name_list, 21, task, 0, printlog=True)
return loglist['mIoU']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--list", default="train", type=str)
parser.add_argument("--name", required=True, type=str)
parser.add_argument("--task", required=True, type=str)
parser.add_argument("--dict_dir", required=True, type=str)
parser.add_argument("--gt_dir", default='./data/VOC2012/SegmentationClass', type=str)
args = parser.parse_args()
loglist = do_eval_from_arg(args.list, args.name, args.task, args.dict_dir, gt_dir=args.gt_dir)
|
{"hexsha": "381e8ed2e6ad8d070064cbd4f888eb6cea607b8f", "size": 6870, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluation.py", "max_stars_repo_name": "KAIST-vilab/OC-CSE", "max_stars_repo_head_hexsha": "35703390e13621a865aef4d9b75202c8c9e5822b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2021-08-06T07:03:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T07:14:48.000Z", "max_issues_repo_path": "evaluation.py", "max_issues_repo_name": "KAIST-vilab/OC-CSE", "max_issues_repo_head_hexsha": "35703390e13621a865aef4d9b75202c8c9e5822b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-30T16:39:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T03:14:00.000Z", "max_forks_repo_path": "evaluation.py", "max_forks_repo_name": "KAIST-vilab/OC-CSE", "max_forks_repo_head_hexsha": "35703390e13621a865aef4d9b75202c8c9e5822b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-08-06T05:28:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-15T13:53:20.000Z", "avg_line_length": 38.1666666667, "max_line_length": 112, "alphanum_fraction": 0.5569141194, "include": true, "reason": "import numpy", "num_tokens": 1816}
|
import aiofiles
import asyncio
import simpleaudio as sa
import numpy as np
import struct
from datetime import datetime
import av
from av.audio.fifo import AudioFifo
'''
卡顿原因 读取 io操作会导致进程卡住,所以必须异步化
测试AudioFifo
'''
async def read_header_wav_async(f):
#RIFF
await f.read(12)
#FORMAT
id_chunk = await f.read(4)
s = await f.read(4)
size_chunk, *_ = struct.unpack('<I', s)
print(id_chunk, size_chunk)
s = await f.read(16)
audio_format, num_channels, sample_rate, byte_rate, BlockAlign, BitsPerSample = struct.unpack('<hhIIhh',s)
print(audio_format, num_channels, sample_rate, byte_rate, BlockAlign, BitsPerSample)
#DATA?
#LIST?!
#不能是True!
while id_chunk != b'data':
id_chunk = await f.read(4)
# 26
s = await f.read(4)
size_chunk, *_ = struct.unpack('<I',s)
print(id_chunk, size_chunk)
if id_chunk != b'data':
await f.read(size_chunk)
else:
break
print(id_chunk, size_chunk)
return num_channels, int(BitsPerSample/8), sample_rate, byte_rate
def read_header_wav(f):
#RIFF
f.read(12)
#FORMAT
b = f.read(4)
size_chunk, *_ = struct.unpack('<I',f.read(4))
print(b, size_chunk)
audio_format, num_channels, sample_rate, byte_rate, BlockAlign, BitsPerSample = struct.unpack('<hhIIhh',f.read(16))
print(audio_format, num_channels, sample_rate, byte_rate, BlockAlign, BitsPerSample)
#DATA?
#LIST?!
while True:
id_chunk = f.read(4)
# 26
size_chunk, *_ = struct.unpack('<I',f.read(4))
print(id_chunk, size_chunk)
if id_chunk != b'data':
f.read(size_chunk)
else:
break
print(b, size_chunk)
return num_channels, int(BitsPerSample/8), sample_rate, byte_rate
async def parser(event_end_parse, filename_audio, audio_fifo, sample_per_frame, sample_rate):
'''懒惰的生产者,只管延时接受,不管其他'''
#生产每frame需要的时间
sec_per_frame = sample_per_frame/sample_rate
with av.open(filename_audio) as container:
stream = container.streams.get(audio=0)
for frame in container.decode(stream):
await asyncio.sleep(sec_per_frame*0.95)
audio_fifo.write(frame)
#print('parser: add data to queue')
print('结束产生')
#通知客户端结束
event_end_parse.set()
async def player(event_end_parse, event_end_play, audio_fifo, num_channels, BytsPerSample, sample_rate, sec_play:int):
'''只等待'''
samples_play_once = sample_rate*sec_play
event_played_frame1 = asyncio.Event()
first = True
data_wait_play = None
while True:
#print('player')
if event_end_parse.is_set() and audio_fifo.samples < samples_play_once:
#没数据了,就通知
break
#---------读数据-----------
#print('player 等待读取 队列中样本数', audio_fifo.samples)
if data_wait_play is None:
if audio_fifo.samples >= samples_play_once:
print('player 开始读取', samples_play_once, sec_play)
frame = audio_fifo.read(samples_play_once)
print('player: get data from queue')
data_wait_play = frame.planes[0].to_bytes()
else:
await asyncio.sleep(0.001)
# #n_sample = audio_fifo.samples
# #if n_sample >= samples_play_once:
# #sec_play_real = n_sample/samples_play_once
# #按样本数*播放秒数读取
# #frame_np = frame.to_ndarray()
# #print(frame_np, frame_np.shape)
# #data_bytes = await queue.get()
else:
#-----------播放 不能太快播放-------------
if first:
first = False
else:
await event_played_frame1.wait()
print('player: start play', datetime.now())
play_obj = sa.play_buffer(data_wait_play, num_channels, BytsPerSample, sample_rate)
#按播放秒数等待
data_wait_play = None
#play_obj.wait_done()
await asyncio.sleep(sec_play)
#通知播放完毕可以继续播放下一个frame了
event_played_frame1.set()
print('player: end play', datetime.now())
print('player 退出')
event_end_play.set()
# async def player(event_end_parse, event_end_play, audio_fifo, num_channels, BytsPerSample, sample_rate, sec_play:int):
# '''只等待'''
# samples_play_once = sample_rate*sec_play
# event_played_frame1 = asyncio.Event()
# first = True
# data_wait_play = None
# while True:
# #print('player')
# if event_end_parse.is_set() and audio_fifo.samples < samples_play_once:
# #没数据了,就通知
# break
# #---------读数据-----------
# #print('player 等待读取 队列中样本数', audio_fifo.samples)
# print('player 开始读取', samples_play_once, sec_play)
# frame = audio_fifo.read(samples_play_once)
# print('player: get data from queue')
# data_wait_play = frame.planes[0].to_bytes()
# print('player: start play', datetime.now())
# play_obj = sa.play_buffer(data_wait_play, num_channels, BytsPerSample, sample_rate)
# #play_obj.wait_done()
# await asyncio.sleep(sec_play)
# #通知播放完毕可以继续播放下一个frame了
# print('player: end play', datetime.now())
# await asyncio.sleep(0.001)
# print('player 退出')
# event_end_play.set()
async def main(event_loop, filename_audio, sec_play:int):
#queue = asyncio.Queue()
audio_fifo = AudioFifo()
event_end_parse = asyncio.Event()
event_end_play = asyncio.Event()
#f = await aiofiles.open(filename_audio, 'rb')
#只用来读参数
with open(filename_audio, 'rb') as f:
num_channels, BytsPerSample, sample_rate, byte_rate = read_header_wav(f)
#简单启动2个coroutine 不能await
sample_per_frame = 1024
coroutine1 = parser(event_end_parse, filename_audio, audio_fifo, sample_per_frame, sample_rate)
task1 = event_loop.create_task(coroutine1)
coroutine2 = player(event_end_parse, event_end_play, audio_fifo, num_channels, BytsPerSample, sample_rate, sec_play)
task2 = event_loop.create_task(coroutine2)
#等待结束
#print('结束')
await event_end_play.wait()
# data_bytes = None
# is_playing = False
# while f:
# if data_bytes is None:
# print('parser: begin parse')
# data_bytes = await f.read(byte_rate*sec_play) # 176400
# print('parser: add data to queue')
# else:
# #not None
# if is_playing:
# pass
# else:
# is_playing = True
# print('player: get data from queue')
# play_obj = sa.play_buffer(data_bytes, num_channels, BytsPerSample, sample_rate)
# data_bytes = None
# print('player: start play ')
# await asyncio.sleep(sec_play)
# print('player: end play')
# is_playing = False
#await asyncio.sleep(0.01)
if __name__ == "__main__":
filename_audio = 'D:/dataset/多瑙河之波.wav'
sec_play = 5
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
event_loop.run_until_complete(main(event_loop, filename_audio, sec_play))
except KeyboardInterrupt:
'''按键退出'''
pass
#now = event_loop.time()
# print(asyncio.Task.all_tasks())
event_loop.close()
|
{"hexsha": "88083bb429a32d9ba2132278a7cd23be4bfc6f73", "size": 7497, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo_play_parse_wav_fifo_async.py", "max_stars_repo_name": "xuqinghan/flv-extract-audio-and-video", "max_stars_repo_head_hexsha": "e4c0c42119e6ea4478817c04e21ffe341bfc4189", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demo_play_parse_wav_fifo_async.py", "max_issues_repo_name": "xuqinghan/flv-extract-audio-and-video", "max_issues_repo_head_hexsha": "e4c0c42119e6ea4478817c04e21ffe341bfc4189", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo_play_parse_wav_fifo_async.py", "max_forks_repo_name": "xuqinghan/flv-extract-audio-and-video", "max_forks_repo_head_hexsha": "e4c0c42119e6ea4478817c04e21ffe341bfc4189", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3146551724, "max_line_length": 120, "alphanum_fraction": 0.606242497, "include": true, "reason": "import numpy", "num_tokens": 1978}
|
# TODO: Need to finish
# Need to test
function fit_fs_imcmc_pt!(cfs::ConstantsFS,
dfs::DataFS;
nmcmc::Int, nburn::Int,
# Args for PT:
tempers::Vector{Float64},
inits=nothing,
save_all_states=false,
randpair=0.0,
# End of PT Args.
# Args are for iMCMC:
batchprop::Float64=0.1,
batchsizes=nothing,
prior_thin::Int=2,
imcmc_burn_prop=0.6,
swap_freq::Float64=1.0,
# End of iMCMC args.
tfs::Union{Nothing, Vector{TunersFS}}=nothing,
monitors=[monitor1, monitor2],
fix::Vector{Symbol}=Symbol[],
thins::Vector{Int}=[2, nsamps_to_thin(10, nmcmc)],
ndden_samps::Int=200,
printFreq::Int=0,
checkpoint=0,
computeDIC::Bool=true, computeLPML::Bool=true,
computedden::Bool=true,
sb_ibp::Bool=false,
use_repulsive::Bool=true,
Z_marg_lamgam::Float64=1.0,
Z_marg_lamgam_decay_rate::Float64=100.0,
Z_marg_lamgam_min::Float64=0.05,
verbose::Int=1,
time_updates=false,
seed::Int=-1)
printMsg(iter::Int, msg::String) = if printFreq > 0 && iter % printFreq == 0
print(msg)
end
# How frequently to thin dden
thin_dden = nsamps_to_thin(ndden_samps, nmcmc)
# Number of temperatures
num_tempers = length(tempers)
@assert mod(num_tempers, 2) == 0
if tfs != nothing
@assert num_tempers == length(tfs)
end
# Assert swap frequency is in unit interval
@assert 0 < swap_freq <= 1.0
# Cache for swap counts
swapcounts = zeros(Int, num_tempers, num_tempers)
# Cache for pair counts
paircounts = zeros(Int, num_tempers, num_tempers)
# Set random seed if needed
if seed >= 0
Random.seed!(seed)
end
if verbose >= 1
fixed_vars_str = join(fix, ", ")
if fixed_vars_str == ""
fixed_vars_str = "nothing"
end
println("fixing: $fixed_vars_str")
println("Use stick-breaking IBP: $(sb_ibp)")
println("Z_marg_lamgam: $(Z_marg_lamgam)")
println("Z_marg_lamgam_decay_rate: $(Z_marg_lamgam_decay_rate)")
println("Z_marg_lamgam_min: $(Z_marg_lamgam_min)")
println("use_repulsive: $(use_repulsive)")
println("batchprop: $(batchprop)")
println("batchsizes: $(batchsizes)")
flush(stdout)
end
@assert printFreq >= -1
if printFreq == 0
numPrints = 10
printFreq = Int(ceil((nburn + nmcmc) / numPrints))
end
# Instantiate (but not initialize) CPO stream
if computeLPML
cpoStream = MCMC.CPOstream{Float64}()
end
# DIC
if computeDIC
dicStream = initDicStream(dfs.data)
loglikeDIC(param::DICparam) = computeLoglikeDIC(dfs.data, param)
convertStateToDicParam(s::State)::DICparam = let
_convertStateToDicParam(s, cfs.constants, dfs.data)
end
end
# Cache for data density
dden = Matrix{Vector{Float64}}[]
# Update function
function update!(states, args, iter::Int)
# Whether or not to marginalize over lambda and gamma.
# We want to do this more often at the beginning, and less at the end.
zmarg = ((Z_marg_lamgam - Z_marg_lamgam_min) *
exp(-iter/Z_marg_lamgam_decay_rate) +
Z_marg_lamgam_min) > rand()
s_arg_vec = pmap(states, args) do s, arg
tu = (arg[:c].constants.temper == 1) && time_updates
# For reproducibility.
if seed > - 1
temper_idx = findfirst(tau -> tau == arg[:c].constants.temper, tempers)
Random.seed!(iter + temper_idx + seed)
end
# Update state using trained prior
imcmc_all_params = let
iter < imcmc_burn_prop * nburn
end
update_via_trained_prior!(s, dfs, arg[:c], arg[:t],
batchprop, prior_thin,
batchsizes=batchsizes,
fix=fix, use_repulsive=use_repulsive,
Z_marg_lamgam=zmarg, sb_ibp=sb_ibp,
time_updates=time_updates, temper=1.0,
minibatch_update_all_params=imcmc_all_params,
verbose=verbose-2)
(s, arg)
end
states = [sa[1] for sa in s_arg_vec]
args = [sa[2] for sa in s_arg_vec]
# Swap Chains
if verbose > 0
println()
end
llf(s, tuner) = compute_marg_loglike(s, cfs.constants, dfs.data, tuner)
if swap_freq > rand()
swapchains!(states, llf, tempers,
paircounts=paircounts, swapcounts=swapcounts,
randpair=randpair, verbose=verbose)
end
if iter == nburn
println("swapcounts / paircounts:")
println(swapcounts ./ (paircounts .+ 1e-6))
println("Resetting swapcounts, paircounts ...")
swapcounts .= 0.0
paircounts .= 0.0
end
# Pull out inner componenets for convenience
s = states[1]
c = args[1][:c]
d = dfs
ll = args[1][:ll]
# # Append loglike
append!(ll, compute_marg_loglike(s.theta, c.constants, d.data, 1.0))
if computedden && iter > nburn && (iter - nburn) % thin_dden == 0
# NOTE: `datadensity(s, c, d)` returns an (I x J) matrix of vectors of
# length g.
append!(dden, [datadensity(s.theta, c.constants, d.data)])
end
if computeLPML && iter > nburn
# Inverse likelihood for each data point
like = [[compute_like(i, n, s.theta, c.constants, d.data)
for n in 1:d.data.N[i]] for i in 1:d.data.I]
# Update (or initialize) CPO
MCMC.updateCPO(cpoStream, vcat(like...))
# Add to printMsg
printMsg(iter, " -- LPML: $(MCMC.computeLPML(cpoStream))")
end
if computeDIC && iter > nburn
# Update DIC
MCMC.updateDIC(dicStream, s.theta, updateParams,
loglikeDIC, convertStateToDicParam)
# Add to printMsg
printMsg(iter, " -- DIC: $(MCMC.computeDIC(dicStream, loglikeDIC,
paramMeanCompute))")
DICg = MCMC.DIC_gelman(ll[(nburn+1):end])
printMsg(iter, " -- DIC_gelman: $(DICg)")
end
printMsg(iter, "\n")
flush(stdout)
return states, args
end
# Create vectors of states
states = if inits == nothing
println("Using random inits!")
[let
s = genInitialState(cfs.constants, dfs.data)
s.eps .= 0.0
StateFS{Float64}(s, dfs)
end for _ in tempers]
else
@assert length(inits) == num_tempers
for _init in inits
_init.theta.eps .= 0.0
end
inits
end
# Create Args
args = [let
ll = Float64[] # FIXME
c = deepcopy(cfs)
c.constants.temper = tempers[i]
t = if tfs == nothing
TunersFS(Tuners(dfs.data.y, cfs.constants.K),
states[1].theta, dfs.X)
else
tfs[i]
end
Dict(:ll => ll, :c => c, :t => t)
end
for i in 1:num_tempers]
println("Running Gibbs sampler ...")
samples, states, args = let
MCMC.gibbs_pt(states, args, update!, monitors=monitors,
thins=thins, nmcmc=nmcmc, nburn=nburn,
printFreq=printFreq,
save_all_states=save_all_states,
printlnAfterMsg=false)
end
out = Dict(:samples => samples,
:lastState => states[1],
:inits => inits,
:c => cfs,
:d => dfs,
:lls => [arg[:ll] for arg in args],
:save_all_states => save_all_states,
:tempers => tempers,
:swap_freq => swap_freq,
:randpair => randpair,
:paircounts => paircounts,
:swapcounts => swapcounts)
if computeDIC || computeLPML
LPML = computeLPML ? MCMC.computeLPML(cpoStream) : NaN
Dmean, pD = computeDIC ? MCMC.computeDIC(dicStream, loglikeDIC,
paramMeanCompute,
return_Dmean_pD=true) : (NaN, NaN)
ll1 = args[1][:ll]
DICg = MCMC.DIC_gelman(ll1[(nburn+1):end])
metrics = Dict(:LPML => LPML,
:DIC => Dmean + pD,
:Dmean => Dmean,
:pD => pD,
:DICg => DICg)
println()
println("metrics:")
for (k, v) in metrics
out[k] = v
println("$k => $v")
end
flush(stdout)
end
if computedden
out[:dden] = dden
end
out[:nburn] = nburn
out[:nmcmc] = nmcmc
out[:batchprop] = batchprop
out[:batchsizes] = batchsizes
out[:prior_thin] = prior_thin
# Return all states if requested
if save_all_states
out[:all_last_states] = deepcopy(states)
end
return out
end
|
{"hexsha": "404e7715dd8e3171af5b918681e8f4a0e17f439b", "size": 9297, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Model/FeatureSelect/fit_feature_select_imcmc_pt.jl", "max_stars_repo_name": "luiarthur/CytofRepFAM.jl", "max_stars_repo_head_hexsha": "1f997d1620d74861c5bde5559ebdd1e6c449b9e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Model/FeatureSelect/fit_feature_select_imcmc_pt.jl", "max_issues_repo_name": "luiarthur/CytofRepFAM.jl", "max_issues_repo_head_hexsha": "1f997d1620d74861c5bde5559ebdd1e6c449b9e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-02-05T01:26:53.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-16T04:13:03.000Z", "max_forks_repo_path": "src/Model/FeatureSelect/fit_feature_select_imcmc_pt.jl", "max_forks_repo_name": "luiarthur/CytofRepFAM.jl", "max_forks_repo_head_hexsha": "1f997d1620d74861c5bde5559ebdd1e6c449b9e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7847682119, "max_line_length": 79, "alphanum_fraction": 0.5372700871, "num_tokens": 2494}
|
[STATEMENT]
lemma ideal_generated_subset2:
assumes ac: "ideal_generated {a} \<subseteq> ideal_generated {c}"
and bc: "ideal_generated {b} \<subseteq> ideal_generated {c}"
shows "ideal_generated {a,b} \<subseteq> ideal_generated {c}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ideal_generated {a, b} \<subseteq> ideal_generated {c}
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> ideal_generated {a, b} \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> ideal_generated {a, b} \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
assume x: "x \<in> ideal_generated {a, b}"
[PROOF STATE]
proof (state)
this:
x \<in> ideal_generated {a, b}
goal (1 subgoal):
1. \<And>x. x \<in> ideal_generated {a, b} \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
show " x \<in> ideal_generated {c}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> ideal_generated {c}
[PROOF STEP]
proof (cases "a=b")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. a = b \<Longrightarrow> x \<in> ideal_generated {c}
2. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
a = b
goal (2 subgoals):
1. a = b \<Longrightarrow> x \<in> ideal_generated {c}
2. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> ideal_generated {c}
[PROOF STEP]
using x bc
[PROOF STATE]
proof (prove)
using this:
x \<in> ideal_generated {a, b}
ideal_generated {b} \<subseteq> ideal_generated {c}
goal (1 subgoal):
1. x \<in> ideal_generated {c}
[PROOF STEP]
unfolding True ideal_generated_repeated
[PROOF STATE]
proof (prove)
using this:
x \<in> ideal_generated {b}
ideal_generated {b} \<subseteq> ideal_generated {c}
goal (1 subgoal):
1. x \<in> ideal_generated {c}
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
x \<in> ideal_generated {c}
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
a \<noteq> b
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
obtain k where k: "a = c * k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>k. a = c * k \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using dvd_ideal_generated_singleton[OF ac]
[PROOF STATE]
proof (prove)
using this:
c dvd a
goal (1 subgoal):
1. (\<And>k. a = c * k \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding dvd_def
[PROOF STATE]
proof (prove)
using this:
\<exists>k. a = c * k
goal (1 subgoal):
1. (\<And>k. a = c * k \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
a = c * k
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
obtain k' where k': "b = c * k'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>k'. b = c * k' \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using dvd_ideal_generated_singleton[OF bc]
[PROOF STATE]
proof (prove)
using this:
c dvd b
goal (1 subgoal):
1. (\<And>k'. b = c * k' \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding dvd_def
[PROOF STATE]
proof (prove)
using this:
\<exists>k. b = c * k
goal (1 subgoal):
1. (\<And>k'. b = c * k' \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
b = c * k'
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
obtain f where f: "sum (\<lambda>i. f i * i) {a,b} = x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>f. (\<Sum>i\<in>{a, b}. f i * i) = x \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (rule obtain_sum_ideal_generated[OF x], simp)
[PROOF STATE]
proof (state)
this:
(\<Sum>i\<in>{a, b}. f i * i) = x
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
hence "x = f a * a + f b * b "
[PROOF STATE]
proof (prove)
using this:
(\<Sum>i\<in>{a, b}. f i * i) = x
goal (1 subgoal):
1. x = f a * a + f b * b
[PROOF STEP]
unfolding sum_two_elements[OF False]
[PROOF STATE]
proof (prove)
using this:
f a * a + f b * b = x
goal (1 subgoal):
1. x = f a * a + f b * b
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x = f a * a + f b * b
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
x = f a * a + f b * b
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
have "... = f a * (c * k) + f b * (c * k')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f a * a + f b * b = f a * (c * k) + f b * (c * k')
[PROOF STEP]
unfolding k k'
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f (c * k) * (c * k) + f (c * k') * (c * k') = f (c * k) * (c * k) + f (c * k') * (c * k')
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
f a * a + f b * b = f a * (c * k) + f b * (c * k')
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
f a * a + f b * b = f a * (c * k) + f b * (c * k')
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
have "... = (f a * k) * c + (f b * k') * c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f a * (c * k) + f b * (c * k') = f a * k * c + f b * k' * c
[PROOF STEP]
by (simp only: mult_assoc) (simp only: mult_commute)
[PROOF STATE]
proof (state)
this:
f a * (c * k) + f b * (c * k') = f a * k * c + f b * k' * c
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
f a * (c * k) + f b * (c * k') = f a * k * c + f b * k' * c
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
have "... = (f a * k + f b * k') * c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f a * k * c + f b * k' * c = (f a * k + f b * k') * c
[PROOF STEP]
by (simp only: mult_commute) (simp only: distrib_left)
[PROOF STATE]
proof (state)
this:
f a * k * c + f b * k' * c = (f a * k + f b * k') * c
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
x = (f a * k + f b * k') * c
[PROOF STEP]
have "x = (f a * k + f b * k') * c"
[PROOF STATE]
proof (prove)
using this:
x = (f a * k + f b * k') * c
goal (1 subgoal):
1. x = (f a * k + f b * k') * c
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
x = (f a * k + f b * k') * c
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> x \<in> ideal_generated {c}
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
x = (f a * k + f b * k') * c
goal (1 subgoal):
1. x \<in> ideal_generated {c}
[PROOF STEP]
unfolding ideal_generated_singleton
[PROOF STATE]
proof (prove)
using this:
x = (f a * k + f b * k') * c
goal (1 subgoal):
1. x \<in> {k * c |k. k \<in> UNIV}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> ideal_generated {c}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x \<in> ideal_generated {c}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3276, "file": "Echelon_Form_Rings2", "length": 43}
|
function class_info=gen_class_info_nyud()
class_info=[];
class_info.class_names={
'wall'
'floor'
'cabinet'
'bed'
'chair'
'sofa'
'table'
'door'
'window'
'bookshelf'
'picture'
'counter'
'blinds'
'desk'
'shelves'
'curtain'
'dresser'
'pillow'
'mirror'
'floor mat'
'clothes'
'ceiling'
'books'
'refridgerator'
'television'
'paper'
'towel'
'shower curtain'
'box'
'whiteboard'
'person'
'night stand'
'toilet'
'sink'
'lamp'
'bathtub'
'bag'
'otherstructure'
'otherfurniture'
'otherprop'
'void'};
class_label_values=uint8([1:40 255]);
class_info.class_label_values=class_label_values;
class_info.background_label_value=uint8(1);
class_info.void_label_values=uint8(255);
class_info.mask_cmap = VOClabelcolormap(256);
class_info=process_class_info(class_info);
end
|
{"author": "guosheng", "repo": "refinenet", "sha": "0d62007bd60ba983d48acaee6ee29988c7171a91", "save_path": "github-repos/MATLAB/guosheng-refinenet", "path": "github-repos/MATLAB/guosheng-refinenet/refinenet-0d62007bd60ba983d48acaee6ee29988c7171a91/main/gen_class_info_nyud.m"}
|
#!/usr/bin/env python3
import numpy as np
import pickle
#this file is a bit messy as both value models and deep cfr models
#both use these methods
stateSize = 3883
#3883 is the state size
#186 is the action size
inputShape = (stateSize + 2 * 186,)
#number of possible actions, which is used for our enumeration
numActions = 2486
#used to store the maps from id,type to enumeration
idMap = {}
#gets a sequential index for the given id of the given type
#generating one if necessary
def enumId(type, id):
#get mapping for type
if not type in idMap:
idMap[type] = {
'nextNum': 0
}
typeMap = idMap[type]
#get enumerated index within the type
if not id in typeMap:
num = typeMap['nextNum']
typeMap['nextNum'] += 1
typeMap[id] = num
return typeMap[id]
#turns the state into a tensor (really just an array)
#structure should be constant across runs
#as long as idMap stays the same
def stateToTensor(state):
stateTensor = np.zeros(0)
#weather section
num = 5
size = 11 # duration 0-10
weatherArray = np.zeros(num * size)
weather = state['weather']
for id in weather:
n = enumId('weather', id)
duration = weather[id]
oneHot = numToOneHot(duration, size)
insertSublist(weatherArray, n, oneHot)
stateTensor = np.concatenate([stateTensor, weatherArray])
#player section
for i in range(2):
player = state['players'][i]
playerList = np.zeros(0)
#zmove
zMove = numToOneHot(player['zMoveUsed'], 2)
playerList = np.concatenate([playerList, zMove])
#mons
num = 6
#active only, all 0s for non active:
#newly switched (2), ability (24), addedType (13), move status (12), boosts (7 * 13), volatiles (10 * 10)
activeSize = 2 + 24 + 13 + 12 + (7 * 13) + (10 * 10)
#for all:
#is active (2), details (24), status (10), hp (11), item (13)
size = 2 + 24 + 10 + 11 + 13 + activeSize
monList = np.zeros(num * size)
for monId in player['mons']:
mon = player['mons'][monId]
monPlace = enumId('mon-id' + str(i), monId)
#go through each field
sublists = []
#non-active first
isActive = monId in player['active']
sublists.append(numToOneHot(isActive, 2))
details = enumId('mon-details', mon['details'])
sublists.append(numToOneHot(details, 24))
status = enumId('mon-status', mon['status'])
sublists.append(numToOneHot(status, 10))
hp = mon['hp']
sublists.append(numToOneHot(hp, 11))
item = enumId('mon-item', mon['item'])
sublists.append(numToOneHot(item, 13))
if not isActive:
sublists.append(np.zeros(activeSize))
else:
active = player['active'][monId]
newlySwitched = bool(active['newlySwitched'])
sublists.append(numToOneHot(newlySwitched, 2))
ability = enumId('active-ability', active['ability'])
sublists.append(numToOneHot(ability, 24))
addedType = enumId('active-addedType', active['addedType'])
sublists.append(numToOneHot(addedType, 13))
moves = active['moves']
moveList = [numToOneHot(m, 3) for m in moves]
sublists.append(np.concatenate(moveList))
boosts = active['boosts']
boostList = [numToOneHot(b, 13) for b in boosts]
sublists.append(np.concatenate(boostList))
vols = active['volatiles']
volList = np.zeros(10*10)
for id in vols:
n = enumId('active-volatiles', id)
duration = numToOneHot(vols[id], 10)
insertSublist(volList, n, duration)
sublists.append(volList)
insertSublist(monList, monPlace, np.concatenate(sublists))
playerList = np.concatenate([playerList, monList])
#side conditions
scList = np.zeros(10 * 10)
for sideId in player['sideConditions']:
n = enumId('player-sc', sideId)
insertSublist(scList, n, numToOneHot(player['sideConditions'][sideId], 10))
playerList = np.concatenate([playerList, scList])
stateTensor = np.concatenate([stateTensor, playerList])
return stateTensor
#turns the action into a tensor (really just an array)
#structure should be constant across runs
#as long as idMap stays the same
#always assumes singles or doubles
def actionToTensor(action):
parts = [p.strip() for p in action.split(',')]
if len(parts) == 1:
parts.append('pass')
actionTensor = np.zeros(0)
#types of action (3) + max number of team combos in vgc (90)
partSize = 3 + 90
for p in parts:
if p == 'pass':
partList = np.zeros(partSize)
elif 'switch' in p:
target = p.split(' ')[1]
targetNum = enumId('switch-target', target)
partList = np.concatenate([numToOneHot(0, 3), numToOneHot(targetNum, 90)])
elif 'team' in p:
team = p.split(' ')[1]
teamNum = enumId('team', team)
partList = np.concatenate([numToOneHot(1, 3), numToOneHot(teamNum, 90)])
elif 'move' in p:
data = p.split(' ')
move = data[1]
moveNum = enumId('move-move', move)
if len(data) < 3:
targetNum = 0
else:
target = data[2]
targetNum = enumId('move-target', target)
partList = np.concatenate([numToOneHot(2, 3), numToOneHot(moveNum, 4), numToOneHot(moveNum, 86)])
actionTensor = np.concatenate([actionTensor, partList])
return actionTensor
def toInput(state, action1, action2):
return np.concatenate([stateToTensor(state), actionToTensor(action1), actionToTensor(action2)])
#these are the nice parts of the action enumeration code
#action -> number
enumActionMap = None
#number -> action
denumActionMap = None
def enumAction(action):
if not enumActionMap:
genActionMap()
#pass,pass is more orthogonal
if action.strip() == 'noop':
action = ' pass,pass'
#convert singles actions to a canonical form
elif ',' not in action and 'team' not in action:
action += ',pass'
#if there's a move with no target, set the target to 1
fixedAction = []
for part in action.split(','):
part = part.strip()
if 'move' in part:
components = part.split(' ')
#add a default target
if len(components) == 2:
components.append('1')
part = ' '.join(components)
fixedAction.append(part)
action = ' ' + ','.join(fixedAction)
return enumActionMap[action]
#do we ever even use denumAction()?
#this returns actions in canonical doubles form, not singles
#so if you're playing singles, cut off the ',pass' at the end
#def denumAction(n):
#if not denumActionMap:
#genActionMap()
#return denumAction[n]
def genActionMap():
global denumActionMap
global enumActionMap
denumActionMap = {}
enumActionMap = {}
#n is the number of unique actions
#i is the enumerated actions, which has a lot of duplicates
n = 0
#minimum number to reach all actions
for i in range(43200):
action = _denumAction(i)
if not action in enumActionMap:
denumActionMap[n] = action
enumActionMap[action] = n
n += 1
#this is the ugly part
#converts a number to a doubles action (e.g. 'pass,pass', 'move 1 2, switch 2')
#this will assign all possible actions in some lower bound (<43200)
#this function is only used for the inital action enumeration generation
#so don't call this unless you're trying to make an enumeration
def _denumAction(n):
actionType = n % 10
n = n // 10
#team
if actionType == 9:
#can pick 1-6 mons
numPicked = (n % 6) + 1
n = n // 6
team = ['0' for _ in range(6)]
for i in range(6, 0, -1):
spot = n % i
n = n // i
#find the index of the spotth 0
for j in range(6):
if team[j] == '0':
spot -= 1
if spot < 0:
team[j] = str(6 - i + 1)
break
team = team[0:numPicked]
return ' team ' + ''.join(team)
actionType1 = actionType % 3
actionType2 = actionType // 3
actions = []
for at in [actionType1, actionType2]:
#move
if at == 0:
action = n % 16
n = n // 16
move = (action % 4) + 1
target = (action // 4) - 2
#0 isn't a valid target
if target >= 0:
target += 1
actions.append('move ' + str(move) + ' ' + str(target))
#switch
elif at == 1:
target = (n % 6) + 1
n = n // 6
actions.append('switch ' + str(target))
#pass
elif at == 2:
actions.append('pass')
return ' ' + ','.join(actions)
#dumping/reading the id map to/from a file
def saveIdMap(filename):
idMapData = pickle.dumps(idMap)
with open(filename, 'wb') as mapFile:
mapFile.write(idMapData)
def readIdMap(filename):
global idMap
with open(filename, 'rb') as mapFile:
idMapData = mapFile.read()
idMap = pickle.loads(idMapData)
#turns a number into a one-hot representation
#0-indexed
#takes booleans too
def numToOneHot(num, size):
if type(num) == bool and num:
num = 1
elif type(num) == bool and not num:
num = 0
xs = np.zeros(size)
xs[num] = 1
return xs
#copies the one-hot into the list at the given position
#defaults to sizes being in multiples of the one-hot
def insertSublist(xs, pos, oneHot, size=None):
if size == None:
size = len(oneHot)
np.put(xs, range(pos * size, (pos+1) * size), oneHot)
if __name__ == '__main__':
#this is the code that generates action.py for the full game
#this really should be worked into something prettier
genActionMap()
#full game doesn't use whitespace around action names
stripMap = {}
for action in enumActionMap:
stripMap[action.strip()] = enumActionMap[action]
i = len(stripMap)
#manually add some teams here
stripMap['|charmander|lifeorb||flareblitz,brickbreak,dragondance,outrage|Adamant|,252,,,4,252|M||||]|bulbasaur|chestoberry||gigadrain,toxic,sludgebomb,rest|Quiet|252,4,,252,,|M|,0,,,,|||]|squirtle|leftovers||fakeout,aquajet,hydropump,freezedry|Quiet|252,4,,252,,|M||||'] = i
stripMap['|charmander|leftovers||flamethrower,icebeam,dragondance,hyperbeam|Modest|,,,252,4,252|M||||]|bulbasaur|lifeorb||gigadrain,powerwhip,sludgebomb,rockslide|Adamant|252,252,,,,4|M||||]|squirtle|lifeorb||fakeout,earthquake,hydropump,freezedry|Timid|,4,,252,,252|M||||'] = i+1
print('actionMap =', stripMap)
|
{"hexsha": "1a9edc0feb6d63a6f79340ea487fd9500ef14fd0", "size": 11177, "ext": "py", "lang": "Python", "max_stars_repo_path": "old/modelInput.py", "max_stars_repo_name": "samhippie/shallow-red", "max_stars_repo_head_hexsha": "5690cdf380c6e138e25d88e85093738951438298", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "old/modelInput.py", "max_issues_repo_name": "samhippie/shallow-red", "max_issues_repo_head_hexsha": "5690cdf380c6e138e25d88e85093738951438298", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "old/modelInput.py", "max_forks_repo_name": "samhippie/shallow-red", "max_forks_repo_head_hexsha": "5690cdf380c6e138e25d88e85093738951438298", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-13T12:53:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-13T12:53:35.000Z", "avg_line_length": 32.3971014493, "max_line_length": 284, "alphanum_fraction": 0.5873669142, "include": true, "reason": "import numpy", "num_tokens": 2988}
|
from __future__ import absolute_import, division, print_function
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=int, default=0)
args = parser.parse_args()
gpu_id = args.gpu_id # set GPU id to use
import os; os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
import numpy as np
import tensorflow as tf
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False, log_device_placement=False))
import json
from models_shapes.nmn3_assembler import Assembler
from models_shapes.nmn3_model import NMN3ModelAtt
# Module parameters
H_im = 30
W_im = 30
num_choices = 2
embed_dim_txt = 300
embed_dim_nmn = 300
lstm_dim = 256
num_layers = 2
encoder_dropout = True
decoder_dropout = True
decoder_sampling = True
T_encoder = 15
T_decoder = 11
N = 256
# Training parameters
weight_decay = 5e-4
max_grad_l2_norm = 10
max_iter = 40000
snapshot_interval = 10000
exp_name = "shapes_gt_layout"
snapshot_dir = './exp_shapes/tfmodel/%s/' % exp_name
# Log params
log_interval = 20
log_dir = './exp_shapes/tb/%s/' % exp_name
# Data files
vocab_shape_file = './exp_shapes/data/vocabulary_shape.txt'
vocab_layout_file = './exp_shapes/data/vocabulary_layout.txt'
image_sets = ['train.large', 'train.med', 'train.small', 'train.tiny']
training_text_files = './exp_shapes/shapes_dataset/%s.query_str.txt'
training_image_files = './exp_shapes/shapes_dataset/%s.input.npy'
training_label_files = './exp_shapes/shapes_dataset/%s.output'
training_gt_layout_file = './exp_shapes/data/%s.query_layout_symbols.json'
image_mean_file = './exp_shapes/data/image_mean.npy'
# Load vocabulary
with open(vocab_shape_file) as f:
vocab_shape_list = [s.strip() for s in f.readlines()]
vocab_shape_dict = {vocab_shape_list[n]:n for n in range(len(vocab_shape_list))}
num_vocab_txt = len(vocab_shape_list)
assembler = Assembler(vocab_layout_file)
num_vocab_nmn = len(assembler.module_names)
# Load training data
training_questions = []
training_labels = []
training_images_list = []
gt_layout_list = []
for image_set in image_sets:
with open(training_text_files % image_set) as f:
training_questions += [l.strip() for l in f.readlines()]
with open(training_label_files % image_set) as f:
training_labels += [l.strip() == 'true' for l in f.readlines()]
training_images_list.append(np.load(training_image_files % image_set))
with open(training_gt_layout_file % image_set) as f:
gt_layout_list += json.load(f)
num_questions = len(training_questions)
training_images = np.concatenate(training_images_list)
# Shuffle the training data
# fix random seed for data repeatibility
np.random.seed(3)
shuffle_inds = np.random.permutation(num_questions)
training_questions = [training_questions[idx] for idx in shuffle_inds]
training_labels = [training_labels[idx] for idx in shuffle_inds]
training_images = training_images[shuffle_inds]
gt_layout_list = [gt_layout_list[idx] for idx in shuffle_inds]
# number of training batches
num_batches = np.ceil(num_questions / N)
# Turn the questions into vocabulary indices
text_seq_array = np.zeros((T_encoder, num_questions), np.int32)
seq_length_array = np.zeros(num_questions, np.int32)
gt_layout_array = np.zeros((T_decoder, num_questions), np.int32)
for n_q in range(num_questions):
tokens = training_questions[n_q].split()
seq_length_array[n_q] = len(tokens)
for t in range(len(tokens)):
text_seq_array[t, n_q] = vocab_shape_dict[tokens[t]]
gt_layout_array[:, n_q] = assembler.module_list2tokens(
gt_layout_list[n_q], T_decoder)
image_mean = np.load(image_mean_file)
image_array = (training_images - image_mean).astype(np.float32)
vqa_label_array = np.array(training_labels, np.int32)
# Network inputs
text_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_batch = tf.placeholder(tf.float32, [None, H_im, W_im, 3])
expr_validity_batch = tf.placeholder(tf.bool, [None])
vqa_label_batch = tf.placeholder(tf.int32, [None])
use_gt_layout = tf.constant(True, dtype=tf.bool)
gt_layout_batch = tf.placeholder(tf.int32, [None, None])
# The model
nmn3_model = NMN3ModelAtt(image_batch, text_seq_batch,
seq_length_batch, T_decoder=T_decoder,
num_vocab_txt=num_vocab_txt, embed_dim_txt=embed_dim_txt,
num_vocab_nmn=num_vocab_nmn, embed_dim_nmn=embed_dim_nmn,
lstm_dim=lstm_dim,
num_layers=num_layers, EOS_idx=assembler.EOS_idx,
encoder_dropout=encoder_dropout,
decoder_dropout=decoder_dropout,
decoder_sampling=decoder_sampling,
num_choices=num_choices, use_gt_layout=use_gt_layout,
gt_layout_batch=gt_layout_batch)
compiler = nmn3_model.compiler
scores = nmn3_model.scores
log_seq_prob = nmn3_model.log_seq_prob
# Loss function
softmax_loss_per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=scores, labels=vqa_label_batch)
# The final per-sample loss, which is vqa loss for valid expr
# and invalid_expr_loss for invalid expr
final_loss_per_sample = softmax_loss_per_sample # All exprs are valid
avg_sample_loss = tf.reduce_mean(final_loss_per_sample)
seq_likelihood_loss = tf.reduce_mean(-log_seq_prob)
total_training_loss = seq_likelihood_loss + avg_sample_loss
total_loss = total_training_loss + weight_decay * nmn3_model.l2_reg
# Train with Adam
solver = tf.train.AdamOptimizer()
gradients = solver.compute_gradients(total_loss)
# Clip gradient by L2 norm
# gradients = gradients_part1+gradients_part2
gradients = [(tf.clip_by_norm(g, max_grad_l2_norm), v)
for g, v in gradients]
solver_op = solver.apply_gradients(gradients)
# Training operation
# Partial-run can't fetch training operations
# some workaround to make partial-run work
with tf.control_dependencies([solver_op]):
train_step = tf.constant(0)
# Write summary to TensorBoard
os.makedirs(log_dir, exist_ok=True)
log_writer = tf.summary.FileWriter(log_dir, tf.get_default_graph())
loss_ph = tf.placeholder(tf.float32, [])
entropy_ph = tf.placeholder(tf.float32, [])
accuracy_ph = tf.placeholder(tf.float32, [])
tf.summary.scalar("avg_sample_loss", loss_ph)
tf.summary.scalar("entropy", entropy_ph)
tf.summary.scalar("avg_accuracy", accuracy_ph)
log_step = tf.summary.merge_all()
os.makedirs(snapshot_dir, exist_ok=True)
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
sess.run(tf.global_variables_initializer())
avg_accuracy = 0
accuracy_decay = 0.99
for n_iter in range(max_iter):
n_begin = int((n_iter % num_batches)*N)
n_end = int(min(n_begin+N, num_questions))
# set up input and output tensors
h = sess.partial_run_setup(
[nmn3_model.predicted_tokens, nmn3_model.entropy_reg,
scores, avg_sample_loss, train_step],
[text_seq_batch, seq_length_batch, image_batch, gt_layout_batch,
compiler.loom_input_tensor, vqa_label_batch])
# Part 0 & 1: Run Convnet and generate module layout
tokens, entropy_reg_val = sess.partial_run(h,
(nmn3_model.predicted_tokens, nmn3_model.entropy_reg),
feed_dict={text_seq_batch: text_seq_array[:, n_begin:n_end],
seq_length_batch: seq_length_array[n_begin:n_end],
image_batch: image_array[n_begin:n_end],
gt_layout_batch: gt_layout_array[:, n_begin:n_end]})
# Assemble the layout tokens into network structure
expr_list, expr_validity_array = assembler.assemble(tokens)
# all expr should be valid (since they are ground-truth)
assert(np.all(expr_validity_array))
labels = vqa_label_array[n_begin:n_end]
# Build TensorFlow Fold input for NMN
expr_feed = compiler.build_feed_dict(expr_list)
expr_feed[vqa_label_batch] = labels
# Part 2: Run NMN and learning steps
scores_val, avg_sample_loss_val, _ = sess.partial_run(
h, (scores, avg_sample_loss, train_step), feed_dict=expr_feed)
# compute accuracy
predictions = np.argmax(scores_val, axis=1)
accuracy = np.mean(np.logical_and(expr_validity_array,
predictions == labels))
avg_accuracy += (1-accuracy_decay) * (accuracy-avg_accuracy)
# Add to TensorBoard summary
if n_iter % log_interval == 0 or (n_iter+1) == max_iter:
print("iter = %d\n\tloss = %f, accuracy (cur) = %f, "
"accuracy (avg) = %f, entropy = %f" %
(n_iter, avg_sample_loss_val, accuracy,
avg_accuracy, -entropy_reg_val))
summary = sess.run(log_step, {loss_ph: avg_sample_loss_val,
entropy_ph: -entropy_reg_val,
accuracy_ph: avg_accuracy})
log_writer.add_summary(summary, n_iter)
# Save snapshot
if (n_iter+1) % snapshot_interval == 0 or (n_iter+1) == max_iter:
snapshot_file = os.path.join(snapshot_dir, "%08d" % (n_iter+1))
snapshot_saver.save(sess, snapshot_file, write_meta_graph=False)
print('snapshot saved to ' + snapshot_file)
|
{"hexsha": "e0d0fdcc89fe1b9335d19a415e0b625b6a6d9c5d", "size": 9107, "ext": "py", "lang": "Python", "max_stars_repo_path": "exp_shapes/train_shapes_gt_layout.py", "max_stars_repo_name": "YuJiang01/n2nnmn", "max_stars_repo_head_hexsha": "f0d751313ca756fe40ece1a7bbb0205ab899adf8", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 299, "max_stars_repo_stars_event_min_datetime": "2017-06-18T01:34:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T15:59:09.000Z", "max_issues_repo_path": "exp_shapes/train_shapes_gt_layout.py", "max_issues_repo_name": "YuJiang01/n2nnmn", "max_issues_repo_head_hexsha": "f0d751313ca756fe40ece1a7bbb0205ab899adf8", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2017-06-23T16:03:15.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-31T05:43:24.000Z", "max_forks_repo_path": "exp_shapes/train_shapes_gt_layout.py", "max_forks_repo_name": "YuJiang01/n2nnmn", "max_forks_repo_head_hexsha": "f0d751313ca756fe40ece1a7bbb0205ab899adf8", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 57, "max_forks_repo_forks_event_min_datetime": "2017-06-18T10:18:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T23:45:00.000Z", "avg_line_length": 37.4773662551, "max_line_length": 80, "alphanum_fraction": 0.7450312946, "include": true, "reason": "import numpy", "num_tokens": 2212}
|
from edgetpu.detection.engine import DetectionEngine
import numpy as np
from PIL import Image
class face_detection():
MODEL = 'models/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite'
def __init__(self, threshold=0.5, num_results=10):
self.engine = DetectionEngine(face_detection.MODEL)
self.objs = None
self.boxes = None
self.scores = None
self.threshold = threshold
self.num_results = num_results
def set_threshold(self, num):
self.threshold = num
def set_max_results(self, num):
self.num_results = num
def detect(self, img):
img = Image.fromarray(img)
self.objs = self.engine.detect_with_image(img,
threshold=self.threshold,
keep_aspect_ratio=True,
relative_coord=False,
top_k=self.num_results)
self.boxes = [obj.bounding_box.flatten().tolist() for obj in self.objs]
self.scores = [obj.score for obj in self.objs]
return self.objs
def get_bounding_boxes(self):
return self.boxes
def get_scores(self):
return self.scores
|
{"hexsha": "38be4936bb1d9fb1db75548ed9b1d63e90623475", "size": 1028, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/ctrlengine/ai/face_detection.py", "max_stars_repo_name": "0xJeremy/ctrl.engine", "max_stars_repo_head_hexsha": "19abba70df149a05edc5722cc95ceacc538448e6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-04-18T19:28:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-25T18:48:08.000Z", "max_issues_repo_path": "build/lib/ctrlengine/ai/face_detection.py", "max_issues_repo_name": "0xJeremy/ctrl.engine", "max_issues_repo_head_hexsha": "19abba70df149a05edc5722cc95ceacc538448e6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "build/lib/ctrlengine/ai/face_detection.py", "max_forks_repo_name": "0xJeremy/ctrl.engine", "max_forks_repo_head_hexsha": "19abba70df149a05edc5722cc95ceacc538448e6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7837837838, "max_line_length": 73, "alphanum_fraction": 0.7247081712, "include": true, "reason": "import numpy", "num_tokens": 258}
|
import pytest
import xarray as xr
import numpy as np
import dask.array as da
from xrspatial.utils import has_cuda
from xrspatial.utils import doesnt_have_cuda
from xrspatial.multispectral import arvi
from xrspatial.multispectral import ebbi
from xrspatial.multispectral import evi
from xrspatial.multispectral import nbr
from xrspatial.multispectral import nbr2
from xrspatial.multispectral import ndmi
from xrspatial.multispectral import ndvi
from xrspatial.multispectral import savi
from xrspatial.multispectral import gci
from xrspatial.multispectral import sipi
from xrspatial.multispectral import true_color
from xrspatial.tests.general_checks import general_output_checks
blue_data = np.array([[0, 9167, 9198, 9589.],
[9154, 9122, 9202, 9475.],
[9029, 9052, 9145, 9385.],
[9153, 9134, 9157, 9389.],
[9254, 9178, 9174, 9453.],
[9348, 9222, 9201, 9413.],
[9393, 9278, 9251, 9347.],
[9486, 9293, np.nan, 9317.]])
green_data = np.array([[0, 9929, 10056, 10620.],
[9913, 9904, 10061, 10621.],
[9853, 9874, 10116, 10429.],
[9901, 9970, 10121, 10395.],
[9954, 9945, 10068, 10512.],
[9985, 9942, 10027, 10541.],
[np.nan, 9980, 10062, 10371.],
[10101, 9971, 10044, 10275.]])
red_data = np.array([[0, 10301, 10454, 11325.],
[10353, 10269, 10501, np.nan],
[10184, 10106, 10445, 10961.],
[10349, 10230, 10299, 10844.],
[10430, 10328, 10327, 10979.],
[10479, 10340, 10381, 11076.],
[10498, 10402, 10495, 10877.],
[10603, 10383, 10433, 10751.]])
nir_data = np.array([[0, 15928, 16135, 16411.],
[15588, 15881, 16253, 16651.],
[16175, 16486, 17038, 17084.],
[15671, 16596, 17511, 17525.],
[15522, 15936, 17003, 17549.],
[15317, 15782, 16322, 17133.],
[15168, 15529, 16011, 16600.],
[15072, 15496, 15983, 16477.]])
tir_data = np.array([[0, 10512, 10517, 10527.],
[10511, 10504, 10502, 10504.],
[10522, 10507, 10497, 10491.],
[10543, 10514, 10498, 10486.],
[10566, np.nan, 10509, 10490.],
[10592, 10558, 10527, 10504.],
[10629, 10598, 10567, 10536.],
[10664, 10639, 10612, 10587.]])
swir1_data = np.array([[0, np.nan, 17194, 18163.],
[16974, 16871, 17123, 18304.],
[16680, 16437, 16474, 17519.],
[17004, 16453, 16001, 16800.],
[17230, 16906, 16442, 16840.],
[17237, 16969, 16784, 17461.],
[17417, 17079, 17173, 17679.],
[17621, 17205, 17163, 17362.]])
swir2_data = np.array([[0, 13175, 13558, 14952.],
[13291, 13159, 13516, 15029.],
[12924, 12676, np.nan, 14009.],
[13294, 12728, 12370, 13289.],
[13507, 13163, 12763, 13499.],
[13570, 13219, 13048, 14145.],
[13770, 13393, 13472, 14249.],
[14148, 13489, 13483, 13893.]])
arvi_expected_results = np.array([
[np.nan, 0.09832155, 0.0956943, 0.0688592],
[0.08880479, 0.09804352, 0.09585208, np.nan],
[0.10611779, 0.1164153, 0.11244237, 0.09396376],
[0.0906375, 0.11409396, 0.12842213, 0.10752644],
[0.08580945, 0.09740005, 0.1179347, 0.10302287],
[0.08125288, 0.09465021, 0.1028627, 0.09022958],
[0.07825362, 0.08776391, 0.09236357, 0.08790172],
[0.07324535, 0.08831083, np.nan, 0.09074763]], dtype=np.float32)
evi_expected_results = np.array([
[0., 1.5661007, 1.4382279, 1.0217365],
[1.4458131, 1.544984, 1.4036115, np.nan],
[1.5662745, 1.7274992, 1.4820393, 1.2281862],
[1.4591216, 1.6802154, 1.6963824, 1.3721503],
[1.4635549, 1.5457553, 1.6425549, 1.3112202],
[1.4965355, 1.5713791, 1.5468937, 1.1654801],
[1.5143654, 1.5337442, 1.4365331, 1.2165724],
[1.4805857, 1.5785736, np.nan, 1.2888849]], dtype=np.float32)
nbr_expected_results = np.array([
[np.nan, 0.09459506, 0.08678813, 0.04651979],
[0.07953876, 0.09373278, 0.09194128, 0.0511995],
[0.11172205, 0.13064948, np.nan, 0.09889686],
[0.08206456, 0.1319056, 0.17204913, 0.13746998],
[0.06941334, 0.09529537, 0.1424444, 0.13044319],
[0.06047703, 0.08837626, 0.11147429, 0.09553041],
[0.04831018, 0.07385381, 0.08611742, 0.07620993],
[0.03162218, 0.06924271, 0.08484355, 0.08508396]], dtype=np.float32)
nbr2_expected_results = np.array([
[np.nan, np.nan, 0.11823621, 0.09696512],
[0.12169173, 0.12360972, 0.11772577, 0.09825099],
[0.12687474, 0.12918627, np.nan, 0.11132962],
[0.12245033, 0.12765156, 0.1279828, 0.11668716],
[0.12112438, 0.12448036, 0.12597159, 0.11012229],
[0.11903139, 0.12422155, 0.12523465, 0.10491679],
[0.11693975, 0.12096351, 0.12077011, 0.10742921],
[0.10932041, 0.121066, 0.12008093, 0.11099024]], dtype=np.float32)
ndvi_expected_results = np.array([
[np.nan, 0.21453354, 0.21365978, 0.1833718],
[0.20180409, 0.21460803, 0.21499589, np.nan],
[0.2272848, 0.23992178, 0.23989375, 0.21832769],
[0.20453498, 0.23730709, 0.25933117, 0.23550354],
[0.19620839, 0.21352422, 0.24427369, 0.23030005],
[0.18754846, 0.20833014, 0.22248437, 0.2147187],
[0.18195277, 0.19771701, 0.20810382, 0.20828329],
[0.17406037, 0.19757332, 0.21009994, 0.21029823]], dtype=np.float32)
ndmi_expected_results = np.array([
[np.nan, np.nan, -0.03177413, -0.05067392],
[-0.04256495, -0.03022716, -0.02606663, -0.04728937],
[-0.01537057, 0.00148832, 0.01682979, -0.01257116],
[-0.04079571, 0.00432691, 0.04505849, 0.02112163],
[-0.05214949, -0.02953535, 0.01677381, 0.02061706],
[-0.05897893, -0.03624317, -0.01395517, -0.00948141],
[-0.06901949, -0.04753435, -0.03501688, -0.031477],
[-0.07796776, -0.0522614, -0.03560007, -0.02615326]], dtype=np.float32)
savi_expected_results = np.array([
[0., 0.10726268, 0.10682587, 0.09168259],
[0.10089815, 0.10729991, 0.10749393, np.nan],
[0.11363809, 0.11995638, 0.11994251, 0.10915995],
[0.10226355, 0.11864913, 0.12966092, 0.11774762],
[0.09810041, 0.10675804, 0.12213238, 0.11514599],
[0.09377059, 0.10416108, 0.11123802, 0.10735555],
[0.09097284, 0.0988547, 0.10404798, 0.10413785],
[0.0870268, 0.09878284, 0.105046, 0.10514525]], dtype=np.float32)
gci_expected_results = np.array([
[np.nan, 0.60418975, 0.6045147, 0.5452919],
[0.57248056, 0.6034935, 0.6154458, 0.5677431],
[0.64163196, 0.66963744, 0.6842626, 0.63812447],
[0.5827694, 0.66459376, 0.730165, 0.6859067],
[0.55937314, 0.6024133, 0.6888161, 0.6694254],
[0.534001, 0.58740693, 0.62780493, 0.62536764],
[np.nan, 0.55601203, 0.5912343, 0.6006171],
[0.4921295, 0.5541069, 0.5912983, 0.603601]], dtype=np.float32)
sipi_expected_results = np.array([
[np.nan, 1.2015283, 1.2210878, 1.3413291],
[1.2290354, 1.2043835, 1.2258345, np.nan],
[1.1927892, 1.1652038, 1.1971788, 1.2573901],
[1.2247275, 1.1721647, 1.1583472, 1.2177818],
[1.2309505, 1.2050642, 1.1727082, 1.2322679],
[1.2337743, 1.2054392, 1.1986197, 1.2745583],
[1.2366167, 1.2192315, 1.2255257, 1.2673423],
[1.2499441, 1.2131821, np.nan, 1.2504367]], dtype=np.float32)
ebbi_expected_results = np.array([
[np.nan, np.nan, 4.0488696, 4.0370474],
[3.9937027, 3.9902349, 3.9841716, np.nan],
[3.9386337, 3.8569257, 3.6711047, 3.918455],
[4.0096908, 3.7895138, 3.5027769, 3.6056597],
[4.0786624, np.nan, 3.724852, 3.5452912],
[4.0510664, 3.9954765, 3.8744915, 3.8181543],
[4.131501, 4.013487, 4.009527, 4.049455],
[4.172874, 4.08833, 4.038202, 3.954431]], dtype=np.float32)
def _do_gaussian_array():
_x = np.linspace(0, 50, 101)
_y = _x.copy()
_mean = 25
_sdev = 5
X, Y = np.meshgrid(_x, _y, sparse=True)
x_fac = -np.power(X-_mean, 2)
y_fac = -np.power(Y-_mean, 2)
gaussian = np.exp((x_fac+y_fac)/(2*_sdev**2)) / (2.5*_sdev)
return gaussian
data_gaussian = _do_gaussian_array()
def create_test_arr(arr, backend='numpy'):
y, x = arr.shape
raster = xr.DataArray(arr, dims=['y', 'x'])
if backend == 'numpy':
raster['y'] = np.linspace(0, y, y)
raster['x'] = np.linspace(0, x, x)
return raster
if has_cuda() and 'cupy' in backend:
import cupy
raster.data = cupy.asarray(raster.data)
if 'dask' in backend:
raster.data = da.from_array(raster.data, chunks=(3, 3))
return raster
# NDVI -------------
def test_ndvi_numpy_contains_valid_values():
_x = np.mgrid[1:0:21j]
a, b = np.meshgrid(_x, _x)
red_numpy = a*b
nir_numpy = (a*b)[::-1, ::-1]
da_nir = xr.DataArray(nir_numpy, dims=['y', 'x'])
da_red = xr.DataArray(red_numpy, dims=['y', 'x'])
da_ndvi = ndvi(da_nir, da_red)
assert da_ndvi.dims == da_nir.dims
assert da_ndvi.attrs == da_nir.attrs
for coord in da_nir.coords:
assert np.all(da_nir[coord] == da_ndvi[coord])
assert da_ndvi[0, 0] == -1
assert da_ndvi[-1, -1] == 1
assert da_ndvi[5, 10] == da_ndvi[10, 5] == -0.5
assert da_ndvi[15, 10] == da_ndvi[10, 15] == 0.5
def test_ndvi_cpu():
# vanilla numpy version
nir_numpy = create_test_arr(nir_data)
red_numpy = create_test_arr(red_data)
numpy_result = ndvi(nir_numpy, red_numpy)
general_output_checks(nir_numpy, numpy_result, ndvi_expected_results)
# dask
nir_dask = create_test_arr(nir_data, backend='dask')
red_dask = create_test_arr(red_data, backend='dask')
dask_result = ndvi(nir_dask, red_dask)
general_output_checks(nir_dask, dask_result, ndvi_expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_ndvi_gpu():
# cupy
nir_cupy = create_test_arr(nir_data, backend='cupy')
red_cupy = create_test_arr(red_data, backend='cupy')
cupy_result = ndvi(nir_cupy, red_cupy)
general_output_checks(nir_cupy, cupy_result, ndvi_expected_results)
# dask + cupy
nir_dask = create_test_arr(nir_data, backend='dask+cupy')
red_dask = create_test_arr(red_data, backend='dask+cupy')
dask_cupy_result = ndvi(nir_dask, red_dask)
general_output_checks(nir_dask, dask_cupy_result, ndvi_expected_results)
# SAVI -------------
def test_savi_cpu():
nir_numpy = create_test_arr(nir_data)
red_numpy = create_test_arr(red_data)
# savi should be same as ndvi at soil_factor=0
result_savi = savi(nir_numpy, red_numpy, soil_factor=0.0)
result_ndvi = ndvi(nir_numpy, red_numpy)
assert np.isclose(result_savi.data, result_ndvi.data, equal_nan=True).all()
# test default savi where soil_factor = 1.0
numpy_result = savi(nir_numpy, red_numpy, soil_factor=1.0)
general_output_checks(nir_numpy, numpy_result, savi_expected_results)
# dask
nir_dask = create_test_arr(nir_data, backend='dask')
red_dask = create_test_arr(red_data, backend='dask')
dask_result = savi(nir_dask, red_dask)
general_output_checks(nir_dask, dask_result, savi_expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_savi_gpu():
# cupy
nir_cupy = create_test_arr(nir_data, backend='cupy')
red_cupy = create_test_arr(red_data, backend='cupy')
cupy_result = savi(nir_cupy, red_cupy)
general_output_checks(nir_cupy, cupy_result, savi_expected_results)
# dask + cupy
nir_dask_cupy = create_test_arr(nir_data, backend='dask+cupy')
red_dask_cupy = create_test_arr(red_data, backend='dask+cupy')
dask_cupy_result = savi(nir_dask_cupy, red_dask_cupy)
general_output_checks(
nir_dask_cupy, dask_cupy_result, savi_expected_results)
# arvi -------------
def test_arvi_cpu():
nir_numpy = create_test_arr(nir_data)
red_numpy = create_test_arr(red_data)
blue_numpy = create_test_arr(blue_data)
numpy_result = arvi(nir_numpy, red_numpy, blue_numpy)
general_output_checks(nir_numpy, numpy_result, arvi_expected_results)
# dask
nir_dask = create_test_arr(nir_data, backend='dask')
red_dask = create_test_arr(red_data, backend='dask')
blue_dask = create_test_arr(blue_data, backend='dask')
dask_result = arvi(nir_dask, red_dask, blue_dask)
general_output_checks(nir_dask, dask_result, arvi_expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_arvi_gpu():
# cupy
nir_cupy = create_test_arr(nir_data, backend='cupy')
red_cupy = create_test_arr(red_data, backend='cupy')
blue_cupy = create_test_arr(blue_data, backend='cupy')
cupy_result = arvi(nir_cupy, red_cupy, blue_cupy)
general_output_checks(nir_cupy, cupy_result, arvi_expected_results)
# dask + cupy
nir_dask_cupy = create_test_arr(nir_data, backend='dask+cupy')
red_dask_cupy = create_test_arr(red_data, backend='dask+cupy')
blue_dask_cupy = create_test_arr(blue_data, backend='dask+cupy')
dask_cupy_result = arvi(nir_dask_cupy, red_dask_cupy, blue_dask_cupy)
general_output_checks(
nir_dask_cupy, dask_cupy_result, arvi_expected_results
)
# EVI -------------
def test_evi_cpu():
nir_numpy = create_test_arr(nir_data)
red_numpy = create_test_arr(red_data)
blue_numpy = create_test_arr(blue_data)
numpy_result = evi(nir_numpy, red_numpy, blue_numpy)
general_output_checks(nir_numpy, numpy_result, evi_expected_results)
# dask
nir_dask = create_test_arr(nir_data, backend='dask')
red_dask = create_test_arr(red_data, backend='dask')
blue_dask = create_test_arr(blue_data, backend='dask')
dask_result = evi(nir_dask, red_dask, blue_dask)
general_output_checks(nir_dask, dask_result, evi_expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_evi_gpu():
# cupy
nir_cupy = create_test_arr(nir_data, backend='cupy')
red_cupy = create_test_arr(red_data, backend='cupy')
blue_cupy = create_test_arr(blue_data, backend='cupy')
cupy_result = evi(nir_cupy, red_cupy, blue_cupy)
general_output_checks(nir_cupy, cupy_result, evi_expected_results)
# dask + cupy
nir_dask_cupy = create_test_arr(nir_data, backend='dask+cupy')
red_dask_cupy = create_test_arr(red_data, backend='dask+cupy')
blue_dask_cupy = create_test_arr(blue_data, backend='dask+cupy')
dask_cupy_result = evi(nir_dask_cupy, red_dask_cupy, blue_dask_cupy)
general_output_checks(
nir_dask_cupy, dask_cupy_result, evi_expected_results)
# GCI -------------
def test_gci_cpu():
# vanilla numpy version
nir_numpy = create_test_arr(nir_data)
green_numpy = create_test_arr(green_data)
numpy_result = gci(nir_numpy, green_numpy)
general_output_checks(nir_numpy, numpy_result, gci_expected_results)
# dask
nir_dask = create_test_arr(nir_data, backend='dask')
green_dask = create_test_arr(green_data, backend='dask')
dask_result = gci(nir_dask, green_dask)
general_output_checks(nir_dask, dask_result, gci_expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_gci_gpu():
# cupy
nir_cupy = create_test_arr(nir_data, backend='cupy')
green_cupy = create_test_arr(green_data, backend='cupy')
cupy_result = gci(nir_cupy, green_cupy)
general_output_checks(nir_cupy, cupy_result, gci_expected_results)
# dask + cupy
nir_dask_cupy = create_test_arr(nir_data, backend='dask+cupy')
green_dask_cupy = create_test_arr(green_data, backend='dask+cupy')
dask_cupy_result = gci(nir_dask_cupy, green_dask_cupy)
general_output_checks(
nir_dask_cupy, dask_cupy_result, gci_expected_results)
# SIPI -------------
def test_sipi_cpu():
nir_numpy = create_test_arr(nir_data)
red_numpy = create_test_arr(red_data)
blue_numpy = create_test_arr(blue_data)
numpy_result = sipi(nir_numpy, red_numpy, blue_numpy)
general_output_checks(nir_numpy, numpy_result, sipi_expected_results)
# dask
nir_dask = create_test_arr(nir_data, backend='dask')
red_dask = create_test_arr(red_data, backend='dask')
blue_dask = create_test_arr(blue_data, backend='dask')
dask_result = sipi(nir_dask, red_dask, blue_dask)
general_output_checks(nir_dask, dask_result, sipi_expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_sipi_gpu():
# cupy
nir_cupy = create_test_arr(nir_data, backend='cupy')
red_cupy = create_test_arr(red_data, backend='cupy')
blue_cupy = create_test_arr(blue_data, backend='cupy')
cupy_result = sipi(nir_cupy, red_cupy, blue_cupy)
general_output_checks(nir_cupy, cupy_result, sipi_expected_results)
# dask + cupy
nir_dask_cupy = create_test_arr(nir_data, backend='dask+cupy')
red_dask_cupy = create_test_arr(red_data, backend='dask+cupy')
blue_dask_cupy = create_test_arr(blue_data, backend='dask+cupy')
dask_cupy_result = sipi(nir_dask_cupy, red_dask_cupy, blue_dask_cupy)
general_output_checks(
nir_dask_cupy, dask_cupy_result, sipi_expected_results)
# NBR -------------
def test_nbr_cpu():
nir_numpy = create_test_arr(nir_data)
swir_numpy = create_test_arr(swir2_data)
numpy_result = nbr(nir_numpy, swir_numpy)
general_output_checks(nir_numpy, numpy_result, nbr_expected_results)
# dask
nir_dask = create_test_arr(nir_data, backend='dask')
swir_dask = create_test_arr(swir2_data, backend='dask')
dask_result = nbr(nir_dask, swir_dask)
general_output_checks(nir_dask, dask_result, nbr_expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_nbr_gpu():
# cupy
nir_cupy = create_test_arr(nir_data, backend='cupy')
swir_cupy = create_test_arr(swir2_data, backend='cupy')
cupy_result = nbr(nir_cupy, swir_cupy)
general_output_checks(nir_cupy, cupy_result, nbr_expected_results)
# dask + cupy
nir_dask_cupy = create_test_arr(nir_data, backend='dask+cupy')
swir_dask_cupy = create_test_arr(swir2_data, backend='dask+cupy')
dask_cupy_result = nbr(nir_dask_cupy, swir_dask_cupy)
general_output_checks(
nir_dask_cupy, dask_cupy_result, nbr_expected_results)
# NBR2 -------------
def test_nbr2_cpu():
swir1_numpy = create_test_arr(swir1_data)
swir2_numpy = create_test_arr(swir2_data)
numpy_result = nbr2(swir1_numpy, swir2_numpy)
general_output_checks(swir1_numpy, numpy_result, nbr2_expected_results)
# dask
swir1_dask = create_test_arr(swir1_data, backend='dask')
swir2_dask = create_test_arr(swir2_data, backend='dask')
dask_result = nbr2(swir1_dask, swir2_dask)
general_output_checks(swir1_dask, dask_result, nbr2_expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Dnbr2ce not Available")
def test_nbr2_gpu():
# cupy
swir1_cupy = create_test_arr(swir1_data, backend='cupy')
swir2_cupy = create_test_arr(swir2_data, backend='cupy')
cupy_result = nbr2(swir1_cupy, swir2_cupy)
general_output_checks(swir2_cupy, cupy_result, nbr2_expected_results)
# dask + cupy
swir1_dask_cupy = create_test_arr(swir1_data, backend='dask+cupy')
swir2_dask_cupy = create_test_arr(swir2_data, backend='dask+cupy')
dask_cupy_result = nbr2(swir1_dask_cupy, swir2_dask_cupy)
general_output_checks(
swir1_dask_cupy, dask_cupy_result, nbr2_expected_results)
# NDMI -------------
def test_ndmi_cpu():
nir_numpy = create_test_arr(nir_data)
swir1_numpy = create_test_arr(swir1_data)
numpy_result = ndmi(nir_numpy, swir1_numpy)
general_output_checks(nir_numpy, numpy_result, ndmi_expected_results)
# dask
nir_dask = create_test_arr(nir_data, backend='dask')
swir1_dask = create_test_arr(swir1_data, backend='dask')
dask_result = ndmi(nir_dask, swir1_dask)
general_output_checks(nir_dask, dask_result, ndmi_expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_ndmi_gpu():
# cupy
nir_cupy = create_test_arr(nir_data, backend='cupy')
swir1_cupy = create_test_arr(swir1_data, backend='cupy')
cupy_result = ndmi(nir_cupy, swir1_cupy)
general_output_checks(nir_cupy, cupy_result, ndmi_expected_results)
# dask + cupy
nir_dask_cupy = create_test_arr(nir_data, backend='dask+cupy')
swir1_dask_cupy = create_test_arr(swir1_data, backend='dask+cupy')
dask_cupy_result = ndmi(nir_dask_cupy, swir1_dask_cupy)
general_output_checks(
nir_dask_cupy, dask_cupy_result, ndmi_expected_results)
# EBBI -------------
def test_ebbi_cpu():
# vanilla numpy version
red_numpy = create_test_arr(red_data)
swir_numpy = create_test_arr(swir1_data)
tir_numpy = create_test_arr(tir_data)
numpy_result = ebbi(red_numpy, swir_numpy, tir_numpy)
general_output_checks(red_numpy, numpy_result, ebbi_expected_results)
# dask
red_dask = create_test_arr(red_data, backend='dask')
swir_dask = create_test_arr(swir1_data, backend='dask')
tir_dask = create_test_arr(tir_data, backend='dask')
dask_result = ebbi(red_dask, swir_dask, tir_dask)
general_output_checks(red_dask, dask_result, ebbi_expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_ebbi_gpu():
# cupy
red_cupy = create_test_arr(red_data, backend='cupy')
swir_cupy = create_test_arr(swir1_data, backend='cupy')
tir_cupy = create_test_arr(tir_data, backend='cupy')
cupy_result = ebbi(red_cupy, swir_cupy, tir_cupy)
general_output_checks(red_cupy, cupy_result, ebbi_expected_results)
# dask + cupy
red_dask_cupy = create_test_arr(red_data, backend='dask+cupy')
swir_dask_cupy = create_test_arr(swir1_data, backend='dask+cupy')
tir_dask_cupy = create_test_arr(tir_data, backend='dask+cupy')
dask_cupy_result = ebbi(red_dask_cupy, swir_dask_cupy, tir_dask_cupy)
general_output_checks(
red_dask_cupy, dask_cupy_result, ebbi_expected_results)
def test_true_color_cpu():
# vanilla numpy version
red_numpy = create_test_arr(red_data)
green_numpy = create_test_arr(green_data)
blue_numpy = create_test_arr(blue_data)
numpy_result = true_color(
red_numpy, green_numpy, blue_numpy, name='np_true_color'
)
assert numpy_result.name == 'np_true_color'
general_output_checks(red_numpy, numpy_result, verify_attrs=False)
# dask
red_dask = create_test_arr(red_data, backend='dask')
green_dask = create_test_arr(green_data, backend='dask')
blue_dask = create_test_arr(blue_data, backend='dask')
dask_result = true_color(
red_dask, green_dask, blue_dask, name='dask_true_color'
)
assert dask_result.name == 'dask_true_color'
general_output_checks(red_numpy, numpy_result, verify_attrs=False)
np.testing.assert_allclose(
numpy_result.data, dask_result.compute().data, equal_nan=True
)
|
{"hexsha": "52e4c6e8e4b998273214d9bfb55a78d2848db1cc", "size": 23503, "ext": "py", "lang": "Python", "max_stars_repo_path": "xrspatial/tests/test_multispectral.py", "max_stars_repo_name": "brendancol/xarray-spatial", "max_stars_repo_head_hexsha": "36d53b75086b760cab5100a12fcbda946dd85a25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xrspatial/tests/test_multispectral.py", "max_issues_repo_name": "brendancol/xarray-spatial", "max_issues_repo_head_hexsha": "36d53b75086b760cab5100a12fcbda946dd85a25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xrspatial/tests/test_multispectral.py", "max_forks_repo_name": "brendancol/xarray-spatial", "max_forks_repo_head_hexsha": "36d53b75086b760cab5100a12fcbda946dd85a25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3685092127, "max_line_length": 79, "alphanum_fraction": 0.6802110369, "include": true, "reason": "import numpy,import cupy", "num_tokens": 8067}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 25 18:04:22 2019
@author: wt4452
"""
from time import time
import numpy as np
import numpy.linalg as la
import meshio as mo
reload(mo)
in_abq = False
try:
from abaqus import *
from abaqusConstants import (
NODAL,
INTEGRATION_POINT,
CENTROID,
VECTOR,
SCALAR,
TENSOR_3D_FULL,
TENSOR_3D_PLANAR,
THREE_D,
DEFORMABLE_BODY,
TIME,
MAGNITUDE,
MISES,
TRESCA,
PRESS,
INV3,
MAX_PRINCIPAL,
)
from odbAccess import *
from odbMaterial import *
from odbSection import *
in_abq = True
except ImportError:
raise SystemError("Functions do only work in Abaqus")
def abaqus_to_meshio_type(element_type):
"""Map Abaqus elment type to meshio types.
Parameters
----------
element_type : str
Abaqus element type (e.g C3D8R)
Returns
-------
str
Meshio element type (e.g. hexahedron)
"""
# trusss
if "T2D2" in element_type or "T3D2" in element_type:
return "line"
if "T2D3" in element_type or "T3D3" in element_type:
return "line3"
# beams
if "B21" in element_type or "B31" in element_type:
return "line"
if "B22" in element_type or "B32" in element_type or "B33" in element_type:
return "line3"
# surfaces
if "S4" in element_type or "R3D4" in element_type:
return "quad"
if "S8" in element_type:
return "quad8"
if "S8" in element_type:
return "quad9"
if "S3" in element_type or "M3D3" in element_type or "R3D3" in element_type:
return "triangle"
if "STRIA6" in element_type:
return "triangle6"
# volumes
if "C3D8" in element_type or "EC3D8" in element_type or "SC8" in element_type:
return "hexahedron"
if "C3D20" in element_type:
return "hexahedron20"
if "C3D4" in element_type:
return "tetra"
if "C3D4H" in element_type:
return "tetra4"
if "C3D10" in element_type:
return "tetra10"
if "C3D6" in element_type:
return "wedge"
meshio_to_abaqus_type = {
"triangle": "S3R",
"quad": "S4R",
"hexahedron": "C3D8R",
"tetra": "C3D4",
"wedge": "C3D6",
}
# error messages
ERROR_NO_ODBObject = (
"{} is no valid ODB object, please pass one of the "
+ "following: <odb>, <ODBAssembly>, <ODBInstance>"
)
ERROR_NO_MDBObject = (
"{} is no valid MDB object, please pass one of the "
+ "following: <Part>, <PartInstance>, <Assembly>, "
+ "<Model>"
)
ERROR_NO_FIELD_DATA = "field output {} has no values in " + "instance {}"
ERROR_ELSET_FIELD = (
"field output {} is not defined on every element of " + "instance {}"
)
ERROR_RESHAPE_CELL_DATA = "{} cannot be reshaped into shape {}"
ERROR_DIFFERENT_ETYPES = (
"different element types ({}) in {}. This feature " + "is not supported yet"
)
def __reshape_TENSOR_3D_FULL(value):
v = value
tens = np.array([[v[0], v[3], v[4]], [v[4], v[1], v[5]], [v[4], v[5], v[2]]])
return tens
def __reshape_TENSOR_3D_PLANAR(value):
v = value
tens = np.array([[v[0], v[3], 0.0], [v[3], v[1], 0.0], [0.0, 0.0, v[2]]])
return tens
# helper function for concatenating cell dictionaries
def __merge_numpy_dicts(dict1, dict2):
new_dict = dict1.copy()
old_keys = dict1.keys()
new_keys = dict2.keys()
for key in new_keys:
if key in old_keys:
new_dict[key] = np.vstack((dict1[key], dict2[key]))
else:
new_dict[key] = dict2[key]
return new_dict
# helper function for concatenating cell_data dictionaries
def __merge_cellData_dicts(dict1, dict2):
new_dict = dict1.copy()
old_keys = dict1.keys()
new_keys = dict2.keys()
for key in new_keys:
if key in old_keys:
fO_1 = dict1[key]
fO_2 = dict2[key]
fO_1.update(fO_2)
else:
new_dict[key] = dict2[key]
return new_dict
def __reshape_fieldOutputs(cell_data_field, allocation):
"""
"""
new_cell_data_dict = {}
new_field_name, field_names_to_reshape = allocation
field_names_to_reshape = np.asarray(field_names_to_reshape)
shape = field_names_to_reshape.shape
field_names_to_reshape = field_names_to_reshape.flatten()
assert shape in [(3, 3), (1, 3), (3, 1), (3,)], ERROR_RESHAPE_CELL_DATA.format(
new_cell_data_dict.values(), shape
)
if not set(field_names_to_reshape).issubset(cell_data_field.keys()):
return {}
fields_to_reshape = np.array([cell_data_field[f] for f in field_names_to_reshape])
fields_to_reshape = np.transpose(fields_to_reshape)
n_values = fields_to_reshape.shape[0]
if np.min(shape) > 1 and len(shape) > 1:
new_field = fields_to_reshape.reshape((n_values, shape[0], shape[1]))
else:
new_field = fields_to_reshape
new_cell_data_dict[new_field_name] = new_field
return new_cell_data_dict
def convertMDBtoMeshio(mdbObject, **kwargs):
"""
convertMDBtoMeshio(mdbObject, **kwargs)
This function converts geometry information stored in Abaqus model database
(mdb) to a meshio compatible representation.
Parameters:
----------
mdbObject : <'Part'> or <'PartInstance'> or <'Assembly'> or <'Model'>
<'Part'> is defined on mdb.parts, whereas <'PartInstance'> is defined
on the assembly level, <'Assembly'> may contain several part instance.
When type <'Model'> is passed, its <'Assembly'> mebber is processed
Returns
-------
Mesh : meshio Mesh object
ready to write meshio Mesh objects
"""
def convertInstance(mdbInstance, idx_shift=0):
inst = mdbInstance
nodes = inst.nodes
elements = inst.elements
n_nodes = len(nodes)
# get node informations and coordinates
node_labels, points = zip(*[(n.label, n.coordinates) for n in nodes])
points = np.array(points)
# create a lookup table to connect node labels and their array index
nodeLU = {
key: value
for (key, value) in zip(node_labels, range(idx_shift, n_nodes + idx_shift))
}
# getting the elements is a bit more complex, since we have to sort by
# type
# firstly, we create an empty dict for storing the cell informations
cells = {}
cell_data = {}
# loop over all elements
for elem in elements:
# get the connectivity
con = [nodeLU[c + 1] for c in elem.connectivity] # consider shift
# get the type of element, convert to meshio representation
etype = abaqus_to_meshio_type(str(elem.type))
if etype in cells.keys():
cells[etype].append(con)
cell_data[etype]["ID"] = np.append(cell_data[etype]["ID"], elem.label)
else:
# create a new key for a new element set
cells[etype] = [con]
cell_data[etype] = {"ID": np.array([elem.label])}
cells.update((key, np.array(cons)) for key, cons in cells.items())
return points, cells, cell_data
# if an Part or PartInstance is passed, call convertInstance once
if str(type(mdbObject)) in ["<type 'Part'>", "<type 'PartInstance'>"]:
points, cells, cell_data = convertInstance(mdbObject)
# if an Assembly Object or a Model Object is passed, loop over
# all instances
elif str(type(mdbObject)) in ["<type 'Assembly'>", "<type 'Model'>"]:
cells = {}
cell_data = {}
points = np.empty((0, 3))
if str(type(mdbObject)) == "<type 'Model'>":
rA = mdbObject.rootAssembly
else:
rA = mdbObject
idx_shift = 0
for inst_name in rA.instances.keys():
inst = rA.instances[inst_name]
points_, cells_, cell_data_ = convertInstance(inst, idx_shift)
points = np.vstack((points, points_))
cells = __merge_numpy_dicts(cells, cells_)
cell_data = __merge_numpy_dicts(cell_data, cell_data_)
idx_shift += len(points_)
else:
raise TypeError(ERROR_NO_MDBObject.format(mdbObject))
return mo.Mesh(points, cells, cell_data=cell_data)
def convertODBtoMeshio(odbObject, frame, list_of_outputs=[], deformed=True, **kwargs):
"""
convertODBtoMeshio(mdbObject, frame, list_of_outputs=None, **kwargs)
This function converts geometry and result dat information stored in
Abaqus Outbut database (odb) to a meshio compatible representation.
Parameters:
----------
odbObject : <'OdbInstance'> or <'OdbSet'> or <'OdbAssembly'> or <'odb'>
<'OdbInstance'> is defined on odb.rootAssembly.instances, argument of
type <'OdbSet'> must be an element Set as a member of an
<'OdbInstance'> odject. <'OdbAssembly'> is the entire assembly
containing several odbInstances and <'odb'> is the entire database
frame: <'OdbFrame>'
the frame containing the displacementField of the desire
Returns
-------
Mesh : meshio Mesh object
ready to write meshio Mesh objects
"""
def convertInstance(odbInstance, frame, idx_shift=0, list_of_outputs=None):
def processPointOutput(fO):
# process node data
print("processing " + fO.name)
values = np.array([value.data for value in fO.values])
if fO.type == SCALAR:
point_data[fO.name] = values
elif fO.type == VECTOR:
point_data[fO.name] = values
elif fO.type == TENSOR_3D_FULL:
values_rs = np.array([__reshape_TENSOR_3D_FULL(v) for v in values])
point_data[fO.name] = values_rs
elif fO.type == TENSOR_3D_PLANAR:
values_rs = np.array([__reshape_TENSOR_3D_PLANAR(v) for v in values])
point_data[fO.name] = values_rs
return
def processCellOutput(fO):
# process element data on several integration point
if isElSet:
fO_elem = fO.getSubset(region=eset)
else:
fO_elem = fO
n_el_values = len(fO_elem.values)
# check for availability of field output on each element
assert n_el_values == n_elements, ERROR_ELSET_FIELD.format(
field_name, inst_name
)
# use interpolation to output on centroid, to assert on result per
# element
fO_elem = fO_elem.getSubset(position=CENTROID)
print("processing " + fO.name)
cell_data_labels_ = {}
for value in fO_elem.values:
etype = abaqus_to_meshio_type(value.baseElementType)
cell_data_labels_.setdefault(etype,{})
if fO.type == TENSOR_3D_FULL:
val_data = __reshape_TENSOR_3D_FULL(value.data)
elif fO.type == TENSOR_3D_PLANAR:
val_data = __reshape_TENSOR_3D_PLANAR(value.data)
else:
val_data = value.data
cell_data_labels_[etype][value.elementLabel] = val_data
return cell_data_labels_
def sortCellOutput(fO_name,cell_data_labels_,cell_labels):
cell_data_ = {}
for etype in cell_labels.keys():
cell_data_[etype] = {fO_name: []}
for label in cell_labels[etype]:
try:
cell_data_[etype][fO_name].append(cell_data_labels_[etype][label])
except:
cell_data_[etype][fO_name].append(np.nan)
cell_data_[etype][fO_name] = np.array(cell_data_[etype][fO_name])
return cell_data_
# assign
inst = odbInstance
inst_name = inst.name
nodes = inst.nodes
elements = inst.elements
isElSet = str(type(odbInstance)) == "<type 'OdbSet'>" and hasattr(
odbInstance, "elements"
)
if isElSet:
eset = odbInstance
elements = eset.elements
inst_name = eset.name
# get the instance that contains the element set
# actually, we would have to check if *each* element in the set
# is part of the same instance
inst = eval(".".join(repr(elements[0]).split(".")[:-1]))
nodes = inst.nodes
# in the odb the initial coordinates are saved as a member to each
# Node Object. i.o. to get the current coords, we have to extract the
# displacement field and add the values
dispField = frame.fieldOutputs["U"]
dispField = dispField.getSubset(region=inst)
disp_values = dispField.values
nLU = inst.getNodeFromLabel # built-in function
n_nodes = len(nodes)
n_elements = len(elements)
# get node informations and coordinates
node_labels, disp, x0 = zip(
*[
(value.nodeLabel, value.data, nLU(value.nodeLabel).coordinates)
for value in disp_values
]
)
x0 = np.array(x0)
disp = np.array(disp)
if deformed:
points = disp + x0
print("Export deformed geometry.")
else:
points = x0
print("Export undeformed geometry.")
# create a lookup table to connect node labels and their array index
nodeLU = {
key: value
for (key, value) in zip(node_labels, range(idx_shift, n_nodes + idx_shift))
}
# getting the elements is a bit more complex, since we have to sort by
# type
# firstly, we create an empty dict for storing the cell informations
cells = {}
cell_labels = {}
# loop over all elements
for elem in elements:
# get the connectivity
con = [nodeLU[c] for c in elem.connectivity]
# get the type of element, convert to meshio representation
etype = abaqus_to_meshio_type(str(elem.type))
cells.setdefault(etype,[]).append(con)
cell_labels.setdefault(etype,[]).append(elem.label)
cells.update((key, np.array(cons)) for key, cons in cells.items())
cell_data = {}
point_data = {}
# if field data is requested
if list_of_outputs:
for field_name in list_of_outputs:
if type(field_name) == str and not field_name.lower().startswith(
"fdir"
):
fO = frame.fieldOutputs[field_name]
fO = fO.getSubset(region=inst)
n_values = len(fO.values)
if n_values > 0:
fO_location = fO.values[0].position # NODAL, ELEMENT,
if fO_location == NODAL:
processPointOutput(fO)
elif fO_location in [CENTROID, INTEGRATION_POINT]:
cell_data_labels_ = processCellOutput(fO)
cell_data_ = sortCellOutput(fO.name,cell_data_labels_,cell_labels)
cell_data = __merge_cellData_dicts(cell_data, cell_data_)
else:
print(ERROR_NO_FIELD_DATA.format(field_name, inst_name))
elif type(field_name) in [list, set, tuple] and len(field_name) == 2:
new_field_names = field_name[1]
new_field_names = np.asarray(new_field_names)
new_field_names = new_field_names.flatten()
for fn in set(new_field_names):
fO = frame.fieldOutputs[fn]
fO = fO.getSubset(region=inst)
n_values = len(fO.values)
assert n_values > 0, ERROR_NO_FIELD_DATA.format(
field_name, inst_name
)
fO_location = fO.values[0].position # NODAL, ELEMENT,
if fO_location == NODAL:
processPointOutput(fO)
elif fO_location in [CENTROID, INTEGRATION_POINT]:
cell_data_labels_ = processCellOutput(fO)
cell_data_ = sortCellOutput(fO.name,cell_labels,cell_data_labels_)
cell_data = __merge_cellData_dicts(cell_data, cell_data_)
if fO_location == NODAL:
new_point_data = __reshape_fieldOutputs(point_data, field_name)
point_data.update(new_point_data)
for fn in set(new_field_names):
del point_data[fn]
if fO_location in [CENTROID, INTEGRATION_POINT]:
for etype, cd_dict in cell_data.items():
new_cd_dict = __reshape_fieldOutputs(cd_dict, field_name)
cd_dict.update(new_cd_dict)
for fn in set(new_field_names):
del cd_dict[fn]
if "FDIR1" in list_of_outputs or "FDIR2" in list_of_outputs:
# get initial fiber orientation from stress field
stress = frame.fieldOutputs["S"].getSubset(region=eset)
csys = np.asarray(stress.values[0].localCoordSystem)
fdir1_0, fdir2_0 = csys[:2]
def _computeDeformationGradient(con):
"""Compute the deformation gradient of the element."""
assert (len(con)) in [3, 4], ""
# coordinates in the initial configuration
x0_coords = np.array([x0[c] for c in con])
# coordinates in the current configuration
x1_coords = np.array([points[c] for c in con])
# compute the derivative of the iso-coordinates
if len(con) == 3: # linear triangle
B_xii = np.zeros((2, 3))
B_xii[0] = [-1.0, 1.0, 0.0]
B_xii[1] = [-1.0, 0.0, 1.0]
else: # linear quad at midpoint
B_xii = np.zeros((2, 4))
B_xii[0] = [-0.25, 0.25, 0.25, -0.25]
B_xii[1] = [-0.25, -0.25, 0.25, 0.25]
# compute the Jacobians
J_initial = np.dot(B_xii, x0_coords)
J_initial_inv = np.dot(
la.inv(np.dot(J_initial, J_initial.T)), J_initial
)
J_current = np.dot(B_xii, x1_coords)
# compute F as product of the Jacobians
# F maps from inital to current configuration via
# reference configuration.
F = np.dot(J_current.T, J_initial_inv)
return F
for etype, cell_con in cells.items():
def_grad = np.array(
[_computeDeformationGradient(con_idx) for con_idx in cell_con]
)
if "FDIR1" in list_of_outputs:
fdir1 = np.einsum("Ijk,k->Ij", def_grad, fdir1_0)
fdir1 = np.array([f_i / la.norm(f_i) for f_i in fdir1])
try:
cell_data[etype].update({"FDIR1": fdir1})
except KeyError:
cell_data[etype] = {"FDIR1": fdir1}
if "FDIR2" in list_of_outputs:
fdir2 = np.einsum("Ijk,k->Ij", def_grad, fdir2_0)
fdir2 = np.array([f_i / la.norm(f_i) for f_i in fdir2])
try:
cell_data[etype].update({"FDIR2": fdir2})
except KeyError:
cell_data[etype] = {"FDIR2": fdir2}
try:
cell_data[etype].update({"F": def_grad})
except KeyError:
cell_data[etype] = {"F": def_grad}
return points, cells, point_data, cell_data
tic = time()
if str(type(odbObject)) in ["<type 'OdbInstance'>", "<type 'OdbSet'>"]:
odbInstance = odbObject
points, cells, point_data, cell_data = convertInstance(
odbInstance, frame, 0, list_of_outputs
)
elif str(odbObject.__class__) in ["<type 'Odb'>", "<type 'OdbAssembly'>"]:
cells = {}
points = np.empty((0, 3))
point_data = {}
cell_data = {}
if str(type(odbObject)) == "<type 'Odb'>":
rA = odbObject.rootAssembly
else:
rA = odbObject
idx_shift = 0
for inst_name in rA.instances.keys():
inst = rA.instances[inst_name]
if inst_name not in ["Assembly", "ASSEMBLY", "assembly"]:
points_, cells_ = convertInstance(inst, frame, idx_shift)[:2]
points = np.vstack((points, points_))
cells = __merge_numpy_dicts(cells, cells_)
idx_shift += len(points_)
toc = time()
print("took {} seconds".format(toc - tic))
return mo.Mesh(points, cells, point_data, cell_data)
def convertMeshioToMDB(mesh, partname="test", modelname="Model-1", **kwargs):
"""
This function creates a new part in the selected model database from
the geometry information stored in a meshio Mesh object
an OdbInstance object, defined in the 'rootAssembly' section
(odb.rootAssembly.instances)
* odbObject: an Abaqus OdbInstance object (<type 'OdbInstance'>)
an elementSet of an OdbInstance object, defined in its ElementSet section
(odbInstance.elementSets)
* odbObject: an Abaqus OdbSet object (<type 'OdbSet'>)
the entire assmebly of the model, containing several OdbInstances
* odbObject: an Abaqus OdbAssembly Object (<type 'OdbAssembly'>)
the entire Output Database
* odbObject: an Abaqus OutputDataBase (<type 'Odb'>)
Returns
-------
Mesh : meshio Mesh object
ready to write meshio Mesh object
"""
print(
"Warning: only the geometry of the mesh can be converted to ABAQUS´"
+ "model database. Information on field data is lost."
)
assert type(mesh) == mo.mesh.Mesh, "No meshio Mesh instance"
all_points = mesh.points
all_cells = mesh.cells
n_points = len(all_points)
# convert nodes to abaqus comaptible representation
nodeCoords = zip(all_points[:, 0], all_points[:, 1], all_points[:, 2])
nodeLabels = list(range(1, n_points + 1))
nodeData = [nodeLabels, nodeCoords]
elementData = []
element_label_shift = 1
for etype, els in all_cells.items():
# convert cells to abaqus compatible representation
abq_element_type = meshio_to_abaqus_type[etype]
element_labels = range(element_label_shift, len(els) + element_label_shift)
# abaqus requires list and int data types
# consider shifting node labels in connectivity
element_con = [[int(i + 1) for i in x] for x in els]
elementData.append([abq_element_type, element_labels, element_con])
# continue counting in next iteration
element_label_shift += len(els)
model = mdb.models[modelname]
partnames = model.parts.keys()
i = 0
while partname in partnames:
if i == 0:
print(
"Warning: a part named {} is "
/ "already in model {}.".format(partname, modelname)
)
partname += "_{}".format(i)
i += 1
model.PartFromNodesAndElements(
name=partname,
dimensionality=THREE_D,
type=DEFORMABLE_BODY,
nodes=nodeData,
elements=elementData,
)
def convertMeshioToODB(mesh, odbname="test", filename="test.odb", **kwargs):
all_points = mesh.points
n_points = len(all_points)
all_elements = mesh.cells
all_node_data = mesh.point_data
all_element_data = mesh.cell_data
# creat a new odb
odb = Odb(
name=odbname,
analysisTitle="ODB created from Meshio Instance",
description="ODB created from Meshio Instance",
path=filename,
)
# add section
sCat = odb.SectionCategory(name="S5", description="Five-Layered Shell")
# create part
odb_part = odb.Part(name="part-1", embeddedSpace=THREE_D, type=DEFORMABLE_BODY)
# get the nodes
node_labels = range(1, 1 + n_points)
node_list = zip(node_labels, *[all_points[:, i] for i in (0, 1, 2)])
# add nodes to odb part
odb_part.addNodes(nodeData=node_list, nodeSetName="nset-1")
# get element data
element_label_shift = 1
element_labels_LU = {}
for etype, els in all_elements.items():
abq_element_type = meshio_to_abaqus_type[etype]
element_labels = range(element_label_shift, len(els) + element_label_shift)
# consider shifting node labels in connectivity
element_con = [[int(i + 1) for i in x] for x in els]
odb_part.addElements(
labels=element_labels,
connectivity=element_con,
type=abq_element_type,
elementSetName="{}".format(etype),
)
# continue counting in next iteration
element_label_shift += len(els)
element_labels_LU[etype] = element_labels
# instance part
odb_inst = odb.rootAssembly.Instance(name="part-1-1", object=odb_part)
# create step and frame
if all_node_data or all_element_data:
dummy_step = odb.Step(
name="step-1",
domain=TIME,
timePeriod=1.0,
description="first analysis step",
)
dummy_frame = dummy_step.Frame(
incrementNumber=0, frameValue=0.0, description="1st frame"
)
# write node data
for nd_name, node_data in all_node_data.items():
shape = node_data.shape
if shape == (n_points,):
field_type = SCALAR
elif shape == (n_points, 3):
field_type = VECTOR
elif shape == (n_points, 3, 3):
field_type = TENSOR_3D_FULL
else:
print("only scalar and vector data is supported atm")
continue
newField = dummy_frame.FieldOutput(
name="{}".format(nd_name), description="{}".format(nd_name), type=field_type
)
if field_type == SCALAR:
newField.addData(
position=NODAL,
instance=odb_inst,
labels=node_labels,
data=[[x] for x in node_data],
)
elif field_type == VECTOR:
newField.setComponentLabels(("1", "2", "3"))
newField.setValidInvariants((MAGNITUDE,))
newField.addData(
position=NODAL, instance=odb_inst, labels=node_labels, data=node_data
)
elif field_type == TENSOR_3D_FULL:
# assume symmetry, keep only the relevant components,
# reshape to abaqus order
node_data = [x.flatten()[[0, 4, 8, 1, 2, 5]] for x in node_data]
newField.setComponentLabels(("11", "22", "33", "12", "13", "23"))
newField.setValidInvariants((MISES, TRESCA, PRESS, INV3, MAX_PRINCIPAL))
newField.addData(
position=NODAL,
instance=odb_inst,
labels=node_labels,
data=[x.tolist() for x in node_data],
)
else:
print("ERROR processing node output {}".format(nd_name))
# write element data
for etype, ed_dict in all_element_data.items():
for ed_name, element_data in ed_dict.items():
n_elements = len(element_data)
shape = element_data.shape
if shape == (n_elements,):
field_type = SCALAR
elif shape == (n_elements, 3):
field_type = VECTOR
elif shape == (n_elements, 3, 3):
field_type = TENSOR_3D_FULL
else:
print("only scalar, vector and full3d data is supported atm")
continue
if ed_name not in dummy_frame.fieldOutputs.keys():
# create new
currentField = dummy_frame.FieldOutput(
name="{}".format(ed_name),
description="{}".format(ed_name),
type=field_type,
)
else:
currentField = dummy_frame.fieldOutputs["{}".format(ed_name)]
# add data to field_output
if field_type == SCALAR:
currentField.addData(
position=CENTROID,
instance=odb_inst,
labels=element_labels_LU[etype],
data=[[x] for x in element_data],
)
elif field_type == VECTOR:
currentField.setComponentLabels(("1", "2", "3"))
currentField.setValidInvariants((MAGNITUDE,))
currentField.addData(
position=CENTROID,
instance=odb_inst,
labels=element_labels_LU[etype],
data=[x.tolist() for x in element_data],
)
elif field_type == TENSOR_3D_FULL:
# assume symmetry, keep only the relevant components,
# reshape to abaqus order
element_data = [x.flatten()[[0, 4, 8, 1, 2, 5]] for x in element_data]
currentField.setComponentLabels(("11", "22", "33", "12", "13", "23"))
currentField.setValidInvariants(
(MISES, TRESCA, PRESS, INV3, MAX_PRINCIPAL)
)
currentField.addData(
position=CENTROID,
instance=odb_inst,
labels=element_labels_LU[etype],
data=[x.tolist() for x in element_data],
)
else:
print("ERROR processing element output {}".format(ed_name))
pathToODB = odb.path
odb.save()
odb.close()
odb = openOdb(pathToODB)
return odb
|
{"hexsha": "9f99c9e56b53f8eef0d398ba2819588adf30c7c2", "size": 30308, "ext": "py", "lang": "Python", "max_stars_repo_path": "plugins/abaqus/abq_meshio/abq_meshio_converter.py", "max_stars_repo_name": "siegfriedgalkinkit/meshio", "max_stars_repo_head_hexsha": "8c2ccf62d1841258df92fe6badd424fe845f9ff9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plugins/abaqus/abq_meshio/abq_meshio_converter.py", "max_issues_repo_name": "siegfriedgalkinkit/meshio", "max_issues_repo_head_hexsha": "8c2ccf62d1841258df92fe6badd424fe845f9ff9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plugins/abaqus/abq_meshio/abq_meshio_converter.py", "max_forks_repo_name": "siegfriedgalkinkit/meshio", "max_forks_repo_head_hexsha": "8c2ccf62d1841258df92fe6badd424fe845f9ff9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2102747909, "max_line_length": 94, "alphanum_fraction": 0.5702124852, "include": true, "reason": "import numpy", "num_tokens": 7270}
|
import pytest
from ffai.core.model import D3, D6, D8, BBDie
from ffai.core.table import BBDieResult
import numpy as np
@pytest.mark.parametrize("die", [D3, D6, D8, BBDie])
def test_d_die(die):
results = []
n = 6
if die == D3:
n = 3
elif die == D8:
n = 8
elif die == BBDieResult:
n = 5 # Two push results
for i in range(100):
rnd = np.random.RandomState(0)
result = die(rnd).value
if die == D3:
assert result in [1, 2, 3]
elif die == D6:
assert result in [1, 2, 3, 4, 5, 6]
elif die == D8:
assert result in [1, 2, 3, 4, 5, 6, 7, 8]
elif die == BBDie:
assert result in [BBDieResult.ATTACKER_DOWN, BBDieResult.BOTH_DOWN, BBDieResult.PUSH, BBDieResult.DEFENDER_STUMBLES, BBDieResult.DEFENDER_DOWN]
results.append(result)
if len(results) == n:
break
assert len(results) == n
def test_d3_fixation():
for seed in range(10):
rnd = np.random.RandomState(seed)
D3.fix_result(1)
D3.fix_result(2)
D3.fix_result(3)
assert D3(rnd).value == 1
assert D3(rnd).value == 2
assert D3(rnd).value == 3
with pytest.raises(ValueError):
D3.fix_result(0)
with pytest.raises(ValueError):
D3.fix_result(4)
def test_d6_fixation():
for seed in range(10):
rnd = np.random.RandomState(seed)
D6.fix_result(1)
D6.fix_result(2)
D6.fix_result(3)
D6.fix_result(4)
D6.fix_result(5)
D6.fix_result(6)
assert D6(rnd).value == 1
assert D6(rnd).value == 2
assert D6(rnd).value == 3
assert D6(rnd).value == 4
assert D6(rnd).value == 5
assert D6(rnd).value == 6
with pytest.raises(ValueError):
D6.fix_result(0)
with pytest.raises(ValueError):
D6.fix_result(7)
def test_d8_fixation():
for seed in range(10):
rnd = np.random.RandomState(seed)
D8.fix_result(1)
D8.fix_result(2)
D8.fix_result(3)
D8.fix_result(4)
D8.fix_result(5)
D8.fix_result(6)
D8.fix_result(7)
D8.fix_result(8)
assert D8(rnd).value == 1
assert D8(rnd).value == 2
assert D8(rnd).value == 3
assert D8(rnd).value == 4
assert D8(rnd).value == 5
assert D8(rnd).value == 6
assert D8(rnd).value == 7
assert D8(rnd).value == 8
with pytest.raises(ValueError):
D8.fix_result(0)
with pytest.raises(ValueError):
D8.fix_result(9)
def test_bb_fixation():
BBDie.clear_fixes()
for seed in range(10):
rnd = np.random.RandomState(seed)
BBDie.fix_result(BBDieResult.ATTACKER_DOWN)
BBDie.fix_result(BBDieResult.BOTH_DOWN)
BBDie.fix_result(BBDieResult.PUSH)
BBDie.fix_result(BBDieResult.DEFENDER_STUMBLES)
BBDie.fix_result(BBDieResult.DEFENDER_DOWN)
assert BBDie(rnd).value == BBDieResult.ATTACKER_DOWN
assert BBDie(rnd).value == BBDieResult.BOTH_DOWN
assert BBDie(rnd).value == BBDieResult.PUSH
assert BBDie(rnd).value == BBDieResult.DEFENDER_STUMBLES
assert BBDie(rnd).value == BBDieResult.DEFENDER_DOWN
with pytest.raises(ValueError):
BBDie.fix_result(1)
|
{"hexsha": "51af5d3ad267bfd4a865544c37f350b0c8f39c19", "size": 3333, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/game/test_dice.py", "max_stars_repo_name": "gsverhoeven/ffai", "max_stars_repo_head_hexsha": "673ff00e1aac905381cdfb1228ccfcfccda97d1f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-03-05T16:43:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-11T14:24:58.000Z", "max_issues_repo_path": "tests/game/test_dice.py", "max_issues_repo_name": "gsverhoeven/ffai", "max_issues_repo_head_hexsha": "673ff00e1aac905381cdfb1228ccfcfccda97d1f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-02-24T23:04:16.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-24T23:04:16.000Z", "max_forks_repo_path": "tests/game/test_dice.py", "max_forks_repo_name": "gsverhoeven/ffai", "max_forks_repo_head_hexsha": "673ff00e1aac905381cdfb1228ccfcfccda97d1f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.027027027, "max_line_length": 155, "alphanum_fraction": 0.5928592859, "include": true, "reason": "import numpy", "num_tokens": 1026}
|
/*
Copyright (c) 2013, Illumina Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
. Neither the name of the Illumina, Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "connection.h"
#include "context.h"
#include <curl/curl.h>
#include "errors.h"
#include <openssl/hmac.h>
#include <openssl/md5.h>
#include <tinyxml.h>
#include "scope_guard.h"
#include <boost/algorithm/string.hpp>
using namespace es3;
static std::string escape(const std::string &str)
{
char *res=curl_escape(str.c_str(), str.length());
ON_BLOCK_EXIT(&curl_free,res);
return std::string(res);
}
s3_path es3::parse_path(const std::string &url)
{
s3_path res;
if (url.find("s3://")!=0)
throw std::bad_exception();
std::string bucket_and_path=url.substr(strlen("s3://"));
size_t path_pos = bucket_and_path.find('/');
if (path_pos==0)
err(errFatal) << "Malformed S3 URL - no bucket name: " << url;
if (path_pos!=-1)
{
res.bucket_=bucket_and_path.substr(0, path_pos);
res.path_=bucket_and_path.substr(path_pos);
} else
{
res.bucket_=bucket_and_path;
res.path_="/";
}
if (res.path_.find("//")!=std::string::npos)
err(errFatal) << "Malformed S3 URL - invalid '//' combination: " << url;
return res;
}
s3_connection::s3_connection(const context_ptr &conn_data)
: conn_data_(conn_data), header_list_(), num_lists_()
{
}
s3_connection::~s3_connection()
{
if (header_list_)
curl_slist_free_all(header_list_);
}
void s3_connection::checked(curl_ptr_t curl, int curl_code)
{
if (curl_code!=CURLE_OK)
{
char* error_buffer=conn_data_->err_buf_for(curl);
assert(error_buffer);
if (strlen(error_buffer)!=0)
{
assert(strlen(error_buffer)<=CURL_ERROR_SIZE);
err(errWarn) << "curl error: " << error_buffer;
} else
err(errWarn) << "curl error: "
<< curl_easy_strerror((CURLcode)curl_code);
conn_data_->taint(curl);
}
}
void s3_connection::check_for_errors(curl_ptr_t curl,
const std::string &curl_res)
{
long code=400;
checked(curl,
curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &code));
if (code<400)
return;
conn_data_->taint(curl);
code_e err_level=errFatal;
if (code>=500)
err_level=errWarn;
std::string def_error="HTTP code "+int_to_string(code)+" received.";
TiXmlDocument doc;
doc.Parse(curl_res.c_str());
if (!doc.Error())
{
TiXmlHandle docHandle(&doc);
TiXmlNode *s3_err_code=docHandle.FirstChild("Error")
.FirstChild("Code")
.FirstChild()
.ToText();
TiXmlNode *message=docHandle.FirstChild("Error")
.FirstChild("Message")
.FirstChild()
.ToText();
//Workaround for timeouts
std::string msg_val = message->Value();
std::string err_code = s3_err_code->Value();
if (msg_val.find("Idle connections will be closed")!=-1)
err_level=errWarn; //Lower error level
if (msg_val.find("NoSuchUpload")!=-1)
err_level=errWarn; //Lower error level
if (s3_err_code && message)
err(err_level) << "" << err_code << " - " << msg_val;
} else
err(err_level) << "" << def_error;
}
void s3_connection::prepare(curl_ptr_t curl,
const std::string &verb,
const s3_path &path,
const header_map_t &opts)
{
s3_path cur_path=path;
if (cur_path.path_.empty())
cur_path.path_.append("/");
curl_easy_reset(curl.get());
//memset(conn_data_->err_buf_for(curl.get()) , 0, CURL_ERROR_SIZE);
//Set HTTP verb
checked(curl,
curl_easy_setopt(curl.get(), CURLOPT_CUSTOMREQUEST, verb.c_str()));
//Do not do any automatic decompression
checked(curl,
curl_easy_setopt(curl.get(), CURLOPT_ENCODING , 0));
if (header_list_)
{
curl_slist_free_all(header_list_);
header_list_ = 0;
}
//Add custom headers
for(auto iter = opts.begin(), iend=opts.end(); iter!=iend; ++iter)
{
std::string header = iter->first + ": " + iter->second;
header_list_ = curl_slist_append(header_list_, header.c_str());
}
header_list_ = authenticate_req(header_list_, verb, cur_path, opts);
checked(curl,
curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, header_list_));
checked(curl,
curl_easy_setopt(curl.get(), CURLOPT_BUFFERSIZE, 16384*16));
checked(curl,
curl_easy_setopt(curl.get(), CURLOPT_NOSIGNAL, 1));
set_url(curl, path, "");
}
void s3_connection::set_url(curl_ptr_t curl,
const s3_path &path, const std::string &args)
{
s3_path cur_path=path;
if (cur_path.path_.empty())
cur_path.path_.append("/");
std::string url = conn_data_->use_ssl_?"https://" : "http://";
url.append(cur_path.bucket_);
url.append(".").append(cur_path.zone_);
url.append(".amazonaws.com");
url.append(cur_path.path_);
url.append(args);
checked(curl,
curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str()));
}
curl_slist* s3_connection::authenticate_req(struct curl_slist * header_list,
const std::string &verb, const s3_path &path, const header_map_t &opts)
{
header_map_t my_opts = opts;
//Make a 'Date' header
std::string date_header=format_time(time(NULL));
my_opts["x-amz-date"] = date_header;
header_list = curl_slist_append(header_list,
(std::string("x-amz-date: ")+date_header).c_str());
std::string amz_headers;
for(auto iter = my_opts.begin(); iter!=my_opts.end();++iter)
{
std::string lower_hdr = iter->first;
std::transform(lower_hdr.begin(), lower_hdr.end(),
lower_hdr.begin(), ::tolower);
if (lower_hdr.find("x-amz")==0)
amz_headers.append(lower_hdr).append(":")
.append(iter->second).append("\n");
}
//Signature
std::string canonicalizedResource="/"+path.bucket_+path.path_;
std::string stringToSign = verb + "\n" +
try_get(opts, "content-md5") + "\n" +
try_get(opts, "content-type") + "\n" +
/*date_header +*/ "\n" +
amz_headers +
canonicalizedResource;
std::string sign_res=sign(stringToSign) ;
if (sign_res.empty())
sign_res.empty();
std::string auth="Authorization: AWS "+conn_data_->api_key_+":"+sign_res;
return curl_slist_append(header_list, auth.c_str());
}
std::string s3_connection::sign(const std::string &str)
{
if (str.length() >= INT_MAX)
throw std::bad_exception();
const std::string &secret_key = conn_data_->secret_key;
char md[EVP_MAX_MD_SIZE+1]={0};
unsigned int md_len=0;
HMAC(EVP_sha1(),
secret_key.c_str(),
secret_key.length(),
(const unsigned char*)str.c_str(), str.length(),
(unsigned char*)md, &md_len);
return base64_encode(md, md_len);
}
static size_t string_appender(const char *ptr,
size_t size, size_t nmemb, void *userdata)
{
std::string *str = reinterpret_cast<std::string*>(userdata);
str->append(ptr, ptr+size*nmemb);
return size*nmemb;
}
std::string s3_connection::read_fully(const std::string &verb,
const s3_path &path,
const std::string &args,
const header_map_t &opts)
{
std::string res;
curl_ptr_t curl=conn_data_->get_curl(path.zone_, path.bucket_);
prepare(curl, verb, path, opts);
if (!args.empty())
set_url(curl, path, args);
checked(curl, curl_easy_setopt(
curl.get(), CURLOPT_WRITEFUNCTION, &string_appender));
checked(curl,curl_easy_setopt(
curl.get(), CURLOPT_WRITEDATA, &res));
checked(curl,curl_easy_perform(curl.get()));
check_for_errors(curl, res);
return res;
}
static std::string extract_leaf(const std::string &path)
{
size_t idx=path.find_last_of('/');
if (idx==std::string::npos || idx==path.size()-1)
return path;
return path.substr(idx+1);
}
static void decrement(int *ptr, mutex_t *mtx, boost::condition_variable *cv)
{
u_guard_t lock(*mtx);
(*ptr)--;
cv->notify_all();
}
s3_directory_ptr s3_connection::list_files_shallow(const s3_path &path,
s3_directory_ptr target, bool try_to_root)
{
{
const int num_reqs = conn_data_->concurrent_list_req_;
u_guard_t lock(parallel_req_mutex_);
while(num_reqs>0 && num_lists_>num_reqs)
num_parallel_reqs_.wait(lock);
num_lists_++;
}
ON_BLOCK_EXIT(&decrement, &num_lists_, ¶llel_req_mutex_, &num_parallel_reqs_);
if (!target)
{
target=s3_directory_ptr(new s3_directory());
target->absolute_name_ = path;
if (try_to_root && *path.path_.rbegin() != '/')
{
size_t pos=path.path_.find_last_of('/');
if (pos==std::string::npos)
target->absolute_name_.path_ = "/";
else
target->absolute_name_.path_ = path.path_.substr(0, pos+1);
}
}
std::string marker;
while(true)
{
std::string args;
assert(!path.path_.empty() && path.path_[0]=='/');
std::string no_leading_slash = path.path_.substr(1);
if (no_leading_slash.empty())
args="?marker="+escape(marker)+"&delimiter=/";
else
args="?prefix="+escape(no_leading_slash)+
"&marker="+escape(marker)+"&delimiter=/";
s3_path root=path;
root.path_="/";
std::string list=read_fully("GET", root, args);
TiXmlDocument doc;
doc.Parse(list.c_str());
if (doc.Error())
err(errWarn) << "Failed to get file listing from /" << path;
TiXmlHandle docHandle(&doc);
TiXmlNode *node=docHandle.FirstChild("ListBucketResult")
.FirstChild("IsTruncated")
.ToNode();
if (!node)
break;
node=node->NextSibling();
if (!node)
break;
while(node)
{
std::string name;
if (strcmp(node->Value(), "Contents")==0)
{
name = node->FirstChild("Key")->
FirstChild()->ToText()->Value();
std::string size = node->FirstChild("Size")->
FirstChild()->ToText()->Value();
std::string mtime = node->FirstChild("LastModified")->
FirstChild()->ToText()->Value();
if (*name.rbegin()!='/')
{
//Yes, Virginia, there are directory-like-files in S3
s3_file_ptr fl(new s3_file());
fl->name_ = extract_leaf(name);
fl->absolute_name_=derive(target->absolute_name_,
fl->name_);
fl->size_ = atoll(size.c_str());
fl->mtime_str_ = mtime;
fl->parent_ = target;
target->files_[fl->name_]=fl;
}
} else if (strcmp(node->Value(), "CommonPrefixes")==0)
{
name = node->FirstChild("Prefix")->
FirstChild()->ToText()->Value();
//Trim trailing '/'
std::string trimmed_name=name.substr(0, name.size()-1);
s3_directory_ptr dir(new s3_directory());
dir->name_ = extract_leaf(trimmed_name);
dir->absolute_name_=derive(target->absolute_name_,
dir->name_+"/");
dir->parent_ = target;
target->subdirs_[dir->name_] = dir;
}
node=node->NextSibling();
if (!node)
marker = name;
}
std::string is_trunc=docHandle.FirstChild("ListBucketResult")
.FirstChild("IsTruncated").FirstChild().Text()->Value();
if (is_trunc=="false")
break;
}
return target;
}
static std::string find_header(void *ptr, size_t size, size_t nmemb,
const std::string &header_name)
{
std::string line(reinterpret_cast<char*>(ptr), size*nmemb);
std::string line_raw(reinterpret_cast<char*>(ptr), size*nmemb);
std::transform(line.begin(), line.end(), line.begin(), ::tolower);
size_t pos=line.find(':');
if(pos!=std::string::npos)
{
std::string name=trim(line.substr(0, pos));
std::string val=trim(line_raw.substr(pos+1));
if (name==header_name)
return val;
}
return "";
}
static size_t find_mtime(void *ptr, size_t size, size_t nmemb, void *userdata)
{
file_desc *info=reinterpret_cast<file_desc*>(userdata);
std::string mtime=find_header(ptr, size, nmemb,
"x-amz-meta-last-modified");
if (!mtime.empty())
info->mtime_=atoll(mtime.c_str());
std::string ln=find_header(ptr, size, nmemb, "x-amz-meta-size");
if (!ln.empty())
info->raw_size_ = atoll(ln.c_str());
std::string ln2=find_header(ptr, size, nmemb, "content-length");
if (!ln2.empty())
info->remote_size_ = atoll(ln2.c_str());
std::string cmpr=find_header(ptr, size, nmemb, "x-amz-meta-compressed");
if (cmpr=="true")
info->compressed_=true;
std::string md=find_header(ptr, size, nmemb, "x-amz-meta-file-mode");
if (!md.empty())
info->mode_ = atoll(md.c_str());
return size*nmemb;
}
static size_t find_etag(void *ptr, size_t size, size_t nmemb, void *userdata)
{
std::string etag=find_header(ptr, size, nmemb, "etag");
if (!etag.empty())
reinterpret_cast<std::string*>(userdata)->assign(etag);
return size*nmemb;
}
file_desc s3_connection::find_mtime_and_size(const s3_path &path)
{
file_desc result={0};
result.compressed_=false;
result.mode_ = 0664;
result.remote_size_=result.raw_size_=0;
curl_ptr_t curl=conn_data_->get_curl(path.zone_, path.bucket_);
prepare(curl, "HEAD", path);
//last-modified
checked(curl, curl_easy_setopt(
curl.get(), CURLOPT_HEADERFUNCTION, &::find_mtime));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_HEADERDATA, &result));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_NOBODY, 1));
checked(curl, curl_easy_perform(curl.get()));
long code=404;
checked(curl, curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &code));
result.found_=code!=404;
if (result.raw_size_==0)
result.raw_size_=result.remote_size_;
return result;
}
class buf_data
{
const char *buf_;
size_t total_size_;
size_t written_;
MD5_CTX md5_ctx;
public:
buf_data(const char *buf, size_t total_size)
: buf_(buf), total_size_(total_size), written_()
{
MD5_Init(&md5_ctx);
}
std::string get_md5()
{
unsigned char md[MD5_DIGEST_LENGTH+1]={0};
MD5_Final(md, &md5_ctx);
return tobinhex(md, MD5_DIGEST_LENGTH);
}
static size_t read_func(char *bufptr, size_t size,
size_t nitems, void *userp)
{
return reinterpret_cast<buf_data*>(userp)->simple_read(
bufptr, size*nitems);
}
size_t simple_read(char *bufptr, size_t size)
{
size_t tocopy = std::min(total_size_-written_, size);
if (tocopy!=0)
{
memcpy(bufptr, buf_+written_, tocopy);
MD5_Update(&md5_ctx, bufptr, tocopy);
written_+=tocopy;
}
return tocopy;
}
};
std::string s3_connection::upload_data(const s3_path &path, const std::string &upload_id, int part_num,
const char *data, size_t size, const header_map_t& opts)
{
assert(data);
std::string etag;
buf_data read_data(data, size);
s3_path fin_path=path;
if (!upload_id.empty())
{
assert(part_num>0);
fin_path.path_+=std::string("?partNumber=")+int_to_string(part_num)+"&uploadId="+upload_id;
}
curl_ptr_t curl=conn_data_->get_curl(path.zone_, path.bucket_);
prepare(curl, "PUT", fin_path, opts);
checked(curl, curl_easy_setopt(curl.get(),
CURLOPT_HEADERFUNCTION, &find_etag));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_HEADERDATA, &etag));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_UPLOAD, 1));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_INFILESIZE_LARGE,
uint64_t(size)));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_READFUNCTION,
&buf_data::read_func));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_READDATA, &read_data));
std::string result;
checked(curl, curl_easy_setopt(curl.get(),
CURLOPT_WRITEFUNCTION, &string_appender));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, &result));
checked(curl, curl_easy_perform(curl.get()));
check_for_errors(curl, result);
if (!etag.empty() &&
strcasecmp(etag.c_str(), ("\""+read_data.get_md5()+"\"").c_str()))
abort(); //Data corruption. This SHOULD NOT happen!
if (!upload_id.empty())
{
s3_path chk_path=path;
chk_path.path_ += std::string("?uploadId=")+upload_id;
std::string args="&part-number-marker="+int_to_string(part_num-1)+"&max-parts=1";
std::string ans=read_fully("GET", chk_path, args);
if (!check_part(ans, part_num))
err(errWarn) << "Failed to get information about part "<< int_to_string(part_num) << " for upload " << path;
}
return etag;
}
bool s3_connection::check_part(const std::string &ans, int part_num)
{
TiXmlDocument doc;
doc.Parse(ans.c_str());
if (doc.Error())
return false;
TiXmlHandle docHandle(&doc);
TiXmlNode *node=docHandle.FirstChild("ListPartsResult")
.FirstChild("Part")
.ToNode();
if (!node)
return false;
node=node->FirstChild("PartNumber");
if (!node)
return false;
std::string text=node->FirstChild()->ToText()->Value();
if (text!=int_to_string(part_num))
return false;
return true;
}
std::string lexioprev(const std::string &cur)
{
std::string res=cur;
for(auto iter=res.rbegin(); iter!=res.rend();++iter)
{
char ch=*iter;
ch--;
if (ch>=',')
{
*iter=ch;
break;
} else
*iter='z';
}
return res;
}
std::string s3_connection::initiate_multipart(
const s3_path &path, const header_map_t &opts)
{
s3_path up_path =path;
up_path.path_+="?uploads";
std::string list=read_fully("POST", up_path, "", opts);
TiXmlDocument doc;
doc.Parse(list.c_str());
if (doc.Error())
err(errWarn) << "Failed to initiate multipart to " << path;
TiXmlHandle docHandle(&doc);
TiXmlNode *node=docHandle.FirstChild("InitiateMultipartUploadResult")
.FirstChild("UploadId")
.FirstChild()
.ToText();
if (!node)
err(errWarn) << "Incorrect document format - no upload ID";
std::string uploadId=node->Value();
//Validate that the upload is created
s3_path all_paths=path;
all_paths.path_="/?uploads";
std::string cur_uploads=read_fully("GET", all_paths, "&prefix="+path.path_.substr(1));
TiXmlDocument cur_uploads_xml;
cur_uploads_xml.Parse(cur_uploads.c_str());
if (cur_uploads_xml.Error())
err(errWarn) << "Failed to initiate multipart to " << path;
TiXmlHandle curDocHandle(&cur_uploads_xml);
TiXmlNode *cur_node=curDocHandle.FirstChild("ListMultipartUploadsResult")
.FirstChild("Upload").ToNode();
if (!cur_node)
err(errWarn) << "Incorrect document format - no upload ID";
while(cur_node)
{
TiXmlHandle upHandle(cur_node);
std::string val=upHandle.FirstChild("UploadId").FirstChild().ToText()->ValueStr();
// std::cerr<<"Val "<< val<<std::endl;
if (val==uploadId)
return uploadId;
cur_node=cur_node->NextSibling();
}
err(errWarn) << "Can't find an active upload with id="<<uploadId;
}
std::string s3_connection::complete_multipart(const s3_path &path,
const std::string &upload_id, const std::vector<std::string> &etags)
{
std::string data="<CompleteMultipartUpload>";
for(size_t f=0;f<etags.size();++f)
{
data.append("<Part>\n");
data.append(" <PartNumber>")
.append(int_to_string(f+1))
.append("</PartNumber>\n");
data.append(" <ETag>")
.append(etags.at(f))
.append("</ETag>\n");
data.append("</Part>\n");
}
data.append("</CompleteMultipartUpload>");
s3_path up_path=path;
up_path.path_+="?uploadId="+upload_id;
curl_ptr_t curl=conn_data_->get_curl(path.zone_, path.bucket_);
prepare(curl, "POST", up_path);
buf_data data_params(data.c_str(), data.size());
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_UPLOAD, 1));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_INFILESIZE_LARGE,
uint64_t(data.size())));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_READFUNCTION,
&buf_data::read_func));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_READDATA, &data_params));
std::string read_data;
checked(curl, curl_easy_setopt(
curl.get(), CURLOPT_WRITEFUNCTION, &string_appender));
checked(curl, curl_easy_setopt(
curl.get(), CURLOPT_WRITEDATA, &read_data));
checked(curl, curl_easy_perform(curl.get()));
check_for_errors(curl, read_data);
VLOG(2) << "Completed multipart of " << path;
return read_data;
}
class write_data
{
char *buf_;
size_t total_size_;
size_t written_;
public:
write_data(char *buf, size_t total_size)
: buf_(buf), total_size_(total_size), written_()
{
}
size_t written() const { return written_; }
static size_t write_func(const char *bufptr, size_t size,
size_t nitems, void *userp)
{
return reinterpret_cast<write_data*>(userp)->simple_write(
bufptr, size*nitems);
}
size_t simple_write(const char *bufptr, size_t size)
{
size_t tocopy = std::min(total_size_-written_, size);
if (tocopy!=0)
{
memcpy(buf_+written_, bufptr, tocopy);
written_+=tocopy;
}
return tocopy;
}
};
void s3_connection::download_data(const s3_path &path,
uint64_t offset, char *data, size_t size, const header_map_t& opts)
{
curl_ptr_t curl=conn_data_->get_curl(path.zone_, path.bucket_);
prepare(curl, "GET", path, opts);
checked(curl, curl_easy_setopt(
curl.get(), CURLOPT_INFILESIZE_LARGE, uint64_t(size)));
std::string range=int_to_string(offset)+"-"+
int_to_string(offset+size-1);
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_RANGE, range.c_str()));
write_data wd(data, size);
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION,
&write_data::write_func));
checked(curl, curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, &wd));
checked(curl, curl_easy_perform(curl.get()));
check_for_errors(curl, std::string(data,
std::min(wd.written(), size_t(1024))));
if (wd.written()!=size)
err(errWarn) << "Size of a segment at offset " << offset
<< " of "<< path << " is incorrect.";
}
std::string s3_connection::find_region(const std::string &bucket)
{
s3_path path;
path.zone_ = "s3";
path.bucket_ = bucket;
path.path_ = "/?location";
std::string reg_data=read_fully("GET", path);
TiXmlDocument doc;
doc.Parse(reg_data.c_str());
if (doc.Error())
err(errWarn) << "Can't find region, bad document received. "
<< doc.ErrorDesc();
TiXmlHandle docHandle(&doc);
TiXmlNode *n1=docHandle.FirstChild("LocationConstraint").ToNode();
if (!n1)
err(errWarn) << "Incorrect document format - no location id";
TiXmlNode *node=docHandle.FirstChild("LocationConstraint")
.FirstChild()
.ToText();
if (!node)
return "s3"; //Default location
return std::string("s3-")+node->Value();
}
void s3_connection::set_acl(const s3_path &path, const std::string &acl)
{
header_map_t hm;
hm["x-amz-acl"]="public-read";
s3_path p1=path;
p1.path_+="?acl";
std::string res=read_fully("PUT", p1, "", hm);
//Result can be ignored - it's the exit code that is important.
}
|
{"hexsha": "5fcf895b181fbb0c6531bd19ed3efb645e85246c", "size": 23427, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "es3/connection.cpp", "max_stars_repo_name": "Cyberax/extremes3", "max_stars_repo_head_hexsha": "dc95b65a84778defc8fcc6d55554de2670cef6fc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "es3/connection.cpp", "max_issues_repo_name": "Cyberax/extremes3", "max_issues_repo_head_hexsha": "dc95b65a84778defc8fcc6d55554de2670cef6fc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "es3/connection.cpp", "max_forks_repo_name": "Cyberax/extremes3", "max_forks_repo_head_hexsha": "dc95b65a84778defc8fcc6d55554de2670cef6fc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8865598027, "max_line_length": 120, "alphanum_fraction": 0.6844239553, "num_tokens": 6249}
|
# *-* coding: utf-8 *-*
"""Read MPT DAS-1 data files.
TODO:
"""
import re
import pandas as pd
import numpy as np
from reda.tdip.decay_curve import DecayCurveObj
# from reda.importers.utils.decorators import enable_result_transforms
def get_frequencies(filename, header_row):
"""Read the used frequencies in header of DAS-1 SIP data set.
Parameters
----------
filename : str
input filename
header_row : int
row number of header row
Returns
-------
frequencies : list
Contains the measured frequencies
"""
fid = open(filename, 'r')
lines = fid.readlines()
freq_header = lines[header_row + 1].split('Hz')[:-1]
frequencies = [re.sub(r"[^\d\.]", "", part) for part in freq_header]
fid.close()
return frequencies
def import_das1_fd(filename, **kwargs):
"""Reads a frequency domain (single frequency) MPT DAS-1 data file (.Data)
and pre pares information in pandas DataFrame for further processing.
Parameters
----------
filename : str
path to input file
corr_array : list, optional
used to correct the electrode numbers [a, b, m, n], eg. for cable
layouts which separated current and potential cables, hence, 64
electrodes for a measurement profile of 32 electrodes
Returns
-------
data : :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes : :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography : None
No topography information is contained in the text files, so we always
return None
"""
if 'corr_array' in kwargs:
corr_array = kwargs.get('corr_array')
else:
corr_array = [0, 0, 0, 0]
df = pd.read_csv(filename,
delimiter=' ',
comment='!',
index_col=0)
# derive rows used in data block
data_start = df.index.get_loc('#data_start')
data_end = df.index.get_loc('#data_end')
data = df.iloc[data_start + 1: data_end].dropna(axis=1)
data_new = pd.DataFrame()
# def split_A(strA):
# return int(strA.split(','))
# A, B, M, N
data_new['a'] = [
int(x.split(',')[1]) - corr_array[0] for x in data.iloc[:, 0]]
data_new['b'] = [
int(x.split(',')[1]) - corr_array[1] for x in data.iloc[:, 1]]
data_new['m'] = [
int(x.split(',')[1]) - corr_array[2] for x in data.iloc[:, 2]]
data_new['n'] = [
int(x.split(',')[1]) - corr_array[3] for x in data.iloc[:, 3]]
data_new['r'] = np.array(data.iloc[:, 4]).astype('float') # resistance Ohm
data_new['rpha'] = np.array(data.iloc[:, 5]).astype('float') # phase mrad
data_new['I'] = np.array(data.iloc[:, 12]).astype('float') # current in mA
data_new['dr'] = np.array(
data.iloc[:, 9]
).astype('float') / (data_new['I'] / 1000)
data_new['Zt'] = data_new['r'] * np.exp(data_new['rpha'] * 1j / 1000.0)
datetime_series = pd.to_datetime(data.iloc[:, -7],
format='%Y%m%d_%H%M%S',
errors='ignore')
data_new['datetime'] = [
time for index, time in datetime_series.iteritems()
]
return data_new, None, None
def import_das1_td(filename, **kwargs):
"""
Reads a time domain MPT DAS-1 data file (.Data) and prepares information
in pandas DataFrame for further processing.
Parameters
----------
filename : str
path to input file
Keyword arguments:
------------------
corr_arry : list
used to correct the electrode numbers [a, b, m, n], eg. for cable
layouts which separated current and potential cables, hence, 64
electrodes for a measurement profile of 32 electrodes
Returns
-------
data : :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes : :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography : None
No topography information is contained in the text files, so we always
return None
"""
if 'corr_array' in kwargs:
corr_array = kwargs.get('corr_array')
else:
corr_array = [0, 0, 0, 0]
with open(filename, 'r') as fid:
for idx, line in enumerate(fid):
if '#data_start' in line:
d_start = idx
if '#data_end' in line:
d_end = idx
if '!List' in line:
tm_start = idx
if '#elec_start' in line:
tm_end = idx
if '#TIPDly' in line:
mdelay = float(line.split('\t')[1])
# import the data block
data = pd.read_csv(
filename,
delimiter=' ',
index_col=0,
names=range(0, 10**3), # dump the file in huge array
skiprows=d_start + 1,
nrows=d_end - d_start - 3, # skip headers after #data_start
low_memory=False
)
# sometimes there are additional comment lines in the file
indices_comments = data.index == '!'
data = data.iloc[~indices_comments]
# import IPython
# IPython.embed()
header = pd.read_csv(
filename,
delimiter='\t',
# comment='!',
# index_col=0,
skiprows=tm_start - 1,
nrows=tm_end - tm_start - 1
)
ngates = len(header)
ipw = np.array(header.iloc[:, 0]).astype(np.float)
data_new = pd.DataFrame()
# def split_A(strA):
# return int(strA.split(',')[1])
# data.iloc[:, 0].apply(split_A)
# A, B, M, N
data_new['a'] = [
int(x.split(',')[1]) - corr_array[0] for x in data.iloc[:, 0]]
data_new['b'] = [
int(x.split(',')[1]) - corr_array[1] for x in data.iloc[:, 1]]
data_new['m'] = [
int(x.split(',')[1]) - corr_array[2] for x in data.iloc[:, 2]]
data_new['n'] = [
int(x.split(',')[1]) - corr_array[3] for x in data.iloc[:, 3]]
data_new['r'] = np.array(data.iloc[:, 4]).astype('float') # resistance
data_new['dr'] = np.array(data.iloc[:, 5]).astype('float') # devR
# voltage in mV
data_new['Vab'] = np.array(data.iloc[:, 6].astype('float') * 1000)
data_new['dVab'] = np.array(
data.iloc[:, 7].astype('float') * 1000) # deviation voltage in mV
# curret in mA
data_new['I'] = np.array(data.iloc[:, 8 + 2 * ngates]).astype('float')
data_new['mdelay'] = mdelay
# use helper DataFrames for Mx, Tm, dMx
data_m = pd.DataFrame(
columns=['M' + str(num) for num in range(1, ngates + 1)],
index=data_new.index
)
data_m.loc[:, 'M1':'M' + str(ngates)] = np.array(
data.iloc[:, 8:8 + 2 * ngates:2]).astype(np.float) # Mi
data_tm = pd.DataFrame(
columns=['Tm' + str(num) for num in range(1, ngates + 1)],
index=data_new.index
)
data_tm.loc[:, 'Tm1':'Tm' + str(ngates)] = ipw
data_devm = pd.DataFrame(
columns=['devm' + str(num) for num in range(1, ngates + 1)],
index=data_new.index
)
data_devm.loc[:, 'devm1':'devm' + str(ngates)] = np.array(
data.iloc[:, 9:9 + 2 * ngates:2]
).astype(np.float) # devMi
# compute the global chargeability
nominator = np.sum(
np.array(data_m.loc[:, 'M1': 'M' + str(ngates)]) *
np.array(data_tm.loc[:, 'Tm1': 'Tm' + str(ngates)]),
axis=1
)
denominator = np.sum(
np.array(data_tm.loc[:, 'Tm1': 'Tm' + str(ngates)]),
axis=1
)
data_new['chargeability'] = nominator / denominator
datetime_series = pd.to_datetime(
data.iloc[:, 10 + 2 * ngates],
format='%Y%m%d_%H%M%S',
errors='ignore'
)
data_new['datetime'] = [
time for index, time in datetime_series.iteritems()]
data_new['decayCurve'] = 0
# construct a sub DataFrame for decay curve properties
for index, meas in data_m.iterrows():
decaycurve = pd.DataFrame(
index=range(len(ipw)), columns=['Mx', 'T[ms]', 'dMx']
)
decaycurve['Mx'] = meas.values
# use the gate ending as plotting point
decaycurve['T[ms]'] = mdelay + np.cumsum(ipw)
decaycurve['dMx'] = data_devm.iloc[index, :].values
decaycurve = decaycurve.set_index('T[ms]')
data_new.at[index, 'decayCurve'] = DecayCurveObj(decaycurve)
return data_new, None, None
def import_das1_sip(filename, **kwargs):
"""Reads a spectral induced polarization MPT DAS-1 data file (.Data) and
prepares information in pandas DataFrame for further processing.
Parameters
----------
filename : string
path to input file
Keyword arguments:
------------------
corr_arry : list
used to correct the electrode numbers [a, b, m, n], eg. for cable
layouts which separated current and potential cables, hence, 64
electrodes for a measurement profile of 32 electrodes
Returns
-------
data : :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes : :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography : None
No topography information is contained in the text files, so we always
return None
"""
if 'corr_array' in kwargs:
corr_array = kwargs.get('corr_array')
else:
corr_array = [0, 0, 0, 0]
d_start, d_end = 0, 0
# deduce the data block here
# look for lines with #data_start/#data_end
with open(filename, 'r') as fid:
for idx, line in enumerate(fid):
if '#data_start' in line:
d_start = idx
if '#data_end' in line:
d_end = idx
# import the data block
data = pd.read_csv(
filename,
delimiter=' ',
index_col=0,
names=range(0, 10 ** 3), # dump the file in huge array
skiprows=d_start + 3,
# skip headers after #data_start
nrows=d_end - d_start - 4,
low_memory=False
)
# sometimes there are additional comment lines in the file
indices_comments = data.index == '!'
data = data.iloc[~indices_comments]
frequency_list = get_frequencies(filename, d_start)
frequencies = np.array(frequency_list).astype(float)
num_freqs = len(frequencies)
num_meas = d_end - d_start - 4
# number of nan and unused columns present when quadrupoles has
# << * * TX Resist. out of range * * >> error
tx_out_skip = 22
nan_index = None
# identifier if quadrupole has above mentioned error
fskip_count = np.zeros(num_meas).astype(int)
data_new = pd.DataFrame()
data_new['a'] = [
int(x.split(',')[1]) - corr_array[0] for x in data.iloc[:, 0]]
data_new['b'] = [
int(x.split(',')[1]) - corr_array[1] for x in data.iloc[:, 1]]
data_new['m'] = [
int(x.split(',')[1]) - corr_array[2] for x in data.iloc[:, 2]]
data_new['n'] = [
int(x.split(',')[1]) - corr_array[3] for x in data.iloc[:, 3]]
data_fin = pd.DataFrame(
columns=[
'a', 'b', 'm', 'n', 'frequency', 'Zt', 'r', 'dr', 'rpha', 'drpha',
'I', 'datetime'
]
)
# array to check for error
dout_r = np.zeros((num_meas, len(frequencies)))
for idx, freq in enumerate(frequencies):
print('Processing frequency: %s Hz' % str(freq))
if nan_index is not None:
# iterate over quadrupoles and skip columns based on fskip_count
for row_idx in range(len(data)):
data_new.loc[row_idx, 'frequency'] = freq
# resistance
data_new.loc[row_idx, 'r'] = float(
data.iloc[
row_idx,
idx * 6 + 4 + tx_out_skip * fskip_count[row_idx]
]
)
# phase
data_new.loc[row_idx, 'rpha'] = float(
data.iloc[
row_idx, idx * 6 + 6 + tx_out_skip * fskip_count[
row_idx]])
# current in mA
data_new.loc[row_idx, 'I'] = float(
data.iloc[
row_idx, idx * 6 + 8 +
tx_out_skip * fskip_count[row_idx]])
# devR
data_new.loc[row_idx, 'dr'] = float(
data.iloc[
row_idx, idx * 6 + 5 + tx_out_skip * fskip_count[
row_idx]])
# devPhi
data_new.loc[row_idx, 'drpha'] = float(
data.iloc[
row_idx, idx * 6 + 7 + tx_out_skip * fskip_count[
row_idx]])
# array to check for error
dout_r[row_idx, idx] = float(
data.iloc[
row_idx, idx * 6 + 4 + tx_out_skip *
fskip_count[row_idx]])
else:
dout_r[:, idx] = np.array(data.iloc[:, idx * 6 + 4])
data_new['frequency'] = freq
# resistance
data_new['r'] = np.array(data.iloc[:, idx * 6 + 4]).astype('float')
# phase
data_new['rpha'] = np.array(
data.iloc[:, idx * 6 + 6]).astype('float')
# current in mA
data_new['I'] = np.array(data.iloc[:, idx * 6 + 8]).astype('float')
# devR
data_new['dr'] = np.array(data.iloc[:, idx * 6 + 5]).astype(
'float')
# dev Phi
data_new['drpha'] = np.array(
data.iloc[:, idx * 6 + 7]).astype('float')
# check for quadrupoles containing nans (because of error)
nan_index = np.where(np.isnan(dout_r[:, idx]) == 1)[0]
# set the skip count
fskip_count[nan_index] = fskip_count[nan_index] + 1
fskip_count.astype(int)
data_fin = data_fin.append(data_new, ignore_index=True, sort=False)
# compute Zt
data_fin['Zt'] = data_fin['r'] * np.exp(data_fin['rpha'] * 1j / 1000.0)
#
start = len(frequencies) - 1
for row_idx in range(len(data)):
data_new.at[row_idx, 'datetime'] = data.iloc[
row_idx, start * 6 + 11 + tx_out_skip * fskip_count[row_idx]]
datetime_series = pd.to_datetime(
data_new['datetime'], format='%Y%m%d_%H%M%S', errors='ignore')
datetime_stack = datetime_series.append(
[datetime_series] * (num_freqs - 1)
)
datetime_stack_reindex = datetime_stack.reset_index()
data_fin['datetime'] = datetime_stack_reindex['datetime']
data_fin[['a', 'b', 'm', 'n']] = data_fin[['a', 'b', 'm', 'n']].astype(
int
)
return data_fin, None, None
def get_measurement_type(filename):
"""
Given an MPT DAS-1 result file, try to determine the type of measurement.
Currently supported/detected types:
* TDIP (i.e., time-domain measurement)
* FD (i.e., complex measurement)
* SIP (i.e., sEIT measurements
Parameters
----------
filename : str
Path to data file
Returns
-------
measurement_type : str
The type of measurement: tdip,cr,sip
"""
with open(filename, 'r') as fid:
for line in fid:
if '!TDIP' in line:
return 'tdip'
if '!Spectral' in line:
return 'sip'
if '!FDIP' in line:
return 'cr'
raise Exception(
'Data type (FD/TD/SIP) of '
'{0} cannot be determined. Check the file!'.format(filename)
)
def import_das1(filename, **kwargs):
"""
Reads a any MPT DAS-1 data file (.Data), e.g. TD/FD/SIP, and prepares
information in pandas DataFrame for further processing.
Parameters
----------
filename : string
path to input file
Keyword arguments:
------------------
corr_arry : list
used to correct the electrode numbers [a, b, m, n], eg. for cable
layouts which separated current and potential cables, hence, 64
electrodes for a measurement profile of 32 electrodes
Returns
-------
data : :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes : :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography : None
No topography information is contained in the text files, so we always
return None
"""
measurement_type = get_measurement_type(filename)
if measurement_type == 'tdip':
data, electrodes, topography = import_das1_td(
filename, **kwargs)
return data, electrodes, topography
elif measurement_type == 'sip':
data, electrodes, topography = import_das1_sip(
filename, **kwargs)
return data, electrodes, topography
elif measurement_type == 'cr':
data, electrodes, topography = import_das1_fd(
filename, **kwargs)
return data, electrodes, topography
|
{"hexsha": "ebe42df0f92d8a76e12e9bba56f1aca6d03c392c", "size": 17057, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/reda/importers/mpt_das1.py", "max_stars_repo_name": "j-hase/reda", "max_stars_repo_head_hexsha": "b6419c39842cfbdd9380a27a5c6e9a04ccaeb294", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2017-12-11T08:32:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-09T05:41:57.000Z", "max_issues_repo_path": "lib/reda/importers/mpt_das1.py", "max_issues_repo_name": "j-hase/reda", "max_issues_repo_head_hexsha": "b6419c39842cfbdd9380a27a5c6e9a04ccaeb294", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 58, "max_issues_repo_issues_event_min_datetime": "2017-11-12T11:10:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-11T13:52:44.000Z", "max_forks_repo_path": "lib/reda/importers/mpt_das1.py", "max_forks_repo_name": "geophysics-ubonn/REDA", "max_forks_repo_head_hexsha": "8f0399031121f5a937171231a25f9ab03a3c8873", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2017-11-12T12:02:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-16T06:54:04.000Z", "avg_line_length": 31.8822429907, "max_line_length": 79, "alphanum_fraction": 0.5583045084, "include": true, "reason": "import numpy", "num_tokens": 4415}
|
#!/usr/bin/env python
######################
## written by Wojciech Dudek
######################
__author__ = "Wojciech Dudek"
from nav_msgs.msg import Odometry
from tf import transformations
import tf
import rospy
import sys
import signal
from sensor_msgs.msg import Imu
from rapp_ros_naoqi_wrappings.srv import GetRobotPose,GetRobotPoseResponse
from rapp_ros_naoqi_wrappings.srv import SetGlobalPose, SetGlobalPoseResponse
from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, PoseStamped
from naoqi import ALModule
from naoqi import ALBroker
import numpy as np
# Constants
class Constants:
NAO_IP = "nao.local"
PORT = 9559
class NaoEstimator(ALModule):
def __init__(self,name):
ALModule.__init__(self, name)
rospy.init_node('acore_estimator')
self.moduleName = name
# self.connectNaoQi()
self.setVariables()
self.dataNamesList = ["DCM/Time",
"Device/SubDeviceList/InertialSensor/AngleX/Sensor/Value",
"Device/SubDeviceList/InertialSensor/AngleY/Sensor/Value",
"Device/SubDeviceList/InertialSensor/AngleZ/Sensor/Value",
"Device/SubDeviceList/InertialSensor/GyroscopeX/Sensor/Value",
"Device/SubDeviceList/InertialSensor/GyroscopeY/Sensor/Value",
"Device/SubDeviceList/InertialSensor/GyroscopeZ/Sensor/Value",
"Device/SubDeviceList/InertialSensor/AccelerometerX/Sensor/Value",
"Device/SubDeviceList/InertialSensor/AccelerometerY/Sensor/Value",
"Device/SubDeviceList/InertialSensor/AccelerometerZ/Sensor/Value"]
self.FSRdataList = ["Device/SubDeviceList/LFoot/FSR/FrontLeft/Sensor/Value",
"Device/SubDeviceList/LFoot/FSR/FrontRight/Sensor/Value",
"Device/SubDeviceList/LFoot/FSR/RearLeft/Sensor/Value",
"Device/SubDeviceList/LFoot/FSR/RearRight/Sensor/Value",
"Device/SubDeviceList/RFoot/FSR/FrontLeft/Sensor/Value",
"Device/SubDeviceList/RFoot/FSR/FrontRight/Sensor/Value",
"Device/SubDeviceList/RFoot/FSR/RearLeft/Sensor/Value",
"Device/SubDeviceList/RFoot/FSR/RearRight/Sensor/Value"]
self.MsgsInit()
self.startSubscribers()
self.openServices()
def setVariables(self):
self.markers=["Wall","Door","Wardrobe","Stable object","Microwave","Fridge"]
self.tl = tf.TransformListener(True, rospy.Duration(20.0))
###
# constPose - definition of object used in SubCall. Is used to operate on obtained data
###
self.constPose = PoseWithCovarianceStamped()
self.constPose.pose.pose.position.x=0
self.constPose.pose.pose.position.y=0
self.constPose.pose.pose.orientation.z=0
self.constPose.pose.pose.orientation.w=1
###
# odom_transformation - definition of object used in SubCall. Contains data that will be published.
###
self.odom_transformation = Pose()
self.odom_transformation.position = [0,0,0]
self.odom_transformation.orientation = [0,0,0,1]
def handle_getRobotPose(self,req):
actual_pose = PoseStamped()
try:
if self.tl.canTransform("map","base_link",rospy.Time()):
ekf_pose = self.tl.lookupTransform("map","base_link",rospy.Time())
actual_pose.pose.position.x = ekf_pose[0][0]
actual_pose.pose.position.y = ekf_pose[0][1]
actual_pose.pose.position.z = ekf_pose[0][2]
actual_pose.pose.orientation.x = ekf_pose[1][0]
actual_pose.pose.orientation.y = ekf_pose[1][1]
actual_pose.pose.orientation.z = ekf_pose[1][2]
actual_pose.pose.orientation.w = ekf_pose[1][3]
actual_pose.header.seq = 1
actual_pose.header.stamp= rospy.Time.now()
actual_pose.header.frame_id = "map"
else:
status = False
status = True
except Exception, ex:
print "[Estimator server] - Exception %s" % str(ex)
status = False
print "STATUS = ", status
return GetRobotPoseResponse(actual_pose)
def handle_setGlobalPose(self,req):
try:
self.SubCall(req)
#
# check if actual state equals to request, define status
#
rospy.sleep(4)
status = True
except Exception, ex:
print "[Estimator server] - Exception %s" % str(ex)
status = False
return SetGlobalPoseResponse(status)
def startSubscribers(self):
rospy.Subscriber("/initialpose", PoseWithCovarianceStamped, self.SubCall)
#rospy.Subscriber("/odometry/filtered", Odometry, self.publishEKFframe)
def SubCall(self,data):
self.constPose.pose.pose.position = data.pose.pose.position
self.constPose.pose.pose.orientation = data.pose.pose.orientation
#
# handle inirial pose
#
self.euler_initial = tf.transformations.euler_from_quaternion((0,0,self.constPose.pose.pose.orientation.z,self.constPose.pose.pose.orientation.w))
#
# find fransformation from odom to Nao_T_odom
#
if self.tl.canTransform("odom","base_link",rospy.Time()):
transform_Nao_odom = self.tl.lookupTransform("base_link","odom", rospy.Time())
euler_transform_Nao_odom = tf.transformations.euler_from_quaternion(transform_Nao_odom[1])
#
# calculate new odom position, so Nao_T_odom will be in pointed position
#
matrix_Nao_odom= np.linalg.pinv(np.array([[np.cos(euler_transform_Nao_odom[2]),-np.sin(euler_transform_Nao_odom[2]),0,transform_Nao_odom[0][0]],
[np.sin(euler_transform_Nao_odom[2]),np.cos(euler_transform_Nao_odom[2]),0,transform_Nao_odom[0][1]],
[0,0,0,transform_Nao_odom[0][2]],
[0,0,0,1]]))
self.odom_transformation.position = [self.constPose.pose.pose.position.x+np.cos(self.euler_initial[2])*transform_Nao_odom[0][0]-np.sin(self.euler_initial[2])*transform_Nao_odom[0][1],
self.constPose.pose.pose.position.y+np.sin(self.euler_initial[2])*transform_Nao_odom[0][0]+np.cos(self.euler_initial[2])*transform_Nao_odom[0][1],
0]
self.odom_transformation.orientation = tf.transformations.quaternion_from_euler(0,0,euler_transform_Nao_odom[2]+self.euler_initial[2])#matrix_Nao_odom[0][1]/matrix_Nao_odom[0][0])#+self.euler_initial[2])
def MsgsInit(self):
# init. messages:
self.torsoOdom = Odometry()
self.torsoOdom.header.frame_id = "odom"
self.torsoOdom.child_frame_id = "Nao_T_odom"
self.torsoOdomPub = rospy.Publisher("odom", Odometry, queue_size=10)
self.torsoIMU = Imu()
self.torsoIMU.header.frame_id = "base_link"
self.torsoIMUPub = rospy.Publisher("imu_data", Imu, queue_size=10)
self.torsoIMU_EKF = Imu()
self.torsoIMU_EKF.header.frame_id = "Nao_T_odom"
self.torsoIMU_EKF_Pub = rospy.Publisher("imu_data_EKF", Imu, queue_size=10)
self.tf_br = tf.TransformBroadcaster()
self.ODOM_POSE_COVARIANCE = [1e-3, 0, 0, 0, 0, 0,
0, 1e-3, 0, 0, 0, 0,
0, 0, 1e6, 0, 0, 0,
0, 0, 0, 1e6, 0, 0,
0, 0, 0, 0, 1e6, 0,
0, 0, 0, 0, 0, 1e3]
self.ODOM_POSE_COVARIANCE2 = [1e-9, 0, 0, 0, 0, 0,
0, 1e-3, 1e-9, 0, 0, 0,
0, 0, 1e6, 0, 0, 0,
0, 0, 0, 1e6, 0, 0,
0, 0, 0, 0, 1e6, 0,
0, 0, 0, 0, 0, 1e-9]
self.ODOM_TWIST_COVARIANCE = [1e-3, 0, 0, 0, 0, 0,
0, 1e-3, 0, 0, 0, 0,
0, 0, 1e6, 0, 0, 0,
0, 0, 0, 1e6, 0, 0,
0, 0, 0, 0, 1e6, 0,
0, 0, 0, 0, 0, 1e3]
self.ODOM_TWIST_COVARIANCE2 = [1e-9, 0, 0, 0, 0, 0,
0, 1e-3, 1e-9, 0, 0, 0,
0, 0, 1e6, 0, 0, 0,
0, 0, 0, 1e6, 0, 0,
0, 0, 0, 0, 1e6, 0,
0, 0, 0, 0, 0, 1e-9]
def openServices(self):
try:
print "[Estimator server] - service - [rapp_setGlobalPose]"
self.service_set = rospy.Service('rapp_setGlobalPose', SetGlobalPose, self.handle_setGlobalPose)
except Exception, ex:
print "[Estimator server] - Exception %s" % str(ex)
try:
print "[Estimator server] - service - [rapp_getRobotPose]"
self.service_get = rospy.Service('rapp_getRobotPose', GetRobotPose, self.handle_getRobotPose)
except Exception, ex:
print "[Estimator server] - Exception %s" % str(ex)
def rapp_move_vel_interface(self,v_x,v_y,v_theta):
move = rospy.ServiceProxy('rapp_moveVel', MoveVel)
speeds = MoveVelRequest()
speeds.velocity_x = v_x
speeds.velocity_y = v_y
speeds.velocity_theta = v_theta
resp1 = move(speeds)
# def handle_rapp_VisOdom(self,req):
# #transform = self.locateMarkers()
# self.rapp_take_predefined_pose_interface("Stand")
# decomposed_robot_in_QR = self.estimateNaoPosition()
# self.moveOdom(decomposed_robot_in_QR)
# feedback = True
# return VisOdomResponse(feedback)
# def locateMarkers(self):
# # Find closest marker to "base_link" frame
# min_dist = 1000
# can_locate = False
# while can_locate == False:
# if self.tl.canTransform("base_link","Wall",rospy.Time()):
# i=0
# can_locate = True
# for i in range(len(self.markers)):
# i+=1
# transform = self.tl.lookupTransform("base_link", self.markers[i-1], rospy.Time())
# if reachable
# distance= np.sqrt(transform[0][0]*transform[0][0]+transform[0][1]*transform[0][1])
# if min_dist > distance:
# min_dist = distance
# marker_id = i-1
# marker_transformation = transform
# elif min_dist == 1000:
# i=0
# print "min_dist == 1000 "
# print "Closest marker : %s" %(self.markers[i-1])
# return transform
def turnNaoHead(self,head_yaw,head_pitch):
moveNaoHead = rospy.ServiceProxy('rapp_moveHead', MoveHead)
resp1 = moveNaoHead(head_yaw,head_pitch)
# def estimateNaoPosition(self):
# # get image
# head_yaw = 0
# head_pitch = 0
# head_yaw_max = 2.08
# gotQRcode = False
# resetHeadPosition = False
# while gotQRcode!=True:
# self.turnNaoHead(head_yaw,head_pitch)
# getImage = rospy.ServiceProxy('rapp_capture_image', GetImage)
# image_response= getImage("top - adaptive auto exposure 2",3)
# frame = image_response.frame
# if (frame.height == 0 or frame.width == 0): # //frame is empty
# print "Camera frame is empty"
# else:
# print "QRcode detection ..."
# detectQRcodes = rospy.ServiceProxy('rapp_detect_qrcodes',DetectQRcodes)
# QRcodeDetectionStruct = detectQRcodes(frame)
# print "number of QRcodes: \n",QRcodeDetectionStruct.numberOfQRcodes
# # Is Qrcode found
# if QRcodeDetectionStruct.numberOfQRcodes > 0:
# gotQRcode = True
# #get transform camera -> torso
# robot_camera_transform = self.motionProxy.getTransform("CameraTop", 0, True)
# matrix_camera_torso = (np.array([[robot_camera_transform[0],robot_camera_transform[1],robot_camera_transform[2],robot_camera_transform[3]],
# [robot_camera_transform[4],robot_camera_transform[5],robot_camera_transform[6],robot_camera_transform[7]],
# [robot_camera_transform[5],robot_camera_transform[6],robot_camera_transform[7],robot_camera_transform[8]],
# [robot_camera_transform[9],robot_camera_transform[10],robot_camera_transform[11],robot_camera_transform[12]]]))
# print "matrix camera->torso: \n",matrix_camera_torso
# #compute robot position in QRcode frame
# self.VO_frame_ID = QRcodeDetectionStruct.message[0]
# print "Localising via : [",self.VO_frame_ID ,"] position"
# matrix_R_in_C_Rz = np.array([[np.cos((3.14/2)-head_yaw),-np.sin((3.14/2)-head_yaw),0,0],
# [np.sin((3.14/2)-head_yaw), np.cos((3.14/2)-head_yaw),0,0],
# [0 ,0 ,1,0],
# [0 ,0 ,0,1]])
# matrix_R_in_C_Rx = np.array([[1,0,0,0],
# [0,np.cos((3.14/2)+head_pitch),-np.sin((3.14/2)+head_pitch),0],
# [0,np.sin((3.14/2)+head_pitch),np.cos((3.14/2)+head_pitch),0],
# [0,0,0,1]])
# matrix_R_in_C_translation = np.array([[1,0,0,robot_camera_transform[7]],
# [0,1,0,-robot_camera_transform[3]],
# [0,0,1,-0.2],
# [0,0,0,1]])
# matrix_camera_in_QR = np.linalg.pinv(np.array([[QRcodeDetectionStruct.cameraToQRcode.r11[0],QRcodeDetectionStruct.cameraToQRcode.r12[0],QRcodeDetectionStruct.cameraToQRcode.r13[0],QRcodeDetectionStruct.cameraToQRcode.r14[0]],
# [QRcodeDetectionStruct.cameraToQRcode.r21[0],QRcodeDetectionStruct.cameraToQRcode.r22[0],QRcodeDetectionStruct.cameraToQRcode.r23[0],QRcodeDetectionStruct.cameraToQRcode.r24[0]],
# [QRcodeDetectionStruct.cameraToQRcode.r31[0],QRcodeDetectionStruct.cameraToQRcode.r32[0],QRcodeDetectionStruct.cameraToQRcode.r33[0],QRcodeDetectionStruct.cameraToQRcode.r34[0]],
# [QRcodeDetectionStruct.cameraToQRcode.r41[0],QRcodeDetectionStruct.cameraToQRcode.r42[0],QRcodeDetectionStruct.cameraToQRcode.r43[0],QRcodeDetectionStruct.cameraToQRcode.r44[0]]]))
# print "Rotation matrix: camera in QRcode: \" %s \" : \n"%(QRcodeDetectionStruct.message[0]), matrix_camera_in_QR
# matrix_robot_in_QR = (np.dot(matrix_camera_in_QR,matrix_R_in_C_Rx))
# matrix_robot_in_QR = (np.dot(matrix_robot_in_QR,matrix_R_in_C_translation))
# matrix_robot_in_QR = (np.dot(matrix_robot_in_QR,matrix_R_in_C_Rz))
# print "Rotation matrix: Torso in QRcode: \" %s \" : \n"%(QRcodeDetectionStruct.message[0]), matrix_robot_in_QR
# # decompose matrix
# decomposed_robot_in_QR=self.decompose_matrix(matrix_robot_in_QR)
# decomposed_camera_in_QR=self.decompose_matrix(matrix_camera_in_QR)
# # send transform Camera in QRcode position
# self.tf_br.sendTransform((decomposed_camera_in_QR[0][0],decomposed_camera_in_QR[0][1],decomposed_camera_in_QR[0][2]),
# tf.transformations.quaternion_from_euler(decomposed_camera_in_QR[1][0],
# decomposed_camera_in_QR[1][1],
# decomposed_camera_in_QR[1][2]),
# rospy.Time.now(), "QR_camera", QRcodeDetectionStruct.message[0])
# # send transform Rrbot in QRcode position
# self.tf_br.sendTransform((decomposed_robot_in_QR[0][0],decomposed_robot_in_QR[0][1],decomposed_robot_in_QR[0][2]),
# tf.transformations.quaternion_from_euler(decomposed_robot_in_QR[1][0],
# decomposed_robot_in_QR[1][1],
# decomposed_robot_in_QR[1][2]),
# rospy.Time.now(), "QR_Torso", QRcodeDetectionStruct.message[0])
# self.marker_id = QRcodeDetectionStruct.message[0]
# return decomposed_robot_in_QR
# else:
# gotQRcode = False
# head_yaw += 3.14/5 # turn head + 0.628 rad | + 36 deg
# if head_yaw > head_yaw_max:
# head_yaw_max_set = 0
# if resetHeadPosition == True: # if no QRcode avaliable in robot position, move robot +180 deg
# #moveNao = rospy.ServiceProxy('rapp_moveVel', MoveVel)
# self.rapp_move_vel_interface(0,0,0.4)
# # moveVel_resp = moveNao(0,0,0.4)
# rospy.sleep(3.14/0.4)
# self.rapp_move_vel_interface(0,0,0)
# self.rapp_take_predefined_pose_interface("Stand")
# #print "rapp_moveVel response: \n",moveVel_resp
# head_yaw_max_set = 2.08
# resetHeadPosition = True
# head_yaw = -2.08
# head_yaw_max = head_yaw_max_set
def moveOdom(self,matrix):
time = rospy.Time.now()
self.tf_br.sendTransform((matrix[0][0],matrix[0][1],matrix[0][2]),
tf.transformations.quaternion_from_euler(matrix[1][0],
matrix[1][1],
matrix[1][2]),
time, "QR_Torso", self.marker_id )
# self.tl.waitForTransform("map", "QR_Torso", rospy.Time(0), rospy.Duration(5.0))
# if self.tl.canTransform("QR_Torso",self.marker_id ,rospy.Time(0)):
rospy.sleep(3)
decomposed_matrix = matrix
decomposed_matrix_quaternion = tf.transformations.quaternion_from_euler(matrix[1][0],
matrix[1][1],
matrix[1][2])
poseMSG = PoseStamped()
poseMSG.header.seq = 0
poseMSG.header.stamp = rospy.Time.now()
poseMSG.header.frame_id = self.marker_id
poseMSG.pose.position.x = decomposed_matrix[0][0]
poseMSG.pose.position.y = decomposed_matrix[0][1]
poseMSG.pose.position.z = decomposed_matrix[0][2]
poseMSG.pose.orientation.x = decomposed_matrix_quaternion[0]
poseMSG.pose.orientation.y = decomposed_matrix_quaternion[1]
poseMSG.pose.orientation.z = decomposed_matrix_quaternion[2]
poseMSG.pose.orientation.w = decomposed_matrix_quaternion[3]
new_robot_matrix_in_map = self.tl.transformPose("map",poseMSG)
convert_to_ROS_transform = [[new_robot_matrix_in_map.pose.position.x,new_robot_matrix_in_map.pose.position.y,new_robot_matrix_in_map.pose.position.z]
,[new_robot_matrix_in_map.pose.orientation.x,new_robot_matrix_in_map.pose.orientation.y,
new_robot_matrix_in_map.pose.orientation.z,new_robot_matrix_in_map.pose.orientation.w]]
#transform_QR_map = self.tl.lookupTransform("map","QR_Torso", rospy.Time(0))
euler_transform_QR_map = tf.transformations.euler_from_quaternion(convert_to_ROS_transform[1])
# if self.tl.canTransform("odom","base_link",rospy.Time()):
transform_Nao_odom = self.tl.lookupTransform("base_link","odom", rospy.Time())
euler_transform_Nao_odom = tf.transformations.euler_from_quaternion(transform_Nao_odom[1])
#
# calculate new odom position, so Nao_T_odom will be in pointed position
#
matrix_Nao_odom= np.linalg.pinv(np.array([[np.cos(euler_transform_Nao_odom[2]),-np.sin(euler_transform_Nao_odom[2]),0,transform_Nao_odom[0][0]],
[np.sin(euler_transform_Nao_odom[2]),np.cos(euler_transform_Nao_odom[2]),0,transform_Nao_odom[0][1]],
[0,0,0,transform_Nao_odom[0][2]],
[0,0,0,1]]))
self.odom_transformation.position = [convert_to_ROS_transform[0][0]+np.cos(euler_transform_QR_map[2])*transform_Nao_odom[0][0]-np.sin(euler_transform_QR_map[2])*transform_Nao_odom[0][1],
convert_to_ROS_transform[0][1]+np.sin(euler_transform_QR_map[2])*transform_Nao_odom[0][0]+np.cos(euler_transform_QR_map[2])*transform_Nao_odom[0][1],
0]
self.odom_transformation.orientation = tf.transformations.quaternion_from_euler(0,0,euler_transform_Nao_odom[2]+euler_transform_QR_map[2])#matrix_Nao_odom[0][1]/matrix_Nao_odom[0][0])#+self.euler_initial[2])
# if self.tl.canTransform("odom","base_link",rospy.Time()):
# transform_Nao_odom_combined = self.tl.lookupTransform("base_link","odom_combined", rospy.Time())
# euler_transform_Nao_odom_combined = tf.transformations.euler_from_quaternion(transform_Nao_odom[1])
# #
# # calculate new odom position, so Nao_T_odom will be in pointed position
# #
# matrix_Nao_odom= np.linalg.pinv(np.array([[np.cos(euler_transform_Nao_odom_combined[2]),-np.sin(euler_transform_Nao_odom_combined[2]),0,transform_Nao_odom_combined[0][0]],
# [np.sin(euler_transform_Nao_odom_combined[2]),np.cos(euler_transform_Nao_odom_combined[2]),0,transform_Nao_odom_combined[0][1]],
# [0,0,0,transform_Nao_odom_combined[0][2]],
# [0,0,0,1]]))
# self.odom_transformation.position = [transform_QR_map[0][0]+np.cos(euler_transform_QR_map[2])*transform_Nao_odom_combined[0][0]-np.sin(euler_transform_QR_map[2])*transform_Nao_odom_combined[0][1],
# transform_QR_map[0][1]+np.sin(euler_transform_QR_map[2])*transform_Nao_odom_combined[0][0]+np.cos(euler_transform_QR_map[2])*transform_Nao_odom_combined[0][1],
# 0]
# self.odom_transformation.orientation = tf.transformations.quaternion_from_euler(0,0,euler_transform_Nao_odom_combined[2]+euler_transform_QR_map[2])#matrix_Nao_odom[0][1]/matrix_Nao_odom[0][0])#+self.euler_initial[2])
# def decompose_matrix(self,matrix):
# euler_x = np.arctan2(matrix[2][1],matrix[2][2])
# euler_y = np.arctan2(-matrix[2][0],np.sqrt(matrix[2][1]*matrix[2][1]+matrix[2][2]*matrix[2][2]))
# euler_z = np.arctan2(matrix[1][0],matrix[0][0])
# position_x = matrix[0][3]
# position_y = matrix[1][3]
# position_z = matrix[2][3]
# decomposed_matrix = np.array([[position_x,position_y,position_z],[euler_x,euler_y,euler_z]])
# return (decomposed_matrix)
def publishOdom(self):
self.tf_br.sendTransform(self.odom_transformation.position, self.odom_transformation.orientation,
rospy.Time.now(), "odom", "map")
def signal_handler(signal, frame):
print "[Estimator server] - signal SIGINT caught"
print "[Estimator server] - system exits"
sys.exit(0)
if __name__ == '__main__':
try:
signal.signal(signal.SIGINT, signal_handler)
print "[Estimator server] - Press Ctrl + C to exit system correctly"
myBroker = ALBroker("myBroker", "0.0.0.0", 0, Constants.NAO_IP,Constants.PORT)
global estimator
estimator = NaoEstimator("estimator")
TfRate = rospy.Rate(10)
while not rospy.is_shutdown():
estimator.publishOdom()
TfRate.sleep()
except (KeyboardInterrupt, SystemExit):
print "[Estimator server] - SystemExit Exception caught"
myBroker.shutdown()
sys.exit(0)
except Exception, ex:
print "[Estimator server] - Exception caught %s" % str(ex)
myBroker.shutdown()
sys.exit(0)
|
{"hexsha": "9bd355fe52abfc1da48af0219701738858737282", "size": 21111, "ext": "py", "lang": "Python", "max_stars_repo_path": "rapp_ros_naoqi_wrappings/nodes/rapp_navigation/acore_estimator_server.py", "max_stars_repo_name": "rapp-project/rapp-robot-nao", "max_stars_repo_head_hexsha": "588061c630b1a8f69791eb603ff52db1e3e07058", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-04-27T03:58:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-01T23:34:21.000Z", "max_issues_repo_path": "rapp_ros_naoqi_wrappings/nodes/rapp_navigation/acore_estimator_server.py", "max_issues_repo_name": "rapp-project/rapp-robot-nao", "max_issues_repo_head_hexsha": "588061c630b1a8f69791eb603ff52db1e3e07058", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2016-12-26T18:03:04.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-22T09:44:36.000Z", "max_forks_repo_path": "rapp_ros_naoqi_wrappings/nodes/rapp_navigation/acore_estimator_server.py", "max_forks_repo_name": "rapp-project/rapp-robot-nao", "max_forks_repo_head_hexsha": "588061c630b1a8f69791eb603ff52db1e3e07058", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-04-09T12:07:13.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-09T12:07:13.000Z", "avg_line_length": 46.8093126386, "max_line_length": 233, "alphanum_fraction": 0.6844772867, "include": true, "reason": "import numpy", "num_tokens": 6616}
|
import math
def RP(x,y,z):
s = 1;
while(x>0):
if(x%2==1):
s=(s*y)%z;
x=x//2;
y=(y*y)%z;
return int(s);
def egcd(a, b):
u1=1;
v1=0;
u2=0;
v2=1;
while(b>0):
r=a%b
q=a//b
a=b;
b=r;
u1prime = u1
v1prime = v1
u2prime = u2
v2prime = v2
u1=u2;
v1=v2;
u2=(u1prime - (u2prime * q));
v2= (v1prime - (v2prime * q));
return u1, v1;
def isPrime(y):
h = 2;
while h <= math.ceil(y/2):
if y % h == 0:
return 0;
h += 1;
return 1
def computeM (p):
m = math.ceil(math.sqrt(p-1));
return m;
def factors(F):
X = [];
while(F != []):
X.append((F[0],F.count(F[0])));
F=F[F.count(F[0]):len(F)];
return X;
def silverPohligHellman(P, a, b, n):
A = [];
u = 0;
(aInv, _) = egcd(a, n);
aInv = aInv % n;
c = n-1;
r = b;
for i in range(0, len(P)):
x = RP((c/(P[i][0])), a, n);#from sage.all import *
for j in range(0, (P[i][1])):
y=RP((c/((P[i][0])**(j+1))), b, n);
for k in range(0, (P[i][0])):
if pow(x, k, n) == y:
u += k*(P[i][0]**j);
break;
b=(b*RP((P[i][0]**j)*k,aInv,n)) % n;
A.append(u);
u = 0;
b = r;
return A;
def compute(X, P):
x = 0;
N = 1;
Y = [];
for k in range(0, len(P)):
Y.append(P[k][0]**P[k][1]);
for j in range(0, len(Y)):
N *= Y[j];
for i in range(0, len(Y)):
(s, _) = egcd(int(N/Y[i]), Y[i]);
e = s*(int(N/Y[i]));
x += X[i]*e;
return x;
def factor(x, primeFactors, i):
j = 2;
if x == 1:
return;
else:
while j<math.ceil(x/2):
if(x%j)==0:
if isPrime(j):
primeFactors.append(j);
if isPrime(x/j):
primeFactors.append(x/j);
return;
else:
i+=1;
factor((x/primeFactors[(i-1)]),primeFactors,i);
return;
j += 1;
return
F = [];
print "Solving DLP for a^x=b mod n"
print "Enter a: ";
a = int(raw_input());
print "Enter b: ";
b = int(raw_input());
print "Enter n(n-1 should have small prime factors):";
n = int(raw_input());
factor(n-1, F, 0);
F = factors(F); #power of the prime factors
Z = silverPohligHellman(F, a, b, n);
x = compute(Z, F);
if(x<0):
x=x+(n-1)
print "x = " + str(x);
|
{"hexsha": "b31e5c03ee58998b825a666af1837105a2efffd4", "size": 2851, "ext": "py", "lang": "Python", "max_stars_repo_path": "Modules/15IT152_15IT119_M2/15IT152_15It119_M2_attacks/silver-pohlig-hellman.py", "max_stars_repo_name": "HimadriP/RSA_attacks", "max_stars_repo_head_hexsha": "db58478a33f0dffe492b9a626bf0a4a055682690", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Modules/15IT152_15IT119_M2/15IT152_15It119_M2_attacks/silver-pohlig-hellman.py", "max_issues_repo_name": "HimadriP/RSA_attacks", "max_issues_repo_head_hexsha": "db58478a33f0dffe492b9a626bf0a4a055682690", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Modules/15IT152_15IT119_M2/15IT152_15It119_M2_attacks/silver-pohlig-hellman.py", "max_forks_repo_name": "HimadriP/RSA_attacks", "max_forks_repo_head_hexsha": "db58478a33f0dffe492b9a626bf0a4a055682690", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7633587786, "max_line_length": 73, "alphanum_fraction": 0.3605752368, "include": true, "reason": "from sage", "num_tokens": 910}
|
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from sklearn.svm import LinearSVC
from neuraxle.base import ExecutionContext
from neuraxle.data_container import DataContainer
from neuraxle.hyperparams.distributions import FixedHyperparameter, RandInt
from neuraxle.hyperparams.space import HyperparameterSpace
from neuraxle.metaopt.auto_ml import InMemoryHyperparamsRepository, AutoML, RandomSearchHyperparameterSelectionStrategy, \
HyperparamsJSONRepository, \
ValidationSplitter, KFoldCrossValidationSplitter, Trainer
from neuraxle.metaopt.callbacks import MetricCallback, ScoringCallback, EarlyStoppingCallback
from neuraxle.metaopt.trial import Trial
from neuraxle.pipeline import Pipeline
from neuraxle.steps.misc import FitTransformCallbackStep
from neuraxle.steps.numpy import MultiplyByN, NumpyReshape
from neuraxle.steps.sklearn import SKLearnWrapper
def test_automl_early_stopping_callback(tmpdir):
# Given
hp_repository = InMemoryHyperparamsRepository(cache_folder=str(tmpdir))
n_epochs = 10
max_epochs_without_improvement=3
auto_ml = AutoML(
pipeline=Pipeline([
MultiplyByN(2).set_hyperparams_space(HyperparameterSpace({
'multiply_by': FixedHyperparameter(2)
})),
NumpyReshape(new_shape=(-1, 1)),
]),
hyperparams_optimizer=RandomSearchHyperparameterSelectionStrategy(),
validation_splitter=ValidationSplitter(0.20),
scoring_callback=ScoringCallback(mean_squared_error, higher_score_is_better=False),
callbacks=[
MetricCallback('mse', metric_function=mean_squared_error, higher_score_is_better=False),
EarlyStoppingCallback(max_epochs_without_improvement)
],
n_trials=1,
refit_trial=True,
epochs=n_epochs,
hyperparams_repository=hp_repository,
continue_loop_on_error=False
)
# When
data_inputs = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
expected_outputs = data_inputs * 2
auto_ml.fit(data_inputs=data_inputs, expected_outputs=expected_outputs)
# Then
trial = hp_repository.trials[0]
assert len(trial.validation_splits) == 1
validation_scores = trial.validation_splits[0].get_validation_scores()
nepochs_executed = len(validation_scores)
assert nepochs_executed == max_epochs_without_improvement +1
def test_automl_with_kfold(tmpdir):
# Given
hp_repository = HyperparamsJSONRepository(cache_folder=str('caching'))
auto_ml = AutoML(
pipeline=Pipeline([
MultiplyByN(2).set_hyperparams_space(HyperparameterSpace({
'multiply_by': FixedHyperparameter(2)
})),
NumpyReshape(new_shape=(-1, 1)),
linear_model.LinearRegression()
]),
validation_splitter=ValidationSplitter(0.20),
hyperparams_optimizer=RandomSearchHyperparameterSelectionStrategy(),
scoring_callback=ScoringCallback(mean_squared_error, higher_score_is_better=False),
callbacks=[
MetricCallback('mse', metric_function=mean_squared_error,
higher_score_is_better=False),
],
n_trials=1,
epochs=10,
refit_trial=True,
print_func=print,
hyperparams_repository=hp_repository,
continue_loop_on_error=False
)
data_inputs = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
expected_outputs = data_inputs * 4
# When
auto_ml.fit(data_inputs=data_inputs, expected_outputs=expected_outputs)
# Then
p = auto_ml.get_best_model()
outputs = p.transform(data_inputs)
mse = mean_squared_error(expected_outputs, outputs)
assert mse < 1000
def test_validation_splitter_should_split_data_properly():
# Given
data_inputs = np.random.random((4, 2, 2048, 6)).astype(np.float32)
expected_outputs = np.random.random((4, 2, 2048, 1)).astype(np.float32)
splitter = ValidationSplitter(test_size=0.2)
# When
validation_splits = splitter.split_data_container(
data_container=DataContainer(data_inputs=data_inputs, expected_outputs=expected_outputs),
context=ExecutionContext()
)
train_di, train_eo, validation_di, validation_eo = extract_validation_split_data(validation_splits)
train_di = train_di[0]
train_eo = train_eo[0]
validation_di = validation_di[0]
validation_eo = validation_eo[0]
# Then
assert len(train_di) == 3
assert np.array_equal(np.array(train_di), data_inputs[0:3])
assert len(train_eo) == 3
assert np.array_equal(np.array(train_eo), expected_outputs[0:3])
assert len(validation_di) == 1
assert np.array_equal(validation_di[0], data_inputs[-1])
assert len(validation_eo) == 1
assert np.array_equal(validation_eo[0], expected_outputs[-1])
def test_kfold_cross_validation_should_split_data_properly():
# Given
data_inputs = np.random.random((4, 2, 2048, 6)).astype(np.float32)
expected_outputs = np.random.random((4, 2, 2048, 1)).astype(np.float32)
splitter = KFoldCrossValidationSplitter(k_fold=4)
# When
validation_splits = splitter.split_data_container(
data_container=DataContainer(data_inputs=data_inputs, expected_outputs=expected_outputs),
context=ExecutionContext()
)
train_di, train_eo, validation_di, validation_eo = extract_validation_split_data(validation_splits)
# Then
assert len(train_di[0]) == 3
assert np.array_equal(np.array(train_di[0]), data_inputs[1:])
assert len(train_eo[0]) == 3
assert np.array_equal(np.array(train_eo[0]), expected_outputs[1:])
assert len(train_di[1]) == 3
assert np.array_equal(np.array(train_di[1]),
np.concatenate((np.expand_dims(data_inputs[0], axis=0), data_inputs[2:]), axis=0))
assert len(train_eo[1]) == 3
assert np.array_equal(np.array(train_eo[1]),
np.concatenate((np.expand_dims(expected_outputs[0], axis=0), expected_outputs[2:]), axis=0))
assert len(train_di[2]) == 3
assert np.array_equal(np.array(train_di[2]),
np.concatenate((data_inputs[0:2], np.expand_dims(data_inputs[3], axis=0)), axis=0))
assert len(train_eo[2]) == 3
assert np.array_equal(np.array(train_eo[2]),
np.concatenate((expected_outputs[0:2], np.expand_dims(expected_outputs[3], axis=0)), axis=0))
assert len(train_di[3]) == 3
assert np.array_equal(np.array(train_di[3]), data_inputs[0:3])
assert len(train_eo[3]) == 3
assert np.array_equal(np.array(train_eo[3]), expected_outputs[0:3])
assert len(validation_di[0]) == 1
assert np.array_equal(validation_di[0][0], data_inputs[0])
assert len(validation_eo[0]) == 1
assert np.array_equal(validation_eo[0][0], expected_outputs[0])
assert len(validation_di[1]) == 1
assert np.array_equal(validation_di[1][0], data_inputs[1])
assert len(validation_eo[1]) == 1
assert np.array_equal(validation_eo[1][0], expected_outputs[1])
assert len(validation_di[2]) == 1
assert np.array_equal(validation_di[2][0], data_inputs[2])
assert len(validation_eo[2]) == 1
assert np.array_equal(validation_eo[2][0], expected_outputs[2])
assert len(validation_di[3]) == 1
assert np.array_equal(validation_di[3][0], data_inputs[3])
assert len(validation_eo[3]) == 1
assert np.array_equal(validation_eo[3][0], expected_outputs[3])
def test_kfold_cross_validation_should_split_data_properly_bug():
data_inputs = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
expected_outputs = np.array([0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40])
data_container = DataContainer(
data_inputs=data_inputs,
expected_outputs=expected_outputs
)
splitter = KFoldCrossValidationSplitter(k_fold=2)
# When
validation_splits = splitter.split_data_container(data_container, ExecutionContext())
train_di, train_eo, validation_di, validation_eo = extract_validation_split_data(validation_splits)
# Then
assert len(train_di[0]) == 6
assert np.array_equal(np.array(train_di[0]), data_inputs[5:])
assert len(train_eo[0]) == 6
assert np.array_equal(np.array(train_eo[0]), expected_outputs[5:])
assert len(train_di[1]) == 5
assert np.array_equal(
np.array(train_di[1]),
data_inputs[:5]
)
assert len(train_eo[1]) == 5
assert np.array_equal(
np.array(train_eo[1]),
expected_outputs[:5]
)
assert len(validation_di[0]) == 5
assert np.array_equal(np.array(validation_di[0]), data_inputs[:5])
assert len(validation_eo[0]) == 5
assert np.array_equal(np.array(validation_eo[0]), expected_outputs[:5])
assert len(validation_di[1]) == 6
assert np.array_equal(np.array(validation_di[1]), data_inputs[5:])
assert len(validation_eo[1]) == 6
assert np.array_equal(validation_eo[1], expected_outputs[5:])
def extract_validation_split_data(validation_splits):
train_di = []
train_eo = []
validation_di = []
validation_eo = []
for train_dc, validation_dc in validation_splits:
train_di.append(train_dc.data_inputs)
train_eo.append(train_dc.expected_outputs)
validation_di.append(validation_dc.data_inputs)
validation_eo.append(validation_dc.expected_outputs)
return train_di, train_eo, validation_di, validation_eo
def test_automl_should_shallow_copy_data_before_each_epoch():
# see issue #332 https://github.com/Neuraxio/Neuraxle/issues/332
data_inputs = np.random.randint(0, 100, (100, 3))
expected_outputs = np.random.randint(0, 3, 100)
from sklearn.preprocessing import StandardScaler
p = Pipeline([
SKLearnWrapper(StandardScaler()),
SKLearnWrapper(LinearSVC(), HyperparameterSpace({'C': RandInt(0, 10000)})),
])
auto_ml = AutoML(
p,
validation_splitter=ValidationSplitter(0.20),
refit_trial=True,
n_trials=10,
epochs=10,
cache_folder_when_no_handle='cache',
scoring_callback=ScoringCallback(mean_squared_error, higher_score_is_better=False),
callbacks=[MetricCallback('mse', metric_function=mean_squared_error, higher_score_is_better=False)],
hyperparams_repository=InMemoryHyperparamsRepository(
cache_folder='cache'),
continue_loop_on_error=False
)
random_search = auto_ml.fit(data_inputs, expected_outputs)
best_model = random_search.get_best_model()
assert isinstance(best_model, Pipeline)
def test_trainer_train():
data_inputs = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
expected_outputs = data_inputs * 4
p = Pipeline([
MultiplyByN(2).set_hyperparams_space(HyperparameterSpace({
'multiply_by': FixedHyperparameter(2)
})),
NumpyReshape(new_shape=(-1, 1)),
linear_model.LinearRegression()
])
trainer: Trainer = Trainer(
epochs=10,
scoring_callback=ScoringCallback(mean_squared_error, higher_score_is_better=False),
validation_splitter=ValidationSplitter(test_size=0.20)
)
repo_trial: Trial = trainer.train(pipeline=p, data_inputs=data_inputs, expected_outputs=expected_outputs)
trained_pipeline = repo_trial.get_trained_pipeline(split_number=0)
outputs = trained_pipeline.transform(data_inputs)
mse = mean_squared_error(expected_outputs, outputs)
assert mse < 1
|
{"hexsha": "a29d1e69dd9e073eec78e4d6662e3a53246348df", "size": 11526, "ext": "py", "lang": "Python", "max_stars_repo_path": "testing/metaopt/test_automl.py", "max_stars_repo_name": "vincent-antaki/Neuraxle", "max_stars_repo_head_hexsha": "cef1284a261010c655f8ef02b4fca5b8bb45850c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "testing/metaopt/test_automl.py", "max_issues_repo_name": "vincent-antaki/Neuraxle", "max_issues_repo_head_hexsha": "cef1284a261010c655f8ef02b4fca5b8bb45850c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "testing/metaopt/test_automl.py", "max_forks_repo_name": "vincent-antaki/Neuraxle", "max_forks_repo_head_hexsha": "cef1284a261010c655f8ef02b4fca5b8bb45850c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7901639344, "max_line_length": 122, "alphanum_fraction": 0.7058823529, "include": true, "reason": "import numpy", "num_tokens": 2879}
|
"""Data utility functions."""
import os
import numpy as np
import scipy.io
import torch
import torch.utils.data as data
import h5py
class ImdbData(data.Dataset):
def __init__(self, X, y, w):
self.X = X
self.y = y
self.w = w
def __getitem__(self, index):
img = self.X[index]
label = self.y[index]
weight = self.w[index]
img = torch.from_numpy(img)
label = torch.from_numpy(label)
weight = torch.from_numpy(weight)
return img, label, weight
def __len__(self):
return len(self.y)
def get_imdb_data():
# TODO: Need to change later
NumClass = 9
# Load DATA
# Data = h5py.File('C:/Users/krive/PycharmProjects/relaynet_pytorch/datasets/Data.h5', 'r')
# a_group_key = list(Data.keys())[0]
# Data = list(Data[a_group_key])
# Data = np.squeeze(np.asarray(Data))
# Label = h5py.File('C:/Users/krive/PycharmProjects/relaynet_pytorch/datasets/label.h5', 'r')
# a_group_key = list(Label.keys())[0]
# Label = list(Label[a_group_key])
# Label = np.squeeze(np.asarray(Label))
# set = h5py.File('C:/Users/krive/PycharmProjects/relaynet_pytorch/datasets/set.h5', 'r')
# a_group_key = list(set.keys())[0]
# set = list(set[a_group_key])
# set = np.squeeze(np.asarray(set))
# sz = Data.shape
# print(sz)
# Data = Data.reshape([sz[0], 1, sz[1], sz[2]])
# Data = Data[:, :, 61:573, :]
# weights = Label[:, 1, 61:573, :]
# Label = Label[:, 0, 61:573, :]
# sz = Label.shape
# Label = Label.reshape([sz[0], 1, sz[1], sz[2]])
# weights = weights.reshape([sz[0], 1, sz[1], sz[2]])
# train_id = set == 1
# test_id = set == 3
#
# Tr_Dat = Data[train_id, :, :, :]
# Tr_Label = np.squeeze(Label[train_id, :, :, :]) - 1 # Index from [0-(NumClass-1)]
# Tr_weights = weights[train_id, :, :, :]
# Tr_weights = np.tile(Tr_weights, [1, NumClass, 1, 1])
#
# Te_Dat = Data[test_id, :, :, :]
# Te_Label = np.squeeze(Label[test_id, :, :, :]) - 1
# Te_weights = weights[test_id, :, :, :]
# Te_weights = np.tile(Te_weights, [1, NumClass, 1, 1])
input = scipy.io.loadmat('C:/Users/MASTER/PycharmProjects/inputParser/img.mat')
Data = input['img']
sz = Data.shape
Data = Data.reshape(sz[0], 1, sz[1], sz[2])
testData = Data[100:110, :, 128:640, 120:376]
trainData = Data[0:100, :, 128:640, 120:376]
lbl = scipy.io.loadmat('C:/Users/MASTER/PycharmProjects/inputParser/label.mat')
lbl = lbl['label']
sz = lbl.shape
lbl = lbl.reshape(sz[0], sz[1], sz[2])
trainlbl = lbl[0:100, 128:640, 120:376]
testlbl = lbl[100:110, 128:640, 120:376]
weights = scipy.io.loadmat('C:/Users/MASTER/PycharmProjects/inputParser/weights.mat')
weights = weights['weights']
weights = weights.reshape(sz[0], sz[1], sz[2])
trainweights = weights[0:100, 128:640, 120:376]
testweignts = weights[100:110, 128:640, 120:376]
trainData = trainData.astype(np.float32)
testData = testData.astype(np.float32)
trainlbl = trainlbl.astype(np.float32)
testlbl = testlbl.astype(np.float32)
trainweights = trainweights.astype(np.float32)
testweights = testweignts.astype(np.float32)
return (ImdbData(trainData, trainlbl, trainweights),
ImdbData(testData, testlbl, testweights))
|
{"hexsha": "f6416ecd1ab0a3fe4e43792d677eec16a0e74dbd", "size": 3350, "ext": "py", "lang": "Python", "max_stars_repo_path": "networks/data_utils.py", "max_stars_repo_name": "Nikolay1998/relaynet_pytorch", "max_stars_repo_head_hexsha": "b4eecc28020b2a7f7a8cf9618558f968788b14c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-22T14:29:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-06T16:28:51.000Z", "max_issues_repo_path": "networks/data_utils.py", "max_issues_repo_name": "Nikolay1998/relaynet_pytorch", "max_issues_repo_head_hexsha": "b4eecc28020b2a7f7a8cf9618558f968788b14c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "networks/data_utils.py", "max_forks_repo_name": "Nikolay1998/relaynet_pytorch", "max_forks_repo_head_hexsha": "b4eecc28020b2a7f7a8cf9618558f968788b14c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2631578947, "max_line_length": 97, "alphanum_fraction": 0.612238806, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1037}
|
r"""Diffusion of an acoustic wave in 1-d (5 minutes)
Propagation of acoustic wave
particles have properties according
to the following distribuion
.. math::
\rho = \rho_0 + \Delta\rho sin(kx)
p = p_0 + c_0^2\Delta\rho sin(kx)
u = c_0\rho_0^{-1}\Delta\rho sin(kx)
with :math:`\Delta\rho = 1e-6` and :math:`k = 2\pi/\lambda`
where :math:`\lambda` is the domain length.
.. math::
\rho_0 = \gamma = 1.4 and p_0 = 1.0
"""
# standard library and numpy imports
import numpy
# pysph imports
from pysph.base.utils import get_particle_array as gpa
from pysph.base.nnps import DomainManager
from pysph.solver.application import Application
from pysph.sph.scheme import \
GSPHScheme, ADKEScheme, GasDScheme, SchemeChooser
from pysph.sph.wc.crksph import CRKSPHScheme
class AcousticWave(Application):
def initialize(self):
self.xmin = 0.
self.xmax = 1.
self.gamma = 1.4
self.rho_0 = self.gamma
self.p_0 = 1.
self.c_0 = 1.
self.delta_rho = 1e-6
self.n_particles = 8
self.domain_length = self.xmax - self.xmin
self.k = -2 * numpy.pi / self.domain_length
self.cfl = 0.1
self.hdx = 1.0
self.dt = 1e-3
self.tf = 5
self.dim = 1
def create_domain(self):
return DomainManager(
xmin=0, xmax=1, periodic_in_x=True
)
def add_user_options(self, group):
group.add_argument(
"--nparticles", action="store", type=int, dest="nprt", default=256,
help="Number of particles in domain"
)
def consume_user_options(self):
self.n_particles = self.options.nprt
self.dx = self.domain_length / (self.n_particles)
self.dt = self.cfl * self.dx / self.c_0
def create_particles(self):
x = numpy.arange(
self.xmin + self.dx*0.5, self.xmax, self.dx
)
rho = self.rho_0 + self.delta_rho *\
numpy.sin(self.k * x)
p = self.p_0 + self.c_0**2 *\
self.delta_rho * numpy.sin(self.k * x)
u = self.c_0 * self.delta_rho * numpy.sin(self.k * x) /\
self.rho_0
cs = numpy.sqrt(
self.gamma * p / rho
)
h = numpy.ones_like(x) * self.dx * self.hdx
m = numpy.ones_like(x) * self.dx * rho
e = p / ((self.gamma - 1) * rho)
fluid = gpa(
name='fluid', x=x, p=p, rho=rho, u=u, h=h, m=m, e=e, cs=cs,
h0=h.copy()
)
self.scheme.setup_properties([fluid])
return [fluid, ]
def create_scheme(self):
gsph = GSPHScheme(
fluids=['fluid'], solids=[], dim=self.dim,
gamma=self.gamma, kernel_factor=1.0,
g1=0., g2=0., rsolver=7, interpolation=1, monotonicity=1,
interface_zero=True, hybrid=False, blend_alpha=5.0,
niter=40, tol=1e-6, has_ghosts=True
)
mpm = GasDScheme(
fluids=['fluid'], solids=[], dim=self.dim, gamma=self.gamma,
kernel_factor=1.2, alpha1=0, alpha2=0,
beta=2.0, update_alpha1=False, update_alpha2=False,
has_ghosts=True
)
crksph = CRKSPHScheme(
fluids=['fluid'], dim=self.dim, rho0=0, c0=0, nu=0, h0=0, p0=0,
gamma=self.gamma, cl=2, has_ghosts=True
)
adke = ADKEScheme(
fluids=['fluid'], solids=[], dim=self.dim, gamma=self.gamma,
alpha=0, beta=0.0, k=1.5, eps=0.0, g1=0.0, g2=0.0,
has_ghosts=True)
s = SchemeChooser(
default='gsph', gsph=gsph, mpm=mpm, crksph=crksph, adke=adke
)
return s
def configure_scheme(self):
s = self.scheme
if self.options.scheme == 'gsph':
s.configure_solver(
dt=self.dt, tf=self.tf,
adaptive_timestep=True, pfreq=50
)
if self.options.scheme == 'mpm':
s.configure(kernel_factor=1.2)
s.configure_solver(dt=self.dt, tf=self.tf,
adaptive_timestep=False, pfreq=50)
if self.options.scheme == 'crksph':
s.configure_solver(dt=self.dt, tf=self.tf,
adaptive_timestep=False, pfreq=50)
if self.options.scheme == 'adke':
s.configure_solver(dt=self.dt, tf=self.tf,
adaptive_timestep=False, pfreq=50)
def post_process(self):
from pysph.solver.utils import load
if len(self.output_files) < 1:
return
outfile = self.output_files[-1]
data = load(outfile)
pa = data['arrays']['fluid']
x_c = pa.x
u = self.c_0 * self.delta_rho * numpy.sin(self.k * x_c) /\
self.rho_0
u_c = pa.u
l_inf = numpy.max(
numpy.abs(u_c - u)
)
l_1 = (numpy.sum(
numpy.abs(u_c - u)
) / self.n_particles)
print("L_inf norm of velocity for the problem: %s" % (l_inf))
print("L_1 norm of velocity for the problem: %s" % (l_1))
rho = self.rho_0 + self.delta_rho *\
numpy.sin(self.k * x_c)
rho_c = pa.rho
l1 = numpy.sum(
numpy.abs(rho - rho_c)
)
l1 = l1 / self.n_particles
print("l_1 norm of density for the problem: %s" % (l1))
if __name__ == "__main__":
app = AcousticWave()
app.run()
app.post_process()
|
{"hexsha": "517b4b735ff29ae4d1227aadc7c588ccd834345e", "size": 5467, "ext": "py", "lang": "Python", "max_stars_repo_path": "pysph/examples/gas_dynamics/acoustic_wave.py", "max_stars_repo_name": "nauaneed/pysph", "max_stars_repo_head_hexsha": "9cb9a859934939307c65a25cbf73e4ecc83fea4a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 293, "max_stars_repo_stars_event_min_datetime": "2017-05-26T14:41:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T09:56:16.000Z", "max_issues_repo_path": "pysph/examples/gas_dynamics/acoustic_wave.py", "max_issues_repo_name": "nauaneed/pysph", "max_issues_repo_head_hexsha": "9cb9a859934939307c65a25cbf73e4ecc83fea4a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 217, "max_issues_repo_issues_event_min_datetime": "2017-05-29T15:48:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T16:16:55.000Z", "max_forks_repo_path": "pysph/examples/gas_dynamics/acoustic_wave.py", "max_forks_repo_name": "nauaneed/pysph", "max_forks_repo_head_hexsha": "9cb9a859934939307c65a25cbf73e4ecc83fea4a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 126, "max_forks_repo_forks_event_min_datetime": "2017-05-25T19:17:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T11:23:24.000Z", "avg_line_length": 30.7134831461, "max_line_length": 79, "alphanum_fraction": 0.5487470276, "include": true, "reason": "import numpy", "num_tokens": 1534}
|
#!/usr/bin/python
"""
Test to compare PPF loop calculations against Maryam's MATLAB code
"""
import numpy as np
from riglib.bmi import ppfdecoder, state_space_models as ssm
from scipy.io import loadmat, savemat
from riglib.bmi.sim_neurons import PointProcessEnsemble
import matplotlib.pyplot as plt
from riglib.bmi import state_space_models
import imp
imp.reload(ppfdecoder)
data = loadmat('sample_spikes_and_kinematics_10000.mat')
hand_vel = data['hand_vel']
beta = data['beta']
beta = np.vstack([beta[1:, :], beta[0,:]])
spike_counts = data['spike_counts']
hand_vel = data['hand_vel']
n_iter = 10000
T_loop = 0.005
Delta_KF = 0.1
a_kf = 0.8
w_kf = 0.0007
A_kf = np.diag([a_kf, a_kf, 1])
W_kf = np.diag([w_kf, w_kf, 0])
A, W = ssm.resample_ssm(A_kf, W_kf, Delta_old=Delta_KF, Delta_new=T_loop)
a_ppf = 9.889048329050316e-01
w_ppf = 4.290850 * 1e-05;
# Instantiate the PPF
##ppf = ppfdecoder.PointProcessFilter(A, W, beta, T_loop)
##ppf._init_state()
##point_proc = PointProcessEnsemble(beta, T_loop)
##decoded_output = np.zeros([3, n_iter])
##for idx in range(1, n_iter):
## # TODO generate spike counts from point process simulator, not file!
## y_t = point_proc(hand_vel[:,idx])
## ppf(y_t)
## decoded_output[:,idx] = ppf.get_mean()
##
##plt.figure()
##plt.hold(True)
##plt.plot(decoded_output[0,:])
##plt.plot(hand_vel[0,:n_iter])
##plt.show()
# Compare against MATLAB data
states = ['hand_px', 'hand_py', 'hand_pz', 'hand_vx', 'hand_vy', 'hand_vz', 'offset']
decoding_states = ['hand_vx', 'hand_vz', 'offset']
# TODO transpose beta matrix
truedata = loadmat('/Users/sgowda/bmi/workspace/adaptive_ppf/ppf_test_case_matlab_output.mat')
a_ppf = truedata['A'][0,0]
w_ppf = truedata['W'][0,0]
A = np.mat(np.diag([a_ppf, a_ppf, 1]))
W = np.mat(np.diag([w_ppf, w_ppf, 0]))
# Instantiate PPF
ppf = ppfdecoder.PointProcessFilter(A, W, beta.T, T_loop)
ppf._init_state()
decoded_output = np.zeros([3, n_iter])
for idx in range(1, n_iter):
ppf(spike_counts[idx, :])
decoded_output[:,idx] = ppf.get_mean()
x_est = truedata['x_est']
plt.figure()
plt.hold(True)
plt.plot(x_est[0,:n_iter], label='matlab')
plt.plot(hand_vel[0,:n_iter], label='handvel')
plt.plot(decoded_output[0,:], label='pyth')
plt.legend()
plt.show()
print(np.max(np.abs(x_est[0,:n_iter] - decoded_output[0,:])))
# TODO expand A, W, C to same dimensions as for KF
def inflate(A, current_states, full_state_ls):
nS = len(full_state_ls)#A.shape[0]
A_new = np.zeros([nS, A.shape[1]])
new_inds = [full_state_ls.index(x) for x in current_states]
A_new[new_inds, :] = A
return A_new
dt = 0.005
A, W = state_space_models.linear_kinarm_kf(update_rate=dt, units_mult=1)
n_neurons = beta.shape[1]
beta = inflate(beta, decoding_states, states)
#W = inflate(W, decoding_states, states)
ppf = ppfdecoder.PointProcessFilter(A, W, beta.T, T_loop)
ppf._init_state()
decoded_output_new = np.zeros([7, n_iter])
for idx in range(1, n_iter):
ppf(spike_counts[idx, :])
decoded_output_new[:,idx] = ppf.get_mean()
x_est = truedata['x_est']
plt.figure()
plt.hold(True)
plt.plot(decoded_output_new[3,:n_iter], label='pyth')
plt.plot(x_est[0,:n_iter], label='matlab')
plt.plot(hand_vel[0,:n_iter], label='handvel')
plt.legend()
plt.show()
print(np.max(np.abs(x_est[0,:n_iter] - decoded_output_new[3,:])))
|
{"hexsha": "1e6813b230f1f2efcc13864bd6d235d1dcdb54b2", "size": 3321, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/ppf/test_point_proc_decoding.py", "max_stars_repo_name": "DerekYJC/bmi_python", "max_stars_repo_head_hexsha": "7b9cf3f294a33688db24b0863c1035e9cc6999ea", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/ppf/test_point_proc_decoding.py", "max_issues_repo_name": "DerekYJC/bmi_python", "max_issues_repo_head_hexsha": "7b9cf3f294a33688db24b0863c1035e9cc6999ea", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-07-31T18:58:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T14:36:00.000Z", "max_forks_repo_path": "tests/ppf/test_point_proc_decoding.py", "max_forks_repo_name": "DerekYJC/bmi_python", "max_forks_repo_head_hexsha": "7b9cf3f294a33688db24b0863c1035e9cc6999ea", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-03-06T15:39:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-26T17:03:21.000Z", "avg_line_length": 27.0, "max_line_length": 94, "alphanum_fraction": 0.7058115026, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1074}
|
# # -*- coding:utf-8 -*-
# &Author AnFany
# 将mnist数据集或者Fashion-MNIST数据集转换为图片
# 因为两个数据集的格式是完全一致的,因此程序可以共用
import struct
from PIL import Image
import numpy as np
import os
Path = r'C:\Users\GWT9\Desktop' # 存储下面4个文件的路径
os.chdir(Path) # 设置为当前的工作路径
# 训练图片文件
train_images = 'train-images-idx3-ubyte' # 注意Mnist数据集中images.idx3,中是点'.',而Fashion-MNIST数据集中是'-'
# 训练标签文件
train_labels = 'train-labels-idx1-ubyte'
# 测试图片文件
test_images = 't10k-images-idx3-ubyte'
# 测试标签文件
test_labels = 't10k-labels-idx1-ubyte'
# 获取图片数据
def get_image(image_file):
# 读取二进制数据
bin_data = open(image_file, 'rb').read()
# 解析文件头信息,依次为魔数、图片数量、每张图片高、每张图片宽
offset = 0
fmt_header = '>iiii'
magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)
# 解析数据集
image_size = num_rows * num_cols
offset += struct.calcsize(fmt_header)
fmt_image = '>' + str(image_size) + 'B'
images = np.empty((num_images, num_rows, num_cols))
for i in range(num_images):
images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows, num_cols))
offset += struct.calcsize(fmt_image)
return images
# 获取标签数据
def get_label(label_file):
# 读取二进制数据
bin_data = open(label_file, 'rb').read()
# 解析文件头信息,依次为魔数和标签数
offset = 0
fmt_header = '>ii'
magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)
# 解析数据集
offset += struct.calcsize(fmt_header)
fmt_image = '>B'
labels = np.empty(num_images)
for i in range(num_images):
labels[i] = struct.unpack_from(fmt_image, bin_data, offset)[0]
offset += struct.calcsize(fmt_image)
return labels
# 将用矩阵表示的图像信息转变为图片,名字为标签
def matrix_to_fig(matrix_data, fig_title, file_name):
"""
将用矩阵表示的图像信息转变为图片,名字为标签
:param matrix_data: 图像的矩阵数据
:param fig_title: 对应的图片的标签
:param file_name: 存储图片的文件极夹的名称
:return: 存储图片的文件夹
"""
sign_dict = {}
for image, label in zip(matrix_data, fig_title):
# 首先获取同一个标签的图片的编号,从1开始
if label not in sign_dict:
cc = 1
else:
cc = sign_dict[label] + 1
sign_dict[label] = cc
# 获取图片
get_image = Image.fromarray(np.uint8(image)) # 转为uint8的格式
# 存储图片
get_image.save(r".\%s\%s_%d.png" % (file_name, int(label), cc))
print(sign_dict) # 查看每个标签的图片的个数
return print('转换完成')
# 最终的主函数
if __name__ == "__main__":
# 获取训练图片的数字矩阵信息和标签信息
train_fig_data = get_image(train_images)
train_fig_label = get_label(train_labels)
# 获取测试图片的数字矩阵信息和标签信息
test_fig_data = get_image(test_images)
test_fig_label = get_label(test_labels)
# 存储图片的文件夹子的名称
New_File_Name = 'MNIST_DATA'
# 如果没有名称为该名字的文件夹就新建
if not os.path.isdir('%s' % New_File_Name):
os.mkdir('%s' % New_File_Name)
# 把这个文件夹作为工作路径
os.chdir(r'%s\%s' % (Path, New_File_Name))
# 需要在这个文件夹子下面建立2个子文件夹train,test,分别存储训练和测试的图片数据
if not os.path.isdir('train'):
os.mkdir('train')
if not os.path.isdir('test'):
os.mkdir('test')
# 训练数据的转换
matrix_to_fig(train_fig_data, train_fig_label, 'train')
# 测试数据的转换
matrix_to_fig(test_fig_data, test_fig_label, 'test')
|
{"hexsha": "352c378b9caa509d78cf7ea46be5d16a7799ca40", "size": 3341, "ext": "py", "lang": "Python", "max_stars_repo_path": "CNN/mnist_to_fig.py", "max_stars_repo_name": "Jojoxiao/Machine-Learning-for-Beginner-by-Python3", "max_stars_repo_head_hexsha": "71b91c9cba5803bd78d4d31be6dabb1d3989e968", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 397, "max_stars_repo_stars_event_min_datetime": "2018-05-28T02:07:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T09:53:37.000Z", "max_issues_repo_path": "CNN/mnist_to_fig.py", "max_issues_repo_name": "976634681/Machine-Learning-for-Beginner-by-Python3", "max_issues_repo_head_hexsha": "d9effcbb1b390dc608a0f4c0a28f0ad03892047a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-01-14T16:41:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-11T13:23:06.000Z", "max_forks_repo_path": "CNN/mnist_to_fig.py", "max_forks_repo_name": "976634681/Machine-Learning-for-Beginner-by-Python3", "max_forks_repo_head_hexsha": "d9effcbb1b390dc608a0f4c0a28f0ad03892047a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 235, "max_forks_repo_forks_event_min_datetime": "2018-06-28T05:31:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T03:20:07.000Z", "avg_line_length": 28.313559322, "max_line_length": 108, "alphanum_fraction": 0.6429212811, "include": true, "reason": "import numpy", "num_tokens": 1212}
|
from rdkit import Chem
from rdkit.Chem import rdchem, Descriptors
import numpy
periodicTable = rdchem.GetPeriodicTable()
def getChinp(mol,NumPath=2):
"""
#################################################################
Calculation of molecular connectivity chi index for path order n
#################################################################
"""
accum=0.0
deltas=[x.GetDegree() for x in mol.GetAtoms()]
for path in Chem.FindAllPathsOfLengthN(mol,NumPath+1,useBonds=0):
cAccum=1.0
for idx in path:
cAccum *= deltas[idx]
if cAccum:
accum += 1./numpy.sqrt(cAccum)
return accum
def getChinch(mol, NumCycle=3):
"""
#################################################################
Calculation of molecular connectivity chi index for cycles of n
#################################################################
"""
accum = 0.0
deltas = [x.GetDegree() for x in mol.GetAtoms()]
for tup in mol.GetRingInfo().AtomRings():
cAccum = 1.0
if len(tup) == NumCycle:
for idx in tup:
cAccum *= deltas[idx]
if cAccum:
accum += 1. / numpy.sqrt(cAccum)
return accum
def getHKDeltas(mol, skipHs=1):
"""
#################################################################
Calculation of modified delta value for a molecule
#################################################################
"""
global periodicTable
res = []
for atom in mol.GetAtoms():
n = atom.GetAtomicNum()
if n > 1:
nV = periodicTable.GetNOuterElecs(n)
nHs = atom.GetTotalNumHs()
if n < 10:
res.append(float(nV - nHs))
else:
res.append(float(nV - nHs) / float(n - nV - 1))
elif not skipHs:
res.append(0.0)
return res
def getAtomHKDeltas(atom,skipHs=0):
"""
#################################################################
*Internal Use Only*
Calculation of modified delta value for a molecule
#################################################################
"""
global periodicTable
res=[]
n=atom.GetAtomicNum()
if n > 1:
nV=periodicTable.GetNOuterElecs(n)
nHs=atom.GetTotalNumHs()
if n<10:
res.append(float(nV-nHs))
else:
res.append(float(nV-nHs)/float(n-nV-1))
elif not skipHs:
res.append(0.0)
return res
def getChivnp(mol, NumPath=1):
"""#################################################################
Calculation of valence molecular connectivity chi index for path order 1
#################################################################
"""
accum = 0.0
deltas = getHKDeltas(mol, skipHs=0)
for path in Chem.FindAllPathsOfLengthN(mol, NumPath + 1, useBonds=0):
cAccum = 1.0
for idx in path:
cAccum *= deltas[idx]
if cAccum:
accum += 1. / numpy.sqrt(cAccum)
return accum
def getChivnch(mol, NumCyc=3):
"""
#################################################################
Calculation of valence molecular connectivity chi index for cycles of n
#################################################################
"""
accum=0.0
deltas=getHKDeltas(mol,skipHs=0)
for tup in mol.GetRingInfo().AtomRings():
cAccum=1.0
if len(tup)==NumCyc:
for idx in tup:
cAccum *= deltas[idx]
if cAccum:
accum += 1./numpy.sqrt(cAccum)
return accum
################################################################
def getChi0(mol):
return Descriptors.Chi0(mol)
def getChi1(mol):
return Descriptors.Chi1(mol)
def getmChi1(mol):
"""
#################################################################
Calculation of mean chi1 (Randic) connectivity index.
---->mchi1
#################################################################
"""
cc = [x.GetBeginAtom().GetDegree()*x.GetEndAtom().GetDegree() for x in mol.GetBonds()]
while 0 in cc:
cc.remove(0)
cc = numpy.array(cc,'d')
res = numpy.mean(numpy.sqrt(1./cc))
return res
def getChi2(mol):
return getChinp(mol,NumPath=2)
def getChi3(mol):
return getChinp(mol,NumPath=3)
def getChi4(mol):
return getChinp(mol,NumPath=4)
def getChi5(mol):
return getChinp(mol,NumPath=5)
def getChi6(mol):
return getChinp(mol,NumPath=6)
def getChi7(mol):
return getChinp(mol,NumPath=7)
def getChi8(mol):
return getChinp(mol,NumPath=8)
def getChi9(mol):
return getChinp(mol,NumPath=9)
def getChi10(mol):
return getChinp(mol,NumPath=10)
def getChi3c(mol):
accum=0.0
deltas=[x.GetDegree() for x in mol.GetAtoms()]
patt=Chem.MolFromSmarts('*~*(~*)~*')
HPatt=mol.GetSubstructMatches(patt)
for cluster in HPatt:
deltas=[mol.GetAtomWithIdx(x).GetDegree() for x in cluster]
while 0 in deltas:
deltas.remove(0)
if deltas!=[]:
deltas1=numpy.array(deltas,numpy.float)
accum=accum+1./numpy.sqrt(deltas1.prod())
return accum
def getChi4c(mol):
accum=0.0
patt=Chem.MolFromSmarts('*~*(~*)(~*)~*')
HPatt=mol.GetSubstructMatches(patt)
for cluster in HPatt:
deltas=[mol.GetAtomWithIdx(x).GetDegree() for x in cluster]
while 0 in deltas:
deltas.remove(0)
if deltas!=[]:
deltas1=numpy.array(deltas,numpy.float)
accum=accum + 1./numpy.sqrt(deltas1.prod())
return accum
def getChi4pc(mol):
accum=0.0
patt=Chem.MolFromSmarts('*~*(~*)~*~*')
HPatt=mol.GetSubstructMatches(patt)
for cluster in HPatt:
deltas=[mol.GetAtomWithIdx(x).GetDegree() for x in cluster]
while 0 in deltas:
deltas.remove(0)
if deltas!=[]:
deltas1=numpy.array(deltas,numpy.float)
accum=accum+1./numpy.sqrt(deltas1.prod())
return accum
def getChi3ch(mol):
return getChinch(mol,NumCycle=3)
def getChi4ch(mol):
return getChinch(mol,NumCycle=4)
def getChi5ch(mol):
return getChinch(mol,NumCycle=5)
def getChi6ch(mol):
return getChinch(mol,NumCycle=6)
def getChiv0(mol):
deltas=getHKDeltas(mol,skipHs=0)
while 0 in deltas:
deltas.remove(0)
deltas=numpy.array(deltas,'d')
res=sum(numpy.sqrt(1./deltas))
return res
def getChiv1(mol):
return getChivnp(mol,NumPath=1)
def getChiv2(mol):
return getChivnp(mol,NumPath=2)
def getChiv3(mol):
return getChivnp(mol, NumPath=3)
def getChiv4(mol):
return getChivnp(mol, NumPath=4)
def getChiv5(mol):
return getChivnp(mol, NumPath=5)
def getChiv6(mol):
return getChivnp(mol, NumPath=6)
def getChiv7(mol):
return getChivnp(mol, NumPath=7)
def getChiv8(mol):
return getChivnp(mol, NumPath=8)
def getChiv9(mol):
return getChivnp(mol, NumPath=9)
def getChiv10(mol):
return getChivnp(mol, NumPath=10)
def getdchi0(mol):
"""
#################################################################
Calculation of the difference between chi0v and chi0
#################################################################
"""
return abs(getChiv0(mol) - getChi0(mol))
def getdchi1(mol):
return abs(getChiv1(mol)-getChi1(mol))
def getdchi2(mol):
return abs(getChiv2(mol)-getChi2(mol))
def getdchi3(mol):
return abs(getChiv3(mol)-getChi3(mol))
def getdchi4(mol):
return abs(getChiv4(mol)-getChi4(mol))
def getChiv3c(mol):
accum=0.0
patt=Chem.MolFromSmarts('*~*(~*)~*')
HPatt=mol.GetSubstructMatches(patt)
for cluster in HPatt:
deltas=[getAtomHKDeltas(mol.GetAtomWithIdx(x)) for x in cluster]
while 0 in deltas:
deltas.remove(0)
if deltas!=[]:
deltas1=numpy.array(deltas,numpy.float)
den = numpy.sqrt(deltas1.prod())
if den != 0.0:
accum = accum + 1./den
return accum
def getChiv4c(mol):
accum=0.0
patt=Chem.MolFromSmarts('*~*(~*)(~*)~*')
HPatt=mol.GetSubstructMatches(patt)
#print HPatt
for cluster in HPatt:
deltas=[getAtomHKDeltas(mol.GetAtomWithIdx(x)) for x in cluster]
while 0 in deltas:
deltas.remove(0)
if deltas!=[]:
deltas1=numpy.array(deltas,numpy.float)
den = numpy.sqrt(deltas1.prod())
if den != 0.0:
accum = accum+1./den
return accum
def getChiv4pc(mol):
"""
#################################################################
Calculation of valence molecular connectivity chi index for
#################################################################
"""
accum=0.0
patt=Chem.MolFromSmarts('*~*(~*)~*~*')
HPatt=mol.GetSubstructMatches(patt)
#print HPatt
for cluster in HPatt:
deltas=[getAtomHKDeltas(mol.GetAtomWithIdx(x)) for x in cluster]
while 0 in deltas:
deltas.remove(0)
if deltas!=[]:
deltas1=numpy.array(deltas,numpy.float)
den = numpy.sqrt(deltas1.prod())
if den != 0.0:
accum = accum+1./den
return accum
def getChiv3ch(mol):
"""
Chiv3ch related to ring 3
"""
return getChivnch(mol, 3)
def getChiv4ch(mol):
"""
Chiv4ch related to ring 4
"""
return getChivnch(mol, 4)
def getChiv5ch(mol):
"""
Chiv5ch related to ring 5
"""
return getChivnch(mol, 5)
def getChiv6ch(mol):
"""
Chiv6h related to ring 6
"""
return getChivnch(mol, 6)
def getknotp(mol):
"""
#################################################################
Calculation of the difference between chi3c and chi4pc
#################################################################
"""
return abs(getChi3c(mol)-getChi4pc(mol))
def getknotpv(mol):
"""
#################################################################
Calculation of the difference between chiv3c and chiv4pc
---->knotpv
#################################################################
"""
chiv3 = getChiv3c(mol)
chiv4pc = getChiv4pc(mol)
return abs(getChiv3c(mol) - getChiv4pc(mol))
def getChi0n(mol):
return Chem.GraphDescriptors.Chi0n(mol)
def getChi1n(mol):
return Chem.GraphDescriptors.Chi1n(mol)
def getChi2n(mol):
return Chem.GraphDescriptors.Chi2n(mol)
def getChi3n(mol):
return Chem.GraphDescriptors.Chi3n(mol)
def getChi4n(mol):
return Chem.GraphDescriptors.Chi4n(mol)
_connectivity={"Chi0": getChi0,
"Chi1": getChi1,
"mChi1": getmChi1,
"Chi2": getChi2,
"Chi3": getChi3,
"Chi4": getChi4,
"Chi5": getChi5,
"Chi6": getChi6,
"Chi7": getChi7,
"Chi8": getChi8,
"Chi9": getChi9,
"Chi10": getChi10,
"Chi3c": getChi3c,
"Chi4c": getChi4c,
"Chi4pc": getChi4pc,
"Chi3ch": getChi3ch,
"Chi4ch": getChi4ch,
"Chi5ch": getChi5ch,
"Chi6ch": getChi6ch,
"Chiv0": getChiv0,
"Chiv1": getChiv1,
"Chiv2": getChiv2,
"Chiv3": getChiv3,
"Chiv4": getChiv4,
"Chiv5": getChiv5,
"Chiv6": getChiv6,
"Chiv7": getChiv7,
"Chiv8": getChiv8,
"Chiv9": getChiv9,
"Chiv10": getChiv10,
"dchi0": getdchi0,
"dchi1": getdchi1,
"dchi2": getdchi2,
"dchi3": getdchi3,
"dchi4": getdchi4,
"Chiv3c": getChiv3c,
"Chiv4c": getChiv4c,
"Chiv4pc": getChiv4pc,
"Chiv3ch": getChiv3ch,
"Chiv4ch": getChiv4ch,
"Chiv5ch": getChiv5ch,
"Chiv6ch": getChiv6ch,
"knotp": getknotp,
"knotpv": getknotpv,
"Chi1n": getChi1n,
"Chi2n": getChi2n,
"Chi3n": getChi3n,
"Chi4n": getChi4n}
def GetConnectivity(mol):
"""
#################################################################
Get the dictionary of connectivity descriptors for given moelcule mol
#################################################################
"""
dresult={}
for DesLabel in _connectivity.keys():
dresult[DesLabel]=round(_connectivity[DesLabel](mol),3)
return dresult
|
{"hexsha": "c391b043939fe06f6e69d9ac5a2d53eacdac6ccb", "size": 12681, "ext": "py", "lang": "Python", "max_stars_repo_path": "Desc1D2D/connectivity.py", "max_stars_repo_name": "ABorrel/molecular-descriptors", "max_stars_repo_head_hexsha": "cdc08c7242e929ecf4dcb362331c7226127c3589", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-19T15:17:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-19T15:17:47.000Z", "max_issues_repo_path": "Desc1D2D/connectivity.py", "max_issues_repo_name": "ABorrel/CompDESC", "max_issues_repo_head_hexsha": "cdc08c7242e929ecf4dcb362331c7226127c3589", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Desc1D2D/connectivity.py", "max_forks_repo_name": "ABorrel/CompDESC", "max_forks_repo_head_hexsha": "cdc08c7242e929ecf4dcb362331c7226127c3589", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1175166297, "max_line_length": 90, "alphanum_fraction": 0.5007491523, "include": true, "reason": "import numpy", "num_tokens": 3474}
|
cd(@__DIR__); include("setups/grid23x22.jl")
gr(dpi = 200)
##
frame = sgwt_frame(W; nf = 6)
x = 242
for j = 1:6
plt = heatmap(reshape(frame[:, x, j], (Nx, Ny))', c = :viridis, ratio = 1,
frame = :none, xlim = [1, Nx], size = (500, 400))
savefig(plt, "../figs/Grid$(Nx)x$(Ny)_SGWT_frame_j$(j-1)_x$(x).png")
end
|
{"hexsha": "8a573bd89bd84febd3404fd89c3f58734dc4052c", "size": 331, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/dissertations/htli/scripts/Figure10.5.jl", "max_stars_repo_name": "BoundaryValueProblems/MTSG.jl", "max_stars_repo_head_hexsha": "8cf8e2b3035876b5ceda45109b0847a60b581a7c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-02T18:39:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-19T15:45:17.000Z", "max_issues_repo_path": "test/dissertations/htli/scripts/Figure10.5.jl", "max_issues_repo_name": "haotian127/MultiscaleGraphSignalTransforms.jl", "max_issues_repo_head_hexsha": "85ba99e505283491ac69e979737bbb712b698a6e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-04-27T23:00:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-03T11:03:17.000Z", "max_forks_repo_path": "test/dissertations/htli/scripts/Figure10.5.jl", "max_forks_repo_name": "haotian127/MultiscaleGraphSignalTransforms.jl", "max_forks_repo_head_hexsha": "85ba99e505283491ac69e979737bbb712b698a6e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-04-24T21:46:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T04:32:31.000Z", "avg_line_length": 27.5833333333, "max_line_length": 78, "alphanum_fraction": 0.5589123867, "num_tokens": 132}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 20 14:38:00 2021
@author: 14488
"""
import pyrealsense2 as rs
import numpy as np
import cv2
class IntelRealSense():
def __init__(self,
RGB_resolution = (320,240),
Depth_resolution = (640,480)):
self.pipeline = rs.pipeline()
self.config = rs.config()
self.config.enable_stream(rs.stream.depth, Depth_resolution[0],Depth_resolution[1], rs.format.z16, 30)
self.config.enable_stream(rs.stream.color, RGB_resolution[0], RGB_resolution[1], rs.format.bgr8, 30)
# Start streaming
self.pipeline.start(self.config)
print('IntelRealSense is connected')
def get_rbg(self,
resize = False,
visulization = False,
crop = False):
frames = self.pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
if crop:
color_image = self.crop(crop,color_image)
if resize:
color_image = cv2.resize(color_image, resize)
if visulization:
self.visulization(color_image)
return color_image
def visulization(self,
image,
Window_name = 'RealSense',
Window_size = (640, 480)):
cv2.namedWindow(Window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(Window_name, Window_size[0], Window_size[1])
print("Press esc or 'q' to close the image window")
while True:
cv2.imshow(Window_name, image)
key = cv2.waitKey(1)
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
def crop(self,
size,
image):
image_cropped = image[size[0]:size[1], size[2]:size[3]]
return image_cropped
|
{"hexsha": "1d5b7e7c8496fc4ae5eeb5c27176d091c70325f8", "size": 2091, "ext": "py", "lang": "Python", "max_stars_repo_path": "Real UR5e/IntelRealSense.py", "max_stars_repo_name": "wq13552463699/UCD_UR5E", "max_stars_repo_head_hexsha": "513acb7e235ab940fd03c3038208678e285690f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-11-02T10:48:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-10T12:32:51.000Z", "max_issues_repo_path": "Real UR5e/IntelRealSense.py", "max_issues_repo_name": "wq13552463699/UR5E_robot_gym_env_Real_and_Sim", "max_issues_repo_head_hexsha": "513acb7e235ab940fd03c3038208678e285690f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Real UR5e/IntelRealSense.py", "max_forks_repo_name": "wq13552463699/UR5E_robot_gym_env_Real_and_Sim", "max_forks_repo_head_hexsha": "513acb7e235ab940fd03c3038208678e285690f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6818181818, "max_line_length": 111, "alphanum_fraction": 0.5279770445, "include": true, "reason": "import numpy", "num_tokens": 446}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.