text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
function x = inv_digamma(y,niter)
% INV_DIGAMMA Inverse of the digamma function.
%
% inv_digamma(y) returns x such that digamma(x) = y.
% a different algorithm is provided by Paul Fackler:
% http://www.american.edu/academic.depts/cas/econ/gaussres/pdf/loggamma.src
% Newton iteration to solve digamma(x)-y = 0
x = exp(y)+1/2;
i = find(y <= -2.22);
x(i) = -1./(y(i) - digamma(1));
% never need more than 5 iterations
if nargin < 2
niter = 5;
end
for iter = 1:niter
x = x - (digamma(x)-y)./trigamma(x);
end
return
% test
y = -3:0.01:0.1;
x = digamma(inv_digamma(y));
max(abs(x-y))
max(abs(x-y)./inv_digamma(y))
|
{"author": "FuzhenZhuang", "repo": "Transfer-Learning-Toolkit", "sha": "24b5323b354aee844b8b7df9fcad17fdfb191dc4", "save_path": "github-repos/MATLAB/FuzhenZhuang-Transfer-Learning-Toolkit", "path": "github-repos/MATLAB/FuzhenZhuang-Transfer-Learning-Toolkit/Transfer-Learning-Toolkit-24b5323b354aee844b8b7df9fcad17fdfb191dc4/utilities/TLLibrary64/LR/fastfit/inv_digamma.m"}
|
import mmcv
import os
import sys
import numpy as np
def write_submission(outputs):
import pandas as pd
import numpy as np
from scipy.special import softmax
from mmdet.datasets.kaggle_pku_utils import quaternion_to_euler_angle
submission = 'Nov20-18-24-45-epoch_50.csv'
predictions = {}
PATH = '/data/Kaggle/pku-autonomous-driving/'
ImageId =[i.strip() for i in open(PATH + 'validation.txt').readlines()]
# ImageId = [x.replace('.jpg', '') for x in os.listdir(PATH + 'test_images')]
for idx, output in enumerate(outputs):
conf = np.max(softmax(output[2]['car_cls_score_pred'], axis=1), axis=1)
euler_angle = np.array([quaternion_to_euler_angle(x) for x in output[2]['quaternion_pred']])
translation = output[2]['trans_pred_world']
coords = np.hstack((euler_angle, translation, conf[:, None]))
coords_str = coords2str(coords)
try:
predictions[ImageId[idx]] = coords_str
except:
continue
pred_dict = {'ImageId':[],'PredictionString':[]}
for k,v in predictions.items():
pred_dict['ImageId'].append(k)
pred_dict['PredictionString'].append(v)
df = pd.DataFrame(data=pred_dict)
print('df',df)
# test = pd.read_csv(PATH + 'sample_submission.csv')
# for im_id in test['ImageId']:
# test.loc[test['ImageId'] == im_id, ['PredictionString']] = [predictions[im_id]]
df.to_csv(submission, index=False)
def coords2str(coords):
s = []
for c in coords:
for l in c:
s.append('%.5f'%l)
return ' '.join(s)
if __name__ == '__main__':
outputs = mmcv.load('/data/Kaggle/wudi_data/work_dirs/Nov20-18-24-45-epoch_50.pkl')
write_submission(outputs)
|
{"hexsha": "6cab9bd2e0f4b76af21c4b36145c19df52ef072a", "size": 1749, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/pkl2csv.py", "max_stars_repo_name": "tyunist/Kaggle_PKU_Baidu", "max_stars_repo_head_hexsha": "48651d8a0fc8a7beda0822a2db794861feada7c6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 59, "max_stars_repo_stars_event_min_datetime": "2020-02-05T05:41:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T08:04:11.000Z", "max_issues_repo_path": "tools/pkl2csv.py", "max_issues_repo_name": "tyunist/Kaggle_PKU_Baidu", "max_issues_repo_head_hexsha": "48651d8a0fc8a7beda0822a2db794861feada7c6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-03-11T11:15:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-30T06:09:01.000Z", "max_forks_repo_path": "tools/pkl2csv.py", "max_forks_repo_name": "tyunist/Kaggle_PKU_Baidu", "max_forks_repo_head_hexsha": "48651d8a0fc8a7beda0822a2db794861feada7c6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-02-26T01:46:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-02T14:05:48.000Z", "avg_line_length": 32.3888888889, "max_line_length": 100, "alphanum_fraction": 0.6426529445, "include": true, "reason": "import numpy,from scipy", "num_tokens": 456}
|
#' Count Letters, Words, and Lines of a File
#'
#' See title.
#'
#' @details
#' \code{wc_l()} is a shorthand for counting only lines, similar to \code{wc -l}
#' in the terminal. Likewise \code{wc_w()} is analogous to \code{wc -w} for
#' words.
#'
#' @param file
#' Location of the file (as a string) from which the counts will be generated.
#' @param chars,words,lines
#' Should char/word/line counts be shown? At least one of the three must be
#' \code{TRUE}.
#'
#' @return
#' A list containing the requested counts.
#'
#' @examples
#' library(filesampler)
#' file = system.file("rawdata/small.csv", package="filesampler")
#' data = wc(file=file)
#'
#' @name wc
#' @rdname wc
NULL
#' @useDynLib filesampler R_fs_wc
#' @rdname wc
#' @export
wc = function(file, chars=TRUE, words=TRUE, lines=TRUE)
{
check.is.string(file)
check.is.flag(chars)
check.is.flag(words)
check.is.flag(lines)
if (!chars && !words && !lines)
stop("at least one of the arguments 'chars', 'words', or 'lines' must be TRUE")
file = abspath(file)
ret = .Call(R_fs_wc, file, chars, words, lines)
counts = list(chars=ret[1L], words=ret[2L], lines=ret[3L])
class(counts) = "wc"
attr(counts, "file") = file
counts
}
#' @rdname wc
#' @export
wc_w = function(file)
{
wc(file=file, chars=FALSE, words=TRUE, lines=FALSE)
}
#' @rdname wc
#' @export
wc_l = function(file)
{
wc(file=file, chars=FALSE, words=FALSE, lines=TRUE)
}
#' @title Print \code{wc} objects
#' @description Printing for \code{wc()}
#' @param x \code{wc} object
#' @param ... unused
#' @name print-wc
#' @rdname print-wc
#' @method print wc
#' @export
print.wc = function(x, ...)
{
cat("file: ", attr(x, "file"), "\n")
x = x[which(x != -1)]
maxlen = max(sapply(names(x), nchar))
names = gsub(names(x), pattern="_", replacement=" ")
names = title_case(x=names)
spacenames = simplify2array(lapply(names, function(str) paste0(str, ":", paste0(rep(" ", maxlen-nchar(str)), collapse=""))))
cat(paste(spacenames, x, sep=" ", collapse="\n"), "\n")
invisible()
}
|
{"hexsha": "2f0efafe17901fd7b73dfeea81f2a8058e738c2f", "size": 2070, "ext": "r", "lang": "R", "max_stars_repo_path": "R/wc.r", "max_stars_repo_name": "wrathematics/lineSampler", "max_stars_repo_head_hexsha": "b3683ea15888b0da6e1f983233c395d23cf9e2b6", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2015-02-25T23:05:19.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-27T14:27:22.000Z", "max_issues_repo_path": "R/wc.r", "max_issues_repo_name": "wrathematics/filesampler", "max_issues_repo_head_hexsha": "b3683ea15888b0da6e1f983233c395d23cf9e2b6", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/wc.r", "max_forks_repo_name": "wrathematics/filesampler", "max_forks_repo_head_hexsha": "b3683ea15888b0da6e1f983233c395d23cf9e2b6", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-02-14T18:26:40.000Z", "max_forks_repo_forks_event_max_datetime": "2017-02-14T18:26:40.000Z", "avg_line_length": 21.7894736842, "max_line_length": 126, "alphanum_fraction": 0.6338164251, "num_tokens": 635}
|
[STATEMENT]
lemma ln_upper_11_neg:
assumes "0 < x" and x1: "x \<le> 1" shows "ln(x) \<le> ln_upper_11 x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ln x \<le> ln_upper_11 x
[PROOF STEP]
apply (rule gen_upper_bound_decreasing [OF x1 d_delta_ln_upper_11])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>y. \<lbrakk>x \<le> y; y \<le> 1\<rbrakk> \<Longrightarrow> 0 < y
2. \<And>y. \<lbrakk>x \<le> y; y \<le> 1\<rbrakk> \<Longrightarrow> diff_delta_ln_upper_11 y \<le> 0
3. ln_upper_11 1 = ln 1
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
0 < x
x \<le> 1
goal (3 subgoals):
1. \<And>y. \<lbrakk>x \<le> y; y \<le> 1\<rbrakk> \<Longrightarrow> 0 < y
2. \<And>y. \<lbrakk>x \<le> y; y \<le> 1\<rbrakk> \<Longrightarrow> diff_delta_ln_upper_11 y \<le> 0
3. ln_upper_11 1 = ln 1
[PROOF STEP]
apply (auto simp: diff_delta_ln_upper_11_def divide_simps ln_upper_11_def mult_less_0_iff)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 489, "file": "Special_Function_Bounds_Log_CF_Bounds", "length": 4}
|
/*****************************************************************************
*
* This file is part of Mapnik (c++ mapping toolkit)
*
* Copyright (C) 2015 Artem Pavlenko
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*****************************************************************************/
#ifndef FORMATTING_TEXT_HPP
#define FORMATTING_TEXT_HPP
// mapnik
#include <mapnik/text/formatting/base.hpp>
// boost
#include <boost/property_tree/ptree_fwd.hpp>
namespace mapnik {
class feature_impl;
namespace formatting {
class MAPNIK_DECL text_node: public node {
public:
text_node(expression_ptr text): node(), text_(text) {}
text_node(std::string text): node(), text_(parse_expression(text)) {}
void to_xml(boost::property_tree::ptree &xml) const;
static node_ptr from_xml(xml_node const& xml, fontset_map const& fontsets);
virtual void apply(evaluated_format_properties_ptr const& p, feature_impl const& feature, attributes const& vars, text_layout &output) const;
virtual void add_expressions(expression_set &output) const;
void set_text(expression_ptr text);
expression_ptr get_text() const;
private:
expression_ptr text_;
};
} //ns formatting
} //ns mapnik
#endif // FORMATTING_TEXT_HPP
|
{"hexsha": "012288507bf7e4f8efbba2e0d3fa16d5d19381ba", "size": 1938, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "external/mapnik/include/mapnik/text/formatting/text.hpp", "max_stars_repo_name": "baiyicanggou/mapnik_mvt", "max_stars_repo_head_hexsha": "9bde52fa9958d81361c015c816858534ec0931bb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "external/mapnik/include/mapnik/text/formatting/text.hpp", "max_issues_repo_name": "baiyicanggou/mapnik_mvt", "max_issues_repo_head_hexsha": "9bde52fa9958d81361c015c816858534ec0931bb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "external/mapnik/include/mapnik/text/formatting/text.hpp", "max_forks_repo_name": "baiyicanggou/mapnik_mvt", "max_forks_repo_head_hexsha": "9bde52fa9958d81361c015c816858534ec0931bb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8888888889, "max_line_length": 145, "alphanum_fraction": 0.689370485, "num_tokens": 422}
|
module calc_kine_temp_module
implicit none
private
public :: calc_Ndof, get_Ndof
public :: calc_kine
public :: calc_temp
real(8),parameter :: TOJOUL=4.35975d-18 ! (J/hartree)
real(8),parameter :: kB_J=1.380658d-23 ! (J/K)
real(8),parameter :: kB=kB_J/TOJOUL ! (hartree/K)
integer :: N_degree_of_freedom=0
contains
subroutine calc_Ndof ! Degree of freedom
use atom_module, only: md_atom
use force_module, only: tim
implicit none
real(8) :: v(3),u(3)
integer :: i,m,natom,Ndof
call random_number( v )
natom=size(md_atom)
Ndof=0
do i=1,natom
m=md_atom(i)
u(:) = matmul( tim(:,:,m), v(:) )
Ndof = Ndof + count( u /= 0.0d0 )
end do
Ndof = Ndof - 3 ! Subtract center-of-mass dof
if ( Ndof <= 0 ) Ndof=Ndof+3
N_degree_of_freedom=Ndof
end subroutine calc_Ndof
subroutine get_Ndof( Ndof_out )
implicit none
integer,intent(out) :: Ndof_out
if ( N_degree_of_freedom == 0 ) call calc_Ndof
Ndof_out = N_degree_of_freedom
end subroutine get_Ndof
subroutine calc_kine( Velocity, kine, temp )
use atom_module, only: zn_atom, ki_atom
use cpmd_variables, only: pmass, AMU
implicit none
real(8),intent(in) :: Velocity(:,:)
real(8),intent(out) :: kine
real(8),optional,intent(out) :: temp
integer :: i,natom,Ndof
real(8) :: pm
natom=size(Velocity,2)
kine=0.0d0
do i=1,natom
pm = pmass( zn_atom(ki_atom(i)) )
kine = kine + sum( Velocity(:,i)**2 )*pm
end do
kine=kine*0.5d0*AMU
if ( present(temp) ) then
call get_Ndof( Ndof )
temp=kine/( 0.5d0*Ndof*kB )
end if
end subroutine calc_kine
subroutine calc_temp( Velocity, temp )
implicit none
real(8),intent(in) :: Velocity(:,:)
real(8),intent(out) :: temp
real(8) :: kine
integer :: Ndof
call calc_kine( Velocity, kine )
call get_Ndof( Ndof )
temp = kine/( 0.5d0*Ndof*kB )
end subroutine calc_temp
end module calc_kine_temp_module
|
{"hexsha": "85494231f481f63c21906e9f5225d00eb6d25a4a", "size": 2014, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/mdsource/calc_kine_temp_module.f90", "max_stars_repo_name": "j-iwata/RSDFT_DEVELOP", "max_stars_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-02T05:03:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T05:03:05.000Z", "max_issues_repo_path": "src/mdsource/calc_kine_temp_module.f90", "max_issues_repo_name": "j-iwata/RSDFT_DEVELOP", "max_issues_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mdsource/calc_kine_temp_module.f90", "max_forks_repo_name": "j-iwata/RSDFT_DEVELOP", "max_forks_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-22T02:44:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-22T02:44:58.000Z", "avg_line_length": 24.5609756098, "max_line_length": 55, "alphanum_fraction": 0.6320754717, "num_tokens": 702}
|
function cmatchregret(I::Int64, r::Vector{Vector{Float64}}, gs::GameSet)
ni_stage, na_stage = gs.ni_stage, gs.na_stage
na = numactions(I, ni_stage, na_stage)
σ = Vector{Float64}(undef, na)
for a in 1:na
denom = sum(max(r[I][b], 0.0) for b in 1:na)
if denom > 0.0
σ[a] = max(r[I][a], 0.0) / denom
else
σ[a] = 1.0 / na
end
end
return σ
end
function cupdateutility!(player::Int64, I::Int64, h::SVector, σ::Vector{Float64},
u::Float64, πi::Float64, πo::Float64,
r::Vector{Vector{Float64}}, s::Vector{Vector{Float64}},
g::AirDefenseGame, gs::GameSet, depth::Int64,
α::Number, β::Number, γ::Number, iter::Int, siter::Vector{Int}, discounted::Bool,
λl::Vector{Vector{Float64}}, λu::Vector{Vector{Float64}})
A, ni_stage, na_stage = gs.A, gs.ni_stage, gs.na_stage
na = numactions(I, ni_stage, na_stage)
m = Vector{Float64}(undef, na)
nextactions = actions(depth, A)
# @show na[I]
for a in 1:na
ha = setindex(h, nextactions[a], depth)
if getplayer(depth) == player
πip = σ[a] * πi
up = cupdatetree!(ha, player, πip, πo, r, s, g, gs, depth + 1, α, β, γ, iter, siter, discounted, λl, λu)
if getplayer(depth) == 2 # constrained player
lagrange = λl[I][a] * -1 + λu[I][a] * 1 # assumes indpendent constraints
else
lagrange = 0.0
end
m[a] = up - lagrange
u = u + σ[a] * up
else
πop = σ[a] * πo
up = cupdatetree!(ha, player, πi, πop, r, s, g, gs, depth + 1, α, β, γ, iter, siter, discounted, λl, λu)
u = u + up # possibly need a lagrange adjustment here? but I don't think so
end
end
return m, u
end
function cupdateregret!(player::Int64, I::Int64, h::SVector, σ::Vector{Float64},
u::Float64, m::Vector{Float64}, πi::Float64,
r::Vector{Vector{Float64}}, s::Vector{Vector{Float64}}, gs::GameSet, depth ::Int64,
α::Number, β::Number, γ::Number, iter::Int, siter::Vector{Int}, discounted::Bool)
ni_stage, na_stage = gs.ni_stage, gs.na_stage
if getplayer(depth) == player
siter[I] += 1
for a in 1:numactions(I, ni_stage, na_stage)
rnew = r[I][a] + m[a] - u
if discounted
# r[I][a] = rnew >= 0 ? rnew * (iter^α / (iter^α + 1)) : rnew * (iter^β / (iter^β + 1))
# s[I][a] = (s[I][a] + πi * σ[a]) * (iter / (iter + 1))^γ
if rnew >= 0
r[I][a] = rnew * (siter[I]^α / (siter[I]^α + 1))
else
r[I][a] = rnew * (float(siter[I])^β / (float(siter[I])^β + 1))
end
s[I][a] = (s[I][a] + πi * σ[a]) * (siter[I] / (siter[I] + 1))^γ
else
r[I][a] = rnew
s[I][a] = s[I][a] + πi * σ[a]
end
end
end
end
function cupdatetree!(h::SVector, player::Int64, πi::Float64, πo::Float64,
r::Vector{Vector{Float64}}, s::Vector{Vector{Float64}}, g::AirDefenseGame,
gs::GameSet, depth::Int64, α::Number, β::Number, γ::Number, iter::Int, siter::Vector{Int}, discounted::Bool,
λl::Vector{Vector{Float64}}, λu::Vector{Vector{Float64}})
# πo is short for \vec{π}_{-i}
A = gs.A
(terminal(depth)) && (return leafutility(h, player, πi, πo, g, gs))
if getplayer(depth) == 3
as = actions(depth, A)
chance_weights = pweights([chance(depth, a, g, gs) for a in as])
a = as[sample(chance_weights)]
ha = setindex(h, a, depth)
return cupdatetree!(ha, player, πi, πo, r, s, g, gs, depth + 1, α, β, γ, iter, siter, discounted, λl, λu)
end
I = infoset(h, depth, g, gs)
u = 0.0
σ = cmatchregret(I, r, gs)
m, u = cupdateutility!(player, I, h, σ, u, πi, πo, r, s, g, gs, depth, α, β, γ, iter, siter, discounted, λl, λu)
cupdateregret!(player, I, h, σ, u, m, πi, r, s, gs, depth, α, β, γ, iter, siter, discounted)
return u
end
function strat_ccfr(s::Vector{Vector{Float64}})
σ = [similar(si) for si in s]
for i in eachindex(s)
denom = sum(s[i])
if denom == 0
len = length(s[i])
σ[i] .= fill(1 / len, len)
else
σ[i] .= s[i] ./ denom
end
end
return σ
end
function ccfr_full(g::AirDefenseGame, gs::GameSet, rp2lb, rp2ub;
iterlimit::Int = 100_000,
timelimit::Number = 600, tol::Float64 = 5e-5,
α::Number = 1.5, β::Number = 0.5, γ::Number = 2, discounted::Bool = false,
λmax::Number = 1_000, λscale::Number = 4)
ni, ni_stage, na_stage, players = gs.ni, gs.ni_stage, gs.na_stage, gs.players
A, _, na_stage = build_action(g)
ni, ni1, ni2, ni_stage = build_info(g)
ns1, ns2, ns1_stage, ns2_stage = build_nseq(g, na_stage, ni_stage)
_, _, (seqI1, seqI2), (nextseqI1, nextseqI2) = build_Iset(g, A, na_stage, ns1_stage, ns2_stage, ni1, ni2, ns1, ns2)
seqvals = (ns1, ns2, seqI1, seqI2, nextseqI1, nextseqI2)
r = [zeros(numactions(i, ni_stage, na_stage)) for i in 1:ni] # regret
s = [zeros(numactions(i, ni_stage, na_stage)) for i in 1:ni] # cumulative strategies
λl = [zeros(numactions(i, ni_stage, na_stage)) for i in 1:ni]
λu = [zeros(numactions(i, ni_stage, na_stage)) for i in 1:ni]
λmean = zeros(iterlimit ÷ 10)
siter = zeros(Int64, ni) # infoset visit counts
u = 0.0
n_hist = 10 # number of intermediate strategies to record (for best responses)
Tx = round(Int64, timelimit * 60 / n_hist) # frequency of recording history
tx = 1 # index for sigma calculations
iter = 1 # index for number of iter
lind = 1 # index for lambda tracking
u1deriv = 0
u1prev = 0
converged = false
u1 = Float64[]
σ1 = Vector{Vector{Vector{Float64}}}(undef, n_hist)
println("CCFR: start $timelimit min or $iterlimit iter at $(now())...")
tstart, t_hist = now(), now()
# lamcheck = []
while !converged && tominute(now() - tstart) < timelimit && iter < iterlimit
ξ = (λmax ^ λscale) / (1 * sqrt(iter)) # λ learning rate β / (G √T)
Ψ = real_strat_struct_combined(strat_ccfr(s), gs, seqvals) # could be faster if we only update for player 2
for I in 1:size(rp2lb, 1)
for a in 1:length(rp2lb[I])
λl[I][a] = clamp(λl[I][a] + ξ * (-Ψ[I][a] + rp2lb[I][a]), 0.0, λmax)
λu[I][a] = clamp(λu[I][a] + ξ * (Ψ[I][a] - rp2ub[I][a]), 0.0, λmax)
end
end
# push!(lamcheck, deepcopy(λl))
# iter % 100 == 0 && @show λl[1], λu[1]
for player in players
h0 = SVector(0, 0, 0, 0, 0, 0)
u = cupdatetree!(h0, player, 1.0, 1.0, r, s, g, gs, 1, α, β, γ, iter, siter, discounted, λl, λu)
(player == 1) && push!(u1, u)
end
if tosecond(now() - t_hist) > Tx # calculate sigma every Tx seconds
σ1[tx] = strat_ccfr(s)
tx += 1
t_hist = now()
end
u1deriv = u1deriv * (1 - 1 / iter) + (u1[iter] - u1prev) * (1 / iter)
converged = abs(u1deriv) < tol
u1prev = u1[iter]
# if iter % 10 == 0
# lammean = mean(mean.(λu))
# lamcount = sum(sum(x .> 0) for x in λu)
# λmean[lind] = lammean
# print("\rIter: $iter, λ mean: $(round(lammean, digits = 6)), λ count: $lamcount")
# lind += 1
# end
iter % 10 == 0 && print("\rIter: $iter")
iter += 1
end
println("")
σ = strat_ccfr(s)
# return u1, r, s, σ, σ1, converged, iter, λmean
return u1, r, s, σ, σ1, converged, iter
end
|
{"hexsha": "8cd0c6a06c068b98465bace1eb567a70bda57a2f", "size": 7876, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ccfrops_solve.jl", "max_stars_repo_name": "ajkeith/Cyber-Air-Defense", "max_stars_repo_head_hexsha": "856b833e7c6cbfe389b2ec1b21edb5d6e6ee97e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-09-25T16:52:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T08:21:35.000Z", "max_issues_repo_path": "src/ccfrops_solve.jl", "max_issues_repo_name": "ajkeith/Cyber-Air-Defense", "max_issues_repo_head_hexsha": "856b833e7c6cbfe389b2ec1b21edb5d6e6ee97e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-12-08T14:21:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-27T00:42:16.000Z", "max_forks_repo_path": "src/ccfrops_solve.jl", "max_forks_repo_name": "ajkeith/CFR_ICADS", "max_forks_repo_head_hexsha": "856b833e7c6cbfe389b2ec1b21edb5d6e6ee97e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-25T16:52:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-25T16:52:02.000Z", "avg_line_length": 43.7555555556, "max_line_length": 120, "alphanum_fraction": 0.5280599289, "num_tokens": 2721}
|
import numpy as np
import pickle,os
from Common.Module import Module
class FakeRateWeighter(Module):
def analyze(self,data,dataset,cfg):
if dataset.name == "ZX":
cfg.collector.event_weight = np.ones(data["genWeight"].shape) * cfg.collector.selection_weight
idx_3p1f = data["nFailedLeptonsZ2"] == 1
idx_2p2f = data["nFailedLeptonsZ2"] == 2
cfg.collector.event_weight[idx_3p1f] *= data["FRWeightProd"][idx_3p1f]
cfg.collector.event_weight[idx_2p2f] *= -1.*data["FRWeightProd"][idx_2p2f]
if "Fake" in dataset.name:
#ptbins = [5,10,20,30,45,100]
ptbins = [5,10,20,30,45,60,80,100]
#fakerate_b = [0.47941238, 0.25981017, 0.14687288, 0.14682184, 0.24102997] # Fewer bins
#fakerate_e = [0.59418372, 0.37545705, 0.2626037, 0.26221264, 0.43978349] # Fewer bins
#fakerate_b = [0.47941238, 0.25981017, 0.14687288, 0.14682184, 0.20095694, 0.29208633, 0.35238095] # More bins
#fakerate_e = [0.59418372, 0.37545705, 0.2626037, 0.26221264, 0.36705882, 0.52118644, 0.58974359] # More bins
fakerate_b = [0.47755192, 0.25319881, 0.14168969, 0.14253394, 0.19903912, 0.28695652, 0.3492823 ] # Upsilon and JPsi veto
fakerate_e = [0.59588139, 0.37110256, 0.25400458, 0.25421245, 0.35799523, 0.51082251, 0.58974359] # Upsilon and JPsi veto
fakeratio_b = [0] * len(fakerate_b)
fakeratio_e = [0] * len(fakerate_e)
for i in range(len(fakerate_b)):
fakeratio_b[i] = fakerate_b[i]/(1-fakerate_b[i])
fakeratio_e[i] = fakerate_e[i]/(1-fakerate_e[i])
pTbin = [0] * len(fakerate_b)
for i in range(len(pTbin)):
pTbin[i] = (data["pTL3"] >= ptbins[i]) & (data["pTL3"] < ptbins[i+1])
barrel = np.abs(data["etaL3"]) <= 1.4
endcap = np.abs(data["etaL3"]) >= 1.4
fake_weight_final_b = 0
fake_weight_final_e = 0
for i in range(len(fakeratio_b)):
fake_weight_final_b += pTbin[i]*fakeratio_b[i]
fake_weight_final_e += pTbin[i]*fakeratio_e[i]
fake_weight_final_b = fake_weight_final_b*barrel
fake_weight_final_e = fake_weight_final_e*endcap
fake_weight_final = fake_weight_final_b + fake_weight_final_e
cfg.collector.event_weight *= fake_weight_final
|
{"hexsha": "d66f0391a36c1505c7a41708cf8e0ef5efb2fe44", "size": 2125, "ext": "py", "lang": "Python", "max_stars_repo_path": "Wto3l/Weighter/FakeRateWeighter.py", "max_stars_repo_name": "Nik-Menendez/PyCudaAnalyzer", "max_stars_repo_head_hexsha": "4b43d2915caac04da9ba688c2743e9c76eacdd5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Wto3l/Weighter/FakeRateWeighter.py", "max_issues_repo_name": "Nik-Menendez/PyCudaAnalyzer", "max_issues_repo_head_hexsha": "4b43d2915caac04da9ba688c2743e9c76eacdd5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Wto3l/Weighter/FakeRateWeighter.py", "max_forks_repo_name": "Nik-Menendez/PyCudaAnalyzer", "max_forks_repo_head_hexsha": "4b43d2915caac04da9ba688c2743e9c76eacdd5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.2708333333, "max_line_length": 124, "alphanum_fraction": 0.7002352941, "include": true, "reason": "import numpy", "num_tokens": 858}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script de comparação dos resultados gerados.
"""
import numpy as np
import rasterio as rio
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
ds_30062020 = rio.open("sits/30-06-2020/classification-results/Sinop_probs_class_bayesian_2013_9_2014_8_v1.tif")
ds_02122020 = rio.open("sits/02-12-2020/classification-results/Sinop_probs_class_bayesian_2013_9_2014_8_v1.tif")
arr_30062020 = ds_30062020.read()
arr_02122020 = ds_02122020.read()
diff = arr_30062020 - arr_02122020
diff[diff != 0] = 2
diff[diff == 0] = 1
diff[diff == 2] = 0
diff = diff.astype(int)
#
# Original figure (a)
#
plt.figure(dpi = 300)
plt.imshow(arr_30062020[0, :, :], cmap = 'Paired', interpolation = 'none')
plt.axis('off')
frame1 = plt.gca()
frame1.axes.xaxis.set_ticklabels([])
frame1.axes.yaxis.set_ticklabels([])
plt.savefig('verification/results/map_30062020.pdf', dpi=600, bbox_inches='tight', pad_inches=0.0)
#
# Original figure (b)
#
plt.figure(dpi = 300)
plt.imshow(arr_02122020[0, :, :], cmap = 'Paired', interpolation = 'none')
plt.axis('off')
frame1 = plt.gca()
frame1.axes.xaxis.set_ticklabels([])
frame1.axes.yaxis.set_ticklabels([])
plt.savefig('verification/results/map_02122020.pdf', dpi=600, bbox_inches='tight', pad_inches=0.0)
#
# Difference Figure
#
plt.figure(dpi = 300)
plt.imshow(diff[0, :, :], cmap = 'tab20c', interpolation='none')
plt.axis('off')
frame1 = plt.gca()
frame1.axes.xaxis.set_ticklabels([])
frame1.axes.yaxis.set_ticklabels([])
# legend
cmap = plt.cm.tab20c
custom_lines = [Line2D([0], [0], color=cmap(20), lw=2),
Line2D([0], [0], color=cmap(1), lw=2)]
plt.legend(custom_lines, ['Valor Igual', 'Valor Diferente'])
# save!
plt.savefig('verification/results/difference_map.pdf', dpi=600, bbox_inches='tight', pad_inches=0.0)
#
# metrics
#
# pip install sewar
from sewar.full_ref import mse
from sewar.full_ref import rmse
# MSE
mse_val = mse(arr_30062020, arr_02122020)
# RMSE
rmse_val = rmse(arr_30062020, arr_02122020)
# count pixels
print(np.count_nonzero(diff == 0))
print(np.count_nonzero(diff == 1))
|
{"hexsha": "cfb308e31d0e890ae9dfce4aafe5871b9b1e7ac2", "size": 2118, "ext": "py", "lang": "Python", "max_stars_repo_path": "verification/difference_plot.py", "max_stars_repo_name": "M3nin0/experiment-software-lulc-versions", "max_stars_repo_head_hexsha": "734e8e6acc369d6bdf5dd8d694d3e3d61740ce44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-22T19:34:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T03:01:13.000Z", "max_issues_repo_path": "verification/difference_plot.py", "max_issues_repo_name": "M3nin0/experiment-software-lulc-versions", "max_issues_repo_head_hexsha": "734e8e6acc369d6bdf5dd8d694d3e3d61740ce44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "verification/difference_plot.py", "max_forks_repo_name": "M3nin0/experiment-software-lulc-versions", "max_forks_repo_head_hexsha": "734e8e6acc369d6bdf5dd8d694d3e3d61740ce44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.797752809, "max_line_length": 112, "alphanum_fraction": 0.7190745987, "include": true, "reason": "import numpy", "num_tokens": 664}
|
[STATEMENT]
lemma lt_plus_distinct_eq_max:
assumes "lt p \<noteq> lt q"
shows "lt (p + q) = ord_term_lin.max (lt p) (lt q)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
proof (rule ord_term_lin.linorder_cases)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. ?x \<prec>\<^sub>t ?y \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
2. ?x = ?y \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
3. ?y \<prec>\<^sub>t ?x \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
assume a: "lt p \<prec>\<^sub>t lt q"
[PROOF STATE]
proof (state)
this:
lt p \<prec>\<^sub>t lt q
goal (3 subgoals):
1. ?x \<prec>\<^sub>t ?y \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
2. ?x = ?y \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
3. ?y \<prec>\<^sub>t ?x \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
hence "lt (p + q) = lt q"
[PROOF STATE]
proof (prove)
using this:
lt p \<prec>\<^sub>t lt q
goal (1 subgoal):
1. lt (p + q) = lt q
[PROOF STEP]
by (rule lt_plus_eqI)
[PROOF STATE]
proof (state)
this:
lt (p + q) = lt q
goal (3 subgoals):
1. ?x \<prec>\<^sub>t ?y \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
2. ?x = ?y \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
3. ?y \<prec>\<^sub>t ?x \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
lt (p + q) = lt q
goal (3 subgoals):
1. ?x \<prec>\<^sub>t ?y \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
2. ?x = ?y \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
3. ?y \<prec>\<^sub>t ?x \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
from a
[PROOF STATE]
proof (chain)
picking this:
lt p \<prec>\<^sub>t lt q
[PROOF STEP]
have "... = ord_term_lin.max (lt p) (lt q)"
[PROOF STATE]
proof (prove)
using this:
lt p \<prec>\<^sub>t lt q
goal (1 subgoal):
1. lt q = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
by (simp add: ord_term_lin.max.absorb2)
[PROOF STATE]
proof (state)
this:
lt q = ord_term_lin.max (lt p) (lt q)
goal (3 subgoals):
1. ?x \<prec>\<^sub>t ?y \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
2. ?x = ?y \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
3. ?y \<prec>\<^sub>t ?x \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
lt (p + q) = ord_term_lin.max (lt p) (lt q)
goal (1 subgoal):
1. lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
lt (p + q) = ord_term_lin.max (lt p) (lt q)
goal (2 subgoals):
1. lt p = lt q \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
2. lt q \<prec>\<^sub>t lt p \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. lt p = lt q \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
2. lt q \<prec>\<^sub>t lt p \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
assume a: "lt q \<prec>\<^sub>t lt p"
[PROOF STATE]
proof (state)
this:
lt q \<prec>\<^sub>t lt p
goal (2 subgoals):
1. lt p = lt q \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
2. lt q \<prec>\<^sub>t lt p \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
hence "lt (p + q) = lt p"
[PROOF STATE]
proof (prove)
using this:
lt q \<prec>\<^sub>t lt p
goal (1 subgoal):
1. lt (p + q) = lt p
[PROOF STEP]
by (rule lt_plus_eqI_2)
[PROOF STATE]
proof (state)
this:
lt (p + q) = lt p
goal (2 subgoals):
1. lt p = lt q \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
2. lt q \<prec>\<^sub>t lt p \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
lt (p + q) = lt p
goal (2 subgoals):
1. lt p = lt q \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
2. lt q \<prec>\<^sub>t lt p \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
from a
[PROOF STATE]
proof (chain)
picking this:
lt q \<prec>\<^sub>t lt p
[PROOF STEP]
have "... = ord_term_lin.max (lt p) (lt q)"
[PROOF STATE]
proof (prove)
using this:
lt q \<prec>\<^sub>t lt p
goal (1 subgoal):
1. lt p = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
by (simp add: ord_term_lin.max.absorb1)
[PROOF STATE]
proof (state)
this:
lt p = ord_term_lin.max (lt p) (lt q)
goal (2 subgoals):
1. lt p = lt q \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
2. lt q \<prec>\<^sub>t lt p \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
lt (p + q) = ord_term_lin.max (lt p) (lt q)
goal (1 subgoal):
1. lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
lt (p + q) = ord_term_lin.max (lt p) (lt q)
goal (1 subgoal):
1. lt p = lt q \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. lt p = lt q \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
assume "lt p = lt q"
[PROOF STATE]
proof (state)
this:
lt p = lt q
goal (1 subgoal):
1. lt p = lt q \<Longrightarrow> lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
lt p \<noteq> lt q
lt p = lt q
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
lt p \<noteq> lt q
lt p = lt q
goal (1 subgoal):
1. lt (p + q) = ord_term_lin.max (lt p) (lt q)
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
lt (p + q) = ord_term_lin.max (lt p) (lt q)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2869, "file": "Polynomials_MPoly_Type_Class_Ordered", "length": 28}
|
import pandas as pd
import numpy as np
from random import sample
import random
import torch
import torch.nn as nn
# # reference from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow
# class QLearningTable:
# def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
# self.actions = actions # a list
# self.lr = learning_rate
# self.gamma = reward_decay
# self.epsilon = e_greedy
# #dataframe 대신 numpy배열을 쓰는 경우도 많다
# self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
#
# def choose_action(self, observation):
# self.check_state_exist(observation)
#
# if np.random.uniform() < self.epsilon:
# # choose best action
# # state_action = self.q_table.ix[observation, :]
# state_action = self.q_table.loc[observation, :]
#
# # some actions have the same value
# # permutation -> 순열을 바꾸고 random하게 선택하도록 하는 부분
# state_action = state_action.reindex(np.random.permutation(state_action.index))
#
# action = state_action.idxmax()
# else:
# # choose random action
# action = np.random.choice(self.actions)
#
# return action
#
# def learn(self, s, a, r, s_):
# self.check_state_exist(s_)
# self.check_state_exist(s)
#
# # q_predict = self.q_table.ix[s, a]
# q_predict = self.q_table.loc[s, a]
# # q_target = r + self.gamma * self.q_table.ix[s_, :].max()
# q_target = r + self.gamma * self.q_table.loc[s_, :].max()
#
# # update
# # self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
# self.q_table.loc[s, a] += self.lr * (q_target - q_predict)
#
# def check_state_exist(self, state):
# if state not in self.q_table.index:
# # append new state to q table
# self.q_table = self.q_table.append(
# pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = actions # a list
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
self.disallowed_actions = {}
def choose_action(self, observation, excluded_actions=[]):
self.check_state_exist(observation)
self.disallowed_actions[observation] = excluded_actions
#state_action = self.q_table.ix[observation, :]
#state_action = self.q_table.loc[observation, self.q_table.columns[:]]
state_action = self.q_table.loc[observation, :]
for excluded_action in excluded_actions:
del state_action[excluded_action]
if np.random.uniform() < self.epsilon:
# some actions have the same value
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
# choose random action
action = np.random.choice(state_action.index)
return action
def learn(self, s, a, r, s_):
if s == s_:
return
self.check_state_exist(s_)
self.check_state_exist(s)
#q_predict = self.q_table.ix[s, a]
q_predict = self.q_table.loc[s, a]
#s_rewards = self.q_table.ix[s_, :]
#s_rewards = self.q_table.loc[s_, self.q_table.columns[:]]
s_rewards = self.q_table.loc[s_, :]
if s_ in self.disallowed_actions:
for excluded_action in self.disallowed_actions[s_]:
del s_rewards[excluded_action]
if s_ != 'terminal':
q_target = r + self.gamma * s_rewards.max()
else:
q_target = r # next state is terminal
# update
#self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
self.q_table.loc[s, a] += self.lr * (q_target - q_predict)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class ExperienceReplayMemory:
def __init__(self, max_size):
# deque object that we've used for 'episodic_memory' is not suitable for random sampling
# here, we instead use a fix-size array to implement 'buffer'
self.buffer = [None] * max_size
self.max_size = max_size
self.index = 0
self.size = 0
def push(self, obj):
self.buffer[self.index] = obj
self.size = min(self.size + 1, self.max_size)
self.index = (self.index + 1) % self.max_size
def sample(self, batch_size):
indices = sample(range(self.size), batch_size)
return [self.buffer[index] for index in indices]
def __len__(self):
return self.size
class NaiveMultiLayerPerceptron(nn.Module):
def __init__(self,
input_dim: int,
output_dim: int,
num_neurons: list = [64, 32],
hidden_act_func: str = 'ReLU',
out_act_func: str = 'Identity'):
super(NaiveMultiLayerPerceptron, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_neurons = num_neurons
self.hidden_act_func = getattr(nn, hidden_act_func)()
self.out_act_func = getattr(nn, out_act_func)()
input_dims = [input_dim] + num_neurons
output_dims = num_neurons + [output_dim]
self.layers = nn.ModuleList()
for i, (in_dim, out_dim) in enumerate(zip(input_dims, output_dims)):
is_last = True if i == len(input_dims) - 1 else False
self.layers.append(nn.Linear(in_dim, out_dim))
if is_last:
self.layers.append(self.out_act_func)
else:
self.layers.append(self.hidden_act_func)
def forward(self, xs):
for layer in self.layers:
xs = layer(xs)
return xs
class DQN(nn.Module):
def __init__(self,
state_dim: int,
action_dim: int,
qnet: nn.Module,
qnet_target: nn.Module,
lr: float,
gamma: float,
epsilon: float):
"""
:param state_dim: input state dimension
:param action_dim: action dimension
:param qnet: main q network
:param qnet_target: target q network
:param lr: learning rate
:param gamma: discount factor of MDP
:param epsilon: E-greedy factor
"""
super(DQN, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.qnet = qnet
self.lr = lr
self.gamma = gamma
self.opt = torch.optim.Adam(params=self.qnet.parameters(), lr=lr)
self.register_buffer('epsilon', torch.ones(1) * epsilon)
self.cum_loss = 0.0
# target network related
qnet_target.load_state_dict(qnet.state_dict())
self.qnet_target = qnet_target
self.criteria = nn.SmoothL1Loss()
self.count_action_random = 0
self.count_action_select = 0
def choose_action(self, state):
qs = self.qnet(state)
#prob = np.random.uniform(0.0, 1.0, 1)
#if torch.from_numpy(prob).float() <= self.epsilon: # random
if random.random() <= self.epsilon: # random
action = np.random.choice(range(self.action_dim))
self.count_action_random += 1
else: # greedy
action = qs.argmax(dim=-1)
self.count_action_select += 1
return int(action)
def learn(self, state, action, reward, next_state, done):
s, a, r, ns = state, action, reward, next_state
# compute Q-Learning target with 'target network'
with torch.no_grad():
q_max, _ = self.qnet_target(ns).max(dim=-1, keepdim=True)
q_target = r + self.gamma * q_max * (1 - done)
q_val = self.qnet(s).gather(1, a)
loss = self.criteria(q_val, q_target)
self.opt.zero_grad()
loss.backward()
self.opt.step()
self.loss = loss.item()
def _get_loss_(self):
return self.loss
def _get_action_ratio(self):
selected_ratio = self.count_action_select/(self.count_action_select+self.count_action_random)
return selected_ratio
def prepare_training_inputs(sampled_exps, device='cpu'):
states = []
actions = []
rewards = []
next_states = []
dones = []
for sampled_exp in sampled_exps:
states.append(sampled_exp[0])
actions.append(sampled_exp[1])
rewards.append(sampled_exp[2])
next_states.append(sampled_exp[3])
dones.append(sampled_exp[4])
states = torch.cat(states, dim=0).float().to(device)
actions = torch.cat(actions, dim=0).to(device)
rewards = torch.cat(rewards, dim=0).float().to(device)
next_states = torch.cat(next_states, dim=0).float().to(device)
dones = torch.cat(dones, dim=0).float().to(device)
return states, actions, rewards, next_states, dones
# if __name__ == '__main__':
# net = NaiveMultiLayerPerceptron(10, 1, [20, 12], 'ReLU', 'Identity')
# print(net)
#
# xs = torch.randn(size=(12, 10))
# ys = net(xs)
# print(ys)
|
{"hexsha": "3cd4382fefb246c6375ea96343f268b2e5a39029", "size": 9552, "ext": "py", "lang": "Python", "max_stars_repo_path": "s09287/racepack/utils.py", "max_stars_repo_name": "parksurk/skcc-drl-sc2-course-2020_1st", "max_stars_repo_head_hexsha": "951d09424b93c76093bab51ed6aaa75eb545152e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "s09287/racepack/utils.py", "max_issues_repo_name": "parksurk/skcc-drl-sc2-course-2020_1st", "max_issues_repo_head_hexsha": "951d09424b93c76093bab51ed6aaa75eb545152e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "s09287/racepack/utils.py", "max_forks_repo_name": "parksurk/skcc-drl-sc2-course-2020_1st", "max_forks_repo_head_hexsha": "951d09424b93c76093bab51ed6aaa75eb545152e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1142857143, "max_line_length": 122, "alphanum_fraction": 0.6022822446, "include": true, "reason": "import numpy", "num_tokens": 2327}
|
#!/usr/bin/env python3
import json
from pathlib import Path
import numpy as np
import tokenizers as tk
import torch
from theissues import training, utils
from theissues.model import TrainArgs, TransformerModel
def main(
path_tokenizer: Path,
dir_model: Path,
):
tokenizer = tk.Tokenizer.from_file(str(path_tokenizer))
special_tokens = utils.SpecialTokens(tokenizer)
with (dir_model / "args.json").open("r") as fio:
train_args = TrainArgs(**json.load(fio))
nvocab = tokenizer.get_vocab_size()
model = TransformerModel(
nvocab=nvocab,
seq_len=train_args.seq_len,
ndims_embed=train_args.ndims,
ndims_forward=train_args.ndims,
nheads=train_args.nheads,
nlayers=train_args.nlayers,
dropout=train_args.dropout,
tied=train_args.tied_weights,
)
with (dir_model / "state.pt").open("rb") as fio:
model.load_state_dict(torch.load(fio))
rng = np.random.default_rng()
generate_ctx = training.GeneratorContext(
model=model,
tokenizer=tokenizer,
special_tokens=special_tokens,
max_tokens=train_args.seq_len,
rng=rng,
)
generate_seed_source = (
(None, "[POL_567]"), # Trudeau
(None, "[POL_9243]"), # O'Toole
(None, "[POL_10636]"), # Singh
)
for seed, source in generate_seed_source:
sequence = training.generate_seq(generate_ctx, seed, source)
print(f"> {sequence}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("path_tokenizer", type=Path)
parser.add_argument("dir_model", type=Path)
main(**vars(parser.parse_args()))
|
{"hexsha": "12135916bdd686410843e30a9fbcc07d8f155a6c", "size": 1715, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/generate.py", "max_stars_repo_name": "gmcgoldr/theissues", "max_stars_repo_head_hexsha": "4e4c9eb66c543cdbcda4f1b96a7d2b163450368c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-07T11:23:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-07T11:23:32.000Z", "max_issues_repo_path": "scripts/generate.py", "max_issues_repo_name": "gmcgoldr/theissues", "max_issues_repo_head_hexsha": "4e4c9eb66c543cdbcda4f1b96a7d2b163450368c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/generate.py", "max_forks_repo_name": "gmcgoldr/theissues", "max_forks_repo_head_hexsha": "4e4c9eb66c543cdbcda4f1b96a7d2b163450368c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2205882353, "max_line_length": 68, "alphanum_fraction": 0.6635568513, "include": true, "reason": "import numpy", "num_tokens": 411}
|
import pyplume
import numpy as np
# Mechanism management
cti = 'test.cti'
pyplume.mech.mechFileAdd(cti) #Add mechanism file
pyplume.mech.mechFileDelete(cti) #Delete mechanism file
pyplume.mech.mechFileRestore() #Restore mechanism files
pyplume.mech.mechFileList() #list mechanism files
pyplume.tests.testMechs.runTests() #Run tests for mech management
# Model Use
pm = pyplume.model.PlumeModel.gridModel()
print(pm.connects)
# pm.buildNetwork()
# for t in np.arange(0.1,1.1,0.1):
# pm(t)
# pm.steadyState()
#
# pyplume.tests.testModel.runTests()
|
{"hexsha": "3a347070f2528f430522e977d062fa721cba87e7", "size": 557, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/scratch.py", "max_stars_repo_name": "awa1k3r/plume-generation-and-analysis", "max_stars_repo_head_hexsha": "926f2b09fa1011515310167f0d2b34a051539db1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/scratch.py", "max_issues_repo_name": "awa1k3r/plume-generation-and-analysis", "max_issues_repo_head_hexsha": "926f2b09fa1011515310167f0d2b34a051539db1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-06-02T09:51:36.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-02T09:51:36.000Z", "max_forks_repo_path": "examples/scratch.py", "max_forks_repo_name": "SoftwareDevEngResearch/pyplume", "max_forks_repo_head_hexsha": "f7d92b71896edc702d9ef769c510f53f118fcecf", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-16T19:15:52.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-16T19:15:52.000Z", "avg_line_length": 21.4230769231, "max_line_length": 65, "alphanum_fraction": 0.7576301616, "include": true, "reason": "import numpy", "num_tokens": 172}
|
#include <boost_python_exception/util.hpp>
#include <boost/python/import.hpp>
using namespace boost::python;
namespace boost_python_exception {
object builtins()
{
#if PY_MAJOR_VERSION == 2
return import("__builtin__");
#elif PY_MAJOR_VERSION == 3
return import("builtins");
#endif
}
}
|
{"hexsha": "9eca11c507a8c5291bb26c6eb51d012d5e9523c0", "size": 299, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/boost_python_exception/util.cpp", "max_stars_repo_name": "abingham/boost_python_exception", "max_stars_repo_head_hexsha": "7882d5e8df051494498a58c06e046cb52421620b", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2015-03-28T08:28:56.000Z", "max_stars_repo_stars_event_max_datetime": "2015-03-28T08:28:56.000Z", "max_issues_repo_path": "src/boost_python_exception/util.cpp", "max_issues_repo_name": "abingham/boost_python_exception", "max_issues_repo_head_hexsha": "7882d5e8df051494498a58c06e046cb52421620b", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2015-01-08T08:10:55.000Z", "max_issues_repo_issues_event_max_datetime": "2015-01-08T10:20:42.000Z", "max_forks_repo_path": "src/boost_python_exception/util.cpp", "max_forks_repo_name": "abingham/boost_python_exception", "max_forks_repo_head_hexsha": "7882d5e8df051494498a58c06e046cb52421620b", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2018-11-13T07:42:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-10T22:43:31.000Z", "avg_line_length": 15.7368421053, "max_line_length": 42, "alphanum_fraction": 0.7391304348, "num_tokens": 65}
|
from skimage import data, filters
from skimage.viewer import ImageViewer
from skimage import filters
import scipy
from scipy import ndimage
import matplotlib.pyplot as plt
smooth_mean=[ [1/9,1/9,1/9],
[1/9,1/9,1/9],
[1/9,1/9,1/9]]
############################
edge1 = [[-1, -1, -1],
[0, 0, 0],
[1, 1, 1]]
edge2 = [[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]]
laplacian=[ [0.5,1,0.5],
[1,-6,1],
[0.5,1,0.5]]
############################
image = data.camera()
edgeIMG1=scipy.ndimage.convolve(image, edge1)
edgeIMG1 += 127
edgeIMG2=scipy.ndimage.convolve(image, edge2)
edgeIMG2 += 127
smoothIMG=scipy.ndimage.convolve(image, smooth_mean)
laplacian=scipy.ndimage.convolve(smoothIMG, laplacian)
laplacian += 127
fig, ax = plt.subplots(2, 4, figsize=(12, 8))
ax[0,0].imshow(image, cmap='gray')
ax[0,0].set_title("self made filters >>>\n\nOriginal")
ax[0,1].imshow(edgeIMG1, cmap='gray')
ax[0,1].set_title("edge x axis")
ax[0,2].imshow(edgeIMG2, cmap='gray')
ax[0,2].set_title("edge y axis")
ax[0,3].imshow(laplacian, cmap='gray')
ax[0,3].set_title("laplacian")
#################################
ax[1,0].imshow(image, cmap='gray')
ax[1,0].set_title("standard filters >>>\n\nOriginal")
a = filters.roberts_neg_diag(image)
b = filters.roberts_pos_diag(image)
ax[1,1].imshow((a+b), cmap='gray')
ax[1,1].set_title("roberts")
a = filters.prewitt_h(image)
b = filters.prewitt_v(image)
ax[1,2].imshow((a+b), cmap='gray')
ax[1,2].set_title("prewitt")
a = filters.farid_h(image)
b = filters.farid_v(image)
ax[1,3].imshow((a+b), cmap='gray')
ax[1,3].set_title("farid")
'''
Wat valt me op:
~~~~~~~~~~~~~~~~~~~
Wat me opvalt is dat de standaard filters allemaal (zover ik heb getest)
een soort ruisreductie eroverheen halen. dit kun je goed zien aan dat de 1e en 2e filter van mijzelf geen ruisreductie toepast
hier zie je duidelijk dan ruis in het resultaat. iets wat je niet terug ziet in de standaard filters.
Ook valt me op dat bij de farid en prewitt filters er een verschil zit tussen hoe intens de outlines zijn. die van prewitt is scherper dan die van farid.
ik weet alleen niet waardoor dit zou komen aangezien ik niet kan vinden welke mask farid gebruikt.
Tot slot valt me op dat de roberts filter geen verticale lijnen kan detecteren. Dit komt natuurlijk doordat de filters [-1, 0] en [0, -1] worden gebruikt.
[ 0, 1] [1, 0]
hierdoor worden alleen verandereingen diagonaal gedetecteerd, en dus worden verticale lijnen niet gedetecteerd.
'''
for a in ax:
for b in a:
b.axis('off')
plt.tight_layout()
plt.show()
|
{"hexsha": "081a3048222dfc58e5ab016024feafcb8e910675", "size": 2784, "ext": "py", "lang": "Python", "max_stars_repo_path": "Modules/module3/opdracht2.py", "max_stars_repo_name": "Pink-Shadow/VISN", "max_stars_repo_head_hexsha": "4a484610cd86a170a9612a65c81e082394cc08f0", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Modules/module3/opdracht2.py", "max_issues_repo_name": "Pink-Shadow/VISN", "max_issues_repo_head_hexsha": "4a484610cd86a170a9612a65c81e082394cc08f0", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Modules/module3/opdracht2.py", "max_forks_repo_name": "Pink-Shadow/VISN", "max_forks_repo_head_hexsha": "4a484610cd86a170a9612a65c81e082394cc08f0", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7010309278, "max_line_length": 158, "alphanum_fraction": 0.6170977011, "include": true, "reason": "import scipy,from scipy", "num_tokens": 876}
|
using GitHubActionsUtils
using Test
using Luxor
@testset "GitHubActionsUtils.jl" begin
# move up from the `test` folder to the main repo
cd("..")
@show GitHubActionsUtils.is_github_actions()
@show GitHubActionsUtils.event_name()
@show GitHubActionsUtils.is_push()
@show GitHubActionsUtils.is_pull_request()
@show GitHubActionsUtils.head_ref()
@show GitHubActionsUtils.github_ref()
@show GitHubActionsUtils.is_branch()
@show GitHubActionsUtils.branch_name()
@show GitHubActionsUtils.is_tag()
@show GitHubActionsUtils.tag_name()
@show GitHubActionsUtils.pull_request_number()
@show GitHubActionsUtils.pull_request_source()
@show GitHubActionsUtils.repository()
if GitHubActionsUtils.is_pull_request()
pr_number = GitHubActionsUtils.pull_request_number()
image_branch_name = "pr$(pr_number)-test-images"
GitHubActionsUtils.set_github_actions_bot_as_git_user()
GitHubActionsUtils.switch_to_or_create_branch(image_branch_name; orphan = true)
image_path = "image.png"
@png juliacircles() 400 400 image_path
run(`git add -A`)
run(`git commit -m "create testimages"`)
GitHubActionsUtils.push_git_branch(image_branch_name)
commit_hash = chomp(read(`git rev-parse HEAD`, String))
image_url = string(
"https://raw.githubusercontent.com/",
GitHubActionsUtils.repository(),
"/",
commit_hash,
"/",
image_path
)
GitHubActionsUtils.comment_on_pr(
pr_number,
"""
This comment is auto-generated from a CI run with version $VERSION.
Here's an image:

"""
)
end
end
|
{"hexsha": "01b1b21a70bc70f296f64a3e9aeda24c58d925fc", "size": 1807, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "jkrumbiegel/GitHubActionsUtils.jl", "max_stars_repo_head_hexsha": "077a54df983b4362715148197427a191c80fe4f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "jkrumbiegel/GitHubActionsUtils.jl", "max_issues_repo_head_hexsha": "077a54df983b4362715148197427a191c80fe4f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-25T15:31:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-30T11:33:42.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "jkrumbiegel/GitHubActionsUtils.jl", "max_forks_repo_head_hexsha": "077a54df983b4362715148197427a191c80fe4f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6825396825, "max_line_length": 87, "alphanum_fraction": 0.6574432761, "num_tokens": 385}
|
import torch
from typing import Iterable, Union, Dict, List, Callable
from data import *
from config import *
from torch import nn
import numpy as np
@dataclass
class ModelOutput:
loss: Union[torch.Tensor, np.array]
@dataclass
class ClassifierOutput(ModelOutput):
predictions: Union[torch.Tensor, np.array, None]
attention: Union[torch.Tensor, None] = None
@dataclass
class SimilarityOutput(ModelOutput):
embeddings: Union[torch.Tensor, np.array, None]
scores: Union[torch.Tensor, np.array, List[float], None]
class LearningStrategy(nn.Module):
"""
Base class for tensor combining strategies
"""
def __init__(self):
super(LearningStrategy, self).__init__()
def forward(self):
raise NotImplementedError()
class PoolingStrategy(LearningStrategy):
"""
Base class for classes that provide
a pooling strategy for a tensor, usually
an embedding
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, embeddings: torch.Tensor):
raise NotImplementedError()
class WordPoolingStrategy(PoolingStrategy):
"""
The representation is pooled by extracting
all the tokens that are part of a word in the sentence
and taking their avarage
"""
def __init__(self, params, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params = params
def forward(self, embeddings: torch.Tensor, features: WordFeatures, **kwargs):
word_embeddings = []
for sen_idx, w_idxs in enumerate(features.indexes):
curr_w_vectors = embeddings[sen_idx][w_idxs]
vectors_avg = torch.mean(curr_w_vectors, dim=0)
word_embeddings.append(vectors_avg)
return torch.stack(word_embeddings, dim=0)
class SequencePoolingStrategy(WordPoolingStrategy):
"""
The representation is pooled by extracting
all the tokens that are part of a word in the sentence
and taking their avarage
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, embeddings: torch.Tensor, features: WordFeatures, **kwargs):
word_embeddings = []
longest_dim = embeddings.shape[1]
for sen_idx in range(embeddings.shape[0]):
new_sequence = []
tokens_indexes = features.indexes[sen_idx]
for idx in tokens_indexes:
curr_w_vectors = embeddings[sen_idx][idx]
curr_w_vectors = torch.mean(curr_w_vectors, dim=0).to(self.params.device)
new_sequence.append(curr_w_vectors)
pad_n = longest_dim - len(new_sequence)
padding = [torch.zeros(curr_w_vectors.shape[-1]).to(self.params.device)] * pad_n
new_sequence += padding
new_sequence = torch.stack(new_sequence, dim=0).to(self.params.device)
word_embeddings.append(new_sequence)
stacked = torch.stack(word_embeddings, dim=0).to(self.params.device)
print(f"Pooled embedding dim: {stacked.shape}")
return stacked
class AvgPoolingStrategy(PoolingStrategy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, embeddings: torch.Tensor, features: EmbeddingsFeatures):
assert len(embeddings.shape) == 3 #batch, seq_len, embed_size
mask = features.attention_mask
#we expand the mask to include the embed_size dimension
mask = mask.unsqueeze(-1).expand(embeddings.size()).float()
#we zero out the weights corresponding to the zero positions
# of the mask and we sum over the seq_len dimension
sum_embeddings = torch.sum(embeddings * mask, 1)
#we sum the values of the mask on the seq_len dimension
# obtaining the number of tokens in the sequence
sum_mask = torch.clamp(mask.sum(1), min=1e-9)
#we take the average
embeddings = sum_embeddings/sum_mask
return embeddings
class CLSPoolingStrategy(PoolingStrategy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, embeddings: torch.Tensor, features: EmbeddingsFeatures):
assert len(embeddings.shape) == 3 #batch, seq_len, embed_size
#the CLS token corresponds to the first token in the seq_len dimension
return embeddings[:0:]
class MergingStrategy(LearningStrategy):
"""
Base class for classes that offer functionalities for
merging pretrained and contextualized embeddings
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self):
raise NotImplementedError()
class EmbeddingsSimilarityCombineStrategy(MergingStrategy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, features: DataLoaderFeatures, embed_1: torch.Tensor, embed_2: torch.Tensor):
out = torch.stack([embed_1, embed_2], dim=0)
return out
class SentenceEncodingCombineStrategy(MergingStrategy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, features: DataLoaderFeatures, pooled: torch.Tensor):
return pooled
class SentenceBertCombineStrategy(MergingStrategy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, features, embeddings_1, embeddings_2):
diff = torch.abs(embeddings_1 - embeddings_2)
out = torch.cat((embeddings_1, embeddings_2, diff), dim=-1)
return out
class Pooler(nn.Module):
"""Module that pools the output of another module according to different strategies """
def __init__(
self,
pooling_strategy: PoolingStrategy,
normalize=False
):
super(Pooler, self).__init__()
self.pooling_strategy = pooling_strategy
self.normalize = normalize
def forward(self):
raise NotImplementedError()
class Loss(nn.Module):
def __init__(self, params: Configuration):
super(Loss, self).__init__()
self.params = params
def forward(self, hidden_state: torch.Tensor, features: EmbeddingsFeatures) -> ModelOutput:
raise NotImplementedError()
class SoftmaxLoss(Loss):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.classifier = nn.Linear(self.params.model_parameters.hidden_size, self.params.model_parameters.num_classes)
self.loss_function = nn.CrossEntropyLoss()
def forward(self, hidden_state, features):
labels = features.labels
logits = self.classifier(hidden_state)
loss = self.loss_function(
logits.view(-1, self.params.model_parameters.num_classes),
labels.view(-1)
)
return ClassifierOutput(
loss = loss,
predictions = logits
)
class SimilarityLoss(Loss):
def __init__(self, *args, margin=0.5, **kwargs):
super().__init__(*args, **kwargs)
self.margin = margin
def forward(self, embeddings, features):
raise NotImplementedError()
class ContrastiveSimilarityLoss(SimilarityLoss):
"""Ranking loss based on the measure of cosine similarity """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, embeddings, features):
assert embeddings.shape[0] == 2
distances = 1 - F.cosine_similarity(embeddings[0], embeddings[1], dim=-1)
loss = 0.5 * (features.labels.float() * distances.pow(2) + (1 - features.labels).float() * F.relu(self.margin - distances).pow(2))
return ClassifierOutput(
loss = loss,
predictions = embeddings
)
class OnlineContrastiveSimilarityLoss(SimilarityLoss):
"""Online contrastive loss as defined in SBERT """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, embeddings, features):
assert embeddings.shape[0] == 2
distance_matrix = 1-F.cosine_similarity(embeddings[0], embeddings[1], dim=-1)
negs = distance_matrix[features.labels == 0]
poss = distance_matrix[features.labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return ClassifierOutput(
loss = loss,
predictions = embeddings,
)
class CosineSimilarityLoss(Loss):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.similarity = nn.CosineSimilarity(dim=-1)
self.loss_function = nn.MSELoss()
def forward(self, embeddings, features):
scores = self.similarity(embeddings[0], embeddings[1])
if isinstance(features, dict):
labels = features["labels"]
else:
labels = features.labels
loss = self.loss_function(scores, labels.view(-1))
return ClassifierOutput(
loss = loss,
predictions = embeddings
)
class SimpleDistillationLoss(Loss):
"""
Distillation loss based on a simple MSE loss
between the teacher and student embeddings
"""
def __init__(self, teacher_model, *args, **kwargs):
super().__init__(*args, **kwargs)
self.teacher_model = teacher_model
self.loss = nn.MSELoss()
def forward(self, student_embeddings, features):
teacher_embeddings = features.generate_labels(self.teacher_model)
loss = self.loss(student_embeddings, teacher_embeddings)
return ClassifierOutput(
loss=loss,
predictions=torch.stack([student_embeddings, teacher_embeddings], dim=0)
)
|
{"hexsha": "d92c40c921280c40f388e63e7af7c42faa30773e", "size": 10079, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules.py", "max_stars_repo_name": "cr1m5onk1ng/semantic-search-api", "max_stars_repo_head_hexsha": "25ecdde4509943bb6420a5a678e4aaa0b1f5a866", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modules.py", "max_issues_repo_name": "cr1m5onk1ng/semantic-search-api", "max_issues_repo_head_hexsha": "25ecdde4509943bb6420a5a678e4aaa0b1f5a866", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules.py", "max_forks_repo_name": "cr1m5onk1ng/semantic-search-api", "max_forks_repo_head_hexsha": "25ecdde4509943bb6420a5a678e4aaa0b1f5a866", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6357388316, "max_line_length": 138, "alphanum_fraction": 0.6572080564, "include": true, "reason": "import numpy", "num_tokens": 2233}
|
#!/usr/bin/env python3
import os
import numpy as np
from katsdpsigproc.accel import Operation, IOSlot, create_some_context, build
class SumTemplate:
def __init__(self, context):
self.wgs = 128
self.program = build(context, 'sum.mako', {'wgs': self.wgs},
extra_dirs=[os.path.dirname(__file__)])
def instantiate(self, command_queue, size):
return Sum(self, command_queue, size)
class Sum(Operation):
def __init__(self, template, command_queue, size):
if size % template.wgs:
raise ValueError(f'size must be a multiple of {template.wgs}')
super().__init__(command_queue)
self.template = template
self.slots['src'] = IOSlot((size,), np.int32)
self.slots['dest'] = IOSlot((size // template.wgs,), np.int32)
self.kernel = template.program.get_kernel('reduce')
def _run(self):
src = self.buffer('src')
dest = self.buffer('dest')
self.command_queue.enqueue_kernel(
self.kernel,
[src.buffer, dest.buffer],
global_size=src.shape,
local_size=(self.template.wgs,)
)
def main():
ctx = create_some_context()
queue = ctx.create_command_queue()
op = SumTemplate(ctx).instantiate(queue, 1024)
op.ensure_all_bound()
src = np.random.randint(1, 100, size=op.buffer('src').shape).astype(np.int32)
op.buffer('src').set(queue, src)
op()
dest = op.buffer('dest').get(queue)
wgs = op.template.wgs
expected = src.reshape(-1, wgs).sum(axis=1)
np.testing.assert_equal(dest, expected)
print(dest)
if __name__ == '__main__':
main()
|
{"hexsha": "1f6457f48bbc9a393f54d5c0adea117040679140", "size": 1676, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/user/examples/sum.py", "max_stars_repo_name": "ska-sa/katsdpsigproc", "max_stars_repo_head_hexsha": "d471d05a3c340ff217db4fd85de0599fe9dfad80", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-16T13:38:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T13:38:29.000Z", "max_issues_repo_path": "doc/user/examples/sum.py", "max_issues_repo_name": "ska-sa/katsdpsigproc", "max_issues_repo_head_hexsha": "d471d05a3c340ff217db4fd85de0599fe9dfad80", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-08-29T11:52:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-25T13:12:19.000Z", "max_forks_repo_path": "doc/user/examples/sum.py", "max_forks_repo_name": "ska-sa/katsdpsigproc", "max_forks_repo_head_hexsha": "d471d05a3c340ff217db4fd85de0599fe9dfad80", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-02-08T12:54:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-30T16:49:26.000Z", "avg_line_length": 28.8965517241, "max_line_length": 81, "alphanum_fraction": 0.6217183771, "include": true, "reason": "import numpy", "num_tokens": 410}
|
Require Import HoTT.
Require Import Auxiliary.Family.
Require Import Auxiliary.WellFounded.
Require Import Syntax.ScopeSystem.
Require Import Auxiliary.Coproduct.
Require Import Auxiliary.Closure.
Require Import Syntax.All.
Require Import Typing.Context.
Require Import Typing.Judgement.
Require Import Typing.RawTypeTheory.
Require Import Presented.PresentedRawRule.
Require Import Presented.CongruenceRule.
(** Main definition in this file: [presented_raw_type_theory], the data one gives to specify a type theory (but before typechecking it) *)
Section TypeTheory.
Context {σ : scope_system}.
Record presented_raw_type_theory
:= {
(* The family of _rules_, with their object-premise arities and conclusion forms specified *)
tt_rule_data :> family (Judgement.form * arity σ)
(* the judgement form of the conclusion of each rule *)
; tt_rule_form : tt_rule_data -> Judgement.form
:= fun i => fst (tt_rule_data i)
(* the arity of the arguments (i.e. the *object* premises only) of each rule *)
; tt_rule_arity : tt_rule_data -> arity _
:= fun i => snd (tt_rule_data i)
(* the ordering on rules *)
; tt_lt : well_founded_order tt_rule_data
(* the signature over which each rule can be written *)
; tt_rule_signature : tt_rule_data -> signature σ
:= fun i => Family.fmap
(fun (ja : Judgement.form * arity σ)
=> (class_of (fst ja), snd ja))
(Family.subfamily tt_rule_data
(fun j => Judgement.is_object (tt_rule_form j) * tt_lt j i))
(* the actual rule specification of each rule *)
; tt_rule
: forall i : tt_rule_data,
rule
(tt_rule_signature i)
(tt_rule_arity i)
(tt_rule_form i)
}.
Local Definition signature (T : presented_raw_type_theory)
: signature σ.
Proof.
(* symbols are given by the object-judgement rules of T *)
exists {r : T & Judgement.is_object (tt_rule_form _ r)}.
intros r_H. set (r := pr1 r_H).
split.
- exact (class_of (tt_rule_form _ r)).
- exact (tt_rule_arity _ r).
Defined.
(* NOTE: it is tempting to case-analyse here and say
“when r is an object rule, use [(class_of …, tt_rule_arity …)];
in case r is an equality rule, use reductio ad absurdum with Hr.”
But we get stronger reduction behaviour by just taking [(class_of …, tt_rule_arity …)] without case-analysing first. (And up to equality, we get the same result.) *)
(* TODO: consider making this a coercion? *)
Local Definition include_rule_signature
{T : presented_raw_type_theory} (r : T)
: Signature.map (tt_rule_signature _ r)
(signature T).
Proof.
simple refine (_;_).
- intros s_isob_lt. cbn in s_isob_lt.
exact (pr1 s_isob_lt ; fst (pr2 (s_isob_lt))).
(* TODO: introduce access functions for the signature components above? *)
- intros s. apply idpath.
Defined.
(* NOTE: could easily be generalised to give the sub-type-theory on any down-closed subset of the rules, if that’s ever needed. *)
Local Definition initial_segment (T : presented_raw_type_theory) (i : T)
: presented_raw_type_theory.
Proof.
simple refine (Build_presented_raw_type_theory _ _ _ ).
- refine (Family.subfamily (tt_rule_data T) _).
intros j. exact (tt_lt _ j i).
- refine (WellFounded.pullback _ (tt_lt T)).
exact (projT1).
- cbn. intros [j lt_j_i].
refine (PresentedRawRule.fmap _ (tt_rule _ j)).
apply Family.map_fmap.
simple refine (_;_).
+ intros [k [k_obj lt_k_j]].
simple refine (_;_).
* exists k. apply (transitive _ _ j); assumption.
* cbn. split; assumption.
+ intros ?; apply idpath.
Defined.
(* NOTE: in fact, this map should be an isomorphism *)
Local Definition initial_segment_signature_to_rule_signature
(T : presented_raw_type_theory) (i : T)
: Signature.map
(TypeTheory.signature (initial_segment T i))
(tt_rule_signature _ i).
Proof.
simple refine (_;_).
- intros [[j lt_j_i] j_obj]. exists j. split; assumption.
- intros ?; apply idpath.
Defined.
Local Definition include_initial_segment_signature
(T : presented_raw_type_theory) (i : T)
: Signature.map
(TypeTheory.signature (initial_segment T i))
(TypeTheory.signature T).
Proof.
eapply Signature.compose.
- apply include_rule_signature.
- apply initial_segment_signature_to_rule_signature.
Defined.
End TypeTheory.
Arguments presented_raw_type_theory _ : clear implicits.
Arguments tt_rule_data {_} _.
Arguments tt_rule_form {_ _} _.
Arguments tt_rule_arity {_ _} _.
Arguments tt_lt {_ _}.
Arguments tt_rule_signature {_ _} _.
Arguments tt_rule {_ _} _.
Section Flattening.
Context {σ : scope_system}.
Local Definition flatten (T : presented_raw_type_theory σ)
: raw_type_theory (signature T).
Proof.
refine (_ + _)%family.
(* First: the explicitly-given logical rules *)
- exists (tt_rule_data T).
intros r.
refine (PresentedRawRule.flatten _ _).
+ (* translate rules up to the full signature *)
refine (PresentedRawRule.fmap _ (tt_rule r)).
apply include_rule_signature.
+ (* pick their symbol in the full signature, if applicable *)
intros r_obj.
exists (r; r_obj).
split; apply idpath.
(* Second: associated congruence rules for the object-judgement logical rules. *)
- exists { r : T & Judgement.is_object (tt_rule_form r) }.
intros [r Hr].
refine (PresentedRawRule.flatten _ _).
+ simple refine
(congruence_rule (PresentedRawRule.fmap _ (tt_rule r)) _ _ _ _).
* apply include_rule_signature.
* exact Hr.
* exact (r;Hr). (* head symbol of original rule *)
* apply idpath.
* apply idpath.
+ intros []. (* no head symbol, since congs are equality rules *)
Defined.
(* Probably should go via a notion of “simple map” of type theories,
in which [flatten] is functorial,
based on simple maps of algebraic extensions. *)
Local Lemma flatten_initial_segment
(T : presented_raw_type_theory σ) (r : T)
: Family.map_over
(RawRule.fmap
(include_initial_segment_signature T r))
(flatten (initial_segment T r))
(flatten T).
Proof.
apply Family.Build_map'; intros [ [i lt_i_r] | [ [ i lt_i_r] i_is_ob] ].
- (* main rule *)
admit.
- (* congruence rule *)
admit.
Admitted. (* [flatten_initial_segment]: large and significant, possible complicated dependency structure *)
End Flattening.
Local Definition derivation {σ : scope_system} (T : presented_raw_type_theory σ) H
: judgement (signature T) -> Type
:= RawTypeTheory.derivation (flatten T) H.
|
{"author": "peterlefanulumsdaine", "repo": "general-type-theories", "sha": "596f032e5d59fa017c2f2595136448b24b810f1d", "save_path": "github-repos/coq/peterlefanulumsdaine-general-type-theories", "path": "github-repos/coq/peterlefanulumsdaine-general-type-theories/general-type-theories-596f032e5d59fa017c2f2595136448b24b810f1d/Presented/PresentedRawTypeTheory.v"}
|
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
NewRulesFor(TTensorInd, rec(
# base cases
# I x A
dsA_base_vec_push := rec(
info := "IxA base",
forTransposition := false,
applicable := nt -> IsParPar(nt.params) and nt.isTag(1, AVecReg),
children := nt -> let(_krnl := nt.params[1].withTags(nt.getTags()), krnl := When(_krnl.isReal(), _krnl.setWrap(VWrapId), _krnl.setWrap(VWrapTRC(nt.firstTag().isa))),
[[ krnl, InfoNt(nt.params[2]) ]]),
apply := (nt, c, cnt) -> IDirSum(cnt[2].params[1], c[1])
),
# A x Iv
L_dsA_L_base_vec := rec(
info := "AxI base",
forTransposition := false,
applicable := nt -> IsVecVec(nt.params) and nt.isTag(1, AVecReg) and nt.params[2].range = nt.getTags()[1].v,
children := nt -> let(jv := Ind(nt.getTags()[1].v),
[[ SubstVars(Copy(nt.params[1]), rec((nt.params[2].id) := jv)).setWrap(VWrap(nt.firstTag().isa)).withTags(Drop(nt.getTags(), 1)),
InfoNt(jv) ]]),
apply := (nt, c, cnt) -> let(v := nt.getTags()[1].v, jv := cnt[2].params[1],
VTensorInd(c[1], jv))
),
# A x In
L_dsA_L_vec := rec(
info := "AxI base",
forTransposition := false,
applicable := nt -> IsVecVec(nt.params) and nt.isTag(1, AVecReg) and nt.params[2].range > nt.getTags()[1].v,
children := nt -> let(v := nt.getTags()[1].v, jv := Ind(nt.getTags()[1].v), j := Ind(nt.params[2].range/nt.getTags()[1].v),
[[ TTensorInd(
SubstVars(Copy(nt.params[1]), rec((nt.params[2].id) := j*V(v)+jv)),
j, AVec, AVec).setWrap(VWrap(nt.firstTag().isa)).withTags(Drop(nt.getTags(), 1)), InfoNt(jv) ]]),
apply := (nt, c, cnt) -> let(jv := cnt[2].params[1],
VTensorInd(c[1], jv))
)
));
|
{"hexsha": "d372251dd158386fef612b9c7eb6db1dbb073609", "size": 1854, "ext": "gi", "lang": "GAP", "max_stars_repo_path": "namespaces/spiral/paradigms/vector/breakdown/ttensorind.gi", "max_stars_repo_name": "sr7cb/spiral-software", "max_stars_repo_head_hexsha": "349d9e0abe75bf4b9a4690f2dbee631700f8361a", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2019-09-01T19:29:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T12:26:12.000Z", "max_issues_repo_path": "namespaces/spiral/paradigms/vector/breakdown/ttensorind.gi", "max_issues_repo_name": "sr7cb/spiral-software", "max_issues_repo_head_hexsha": "349d9e0abe75bf4b9a4690f2dbee631700f8361a", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-11-20T16:15:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-07T21:17:28.000Z", "max_forks_repo_path": "namespaces/spiral/paradigms/vector/breakdown/ttensorind.gi", "max_forks_repo_name": "sr7cb/spiral-software", "max_forks_repo_head_hexsha": "349d9e0abe75bf4b9a4690f2dbee631700f8361a", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2019-08-20T19:27:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-01T22:11:18.000Z", "avg_line_length": 46.35, "max_line_length": 173, "alphanum_fraction": 0.5474649407, "num_tokens": 604}
|
from nose import SkipTest
import networkx as nx
from networkx.generators.degree_seq import havel_hakimi_graph
class TestLaplacian(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global numpy
global assert_equal
global assert_almost_equal
try:
import numpy
from numpy.testing import assert_equal,assert_almost_equal
except ImportError:
raise SkipTest('NumPy not available.')
def setUp(self):
deg=[3,2,2,1,0]
self.G=havel_hakimi_graph(deg)
self.WG=nx.Graph( (u,v,{'weight':0.5,'other':0.3})
for (u,v) in self.G.edges_iter() )
self.WG.add_node(4)
self.MG=nx.MultiGraph(self.G)
def test_laplacian(self):
"Graph Laplacian"
NL=numpy.array([[ 3, -1, -1, -1, 0],
[-1, 2, -1, 0, 0],
[-1, -1, 2, 0, 0],
[-1, 0, 0, 1, 0],
[ 0, 0, 0, 0, 0]])
WL=0.5*NL
OL=0.3*NL
assert_equal(nx.laplacian(self.G),NL)
assert_equal(nx.laplacian(self.MG),NL)
assert_equal(nx.laplacian(self.G,nodelist=[0,1]),
numpy.array([[ 1, -1],[-1, 1]]))
assert_equal(nx.laplacian(self.WG),WL)
assert_equal(nx.laplacian(self.WG,weight=None),NL)
assert_equal(nx.laplacian(self.WG,weight='other'),OL)
def test_generalized_laplacian(self):
"Generalized Graph Laplacian"
GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00],
[-0.408, 1.00, -0.50, 0.00 , 0.00],
[-0.408, -0.50, 1.00, 0.00, 0.00],
[-0.577, 0.00, 0.00, 1.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00]])
assert_almost_equal(nx.generalized_laplacian(self.G),GL,decimal=3)
def test_normalized_laplacian(self):
"Generalized Graph Laplacian"
GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00],
[-0.408, 1.00, -0.50, 0.00 , 0.00],
[-0.408, -0.50, 1.00, 0.00, 0.00],
[-0.577, 0.00, 0.00, 1.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00]])
assert_almost_equal(nx.normalized_laplacian(self.G),GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian(self.MG),GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian(self.WG),GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian(self.WG,weight='other'),GL,decimal=3)
|
{"hexsha": "7174e924cadb948595015aa9d49d7ee3f6b82980", "size": 2686, "ext": "py", "lang": "Python", "max_stars_repo_path": "networkx/linalg/tests/test_laplaican.py", "max_stars_repo_name": "rafguns/networkx", "max_stars_repo_head_hexsha": "ce5e7394e56c3ee92f3f40a392b7344ce1f7e366", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2015-03-25T20:20:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T19:44:56.000Z", "max_issues_repo_path": "networkx/linalg/tests/test_laplaican.py", "max_issues_repo_name": "rafguns/networkx", "max_issues_repo_head_hexsha": "ce5e7394e56c3ee92f3f40a392b7344ce1f7e366", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 71, "max_issues_repo_issues_event_min_datetime": "2015-01-05T16:50:55.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-30T19:17:47.000Z", "max_forks_repo_path": "networkx/linalg/tests/test_laplaican.py", "max_forks_repo_name": "rafguns/networkx", "max_forks_repo_head_hexsha": "ce5e7394e56c3ee92f3f40a392b7344ce1f7e366", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2015-02-15T22:19:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-30T18:54:54.000Z", "avg_line_length": 40.696969697, "max_line_length": 89, "alphanum_fraction": 0.5260610573, "include": true, "reason": "import numpy,from numpy,import networkx,from networkx", "num_tokens": 892}
|
theory Flatten_Iter_Spec
imports
Basic_Assn
"Separation_Logic_Imperative_HOL.Imp_List_Spec"
"HOL-Real_Asymp.Inst_Existentials"
begin
text "This locale takes an iterator that refines a list of elements that themselves
can be iterated and defines an iterator over the flattened list of lower level elements"
locale flatten_iter =
inner_list: imp_list_iterate is_inner_list inner_is_it inner_it_init inner_it_has_next inner_it_next +
outer_list: imp_list_iterate is_outer_list outer_is_it outer_it_init outer_it_has_next outer_it_next
for is_outer_list :: "'l list \<Rightarrow> 'm \<Rightarrow> assn"
and outer_is_it :: "'l list \<Rightarrow> 'm \<Rightarrow> 'l list \<Rightarrow> 'oit \<Rightarrow> assn"
and outer_it_init :: "'m \<Rightarrow> ('oit) Heap"
and outer_it_has_next :: "'oit \<Rightarrow> bool Heap"
and outer_it_next :: "'oit \<Rightarrow> ('l\<times>'oit) Heap"
and is_inner_list :: "'a list \<Rightarrow> 'l \<Rightarrow> assn"
and inner_is_it :: "'a list \<Rightarrow> 'l \<Rightarrow> 'a list \<Rightarrow> 'iit \<Rightarrow> assn"
and inner_it_init :: "'l \<Rightarrow> ('iit) Heap"
and inner_it_has_next :: "'iit \<Rightarrow> bool Heap"
and inner_it_next :: "'iit \<Rightarrow> ('a\<times>'iit) Heap"
begin
fun is_flatten_list :: "'a list list \<Rightarrow> 'a list \<Rightarrow> 'm \<Rightarrow> assn" where
"is_flatten_list ls' ls lsi = (\<exists>\<^sub>A lsi'.
is_outer_list lsi' lsi * list_assn is_inner_list ls' lsi' * \<up>(ls = concat ls')
)"
lemma flatten_prec:
"precise (is_flatten_list ls)"
apply (intro preciseI)
apply (auto)
done
(*type_synonym flatten_it = "'iit \<times> 'oit"*)
fun is_flatten_it :: "'a list list \<Rightarrow> 'a list \<Rightarrow> 'm \<Rightarrow> 'a list \<Rightarrow> ('oit \<times> 'iit option) \<Rightarrow> assn"
where
"is_flatten_it lsi'' ls lsi [] (oit, None) =
(\<exists>\<^sub>A lsi'.
list_assn is_inner_list lsi'' lsi' *
\<up>(ls = (concat lsi'')) *
outer_is_it lsi' lsi [] oit
)" |
"is_flatten_it lsi'' ls lsi ls2 (oit, Some iit) =
(\<exists>\<^sub>A lsi' ls2' ls1' lsi1 lsi2 lsim ls2m lsm ls1m.
list_assn is_inner_list ls1' lsi1 *
list_assn is_inner_list ls2' lsi2 *
\<up>(ls2m \<noteq> [] \<and> ls2 = ls2m@(concat ls2') \<and> ls = (concat (ls1'@lsm#ls2')) \<and> lsi'' = (ls1'@lsm#ls2')) *
outer_is_it lsi' lsi lsi2 oit *
\<up>(lsm = ls1m@ls2m \<and> lsi'=(lsi1@lsim#lsi2)) *
inner_is_it lsm lsim ls2m iit
)
" |
"is_flatten_it _ _ _ _ _ = false"
partial_function (heap) flatten_it_adjust:: "'oit \<Rightarrow> 'iit \<Rightarrow> ('oit \<times> 'iit option) Heap" where
"flatten_it_adjust oit iit = do {
ihasnext \<leftarrow> inner_it_has_next iit;
if ihasnext then
return (oit, Some iit)
else do {
ohasnext \<leftarrow> outer_it_has_next oit;
if \<not>ohasnext then
return (oit, None)
else do {
(next, oit) \<leftarrow> outer_it_next oit;
nextit \<leftarrow> inner_it_init next;
flatten_it_adjust oit nextit
}
}
}
"
declare flatten_it_adjust.simps[code]
lemma flatten_it_adjust_rule:
" <list_assn is_inner_list ls1' ls1 * list_assn is_inner_list ls2' ls2 *
outer_is_it (ls1@lsim#ls2) lsi ls2 oit * inner_is_it (lsm1@lsm2) lsim lsm2 iit>
flatten_it_adjust oit iit
<is_flatten_it (ls1'@(lsm1@lsm2)#ls2') (concat (ls1'@(lsm1@lsm2)#ls2')) lsi (concat (lsm2#ls2'))>\<^sub>t"
proof (induction ls2 arbitrary: ls1' ls1 ls2' lsim lsm1 lsm2 oit iit)
case Nil
then show ?case
apply(subst flatten_it_adjust.simps)
apply (sep_auto eintros del: exI heap add: inner_list.it_has_next_rule)
apply(inst_existentials "(ls1 @ lsim # [])" ls2' ls1' ls1 "[]::'l list" lsim lsm2 "lsm1@lsm2")
subgoal by auto
subgoal by (sep_auto)
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
subgoal
apply (vcg (ss))
apply (sep_auto eintros del: exI)
apply(inst_existentials "(ls1 @ [lsim])" "ls1'@[lsm1]")
subgoal
apply(auto simp add: list_assn_app_one)
using inner_list.quit_iteration
by (smt (z3) assn_aci(9) assn_times_comm ent_true_drop(1) fr_refl)
done
done
next
case (Cons a ls2)
show ?case
apply(subst flatten_it_adjust.simps)
apply (sep_auto eintros del: exI heap add: inner_list.it_has_next_rule)
apply(inst_existentials "(ls1 @ lsim # a # ls2)" ls2' ls1' ls1 "a #ls2" lsim lsm2 "lsm1@lsm2")
subgoal by auto
subgoal by (sep_auto)
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
subgoal by simp
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (case_tac ls2')
apply simp_all
apply (sep_auto eintros del: exI heap add: inner_list.it_init_rule)
subgoal for x oit aa list xa
supply R = "Cons.IH"[of "ls1'@[lsm1]" "ls1@[lsim]" list a oit "[]::'a list" aa xa, simplified]
thm R
find_theorems "_ \<Longrightarrow>\<^sub>A _" "<_>_<_>"
supply Q = Hoare_Triple.cons_pre_rule[of
"inner_is_it aa a aa xa * outer_is_it (ls1 @ lsim # a # ls2) lsi ls2 oit *
inner_is_it lsm1 lsim [] iit *
list_assn is_inner_list ls1' ls1 *
list_assn is_inner_list list ls2 *
true"
"list_assn is_inner_list ls1' ls1 * is_inner_list lsm1 lsim * list_assn is_inner_list list ls2 *
outer_is_it (ls1 @ lsim # a # ls2) lsi ls2 oit *
inner_is_it aa a aa
xa * true"
]
thm Q
apply(rule Q)
prefer 2
subgoal by (sep_auto heap add: R intro: inner_list.quit_iteration)
subgoal using inner_list.quit_iteration
by (smt (z3) assn_aci(10) assn_times_comm ent_refl_true ent_star_mono_true)
done
done
qed
definition flatten_it_init :: "'m \<Rightarrow> _ Heap"
where "flatten_it_init l = do {
oit \<leftarrow> outer_it_init l;
ohasnext \<leftarrow> outer_it_has_next oit;
if ohasnext then do {
(next, oit) \<leftarrow> outer_it_next oit;
nextit \<leftarrow> inner_it_init next;
flatten_it_adjust oit nextit
} else return (oit, None)
}"
lemma flatten_it_init_rule[sep_heap_rules]:
"<is_flatten_list l' l p> flatten_it_init p <is_flatten_it l' l p l>\<^sub>t"
unfolding flatten_it_init_def
apply simp
apply(rule norm_pre_ex_rule)+
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
subgoal for ls' x xa
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply (vcg (ss))
apply(case_tac ls'; case_tac l')
apply simp+
apply(rule impI)
thm inner_list.it_init_rule
apply (vcg heap add: inner_list.it_init_rule)
subgoal for _ nxt oit a list aa lista xaa
supply R = flatten_it_adjust_rule[of "[]" "[]" lista list a p oit "[]" aa xaa, simplified]
thm R
apply (sep_auto heap add: R)
done
done
apply (sep_auto)
done
definition flatten_it_next where
"flatten_it_next \<equiv> \<lambda>(oit,iit). do {
(x, iit) \<leftarrow> inner_it_next (the iit);
(oit, iit) \<leftarrow> flatten_it_adjust oit iit;
return (x, (oit,iit))
}"
lemma flatten_it_next_rule:
" l' \<noteq> [] \<Longrightarrow>
<is_flatten_it lsi'' l p l' it>
flatten_it_next it
<\<lambda>(a,it'). is_flatten_it lsi'' l p (tl l') it' * \<up>(a=hd l')>\<^sub>t"
apply(subst flatten_it_next_def)
thm inner_list.it_next_rule
apply (vcg (ss))
apply (vcg (ss))
apply(case_tac iit; case_tac l')
apply simp_all
apply(rule norm_pre_ex_rule)+
subgoal for oit iit a aa list lsi' ls2' ls1' lsi1 lsi2 lsim ls2m lsm ls1m
apply(vcg (ss))
apply(vcg (ss))
apply(vcg (ss))
apply(vcg (ss))
apply(vcg (ss))
apply(vcg (ss))
apply(vcg (ss))
apply(vcg (ss))
apply(vcg (ss))
apply(case_tac ls2m)
apply simp_all
subgoal for _ _ iita lista
supply R = flatten_it_adjust_rule[of ls1' lsi1 ls2' lsi2 lsim p oit "ls1m@[aa]" "lista" iita, simplified]
thm R
apply (sep_auto heap add: R)
done
done
done
definition flatten_it_has_next where
"flatten_it_has_next \<equiv> \<lambda>(oit, iit). do {
return (iit \<noteq> None)
}"
lemma flatten_it_has_next_rule[sep_heap_rules]:
"<is_flatten_it lsi'' l p l' it>
flatten_it_has_next it
<\<lambda>r. is_flatten_it lsi'' l p l' it * \<up>(r\<longleftrightarrow>l'\<noteq>[])>\<^sub>t"
apply(subst flatten_it_has_next_def)
apply(sep_auto)
apply(case_tac iit, case_tac l')
apply simp_all
apply sep_auto
done
declare mult.left_assoc[simp add]
lemma flatten_quit_iteration:
"is_flatten_it lsi'' l p l' it \<Longrightarrow>\<^sub>A is_flatten_list lsi'' l p * true"
apply(cases it)
subgoal for oit iit
apply(cases iit; cases l')
proof (goal_cases)
case 1
then show ?case
apply (sep_auto eintros del: exI)
subgoal for lsi'
apply(inst_existentials lsi')
subgoal by (metis (no_types, lifting) assn_aci(10) assn_times_comm fr_refl outer_list.quit_iteration)
done
done
next
case (2 lsim ll')
then show ?case
by (sep_auto eintros del: exI)
next
case (3 iit)
then show ?case
by (sep_auto eintros del: exI)
next
case (4 iit lsim ll')
then show ?case
apply (sep_auto eintros del: exI)
subgoal for ls2' ls1' lsi1 lsi2 lsima ls2m ls1m
apply(inst_existentials "(lsi1 @ lsima # lsi2)")
apply(rule entails_preI)
apply(sep_auto dest!: mod_starD list_assn_len)
subgoal
apply(simp add:
mult.commute[where ?b="outer_is_it (lsi1 @ lsima # lsi2) p lsi2 oit"]
mult.commute[where ?b="is_outer_list (lsi1 @ lsima # lsi2) p"]
mult.left_assoc )?
apply(rule rem_true)
supply R = ent_star_mono_true[of
"outer_is_it (lsi1 @ lsima # lsi2) p lsi2 oit"
"is_outer_list (lsi1 @ lsima # lsi2) p"
"list_assn is_inner_list ls1' lsi1 *
list_assn is_inner_list ls2' lsi2 *
inner_is_it (ls1m @ ls2m) lsima ls2m iit"
" list_assn is_inner_list ls1' lsi1 *
is_inner_list (ls1m @ ls2m) lsima *
list_assn is_inner_list ls2' lsi2"
,simplified]
thm R
apply(rule R)
subgoal by (rule outer_list.quit_iteration)
apply(simp add:
mult.commute[where ?b="inner_is_it (ls1m @ ls2m) lsima ls2m iit"]
mult.commute[where ?b="is_inner_list (ls1m @ ls2m) lsima"]
mult.left_assoc)
apply(rule rem_true)
supply R = ent_star_mono_true[of
"inner_is_it (ls1m @ ls2m) lsima ls2m iit"
"is_inner_list (ls1m @ ls2m) lsima"
"list_assn is_inner_list ls1' lsi1 *
list_assn is_inner_list ls2' lsi2"
" list_assn is_inner_list ls1' lsi1 *
list_assn is_inner_list ls2' lsi2"
,simplified]
thm R
apply(rule R)
subgoal by (rule inner_list.quit_iteration)
subgoal by sep_auto
done
done
done
qed
done
declare mult.left_assoc[simp del]
interpretation flatten_it: imp_list_iterate "is_flatten_list lsi''" "is_flatten_it lsi''" flatten_it_init flatten_it_has_next flatten_it_next
apply(unfold_locales)
subgoal
by (rule flatten_prec)
subgoal for l p
by (rule flatten_it_init_rule[of lsi'' l p])
subgoal for l' l p it
by (rule flatten_it_next_rule[of l' lsi'' l p it]) simp
subgoal for l p l' it
by (rule flatten_it_has_next_rule[of lsi'' l p l' it])
subgoal for l p l' it
by (rule flatten_quit_iteration[of lsi'' l p l' it])
done
end
end
|
{"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/BTree/Flatten_Iter_Spec.thy"}
|
SUBROUTINE ZACAI(ZR, ZI, FNU, KODE, MR, N, YR, YI, NZ, RL, TOL,
* ELIM, ALIM)
C***BEGIN PROLOGUE ZACAI
C***REFER TO ZAIRY
C
C ZACAI APPLIES THE ANALYTIC CONTINUATION FORMULA
C
C K(FNU,ZN*EXP(MP))=K(FNU,ZN)*EXP(-MP*FNU) - MP*I(FNU,ZN)
C MP=PI*MR*CMPLX(0.0,1.0)
C
C TO CONTINUE THE K FUNCTION FROM THE RIGHT HALF TO THE LEFT
C HALF Z PLANE FOR USE WITH ZAIRY WHERE FNU=1/3 OR 2/3 AND N=1.
C ZACAI IS THE SAME AS ZACON WITH THE PARTS FOR LARGER ORDERS AND
C RECURRENCE REMOVED. A RECURSIVE CALL TO ZACON CAN RESULT IF ZACON
C IS CALLED FROM ZAIRY.
C
C***ROUTINES CALLED ZASYI,ZBKNU,ZMLRI,ZSERI,ZS1S2,D1MACH,ZABS
C***END PROLOGUE ZACAI
C COMPLEX CSGN,CSPN,C1,C2,Y,Z,ZN,CY
DOUBLE PRECISION ALIM, ARG, ASCLE, AZ, CSGNR, CSGNI, CSPNR,
* CSPNI, C1R, C1I, C2R, C2I, CYR, CYI, DFNU, ELIM, FMR, FNU, PI,
* RL, SGN, TOL, YY, YR, YI, ZR, ZI, ZNR, ZNI, D1MACH, ZABS
INTEGER INU, IUF, KODE, MR, N, NN, NW, NZ
DIMENSION YR(N), YI(N), CYR(2), CYI(2)
DATA PI / 3.14159265358979324D0 /
NZ = 0
ZNR = -ZR
ZNI = -ZI
AZ = ZABS(CMPLX(ZR,ZI,kind=KIND(1.0D0)))
NN = N
DFNU = FNU + DBLE(FLOAT(N-1))
IF (AZ.LE.2.0D0) GO TO 10
IF (AZ*AZ*0.25D0.GT.DFNU+1.0D0) GO TO 20
10 CONTINUE
C-----------------------------------------------------------------------
C POWER SERIES FOR THE I FUNCTION
C-----------------------------------------------------------------------
CALL ZSERI(ZNR, ZNI, FNU, KODE, NN, YR, YI, NW, TOL, ELIM, ALIM)
GO TO 40
20 CONTINUE
IF (AZ.LT.RL) GO TO 30
C-----------------------------------------------------------------------
C ASYMPTOTIC EXPANSION FOR LARGE Z FOR THE I FUNCTION
C-----------------------------------------------------------------------
CALL ZASYI(ZNR, ZNI, FNU, KODE, NN, YR, YI, NW, RL, TOL, ELIM,
* ALIM)
IF (NW.LT.0) GO TO 80
GO TO 40
30 CONTINUE
C-----------------------------------------------------------------------
C MILLER ALGORITHM NORMALIZED BY THE SERIES FOR THE I FUNCTION
C-----------------------------------------------------------------------
CALL ZMLRI(ZNR, ZNI, FNU, KODE, NN, YR, YI, NW, TOL)
IF(NW.LT.0) GO TO 80
40 CONTINUE
C-----------------------------------------------------------------------
C ANALYTIC CONTINUATION TO THE LEFT HALF PLANE FOR THE K FUNCTION
C-----------------------------------------------------------------------
CALL ZBKNU(ZNR, ZNI, FNU, KODE, 1, CYR, CYI, NW, TOL, ELIM, ALIM)
IF (NW.NE.0) GO TO 80
FMR = DBLE(FLOAT(MR))
SGN = -DSIGN(PI,FMR)
CSGNR = 0.0D0
CSGNI = SGN
IF (KODE.EQ.1) GO TO 50
YY = -ZNI
CSGNR = -CSGNI*DSIN(YY)
CSGNI = CSGNI*DCOS(YY)
50 CONTINUE
C-----------------------------------------------------------------------
C CALCULATE CSPN=EXP(FNU*PI*I) TO MINIMIZE LOSSES OF SIGNIFICANCE
C WHEN FNU IS LARGE
C-----------------------------------------------------------------------
INU = INT(SNGL(FNU))
ARG = (FNU-DBLE(FLOAT(INU)))*SGN
CSPNR = DCOS(ARG)
CSPNI = DSIN(ARG)
IF (MOD(INU,2).EQ.0) GO TO 60
CSPNR = -CSPNR
CSPNI = -CSPNI
60 CONTINUE
C1R = CYR(1)
C1I = CYI(1)
C2R = YR(1)
C2I = YI(1)
IF (KODE.EQ.1) GO TO 70
IUF = 0
ASCLE = 1.0D+3*D1MACH(1)/TOL
CALL ZS1S2(ZNR, ZNI, C1R, C1I, C2R, C2I, NW, ASCLE, ALIM, IUF)
NZ = NZ + NW
70 CONTINUE
YR(1) = CSPNR*C1R - CSPNI*C1I + CSGNR*C2R - CSGNI*C2I
YI(1) = CSPNR*C1I + CSPNI*C1R + CSGNR*C2I + CSGNI*C2R
RETURN
80 CONTINUE
NZ = -1
IF(NW.EQ.(-2)) NZ=-2
RETURN
END
|
{"hexsha": "aa05a5c7b6c6446198f397fac714d5a02594c4fa", "size": 3719, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "mathext/internal/amos/amoslib/zacai.f", "max_stars_repo_name": "blackrez/gonum", "max_stars_repo_head_hexsha": "aad36a059009dc681b68a7d9fbdcadd09c9db798", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 313, "max_stars_repo_stars_event_min_datetime": "2018-01-13T22:09:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-01T21:50:51.000Z", "max_issues_repo_path": "mathext/internal/amos/amoslib/zacai.f", "max_issues_repo_name": "mingrammer/gonum", "max_issues_repo_head_hexsha": "b555074219fc5ed0bf39429799953c12f1a78810", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 57, "max_issues_repo_issues_event_min_datetime": "2018-01-13T19:50:21.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-09T19:00:05.000Z", "max_forks_repo_path": "mathext/internal/amos/amoslib/zacai.f", "max_forks_repo_name": "mingrammer/gonum", "max_forks_repo_head_hexsha": "b555074219fc5ed0bf39429799953c12f1a78810", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 42, "max_forks_repo_forks_event_min_datetime": "2019-05-06T08:48:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T23:47:23.000Z", "avg_line_length": 37.19, "max_line_length": 72, "alphanum_fraction": 0.4689432643, "num_tokens": 1369}
|
import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
# import open3d as o3d
import numpy as np
import random
import paddle.fluid as fluid
import argparse
from shapenet_part_loader import PartDataset
import utils
from utils import distance_squre, PointLoss
import copy
from model_PFNet import PFNetG
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='dataset/train', help='path to dataset')
parser.add_argument('--workers', type=int, default=2, help='number of data loading workers')
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--pnum', type=int, default=2048, help='the point number of a sample')
parser.add_argument('--crop_point_num', type=int, default=512, help='0 means do not use else use with this weight')
parser.add_argument('--nc', type=int, default=3)
parser.add_argument('--niter', type=int, default=201, help='number of epochs to train for')
parser.add_argument('--weight_decay', type=float, default=0.001)
parser.add_argument('--learning_rate', default=0.0002, type=float, help='learning rate in training')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--cuda', type=bool, default=False, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=2, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--drop', type=float, default=0.2)
parser.add_argument('--num_scales', type=int, default=3, help='number of scales')
parser.add_argument('--point_scales_list', type=list, default=[2048, 1024, 512], help='number of points in each scales')
parser.add_argument('--each_scales_size', type=int, default=1, help='each scales size')
parser.add_argument('--wtl2', type=float, default=0.95, help='0 means do not use else use with this weight')
parser.add_argument('--cropmethod', default='random_center', help='random|center|random_center')
opt = parser.parse_args()
dset = PartDataset(
root='/home/arclab/PF-Net-Point-Fractal-Network/dataset/shapenet_part/shapenetcore_partanno_segmentation_benchmark_v0/',
classification=True, class_choice=None, num_point=opt.pnum, mode='test')
crop_choice = [np.array([1, 0, 0]), np.array([0, 0, 1]), np.array([1, 0, 1]), np.array([-1, 0, 0]), np.array([-1, 1, 0])]
place = fluid.CUDAPlace(0) # 或者 fluid.CUDAPlace(0)
with fluid.dygraph.guard(place):
netG = PFNetG(opt.num_scales, opt.each_scales_size, opt.point_scales_list, opt.crop_point_num)
# netG_scheduler = fluid.dygraph.StepDecay(0.0001, step_size=40, decay_rate=0.2)
netG_optimizer = fluid.optimizer.AdamOptimizer(learning_rate=0.0001, epsilon=1e-05,
parameter_list=netG.parameters(),
regularization=fluid.regularizer.L2Decay(regularization_coeff=
opt.weight_decay))
para, netG_opt = fluid.load_dygraph('Checkpoints/netG_pretrained.pdparams')
netG.load_dict(para)
netG.eval()
criterion_G = PointLoss()
train_loader = fluid.io.DataLoader.from_generator(capacity=10, iterable=True)
train_loader.set_sample_list_generator(dset.get_reader(opt.batchSize), places=place)
alpha1 = 0.01
alpha2 = 0.02
for data in train_loader():
points, label = data
batch_size = points.shape[0]
real_point = points.numpy()
real_center = np.zeros((batch_size, opt.crop_point_num, 3)).astype('float32')
cropped_point = copy.deepcopy(real_point)
for m in range(batch_size):
index = random.sample(crop_choice, 1)
distance_list = []
p_center = index[0]
for n in range(opt.pnum):
distance_list.append(distance_squre(real_point[m, n], p_center))
distance_order = sorted(enumerate(distance_list), key=lambda x: x[1])
for sp in range(opt.crop_point_num):
cropped_point[m, distance_order[sp][0]] = np.array([0, 0, 0])
real_center[m, sp] = real_point[m, distance_order[sp][0]]
cropped_point1_idx = utils.farthest_point_sample_numpy(cropped_point, opt.point_scales_list[1], RAN=True)
cropped_point1 = utils.index_points_numpy(cropped_point, cropped_point1_idx)
cropped_point2_idx = utils.farthest_point_sample_numpy(cropped_point, opt.point_scales_list[2], RAN=False)
cropped_point2 = utils.index_points_numpy(cropped_point, cropped_point2_idx)
cropped_point = fluid.dygraph.to_variable(cropped_point)
cropped_point1 = fluid.dygraph.to_variable(cropped_point1)
cropped_point2 = fluid.dygraph.to_variable(cropped_point2)
real_center1_idx = utils.farthest_point_sample_numpy(real_center, 64, RAN=False)
real_center1 = utils.index_points_numpy(real_center, real_center1_idx)
real_center2_idx = utils.farthest_point_sample_numpy(real_center, 128, RAN=True)
real_center2 = utils.index_points_numpy(real_center, real_center2_idx)
real_center = fluid.dygraph.to_variable(real_center)
real_center1 = fluid.dygraph.to_variable(real_center1)
real_center2 = fluid.dygraph.to_variable(real_center2)
# cropped_point = np.load('cmp/input_cropped1.npy')
# cropped_point1 = np.load('cmp/input_cropped2.npy')
# cropped_point2 = np.load('cmp/input_cropped3.npy')
#
# cropped_point = fluid.dygraph.to_variable(cropped_point)
# cropped_point1 = fluid.dygraph.to_variable(cropped_point1)
# cropped_point2 = fluid.dygraph.to_variable(cropped_point2)
cropped_input = [cropped_point, cropped_point1, cropped_point2]
fake_center1, fake_center2, fake = netG(cropped_input)
# fake_torch = np.squeeze(np.load('cmp/fake.npy'), 1)
# fake_center1_torch = np.load('cmp/fake_center1.npy')
# fake_center2_torch = np.load('cmp/fake_center2.npy')
#
# real_center = np.load('cmp/real_center.npy')
# real_center1 = np.load('cmp/real_center_key1.npy')
# real_center2 = np.load('cmp/real_center_key2.npy')
#
# real_center = fluid.dygraph.to_variable(real_center)
# real_center1 = fluid.dygraph.to_variable(real_center1)
# real_center2 = fluid.dygraph.to_variable(real_center2)
cd_loss = criterion_G(fake, real_center)
# CD_LOSS_torch = np.load('cmp/CD_LOSS.npy')
# print(CD_LOSS_torch)
# print(cd_loss)
G_loss_l2 = criterion_G(fake, real_center) + alpha1*criterion_G(fake_center1, real_center1) + \
alpha2*criterion_G(fake_center2, real_center2)
print('G_loss: ', G_loss_l2.numpy())
# real_pc = o3d.geometry.PointCloud()
# real_pc.points = o3d.utility.Vector3dVector(points[0].numpy())
# cropped_pc = o3d.geometry.PointCloud()
# cropped_pc.points = o3d.utility.Vector3dVector(cropped_point[0].numpy())
# cropped_pc.paint_uniform_color([1, 0.706, 0])
# fake_pc = o3d.geometry.PointCloud()
# fake_pc.points = o3d.utility.Vector3dVector(real_center[0].numpy())
# fake_pc.paint_uniform_color([1, 0.203, 0])
# o3d.visualization.draw_geometries([cropped_pc, fake_pc])
|
{"hexsha": "21c5c21e8b4792ede02634c4d99f512ab8491aa1", "size": 7499, "ext": "py", "lang": "Python", "max_stars_repo_path": "pf-net/Test_PFNet.py", "max_stars_repo_name": "63445538/Contrib", "max_stars_repo_head_hexsha": "8860692e341020bb4332ff9f59b17a0c8cd9c748", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pf-net/Test_PFNet.py", "max_issues_repo_name": "63445538/Contrib", "max_issues_repo_head_hexsha": "8860692e341020bb4332ff9f59b17a0c8cd9c748", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pf-net/Test_PFNet.py", "max_forks_repo_name": "63445538/Contrib", "max_forks_repo_head_hexsha": "8860692e341020bb4332ff9f59b17a0c8cd9c748", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.9933333333, "max_line_length": 124, "alphanum_fraction": 0.6922256301, "include": true, "reason": "import numpy", "num_tokens": 1875}
|
#include <boost/geometry/algorithms/centroid.hpp>
|
{"hexsha": "2c5991633ba677a84d872db945e3ddcb42972b18", "size": 50, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_geometry_algorithms_centroid.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_geometry_algorithms_centroid.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_geometry_algorithms_centroid.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 25.0, "max_line_length": 49, "alphanum_fraction": 0.82, "num_tokens": 11}
|
[STATEMENT]
lemma shrK_notin_image_publicKey [simp]: "shrK x \<notin> publicKey b ` AA"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. shrK x \<notin> publicKey b ` AA
[PROOF STEP]
by auto
|
{"llama_tokens": 78, "file": "Inductive_Confidentiality_DolevYao_Public", "length": 1}
|
## philvals.py
## This is my implementation of phivals.m
## Computation of scaling function and wavelet by recursion
## using Python libraries numpy, scipy
##
## The main reference that I'll use is
## Gilbert Strang, and Kevin Amaratunga. 18.327 Wavelets, Filter Banks and Applications, Spring 2003. (Massachusetts Institute of Technology: MIT OpenCourseWare), http://ocw.mit.edu (Accessed 19 Jun, 2015). License: Creative Commons BY-NC-SA
##
##
##
#####################################################################################
## Copyleft 2015, Ernest Yeung <ernestyalumni@gmail.com>
##
## 20150621
##
## This program, along with all its code, is free software;
## you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You can have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software Foundation, Inc.,
## S1 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA
##
## Governing the ethics of using this program, I default to the Caltech Honor Code:
## ``No member of the Caltech community shall take unfair advantage of
## any other member of the Caltech community.''
##
## If you like what I'm doing and would like to help and contribute support,
## please take a look at my crowdfunding campaign at ernestyalumni.tilt.com
## and subscription-based Patreon
## read my mission statement and give your financial support,
## no matter how small or large,
## if you can
## and to keep checking my ernestyalumni.wordpress.com blog and
## various social media channels
## for updates as I try to keep putting out great stuff.
##
## Fund Science! Help my physics education outreach and research efforts at
## Open/Tilt or subscription Patreon - Ernest Yeung
##
## ernestyalumni.tilt.com
##
## Facebook : ernestyalumni
## gmail : ernestyalumni
## google : ernestyalumni
## linkedin : ernestyalumni
## Patreon : ernestyalumni
## Tilt/Open : ernestyalumni
## tumblr : ernestyalumni
## twitter : ernestyalumni
## youtube : ernestyalumni
## wordpress : ernestyalumni
##
##
################################################################################
##
##
##
##
## EY : 20150621 on the MIT OCW website for 18.327, and in the download, phivals.m isn't even a text file; it's in html for some reason. However, on the web.mit.edu website, the formating is correct, although it's still a html file.
import numpy as np
import scipy
from scipy.linalg import toeplitz
def phivals(h,i):
"""
phivals = phivals(h,i)
Generate a scaling function and its associated wavelet
using the given filter coefficients
Matlab original version (but it was a html file!) by Kevin Amaratunga 5 March 1993
INPUTS:
h = filter coefficients (sum(h)=2)
i = discretization parameter. The number of points per integer
step is 2^i. Thus, setting i = 0 gives the scaling function
and wavelet values at integer points
OUTPUTS: x,phi,psi
"""
assert i>=0, "phivals: i must be non-negative"
m,n = h.shape
assert n==1, "input h is not a column vector"
g = np.multiply( np.array( [(-1)**i for i in range(0,m)]), h[::-1,0] )
# The Haar filter produces a singular matrix, but since we know the solution
# already we treat this as a special case.
if m == 2 and h == np.vstack( np.array([1,1])):
phi = np.vstack( np.append(np.ones(2**i), np.zeros(1) ) )
if i > 0:
psi = np.vstack( np.append( np.append(np.ones(2**(i-1)),-np.ones(2**(i-1))), np.zeros(1) ))
elif i==0:
psi = np.vstack( np.array([1,0]) )
else:
ch = np.vstack( (h,np.zeros((m,1)) ))
rh = np.append( np.array([h[0,0]]), np.zeros(m-1))
tmp = toeplitz(ch,rh)
M = tmp.flatten('F')[0:-1:2].reshape((m,m)).T - np.identity(m)
M[-1,:] = np.ones(m)
tmp = np.vstack( np.append(np.zeros(m-1),np.identity(1)) )
phi = np.linalg.solve( M,tmp)
# Integer values of phi
if i > 0:
for k in range(0,i):
p = 2**(k+1)*(m-1)+1 # No of rows in toeplitz matrix
q = 2**k *(m-1)+1 # No of columns toeplitz matrix
if k==0:
ch0 = np.vstack( np.append( h, np.zeros(p-1-m)) )
ch = np.vstack( ( ch0, np.zeros(1) ))
cg0 = np.vstack( np.append(g, np.zeros(p-1-m)))
else:
ch = np.vstack( np.append( np.identity(1), np.zeros(2**k-1))).dot(ch0.T)
ch = np.vstack( np.append( ch.flatten('F'), np.zeros(1) ) )
rh = np.append( ch[0], np.zeros(q-1) )
Th = toeplitz(ch,rh)
if k == i-1:
cg = (np.vstack(np.append(np.identity(1),np.zeros(2**k-1)))).dot( cg0.T)
cg = cg.flatten('F') # flatten
cg = np.vstack( np.append(cg,np.zeros(1)))
rg = np.append( cg[0], np.zeros(q-1) )
Tg = toeplitz(cg,rg)
psi = Tg.dot(phi)
phi = Th.dot(phi)
elif i==0:
cg0 = np.vstack( np.append( g, np.zeros(m-1) ) )
cg = np.vstack((cg0, np.zeros(1) ) )
rg = np.append(cg[0],np.zeros(m-1))
Tg = toeplitz(cg,rg)
psi = Tg.dot(phi)
psi=psi[::2]
a,b = phi.shape
x = np.vstack( np.arange(0,a)/2.**i )
return x,phi,psi
#################
## test values
#################
h_test = np.random.rand(3,1)
|
{"hexsha": "cb840dd7bc47cda9382799aacf13049eb034becf", "size": 7549, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/phivals.py", "max_stars_repo_name": "ernestyalumni/18-327-wavelets-filter-banks", "max_stars_repo_head_hexsha": "eeb3fd65b42808cf907aa716110417515dbbfd82", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2015-07-18T16:13:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T12:04:01.000Z", "max_issues_repo_path": "tools/phivals.py", "max_issues_repo_name": "ernestyalumni/18-327-wavelets-filter-banks", "max_issues_repo_head_hexsha": "eeb3fd65b42808cf907aa716110417515dbbfd82", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/phivals.py", "max_forks_repo_name": "ernestyalumni/18-327-wavelets-filter-banks", "max_forks_repo_head_hexsha": "eeb3fd65b42808cf907aa716110417515dbbfd82", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2015-07-30T20:05:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-29T01:11:08.000Z", "avg_line_length": 46.8881987578, "max_line_length": 241, "alphanum_fraction": 0.4637700358, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1638}
|
from copy import deepcopy
from functools import wraps
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize import minimize as sp_minimize
from sklearn.base import is_regressor
from sklearn.ensemble import GradientBoostingRegressor
from joblib import dump as dump_
from joblib import load as load_
from collections import OrderedDict
from .learning import ExtraTreesRegressor
from .learning import GaussianProcessRegressor
from .learning import GradientBoostingQuantileRegressor
from .learning import RandomForestRegressor
from .learning.gaussian_process.kernels import ConstantKernel
from .learning.gaussian_process.kernels import HammingKernel
from .learning.gaussian_process.kernels import Matern
from .space import Space, Categorical, Integer, Real, Dimension
__all__ = (
"load",
"dump",
)
def create_result(Xi, yi, space=None, rng=None, specs=None, models=None):
"""
Initialize an `OptimizeResult` object.
Parameters
----------
Xi : list of lists, shape (n_iters, n_features)
Location of the minimum at every iteration.
yi : array-like, shape (n_iters,)
Minimum value obtained at every iteration.
space : Space instance, optional
Search space.
rng : RandomState instance, optional
State of the random state.
specs : dict, optional
Call specifications.
models : list, optional
List of fit surrogate models.
Returns
-------
res : `OptimizeResult`, scipy object
OptimizeResult instance with the required information.
"""
res = OptimizeResult()
yi = np.asarray(yi)
if np.ndim(yi) == 2:
res.log_time = np.ravel(yi[:, 1])
yi = np.ravel(yi[:, 0])
best = np.argmin(yi)
res.x = Xi[best]
res.fun = yi[best]
res.func_vals = yi
res.x_iters = Xi
res.models = models
res.space = space
res.random_state = rng
res.specs = specs
return res
def eval_callbacks(callbacks, result):
"""Evaluate list of callbacks on result.
The return values of the `callbacks` are ORed together to give the
overall decision on whether or not the optimization procedure should
continue.
Parameters
----------
callbacks : list of callables
Callbacks to evaluate.
result : `OptimizeResult`, scipy object
Optimization result object to be stored.
Returns
-------
decision : bool
Decision of the callbacks whether or not to keep optimizing
"""
stop = False
if callbacks:
for c in callbacks:
decision = c(result)
if decision is not None:
stop = stop or decision
return stop
def dump(res, filename, store_objective=True, **kwargs):
"""
Store an skopt optimization result into a file.
Parameters
----------
res : `OptimizeResult`, scipy object
Optimization result object to be stored.
filename : string or `pathlib.Path`
The path of the file in which it is to be stored. The compression
method corresponding to one of the supported filename extensions ('.z',
'.gz', '.bz2', '.xz' or '.lzma') will be used automatically.
store_objective : boolean, default=True
Whether the objective function should be stored. Set `store_objective`
to `False` if your objective function (`.specs['args']['func']`) is
unserializable (i.e. if an exception is raised when trying to serialize
the optimization result).
Notice that if `store_objective` is set to `False`, a deep copy of the
optimization result is created, potentially leading to performance
problems if `res` is very large. If the objective function is not
critical, one can delete it before calling `skopt.dump()` and thus
avoid deep copying of `res`.
**kwargs : other keyword arguments
All other keyword arguments will be passed to `joblib.dump`.
"""
if store_objective:
dump_(res, filename, **kwargs)
elif 'func' in res.specs['args']:
# If the user does not want to store the objective and it is indeed
# present in the provided object, then create a deep copy of it and
# remove the objective function before dumping it with joblib.dump.
res_without_func = deepcopy(res)
del res_without_func.specs['args']['func']
dump_(res_without_func, filename, **kwargs)
else:
# If the user does not want to store the objective and it is already
# missing in the provided object, dump it without copying.
dump_(res, filename, **kwargs)
def load(filename, **kwargs):
"""
Reconstruct a skopt optimization result from a file
persisted with skopt.dump.
.. note::
Notice that the loaded optimization result can be missing
the objective function (`.specs['args']['func']`) if `skopt.dump`
was called with `store_objective=False`.
Parameters
----------
filename : string or `pathlib.Path`
The path of the file from which to load the optimization result.
**kwargs : other keyword arguments
All other keyword arguments will be passed to `joblib.load`.
Returns
-------
res : `OptimizeResult`, scipy object
Reconstructed OptimizeResult instance.
"""
return load_(filename, **kwargs)
def is_listlike(x):
return isinstance(x, (list, tuple))
def is_2Dlistlike(x):
return np.all([is_listlike(xi) for xi in x])
def check_x_in_space(x, space):
if is_2Dlistlike(x):
if not np.all([p in space for p in x]):
raise ValueError("Not all points are within the bounds of"
" the space.")
if any([len(p) != len(space.dimensions) for p in x]):
raise ValueError("Not all points have the same dimensions as"
" the space.")
elif is_listlike(x):
if x not in space:
raise ValueError("Point (%s) is not within the bounds of"
" the space (%s)."
% (x, space.bounds))
if len(x) != len(space.dimensions):
raise ValueError("Dimensions of point (%s) and space (%s) do not match"
% (x, space.bounds))
def expected_minimum(res, n_random_starts=20, random_state=None):
"""
Compute the minimum over the predictions of the last surrogate model.
Uses `expected_minimum_random_sampling` with `n_random_starts`=100000,
when the space contains any categorical values.
.. note::
The returned minimum may not necessarily be an accurate
prediction of the minimum of the true objective function.
Parameters
----------
res : `OptimizeResult`, scipy object
The optimization result returned by a `skopt` minimizer.
n_random_starts : int, default=20
The number of random starts for the minimization of the surrogate
model.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
x : list
location of the minimum.
fun : float
the surrogate function value at the minimum.
"""
if res.space.is_partly_categorical:
return expected_minimum_random_sampling(res, n_random_starts=100000,
random_state=random_state)
def func(x):
reg = res.models[-1]
x = res.space.transform(x.reshape(1, -1))
return reg.predict(x.reshape(1, -1))[0]
xs = [res.x]
if n_random_starts > 0:
xs.extend(res.space.rvs(n_random_starts, random_state=random_state))
best_x = None
best_fun = np.inf
for x0 in xs:
r = sp_minimize(func, x0=x0, bounds=res.space.bounds)
if r.fun < best_fun:
best_x = r.x
best_fun = r.fun
return [v for v in best_x], best_fun
def expected_minimum_random_sampling(res, n_random_starts=100000,
random_state=None):
"""Minimum search by doing naive random sampling, Returns the parameters
that gave the minimum function value. Can be used when the space
contains any categorical values.
.. note::
The returned minimum may not necessarily be an accurate
prediction of the minimum of the true objective function.
Parameters
----------
res : `OptimizeResult`, scipy object
The optimization result returned by a `skopt` minimizer.
n_random_starts : int, default=100000
The number of random starts for the minimization of the surrogate
model.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
x : list
location of the minimum.
fun : float
the surrogate function value at the minimum.
"""
# sample points from search space
random_samples = res.space.rvs(n_random_starts, random_state=random_state)
# make estimations with surrogate
model = res.models[-1]
y_random = model.predict(res.space.transform(random_samples))
index_best_objective = np.argmin(y_random)
min_x = random_samples[index_best_objective]
return min_x, y_random[index_best_objective]
def has_gradients(estimator):
"""
Check if an estimator's ``predict`` method provides gradients.
Parameters
----------
estimator :
sklearn BaseEstimator instance.
"""
tree_estimators = (
ExtraTreesRegressor, RandomForestRegressor,
GradientBoostingQuantileRegressor
)
# cook_estimator() returns None for "dummy minimize" aka random values only
if estimator is None:
return False
if isinstance(estimator, tree_estimators):
return False
categorical_gp = False
if hasattr(estimator, "kernel"):
params = estimator.get_params()
categorical_gp = (
isinstance(estimator.kernel, HammingKernel) or
any([isinstance(params[p], HammingKernel) for p in params])
)
return not categorical_gp
def cook_estimator(base_estimator, space=None, **kwargs):
"""
Cook a default estimator.
For the special base_estimator called "DUMMY" the return value is None.
This corresponds to sampling points at random, hence there is no need
for an estimator.
Parameters
----------
base_estimator : "GP", "RF", "ET", "GBRT", "DUMMY"
or sklearn regressor, default="GP"
Should inherit from `sklearn.base.RegressorMixin`.
In addition the `predict` method should have an optional `return_std`
argument, which returns `std(Y | x)`` along with `E[Y | x]`.
If base_estimator is one of ["GP", "RF", "ET", "GBRT", "DUMMY"], a
surrogate model corresponding to the relevant `X_minimize` function
is created.
space : Space instance
Has to be provided if the base_estimator is a gaussian process.
Ignored otherwise.
kwargs : dict
Extra parameters provided to the base_estimator at init time.
"""
if isinstance(base_estimator, str):
base_estimator = base_estimator.upper()
if base_estimator not in ["GP", "ET", "RF", "GBRT", "DUMMY"]:
raise ValueError("Valid strings for the base_estimator parameter "
" are: 'RF', 'ET', 'GP', 'GBRT' or 'DUMMY' not "
"%s." % base_estimator)
elif not is_regressor(base_estimator):
raise ValueError("base_estimator has to be a regressor.")
if base_estimator == "GP":
if space is not None:
space = Space(space)
space = Space(normalize_dimensions(space.dimensions))
n_dims = space.transformed_n_dims
is_cat = space.is_categorical
else:
raise ValueError("Expected a Space instance, not None.")
cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))
# only special if *all* dimensions are categorical
if is_cat:
other_kernel = HammingKernel(length_scale=np.ones(n_dims))
else:
other_kernel = Matern(
length_scale=np.ones(n_dims),
length_scale_bounds=[(0.01, 100)] * n_dims, nu=2.5)
base_estimator = GaussianProcessRegressor(
kernel=cov_amplitude * other_kernel,
normalize_y=True, noise="gaussian",
n_restarts_optimizer=2)
elif base_estimator == "RF":
base_estimator = RandomForestRegressor(n_estimators=100,
min_samples_leaf=3)
elif base_estimator == "ET":
base_estimator = ExtraTreesRegressor(n_estimators=100,
min_samples_leaf=3)
elif base_estimator == "GBRT":
gbrt = GradientBoostingRegressor(n_estimators=30, loss="quantile")
base_estimator = GradientBoostingQuantileRegressor(base_estimator=gbrt)
elif base_estimator == "DUMMY":
return None
base_estimator.set_params(**kwargs)
return base_estimator
def dimensions_aslist(search_space):
"""Convert a dict representation of a search space into a list of
dimensions, ordered by sorted(search_space.keys()).
Parameters
----------
search_space : dict
Represents search space. The keys are dimension names (strings)
and values are instances of classes that inherit from the class
:class:`skopt.space.Dimension` (Real, Integer or Categorical)
Returns
-------
params_space_list: list
list of skopt.space.Dimension instances.
Examples
--------
>>> from skopt.space.space import Real, Integer
>>> from skopt.utils import dimensions_aslist
>>> search_space = {'name1': Real(0,1),
... 'name2': Integer(2,4), 'name3': Real(-1,1)}
>>> dimensions_aslist(search_space)[0]
Real(low=0, high=1, prior='uniform', transform='identity')
>>> dimensions_aslist(search_space)[1]
Integer(low=2, high=4, prior='uniform', transform='identity')
>>> dimensions_aslist(search_space)[2]
Real(low=-1, high=1, prior='uniform', transform='identity')
"""
params_space_list = [
search_space[k] for k in sorted(search_space.keys())
]
return params_space_list
def point_asdict(search_space, point_as_list):
"""Convert the list representation of a point from a search space
to the dictionary representation, where keys are dimension names
and values are corresponding to the values of dimensions in the list.
.. seealso:: :class:`skopt.utils.point_aslist`
Parameters
----------
search_space : dict
Represents search space. The keys are dimension names (strings)
and values are instances of classes that inherit from the class
:class:`skopt.space.Dimension` (Real, Integer or Categorical)
point_as_list : list
list with parameter values.The order of parameters in the list
is given by sorted(params_space.keys()).
Returns
-------
params_dict : OrderedDict
dictionary with parameter names as keys to which
corresponding parameter values are assigned.
Examples
--------
>>> from skopt.space.space import Real, Integer
>>> from skopt.utils import point_asdict
>>> search_space = {'name1': Real(0,1),
... 'name2': Integer(2,4), 'name3': Real(-1,1)}
>>> point_as_list = [0.66, 3, -0.15]
>>> point_asdict(search_space, point_as_list)
OrderedDict([('name1', 0.66), ('name2', 3), ('name3', -0.15)])
"""
params_dict = OrderedDict()
for k, v in zip(sorted(search_space.keys()), point_as_list):
params_dict[k] = v
return params_dict
def point_aslist(search_space, point_as_dict):
"""Convert a dictionary representation of a point from a search space to
the list representation. The list of values is created from the values of
the dictionary, sorted by the names of dimensions used as keys.
.. seealso:: :class:`skopt.utils.point_asdict`
Parameters
----------
search_space : dict
Represents search space. The keys are dimension names (strings)
and values are instances of classes that inherit from the class
:class:`skopt.space.Dimension` (Real, Integer or Categorical)
point_as_dict : dict
dict with parameter names as keys to which corresponding
parameter values are assigned.
Returns
-------
point_as_list : list
list with point values.The order of
parameters in the list is given by sorted(params_space.keys()).
Examples
--------
>>> from skopt.space.space import Real, Integer
>>> from skopt.utils import point_aslist
>>> search_space = {'name1': Real(0,1),
... 'name2': Integer(2,4), 'name3': Real(-1,1)}
>>> point_as_dict = {'name1': 0.66, 'name2': 3, 'name3': -0.15}
>>> point_aslist(search_space, point_as_dict)
[0.66, 3, -0.15]
"""
point_as_list = [
point_as_dict[k] for k in sorted(search_space.keys())
]
return point_as_list
def normalize_dimensions(dimensions):
"""Create a ``Space`` where all dimensions are normalized to unit range.
This is particularly useful for Gaussian process based regressors and is
used internally by ``gp_minimize``.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
NOTE: The upper and lower bounds are inclusive for `Integer`
dimensions.
"""
space = Space(dimensions)
transformed_dimensions = []
if space.is_categorical:
# recreate the space and explicitly set transform to "string"
# this is a special case for GP based regressors
for dimension in space:
transformed_dimensions.append(Categorical(dimension.categories,
dimension.prior,
name=dimension.name,
transform="string"))
else:
for dimension in space.dimensions:
if isinstance(dimension, Categorical):
transformed_dimensions.append(dimension)
# To make sure that GP operates in the [0, 1] space
elif isinstance(dimension, Real):
transformed_dimensions.append(
Real(dimension.low, dimension.high, dimension.prior,
name=dimension.name,
transform="normalize",
dtype=dimension.dtype)
)
elif isinstance(dimension, Integer):
transformed_dimensions.append(
Integer(dimension.low, dimension.high,
name=dimension.name,
transform="normalize",
dtype=dimension.dtype)
)
else:
raise RuntimeError("Unknown dimension type "
"(%s)" % type(dimension))
return Space(transformed_dimensions)
def check_list_types(x, types):
"""
Check whether all elements of a list `x` are of the correct type(s)
and raise a ValueError if they are not.
Note that `types` can be either a single object-type or a tuple
of object-types.
Raises `ValueError`, If one or more element in the list `x` is
not of the correct type(s).
Parameters
----------
x : list
List of objects.
types : object or list(object)
Either a single object-type or a tuple of object-types.
"""
# List of the elements in the list that are incorrectly typed.
err = list(filter(lambda a: not isinstance(a, types), x))
# If the list is non-empty then raise an exception.
if len(err) > 0:
msg = "All elements in list must be instances of {}, but found: {}"
msg = msg.format(types, err)
raise ValueError(msg)
def check_dimension_names(dimensions):
"""
Check whether all dimensions have names. Raises `ValueError`,
if one or more dimensions are unnamed.
Parameters
----------
dimensions : list(Dimension)
List of Dimension-objects.
"""
# List of the dimensions that have no names.
err_dims = list(filter(lambda dim: dim.name is None, dimensions))
# If the list is non-empty then raise an exception.
if len(err_dims) > 0:
msg = "All dimensions must have names, but found: {}"
msg = msg.format(err_dims)
raise ValueError(msg)
def use_named_args(dimensions):
"""
Wrapper / decorator for an objective function that uses named arguments
to make it compatible with optimizers that use a single list of parameters.
Your objective function can be defined as being callable using named
arguments: `func(foo=123, bar=3.0, baz='hello')` for a search-space
with dimensions named `['foo', 'bar', 'baz']`. But the optimizer
will only pass a single list `x` of unnamed arguments when calling
the objective function: `func(x=[123, 3.0, 'hello'])`. This wrapper
converts your objective function with named arguments into one that
accepts a list as argument, while doing the conversion automatically.
The advantage of this is that you don't have to unpack the list of
arguments `x` yourself, which makes the code easier to read and
also reduces the risk of bugs if you change the number of dimensions
or their order in the search-space.
Examples
--------
>>> # Define the search-space dimensions. They must all have names!
>>> from skopt.space import Real
>>> from skopt import forest_minimize
>>> from skopt.utils import use_named_args
>>> dim1 = Real(name='foo', low=0.0, high=1.0)
>>> dim2 = Real(name='bar', low=0.0, high=1.0)
>>> dim3 = Real(name='baz', low=0.0, high=1.0)
>>>
>>> # Gather the search-space dimensions in a list.
>>> dimensions = [dim1, dim2, dim3]
>>>
>>> # Define the objective function with named arguments
>>> # and use this function-decorator to specify the
>>> # search-space dimensions.
>>> @use_named_args(dimensions=dimensions)
... def my_objective_function(foo, bar, baz):
... return foo ** 2 + bar ** 4 + baz ** 8
>>>
>>> # Not the function is callable from the outside as
>>> # `my_objective_function(x)` where `x` is a list of unnamed arguments,
>>> # which then wraps your objective function that is callable as
>>> # `my_objective_function(foo, bar, baz)`.
>>> # The conversion from a list `x` to named parameters `foo`,
>>> # `bar`, `baz`
>>> # is done automatically.
>>>
>>> # Run the optimizer on the wrapped objective function which is called
>>> # as `my_objective_function(x)` as expected by `forest_minimize()`.
>>> result = forest_minimize(func=my_objective_function,
... dimensions=dimensions,
... n_calls=20, base_estimator="ET",
... random_state=4)
>>>
>>> # Print the best-found results.
>>> print("Best fitness:", result.fun)
Best fitness: 0.1948080835239698
>>> print("Best parameters:", result.x)
Best parameters: [0.44134853091052617, 0.06570954323368307, 0.17586123323419825]
Parameters
----------
dimensions : list(Dimension)
List of `Dimension`-objects for the search-space dimensions.
Returns
-------
wrapped_func : callable
Wrapped objective function.
"""
def decorator(func):
"""
This uses more advanced Python features to wrap `func` using a
function-decorator, which are not explained so well in the
official Python documentation.
A good video tutorial explaining how this works is found here:
https://www.youtube.com/watch?v=KlBPCzcQNU8
Parameters
----------
func : callable
Function to minimize. Should take *named arguments*
and return the objective value.
"""
# Ensure all dimensions are correctly typed.
check_list_types(dimensions, Dimension)
# Ensure all dimensions have names.
check_dimension_names(dimensions)
@wraps(func)
def wrapper(x):
"""
This is the code that will be executed every time the
wrapped / decorated `func` is being called.
It takes `x` as a single list of parameters and
converts them to named arguments and calls `func` with them.
Parameters
----------
x : list
A single list of parameters e.g. `[123, 3.0, 'linear']`
which will be converted to named arguments and passed
to `func`.
Returns
-------
objective_value
The objective value returned by `func`.
"""
# Ensure the number of dimensions match
# the number of parameters in the list x.
if len(x) != len(dimensions):
msg = "Mismatch in number of search-space dimensions. " \
"len(dimensions)=={} and len(x)=={}"
msg = msg.format(len(dimensions), len(x))
raise ValueError(msg)
# Create a dict where the keys are the names of the dimensions
# and the values are taken from the list of parameters x.
arg_dict = {dim.name: value for dim, value in zip(dimensions, x)}
# Call the wrapped objective function with the named arguments.
objective_value = func(**arg_dict)
return objective_value
return wrapper
return decorator
|
{"hexsha": "83d5e27abc0c077adef7a176f39a897b559a22b0", "size": 26526, "ext": "py", "lang": "Python", "max_stars_repo_path": "skopt/utils.py", "max_stars_repo_name": "sqbl/scikit-optimize", "max_stars_repo_head_hexsha": "c1866d5a9ad67efe93ac99736bfc2dc659b561d4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skopt/utils.py", "max_issues_repo_name": "sqbl/scikit-optimize", "max_issues_repo_head_hexsha": "c1866d5a9ad67efe93ac99736bfc2dc659b561d4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skopt/utils.py", "max_forks_repo_name": "sqbl/scikit-optimize", "max_forks_repo_head_hexsha": "c1866d5a9ad67efe93ac99736bfc2dc659b561d4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4046692607, "max_line_length": 84, "alphanum_fraction": 0.6251225213, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5771}
|
@testset "EigenAngles.jl" begin
@info "Testing EigenAngles"
@test isbits(EigenAngle(deg2rad(complex(85.0, -1.0))))
@test EigenAngle(deg2rad(80-0.5im)) > EigenAngle(deg2rad(75-0.3im))
@test_logs (:warn, "θ > 2π. Make sure θ is in radians.") EigenAngle(complex(85.0, 0.31))
end
|
{"hexsha": "11e1bfa3441c6abb2595236399693d97717de3c9", "size": 294, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/EigenAngles.jl", "max_stars_repo_name": "fgasdia/LongwaveModePropagator.jl", "max_stars_repo_head_hexsha": "d99750b7e248f93c36beb9a291e6481da08bb8c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-03T14:29:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-03T14:29:46.000Z", "max_issues_repo_path": "test/EigenAngles.jl", "max_issues_repo_name": "fgasdia/LongwaveModePropagator.jl", "max_issues_repo_head_hexsha": "d99750b7e248f93c36beb9a291e6481da08bb8c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2021-01-02T23:45:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-03T05:11:38.000Z", "max_forks_repo_path": "test/EigenAngles.jl", "max_forks_repo_name": "fgasdia/LongwaveModePropagator.jl", "max_forks_repo_head_hexsha": "d99750b7e248f93c36beb9a291e6481da08bb8c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-29T17:37:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T17:37:44.000Z", "avg_line_length": 32.6666666667, "max_line_length": 92, "alphanum_fraction": 0.6734693878, "num_tokens": 109}
|
import os
import pickle
from tune.api.factory import TUNE_OBJECT_FACTORY
from typing import Any, Optional, Tuple
from uuid import uuid4
import numpy as np
import pandas as pd
from sklearn.metrics import get_scorer
from sklearn.model_selection import cross_val_score
from triad import FileSystem
from tune import NonIterativeObjectiveFunc, Trial, TrialReport
from tune.constants import (
SPACE_MODEL_NAME,
TUNE_DATASET_DF_DEFAULT_NAME,
TUNE_DATASET_VALIDATION_DF_DEFAULT_NAME,
)
from tune_sklearn.utils import to_sk_model, to_sk_model_expr
class SKObjective(NonIterativeObjectiveFunc):
def __init__(
self,
scoring: Any,
feature_prefix: str = "",
label_col: str = "label",
checkpoint_path: Optional[str] = None,
) -> None:
super().__init__()
self._last_id = ""
self._model_type: Any = None
self._model_expr: str = ""
self._scoring = scoring
self._feature_prefix = feature_prefix
self._label_col = label_col
if checkpoint_path is None:
self._checkpoint_path = checkpoint_path
else:
self._checkpoint_path = TUNE_OBJECT_FACTORY.get_path_or_temp(
checkpoint_path
)
def generate_sort_metric(self, value: float) -> float:
return -value
def run(self, trial: Trial) -> TrialReport:
params = dict(trial.params.simple_value)
if trial.trial_id != self._last_id:
self._model_type = to_sk_model(params.pop(SPACE_MODEL_NAME))
self._model_expr = to_sk_model_expr(self._model_type)
self._train_x, self._train_y = self._reset_xy(
trial.dfs[TUNE_DATASET_DF_DEFAULT_NAME]
)
self._test_x, self._test_y = self._reset_xy(
trial.dfs[TUNE_DATASET_VALIDATION_DF_DEFAULT_NAME]
)
self._last_id = trial.trial_id
else:
params.pop(SPACE_MODEL_NAME)
model = self._model_type(**params).fit(self._train_x, self._train_y)
metric = get_scorer(self._scoring)(model, self._test_x, self._test_y)
metadata = dict(model=self._model_expr)
if self._checkpoint_path is not None:
fp = os.path.join(self._checkpoint_path, str(uuid4()) + ".pkl")
with FileSystem().openbin(fp, mode="wb") as f:
pickle.dump(model, f)
metadata["checkpoint_path"] = fp
return TrialReport(
trial,
metric=metric,
metadata=metadata,
sort_metric=self.generate_sort_metric(metric),
)
def _reset_xy(self, df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
train_df = df.sample(frac=1, random_state=0).reset_index(drop=True)
train_x = train_df.drop([self._label_col], axis=1)
cols = [x for x in train_x.columns if x.startswith(self._feature_prefix)]
return train_x[cols], train_df[self._label_col]
class SKCVObjective(SKObjective):
def __init__(
self,
scoring: Any,
cv: int = 5,
feature_prefix: str = "",
label_col: str = "label",
checkpoint_path: Optional[str] = None,
) -> None:
super().__init__(
scoring=scoring,
feature_prefix=feature_prefix,
label_col=label_col,
checkpoint_path=checkpoint_path,
)
self._cv = cv
def run(self, trial: Trial) -> TrialReport:
params = dict(trial.params.simple_value)
if trial.trial_id != self._last_id:
self._model_type = to_sk_model(params.pop(SPACE_MODEL_NAME))
self._model_expr = to_sk_model_expr(self._model_type)
self._train_x, self._train_y = self._reset_xy(
trial.dfs[TUNE_DATASET_DF_DEFAULT_NAME]
)
self._last_id = trial.trial_id
else:
params.pop(SPACE_MODEL_NAME)
model = self._model_type(**params)
s = cross_val_score(
model, self._train_x, self._train_y, cv=self._cv, scoring=self._scoring
)
metadata = dict(model=self._model_expr, cv_scores=[float(x) for x in s])
if self._checkpoint_path is not None:
model.fit(self._train_x, self._train_y)
fp = os.path.join(self._checkpoint_path, str(uuid4()) + ".pkl")
with FileSystem().openbin(fp, mode="wb") as f:
pickle.dump(model, f)
metadata["checkpoint_path"] = fp
metric = float(np.mean(s))
return TrialReport(
trial,
metric=metric,
metadata=metadata,
sort_metric=self.generate_sort_metric(metric),
)
|
{"hexsha": "cacf9391b22f36c181d310d851e3c1395daf5f9d", "size": 4703, "ext": "py", "lang": "Python", "max_stars_repo_path": "tune_sklearn/objective.py", "max_stars_repo_name": "fugue-project/tune", "max_stars_repo_head_hexsha": "bf2288ddcb29c8345d996a9b22c0910da9002da1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2021-03-03T20:02:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-10T20:32:22.000Z", "max_issues_repo_path": "tune_sklearn/objective.py", "max_issues_repo_name": "fugue-project/tune", "max_issues_repo_head_hexsha": "bf2288ddcb29c8345d996a9b22c0910da9002da1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2021-04-30T19:56:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-18T04:40:00.000Z", "max_forks_repo_path": "tune_sklearn/objective.py", "max_forks_repo_name": "fugue-project/tune", "max_forks_repo_head_hexsha": "bf2288ddcb29c8345d996a9b22c0910da9002da1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-30T03:12:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-05T12:13:37.000Z", "avg_line_length": 35.3609022556, "max_line_length": 83, "alphanum_fraction": 0.6249202637, "include": true, "reason": "import numpy", "num_tokens": 1073}
|
import time
import numpy as np
import torch
import torch.optim
import torch.utils.data
import torch.nn.functional as F
import models
import train_img_pairs
from inverse_warp import compensate_pose, pose_vec2mat, inverse_rotate
from logger import AverageMeter
train_img_pairs.parser.add_argument('-d', '--target-mean-depth', type=float,
help='equivalent depth to aim at when adjustting shifts, regarding DepthNet output',
metavar='D', default=40)
train_img_pairs.parser.add_argument('-r', '--recompute-frequency', type=int,
help='Will recompute optimal shifts every R epochs',
metavar='R', default=5)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def main():
env = train_img_pairs.prepare_environment()
env['adjust_loader'] = torch.utils.data.DataLoader(
env['train_set'], batch_size=env['args'].batch_size, shuffle=False,
num_workers=0, pin_memory=True) # workers is set to 0 to avoid multiple instances to be modified at the same time
launch_training_flexible_shifts(**env)
def launch_training_flexible_shifts(scheduler, **env):
logger = env['logger']
args = env["args"]
train_set = env["train_set"]
env['best_error'] = -1
env['epoch'] = 0
env['n_iter'] = 0
if args.pretrained_depth or args.evaluate:
train_img_pairs.validate(**env)
for epoch in range(1, args.epochs + 1):
env['epoch'] = epoch
scheduler.step()
logger.epoch_bar.update(epoch)
# train for one epoch
train_loss, env['n_iter'] = train_img_pairs.train_one_epoch(**env)
logger.train_writer.write(' * Avg Loss : {:.3f}'.format(train_loss))
if epoch % args.recompute_frequency == 0:
train_set.adjust = True
average_shifts = adjust_shifts(**env)
shifts_string = ' '.join(['{:.3f}'.format(s) for s in average_shifts])
logger.train_writer.write(' * adjusted shifts, average shifts are now : {}'.format(shifts_string))
train_set.adjust = False
# evaluate on validation set
error = train_img_pairs.validate(**env)
env['best_error'] = train_img_pairs.finish_epoch(train_loss, error, **env)
logger.epoch_bar.finish()
@torch.no_grad()
def adjust_shifts(args, train_set, adjust_loader, depth_net, pose_net, epoch, logger, training_writer, **env):
batch_time = AverageMeter()
data_time = AverageMeter()
new_shifts = AverageMeter(args.sequence_length-1, precision=2)
pose_net.eval()
depth_net.eval()
upsample_depth_net = models.UpSampleNet(depth_net, args.network_input_size)
end = time.time()
mid_index = (args.sequence_length - 1)//2
# we contrain mean value of depth net output from pair 0 and mid_index
target_values = np.arange(-mid_index, mid_index + 1) / (args.target_mean_depth * mid_index)
target_values = 1/np.abs(np.concatenate([target_values[:mid_index], target_values[mid_index + 1:]]))
logger.reset_train_bar(len(adjust_loader))
for i, sample in enumerate(adjust_loader):
index = sample['index']
# measure data loading time
data_time.update(time.time() - end)
imgs = torch.stack(sample['imgs'], dim=1).to(device)
intrinsics = sample['intrinsics'].to(device)
intrinsics_inv = sample['intrinsics_inv'].to(device)
# compute output
batch_size, seq = imgs.size()[:2]
if args.network_input_size is not None:
h,w = args.network_input_size
downsample_imgs = F.interpolate(imgs,
(3, h, w),
mode='area')
poses = pose_net(downsample_imgs) # [B, seq, 6]
else:
poses = pose_net(imgs)
pose_matrices = pose_vec2mat(poses, args.rotation_mode) # [B, seq, 3, 4]
tgt_imgs = imgs[:, mid_index] # [B, 3, H, W]
tgt_poses = pose_matrices[:, mid_index] # [B, 3, 4]
compensated_poses = compensate_pose(pose_matrices, tgt_poses) # [B, seq, 3, 4] tgt_poses are now neutral pose
ref_indices = list(range(args.sequence_length))
ref_indices.remove(mid_index)
mean_depth_batch = []
for ref_index in ref_indices:
prior_imgs = imgs[:, ref_index]
prior_poses = compensated_poses[:, ref_index] # [B, 3, 4]
prior_imgs_compensated = inverse_rotate(prior_imgs, prior_poses[:,:,:3], intrinsics, intrinsics_inv)
input_pair = torch.cat([prior_imgs_compensated, tgt_imgs], dim=1) # [B, 6, W, H]
depth = upsample_depth_net(input_pair) # [B, 1, H, W]
mean_depth = depth.view(batch_size, -1).mean(-1).cpu().numpy() # B
mean_depth_batch.append(mean_depth)
for j, mean_values in zip(index, np.stack(mean_depth_batch, axis=-1)):
ratio = mean_values / target_values # if mean value is too high, raise the shift, lower otherwise
train_set.reset_shifts(j, ratio[:mid_index], ratio[mid_index:])
new_shifts.update(train_set.get_shifts(j))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
logger.train_bar.update(i)
if i % args.print_freq == 0:
logger.train_writer.write('Adjustement:'
'Time {} Data {} shifts {}'.format(batch_time, data_time, new_shifts))
for i, shift in enumerate(new_shifts.avg):
training_writer.add_scalar('shifts{}'.format(i), shift, epoch)
return new_shifts.avg
if __name__ == '__main__':
main()
|
{"hexsha": "1f01e68910227dbcfd95c03adc7cadcd550e106a", "size": 5773, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_flexible_shifts.py", "max_stars_repo_name": "ClementPinard/unsupervised-depthnet", "max_stars_repo_head_hexsha": "71bc54afd8a22d5c99e1db88618119c33956b8c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 52, "max_stars_repo_stars_event_min_datetime": "2018-09-11T21:10:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T20:28:20.000Z", "max_issues_repo_path": "train_flexible_shifts.py", "max_issues_repo_name": "ClementPinard/unsupervised-depthnet", "max_issues_repo_head_hexsha": "71bc54afd8a22d5c99e1db88618119c33956b8c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-09-24T23:30:14.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-06T02:02:33.000Z", "max_forks_repo_path": "train_flexible_shifts.py", "max_forks_repo_name": "ClementPinard/unsupervised-depthnet", "max_forks_repo_head_hexsha": "71bc54afd8a22d5c99e1db88618119c33956b8c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-09-17T15:49:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-17T08:59:42.000Z", "avg_line_length": 38.744966443, "max_line_length": 122, "alphanum_fraction": 0.6305213927, "include": true, "reason": "import numpy", "num_tokens": 1359}
|
module SimplePackage
using Boot
include_folder(SimplePackage, @__FILE__)
end
|
{"hexsha": "e18bf887ebf0377b3b8c55da38800a5d97503cd8", "size": 84, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/packages/SimplePackage/src/SimplePackage.jl", "max_stars_repo_name": "djsegal/Boot.jl", "max_stars_repo_head_hexsha": "25aefa8ffc7467ece2951f4df0ae44c1c5897f25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/packages/SimplePackage/src/SimplePackage.jl", "max_issues_repo_name": "djsegal/Boot.jl", "max_issues_repo_head_hexsha": "25aefa8ffc7467ece2951f4df0ae44c1c5897f25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2017-12-10T10:34:27.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-04T06:41:39.000Z", "max_forks_repo_path": "test/packages/SimplePackage/src/SimplePackage.jl", "max_forks_repo_name": "djsegal/Boot.jl", "max_forks_repo_head_hexsha": "25aefa8ffc7467ece2951f4df0ae44c1c5897f25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.5, "max_line_length": 42, "alphanum_fraction": 0.7857142857, "num_tokens": 21}
|
import sys
import re
import pandas as pd
import numpy as np
import linecache
def main():
args = sys.argv
"""args[1] : threshold number of word [2]:cancer type"""
K = 96
threshold = int(args[1])
pre_data_file = 'data/data1/Pre_data1_o' + args[1] + '.txt'
pre_data = pd.read_csv(pre_data_file, delimiter='\t')
cancer_type = args[2]
pre_data = select_cancer_type(pre_data, threshold, cancer_type)
pre_data.reset_index(drop=True, inplace=True)
output_file = 'data/data1_o' + args[1] + '_' + args[2] + '.txt'
output = open(output_file, 'w')
name_list = pre_data['Sample name']
last_document = 'xxxxxxxxxxxx'
number_of_document = 0
for name in name_list:
if(name != str(last_document)):
last_document = name
number_of_document += 1
last_document = name_list[0]
type_list = list()
type_list.append(pre_data['Primary site'][0])
data_mat = np.zeros([number_of_document, K])
index = 0
error = 0
document = 0
for name in name_list:
if(name != str(last_document)):
last_document = name
document += 1
type_list.append(pre_data['Primary site'][index])
selected = calc_word(pre_data['Mutation CDS'][index],
pre_data['Mutation genome position'][index],
pre_data['Mutation strand'][index])
if(selected == -1):
error += 1
else:
data_mat[document, selected] += 1
index += 1
print(index)
""" check """
drop_list = list()
for i in range(number_of_document):
sum_words = 0
for j in range(K):
sum_words += data_mat[i, j]
if(sum_words == 0):
drop_list.append(i)
number_of_document -= len(drop_list)
data_mat = np.delete(data_mat, drop_list, 0)
type_list = np.delete(type_list, drop_list, 0)
print(str(len(drop_list)))
output.write(str(number_of_document) + ' 96\n')
for i in range(number_of_document):
for k in range(K):
if(k == K-1):
output.write(str(int(data_mat[i, k])) + '\n')
else:
output.write(str(int(data_mat[i, k])) + ' ')
def select_cancer_type(pre_data, threshold, cancer_type):
pre_data = pre_data.loc[pre_data['Primary site'] == cancer_type]
print(pre_data)
pre_data.sort_values(by='Sample name', inplace=True)
pre_data.reset_index(drop=True, inplace=True)
name_list = pre_data['Sample name']
last_document = name_list[0]
drop_list = list()
sum_of_words = 0
temp_index = 0
index_list = list()
for name in name_list:
if(name != last_document):
if(sum_of_words < threshold):
drop_list.extend(index_list)
sum_of_words = 0
last_document = name
index_list = list()
sum_of_words += 1
index_list.append(temp_index)
temp_index += 1
if(sum_of_words < threshold):
drop_list.extend(index_list)
pre_data.drop(drop_list, inplace=True)
return pre_data
def calc_word(mutation, position, strand):
before = mutation[len(mutation)-3]
after = mutation[len(mutation)-1]
position_list = re.split(r'[:-]', position)
if(int(position_list[0]) == 23):
chromosome = 'X'
elif(int(position_list[0]) == 24):
chromosome = 'Y'
elif(int(position_list[0]) == 25):
chromosome = 'M'
else:
chromosome = int(position_list[0])
start = int(position_list[1])
num = int(position_list[2]) - int(position_list[1]) + 1
GRCh_file = 'raw_data/chr' + str(chromosome) + '.fa'
quotient = start // 50
surplus = start % 50
if(surplus != 0):
target_index = int(surplus) - 1
else:
quotient -= 1
target_index = 49
targetline = linecache.getline(GRCh_file, int(quotient)+1)
if(((targetline[target_index] != before) and (strand == '+')) or
((targetline[target_index] != swap(before))and(strand == '-'))):
print('error: ' + mutation)
print('target: ' + targetline[target_index])
print('strand: ' + strand)
strand = swap(strand)
if(((targetline[target_index] != before) and (strand == '+')) or
((targetline[target_index] != swap(before))and(strand == '-'))):
print('still error')
return -1
if((target_index >= 1) and (target_index <= 48)):
pattern = 1
elif(target_index == 0):
pattern = 2
elif(target_index == 49):
pattern = 3
if(pattern == 1):
forward = targetline[target_index - 1]
backward = targetline[target_index + 1]
elif(pattern == 2):
pre_line = linecache.getline(GRCh_file, int(quotient))
forward = pre_line[49]
backward = targetline[target_index + 1]
elif(pattern == 3):
post_line = linecache.getline(GRCh_file, int(quotient)+2)
forward = targetline[target_index - 1]
backward = post_line[0]
if(((strand == '+') and (before in ['A', 'G'])) or ((strand == '-') and
(before in ['C', 'T']))):
buf_f = swap(forward)
forward = swap(backward)
backward = buf_f
if(before in ['A', 'G']):
before = swap(before)
after = swap(after)
if(forward == 'A'):
first = 0
elif(forward == 'C'):
first = 1
elif(forward == 'G'):
first = 2
else:
first = 3
if(before == 'C'):
if(after == 'A'):
second = 0
elif(after == 'G'):
second = 1
else:
second = 2
elif(before == 'T'):
if(after == 'A'):
second = 3
elif(after == 'C'):
second = 4
else:
second = 5
if(backward == 'A'):
third = 0
elif(backward == 'C'):
third = 1
elif(backward == 'G'):
third = 2
else:
third = 3
answer = 24*first + 4*second + third
return(answer)
def swap(base):
if(base == 'A'):
return('T')
elif(base == 'C'):
return('G')
elif(base == 'G'):
return('C')
elif(base == 'T'):
return('A')
elif(base == '+'):
return('-')
elif(base == '-'):
return('+')
else:
return(base)
if __name__ == '__main__':
main()
|
{"hexsha": "8d8335a3cf4dce196c355cbdcc26e79dcf8f9b52", "size": 6412, "ext": "py", "lang": "Python", "max_stars_repo_path": "Preprocessing/get_M1.py", "max_stars_repo_name": "qkirikigaku/MS_LDA", "max_stars_repo_head_hexsha": "7eea53759e21c95cd6cb3afd2937388a6b222c5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Preprocessing/get_M1.py", "max_issues_repo_name": "qkirikigaku/MS_LDA", "max_issues_repo_head_hexsha": "7eea53759e21c95cd6cb3afd2937388a6b222c5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Preprocessing/get_M1.py", "max_forks_repo_name": "qkirikigaku/MS_LDA", "max_forks_repo_head_hexsha": "7eea53759e21c95cd6cb3afd2937388a6b222c5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7533632287, "max_line_length": 80, "alphanum_fraction": 0.550062383, "include": true, "reason": "import numpy", "num_tokens": 1671}
|
"""Methods for drawing a bounding box on an image."""
import cv2
import numpy as np
import selfsupmotion.data.objectron.dataset.box as Box
_LINE_TICKNESS = 10
_POINT_RADIUS = 10
_COLORS = [
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(128, 128, 0),
(128, 0, 128),
(0, 128, 128),
(255, 255, 255),
(0, 0, 0),
(255, 0, 255),
]
def draw_annotation_on_image(image,
object_annotations,
num_keypoints):
"""Draw annotation on the image."""
# The object annotation is a list of 3x1 keypoints for all the annotated
# objects. The objects can have a varying number of keypoints. First we split
# the list according to the number of keypoints for each object. This
# also leaves an empty array at the end of the list.
keypoints = np.split(object_annotations, np.array(np.cumsum(num_keypoints)))
keypoints = [points.reshape(-1, 3) for points in keypoints]
h, w, _ = image.shape
num_objects = len(num_keypoints)
# The keypoints are [x, y, d] where `x` and `y` are normalized (`uv`-system)\
# and `d` is the metric distance from the center of the camera. Convert them
# keypoint's `xy` value to pixel.
keypoints = [
np.multiply(keypoint, np.asarray([w, h, 1.], np.float32)).astype(int)
for keypoint in keypoints
]
def draw_face(object_id, face, color):
start = keypoints[object_id][face[0], :]
end = keypoints[object_id][face[2], :]
cv2.line(image, (start[0], start[1]), (end[0], end[1]), color,
_LINE_TICKNESS)
start = keypoints[object_id][face[1], :]
end = keypoints[object_id][face[3], :]
cv2.line(image, (start[0], start[1]), (end[0], end[1]), color,
_LINE_TICKNESS)
for object_id in range(num_objects):
num_keypoint = num_keypoints[object_id]
edges = Box.EDGES
hidden = [False] * Box.NUM_KEYPOINTS
draw_face(object_id, Box.FACES[Box.FRONT_FACE_ID], _COLORS[7])
draw_face(object_id, Box.FACES[Box.TOP_FACE_ID], _COLORS[8])
for kp_id in range(num_keypoint):
kp_pixel = keypoints[object_id][kp_id, :]
# If a keypoint is hidden (e.g. a subset of a larger skeleton family) do
# not visualize it.
if not hidden[kp_id]:
cv2.circle(image, (kp_pixel[0], kp_pixel[1]), _POINT_RADIUS,
_COLORS[object_id % len(_COLORS)], -1)
for edge in edges:
# This if statement is for backward compatibility, where we might later
# add more edges/keypoints to the skeletons.
if edge[0] < num_keypoint and edge[1] < num_keypoint:
start_kp = keypoints[object_id][edge[0], :]
end_kp = keypoints[object_id][edge[1], :]
if not hidden[edge[0]] and not hidden[edge[1]]:
cv2.line(image, (start_kp[0], start_kp[1]), (end_kp[0], end_kp[1]),
_COLORS[object_id % len(_COLORS)], _LINE_TICKNESS)
return image
|
{"hexsha": "a27197e1f25e079ba12699d3ef5b02cb29b44afe", "size": 2901, "ext": "py", "lang": "Python", "max_stars_repo_path": "selfsupmotion/data/objectron/dataset/graphics.py", "max_stars_repo_name": "sbrodeur/selfsupmotion", "max_stars_repo_head_hexsha": "32ba34a090e7e575b43a6a6f14c52c0a5f363d40", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "selfsupmotion/data/objectron/dataset/graphics.py", "max_issues_repo_name": "sbrodeur/selfsupmotion", "max_issues_repo_head_hexsha": "32ba34a090e7e575b43a6a6f14c52c0a5f363d40", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-02-12T02:51:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-10T20:16:54.000Z", "max_forks_repo_path": "selfsupmotion/data/objectron/dataset/graphics.py", "max_forks_repo_name": "sbrodeur/selfsupmotion", "max_forks_repo_head_hexsha": "32ba34a090e7e575b43a6a6f14c52c0a5f363d40", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-02-06T15:52:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-10T18:16:28.000Z", "avg_line_length": 38.1710526316, "max_line_length": 79, "alphanum_fraction": 0.6363322992, "include": true, "reason": "import numpy", "num_tokens": 837}
|
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from functools import partial
from typing import Any, Optional, Callable, Iterable, Union, Tuple, List
import numpy as np
import jax
from jax import numpy as jnp
from jax import tree_map
from jax.util import as_hashable_function
import flax
from flax import linen as nn
from flax import serialization
import netket
from netket import jax as nkjax
from netket import utils
from netket.hilbert import AbstractHilbert
from netket.sampler import Sampler, SamplerState, ExactSampler
from netket.stats import Stats, statistics, mean, sum_inplace
from netket.utils import flax as flax_utils, maybe_wrap_module
from netket.utils.types import DType, Array, PyTree, PRNGKeyT, Shape, NNInitFunc
from netket.optimizer import SR
from netket.operator import (
AbstractOperator,
define_local_cost_function,
local_cost_function,
local_value_cost,
local_value_op_op_cost,
)
from .base import VariationalState, VariationalMixedState
from .mc_state import MCState
AFunType = Callable[[nn.Module, PyTree, jnp.ndarray], jnp.ndarray]
ATrainFunType = Callable[
[nn.Module, PyTree, jnp.ndarray, Union[bool, PyTree]], jnp.ndarray
]
def apply_diagonal(bare_afun, w, x, *args, **kwargs):
x = jnp.hstack((x, x))
return bare_afun(w, x, *args, **kwargs)
class MCMixedState(VariationalMixedState, MCState):
"""Variational State for a Mixed Variational Neural Quantum State.
The state is sampled according to the provided sampler, and it's diagonal is sampled
according to another sampler.
"""
def __init__(
self,
sampler,
model=None,
*,
sampler_diag: Sampler = None,
n_samples_diag: int = 1000,
n_discard_diag: Optional[int] = None,
seed=nkjax.PRNGKey(),
sampler_seed: Optional[int] = None,
variables=None,
**kwargs,
):
"""
Constructs the MCMixedState.
Arguments are the same as :class:`MCState`.
Arguments:
sampler: The sampler
model: (Optional) The model. If not provided, you must provide init_fun and apply_fun.
Keyword Arguments:
n_samples: the total number of samples across chains and processes when sampling (default=1000).
n_discard: number of discarded samples at the beginning of each monte-carlo chain (default=n_samples/10).
parameters: Optional PyTree of weights from which to start.
seed: rng seed used to generate a set of parameters (only if parameters is not passed). Defaults to a random one.
sampler_seed: rng seed used to initialise the sampler. Defaults to a random one.
mutable: Dict specifing mutable arguments. Use it to specify if the model has a state that can change
during evaluation, but that should not be optimised. See also flax.linen.module.apply documentation
(default=False)
init_fun: Function of the signature f(model, shape, rng_key, dtype) -> Optional_state, parameters used to
initialise the parameters. Defaults to the standard flax initialiser. Only specify if your network has
a non-standard init method.
apply_fun: Function of the signature f(model, variables, σ) that should evaluate the model. Defafults to
`model.apply(variables, σ)`. specify only if your network has a non-standard apply method.
training_kwargs: a dict containing the optionaal keyword arguments to be passed to the apply_fun during training.
Useful for example when you have a batchnorm layer that constructs the average/mean only during training.
"""
seed, seed_diag = jax.random.split(nkjax.PRNGKey(seed))
if sampler_seed is None:
sampler_seed_diag = None
else:
sampler_seed, sampler_seed_diag = jax.random.split(
nkjax.PRNGKey(sampler_seed)
)
self._diagonal = None
hilbert_physical = sampler.hilbert.physical
super().__init__(
sampler.hilbert.physical,
sampler,
model,
**kwargs,
seed=seed,
sampler_seed=sampler_seed,
variables=variables,
)
if sampler_diag is None:
sampler_diag = sampler.replace(hilbert=hilbert_physical)
sampler_diag = sampler_diag.replace(machine_pow=1)
diagonal_apply_fun = nkjax.HashablePartial(apply_diagonal, self._apply_fun)
for kw in ["n_samples", "n_discard"]:
if kw in kwargs:
kwargs.pop(kw)
self._diagonal = MCState(
sampler_diag,
apply_fun=diagonal_apply_fun,
n_samples=n_samples_diag,
n_discard=n_discard_diag,
variables=self.variables,
seed=seed_diag,
sampler_seed=sampler_seed_diag,
**kwargs,
)
# build the
# def init(self, *args, **kwargs):
# super().init(*args, **kwargs)
@property
def diagonal(self):
return self._diagonal
@property
def sampler_diag(self) -> Sampler:
"""The Monte Carlo sampler used by this Monte Carlo variational state to
sample the diagonal."""
return self.diagonal.sampler
@sampler_diag.setter
def sampler_diag(self, sampler):
self.diagonal.sampler = sampler
@property
def n_samples_diag(self) -> int:
"""The total number of samples generated at every sampling step
when sampling the diagonal of this mixed state.
"""
return self.diagonal.n_samples
@n_samples_diag.setter
def n_samples_diag(self, n_samples):
self.diagonal.n_samples = n_samples
@property
def chain_length_diag(self) -> int:
"""
Length of the markov chain used for sampling the diagonal configurations.
If running under MPI, the total samples will be n_nodes * chain_length * n_batches.
"""
return self.diagonal.chain_length
@chain_length_diag.setter
def chain_length_diag(self, length: int):
self.diagonal.chain_length = length
@property
def n_discard_diag(self) -> int:
"""Number of discarded samples at the beginning of the markov chain used to
sample the diagonal of this mixed state.
"""
return self.diagonal.n_discard
@n_discard_diag.setter
def n_discard_diag(self, n_discard: Optional[int]):
self.diagonal.n_discard = n_discard
@MCState.parameters.setter
def parameters(self, pars: PyTree):
MCState.parameters.fset(self, pars)
if self.diagonal is not None:
self.diagonal.parameters = pars
@MCState.model_state.setter
def model_state(self, state: PyTree):
MCState.model_state.fset(self, state)
if self.diagonal is not None:
self.diagonal.model_state = state
def reset(self):
super().reset()
if self.diagonal is not None:
self.diagonal.reset()
def expect_operator(self, Ô: AbstractOperator) -> Stats:
σ = self.diagonal.samples
σ_shape = σ.shape
σ = σ.reshape((-1, σ.shape[-1]))
σ_np = np.asarray(σ)
σp, mels = Ô.get_conn_padded(σ_np)
# now we have to concatenate the two
O_loc = local_cost_function(
local_value_op_op_cost,
self._apply_fun,
self.variables,
σp,
mels,
σ,
).reshape(σ_shape[:-1])
# notice that loc.T is passed to statistics, since that function assumes
# that the first index is the batch index.
return statistics(O_loc.T)
def expect_and_grad_operator(
self, Ô: AbstractOperator, is_hermitian=None
) -> Tuple[Stats, PyTree]:
raise NotImplementedError
def to_matrix(self, normalize: bool = True) -> jnp.ndarray:
return netket.nn.to_matrix(
self.hilbert, self._apply_fun, self.variables, normalize=normalize
)
def __repr__(self):
return (
"MCMixedState("
+ "\n hilbert = {},".format(self.hilbert)
+ "\n sampler = {},".format(self.sampler)
+ "\n n_samples = {},".format(self.n_samples)
+ "\n n_discard = {},".format(self.n_discard)
+ "\n sampler_state = {},".format(self.sampler_state)
+ "\n sampler_diag = {},".format(self.sampler_diag)
+ "\n n_samples_diag = {},".format(self.n_samples_diag)
+ "\n n_discard_diag = {},".format(self.n_discard_diag)
+ "\n sampler_state_diag = {},".format(self.diagonal.sampler_state)
+ "\n n_parameters = {})".format(self.n_parameters)
)
def __str__(self):
return (
"MCMixedState("
+ "hilbert = {}, ".format(self.hilbert)
+ "sampler = {}, ".format(self.sampler)
+ "n_samples = {})".format(self.n_samples)
)
# serialization
def serialize_MCMixedState(vstate):
state_dict = {
"variables": serialization.to_state_dict(vstate.variables),
"sampler_state": serialization.to_state_dict(vstate.sampler_state),
"diagonal": serialization.to_state_dict(vstate.diagonal),
"n_samples": vstate.n_samples,
"n_discard": vstate.n_discard,
}
return state_dict
def deserialize_MCMixedState(vstate, state_dict):
import copy
new_vstate = copy.copy(vstate)
new_vstate.reset()
# restore the diagonal first so we can relink the samples
new_vstate._diagonal = serialization.from_state_dict(
vstate._diagonal, state_dict["diagonal"]
)
new_vstate.variables = serialization.from_state_dict(
vstate.variables, state_dict["variables"]
)
new_vstate.sampler_state = serialization.from_state_dict(
vstate.sampler_state, state_dict["sampler_state"]
)
new_vstate.n_samples = state_dict["n_samples"]
new_vstate.n_discard = state_dict["n_discard"]
return new_vstate
serialization.register_serialization_state(
MCMixedState,
serialize_MCMixedState,
deserialize_MCMixedState,
)
|
{"hexsha": "53453f36e1d8b361f56b89731f4a019a25d1bf11", "size": 10831, "ext": "py", "lang": "Python", "max_stars_repo_path": "netket/variational/mc_mixed_state.py", "max_stars_repo_name": "inailuig/netket", "max_stars_repo_head_hexsha": "ab57a6fb019edb9ac298969950724781f2ae2b22", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "netket/variational/mc_mixed_state.py", "max_issues_repo_name": "inailuig/netket", "max_issues_repo_head_hexsha": "ab57a6fb019edb9ac298969950724781f2ae2b22", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-16T10:57:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-16T10:57:10.000Z", "max_forks_repo_path": "netket/variational/mc_mixed_state.py", "max_forks_repo_name": "inailuig/netket", "max_forks_repo_head_hexsha": "ab57a6fb019edb9ac298969950724781f2ae2b22", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7414330218, "max_line_length": 125, "alphanum_fraction": 0.6544178746, "include": true, "reason": "import numpy,import jax,from jax", "num_tokens": 2473}
|
#include <utility>
// You must include this before including boost headers.
#include "resource-types-fwd.h"
#include <boost/python/class.hpp>
#include <boost/python/def.hpp>
#include <kj/io.h>
#include <capnp/any.h>
#include <capnp/dynamic.h>
#include <capnp/message.h>
#include <capnp/schema.h>
#include <capnp/serialize-packed.h>
#include <capnp/serialize.h>
#include "resource-types.h"
//
// Notes to implementer:
//
// * For now, we focus on exposing memory-backed message reader/builder,
// and do not expose I/O stream backed ones.
//
// * I feel that capnp's packed interface is somehow inconsistent with
// non-packed's; so I define my own interface.
//
// * Do not expose segment-based interface, which requires exposing
// kj::ArrayPtr<const kj::ArrayPtr<const word>> first.
//
// * Do not expose virtual member function for now (because Boost.Python
// requires a wrapper class when exposing virtual member function).
//
// * Do not expose adopt/orphan interface for now.
//
namespace capnp_python {
namespace {
// Return a copy rather than a const reference to reader's state.
capnp::ReaderOptions messageReaderGetOptions(capnp::MessageReader& reader) {
return reader.getOptions();
}
ResourceSharedPtr<capnp::PackedMessageReader> makePackedMessageReader(
kj::ArrayPtr<const kj::byte> array //
) {
kj::ArrayInputStream inputStream(array);
return ResourceSharedPtr<capnp::PackedMessageReader>(new capnp::PackedMessageReader(inputStream));
}
// We need this wrapper because Boost doesn't seem to support rvalue
// reference.
void messageBuilderSetRoot(capnp::MessageBuilder& builder, capnp::DynamicStruct::Reader& value) {
builder.setRoot(value);
}
kj::ArrayPtr<const capnp::word> initMessageBuilderFromFlatArrayCopy_2(
kj::ArrayPtr<const capnp::word> array,
capnp::MessageBuilder& target //
) {
return capnp::initMessageBuilderFromFlatArrayCopy(array, target);
}
void initMessageBuilderFromPackedArrayCopy(
kj::ArrayPtr<const capnp::word> array,
capnp::MessageBuilder& target,
capnp::ReaderOptions options //
) {
kj::ArrayInputStream inputStream(array.asBytes());
capnp::PackedMessageReader reader(inputStream, options);
target.setRoot(reader.getRoot<capnp::AnyPointer>());
}
void initMessageBuilderFromPackedArrayCopy_2(
kj::ArrayPtr<const capnp::word> array,
capnp::MessageBuilder& target //
) {
return initMessageBuilderFromPackedArrayCopy(array, target, capnp::ReaderOptions());
}
ResourceSharedPtr<kj::Array<capnp::word>> messageToFlatArray(capnp::MessageBuilder& builder) {
kj::Array<capnp::word> array = capnp::messageToFlatArray(builder);
return ResourceSharedPtr<kj::Array<capnp::word>>(new kj::Array<capnp::word>(std::move(array)));
}
ResourceSharedPtr<kj::Array<kj::byte>> messageToPackedArray(capnp::MessageBuilder& builder) {
kj::VectorOutputStream outputStream;
capnp::writePackedMessage(outputStream, builder);
kj::Array<kj::byte> array = kj::heapArray(outputStream.getArray());
return ResourceSharedPtr<kj::Array<kj::byte>>(new kj::Array<kj::byte>(std::move(array)));
}
} // namespace
void defineMessageTypes(void) {
boost::python::class_<capnp::ReaderOptions>("ReaderOptions", boost::python::init<>())
.def_readwrite("traversalLimitInWords", &capnp::ReaderOptions::traversalLimitInWords)
.def_readwrite("nestingLimit", &capnp::ReaderOptions::nestingLimit);
#include "resource-types-def-macros.h"
// Virtual base classes.
{
using Type = capnp::MessageReader;
boost::python::class_<Type, boost::noncopyable>("MessageReader", boost::python::no_init)
// TODO: Do not expose virtual member function for now.
// .DEF(getSegment)
.def("getOptions", &messageReaderGetOptions)
.def("getRoot", &Type::getRoot<capnp::schema::CodeGeneratorRequest>)
.def("getRoot", &Type::getRoot<capnp::DynamicStruct, capnp::StructSchema>)
.DEF(isCanonical);
}
{
using Type = capnp::MessageBuilder;
boost::python::class_<Type, boost::noncopyable>("MessageBuilder", boost::python::no_init)
// TODO: Do not expose virtual member function for now.
// .DEF(allocateSegment)
.def("setRoot", &messageBuilderSetRoot)
.def("getRoot", &Type::getRoot<capnp::DynamicStruct, capnp::StructSchema>)
.def("initRoot", &Type::initRoot<capnp::DynamicStruct, capnp::StructSchema>)
// TODO: Expose kj::ArrayPtr<const kj::ArrayPtr<const word>>.
// .DEF(getSegmentsForOutput)
.DEF(isCanonical);
}
// Concrete classes.
{
DERIVED_RESOURCE_CLASS_(
capnp::FlatArrayMessageReader,
capnp::MessageReader,
"FlatArrayMessageReader",
boost::python::init<
kj::ArrayPtr<const capnp::word>,
boost::python::optional<capnp::ReaderOptions>>() //
);
}
{
DERIVED_RESOURCE_CLASS_(
capnp::PackedMessageReader,
capnp::MessageReader,
"PackedMessageReader",
boost::python::no_init // For now, expose no constructor.
);
}
{
DERIVED_RESOURCE_CLASS_(
capnp::MallocMessageBuilder,
capnp::MessageBuilder,
"MallocMessageBuilder",
boost::python::init<>() // For now, use default arg values.
);
}
#include "resource-types-undef-macros.h"
// Helper functions.
boost::python::def("makePackedMessageReader", makePackedMessageReader);
boost::python::def(
"initMessageBuilderFromFlatArrayCopy", capnp::initMessageBuilderFromFlatArrayCopy);
boost::python::def("initMessageBuilderFromFlatArrayCopy", initMessageBuilderFromFlatArrayCopy_2);
boost::python::def("messageToFlatArray", messageToFlatArray);
boost::python::def(
"computeSerializedSizeInWords",
static_cast<size_t (*)(capnp::MessageBuilder&)>(capnp::computeSerializedSizeInWords) //
);
boost::python::def(
"initMessageBuilderFromPackedArrayCopy", initMessageBuilderFromPackedArrayCopy);
boost::python::def(
"initMessageBuilderFromPackedArrayCopy", initMessageBuilderFromPackedArrayCopy_2);
boost::python::def("messageToPackedArray", messageToPackedArray);
boost::python::def("computeUnpackedSizeInWords", capnp::computeUnpackedSizeInWords);
}
} // namespace capnp_python
|
{"hexsha": "9cc15762bb4f9147b6cc1501e987964993235ac9", "size": 6248, "ext": "cc", "lang": "C++", "max_stars_repo_path": "py/g1/third-party/capnp/src/message.cc", "max_stars_repo_name": "clchiou/garage", "max_stars_repo_head_hexsha": "446ff34f86cdbd114b09b643da44988cf5d027a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2016-01-04T06:28:52.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-20T13:18:40.000Z", "max_issues_repo_path": "py/g1/third-party/capnp/src/message.cc", "max_issues_repo_name": "clchiou/garage", "max_issues_repo_head_hexsha": "446ff34f86cdbd114b09b643da44988cf5d027a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py/g1/third-party/capnp/src/message.cc", "max_forks_repo_name": "clchiou/garage", "max_forks_repo_head_hexsha": "446ff34f86cdbd114b09b643da44988cf5d027a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7120418848, "max_line_length": 100, "alphanum_fraction": 0.7170294494, "num_tokens": 1567}
|
import sys
import pickle as pkl
# import libraries
import nltk
from nltk.corpus import stopwords
nltk.download(['punkt', 'wordnet'])
st = set(stopwords.words('english'))
import re
import time
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import classification_report, accuracy_score
from sklearn.svm import LinearSVC
def load_data(database_filepath):
"""
Loads data from database
Input - database_filepath: filepath to sqlite database
Output - X, y: Pandas DataFrames with Data and labels for training.
"""
# get table name from filepath
table = database_filepath.split('/')[-1].split('.')[0]
engine = create_engine(f'sqlite:///{database_filepath}')
df = pd.read_sql_table(table, engine)
X = df['message']
y = df.drop(['id', 'message', 'original', 'genre'], axis=1)
return X, y
def tokenize(text):
"""
Tokenize with NLTK and removes URLs
Input - text - Single string object with english message
Output - list of lowercase, lemmatized word tokens
"""
# Regex string to match URLs
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
# get list of all urls using regex
detected_urls = re.findall(url_regex, text)
# replace each url in text string with placeholder
for url in detected_urls:
text = text.replace(url, 'urlplaceholder')
# Remove Punctiation and other characters
text = re.sub('[^a-zA-Z0-9]', ' ', text)
tokens = word_tokenize(text) # Tokenize block of text
lemmatizer = WordNetLemmatizer() # Initialize Lemmatizer
clean_tokens = []
for tok in tokens:
# lemmatize, normalize case, and remove leading/trailing white space
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
#remove stopwords
clean_tokens = [x for x in clean_tokens if x not in list(st)]
clean_tokens = [x for x in clean_tokens if x not in ['said', 'wa', 'ha', 'u', '000']]
return clean_tokens
def build_model():
"""
Builds an Sklearn Pipeline with a countVectorizer, TF-IDF Transformer and Linear Support Vector Classifier object.
Input - None
Output - Grid Search Cross Validation object with 3 stratified folds to balance target class to distrobution.
"""
pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer(use_idf=False)),
('clf', MultiOutputClassifier(LinearSVC()))])
parameters = {
'vect__max_df': (.45, .5, .65),
'vect__ngram_range': ((1, 1), (1, 2)),
'clf__estimator__C': [.45, .5, .65]
}
model = GridSearchCV(pipeline, param_grid=parameters, verbose=3, cv=4)
return model
def evaluate_model(model, X_test, y_test):
"""
Function to gather basic results for printing to standard out.
Input - Model : trained model object
X_test : Unseen Input features to evaluate model
y_test : Unseen labels to evaluate model
Output - Pandas dataframe with 'precision', 'recall',
'f1-score', 'support', and 'accuracy' for each class
"""
y_pred = model.predict(X_test)
results_df = pd.DataFrame(columns=['precision', 'recall', 'f1-score', 'support', 'accuracy'])
for index, column in enumerate(y_test.columns):
cr_dict = classification_report(y_test[column],
y_pred[:, index],
output_dict=True,
labels=np.unique(y_pred[:, index]))
cr_dict['weighted avg']['accuracy'] = accuracy_score(y_test[column], y_pred[:, index])
results_df = results_df.append(pd.DataFrame(index=[column], data=cr_dict['weighted avg']))
return results_df
def save_model(model, model_filepath):
"""
Saves model as pickle object.
Input - model : model object
model_filepath : filepath destination for output
Output - None, file stored
"""
pkl.dump(model, open(model_filepath, 'wb'))
pass
def build_word_freq(X, y):
"""
Builds a csv table with top 20 most frequent words for each target.
To be used in visualization to demonstrate NLP functionality
Input - X message feature associated with the model
y label features associated with the model
Output - keywords.csv stored in 'data' directory
"""
dff = pd.concat([X, y], axis=1)
corpus = []
corpus_names = []
for _ in dff.columns[4:]:
corpus.append(dff.loc[dff[_] == 1]['message'].str.cat(sep=' '))
corpus_names.append(_)
vectorizer = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer())])
vectors = vectorizer.fit_transform(corpus)
names = vectorizer.named_steps['vect'].get_feature_names()
data = vectors.todense().tolist()
# Create a dataframe with the results
keywords_ = pd.DataFrame(data, columns=names)
key_dict = {}
N = 20
for i, v in enumerate(keywords_.iterrows()):
key_dict[corpus_names[i]] = v[1].sort_values(ascending=False)[:N].to_dict()
pd.DataFrame(key_dict).to_csv('./data/keywords.csv')
return True
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, y = load_data(database_filepath)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, y_train)
print('Evaluating model...')
results_df = evaluate_model(model, X_test, y_test)
print("Model results: \n", results_df.mean())
print("Model Best Parameters: \n", model.best_params_)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
print('Building a Word Frequency table for landing page.')
build_word_freq(X, y)
print('Saved word frequency table.')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
{"hexsha": "95cc29f65c69dfba6312cc3768849ee788d6cc91", "size": 6924, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/train_classifier.py", "max_stars_repo_name": "lewi0332/disaster_relief_ml_pipeline", "max_stars_repo_head_hexsha": "774b8459f2d6e337c8003cfb4012adf70461caeb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/train_classifier.py", "max_issues_repo_name": "lewi0332/disaster_relief_ml_pipeline", "max_issues_repo_head_hexsha": "774b8459f2d6e337c8003cfb4012adf70461caeb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/train_classifier.py", "max_forks_repo_name": "lewi0332/disaster_relief_ml_pipeline", "max_forks_repo_head_hexsha": "774b8459f2d6e337c8003cfb4012adf70461caeb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9411764706, "max_line_length": 119, "alphanum_fraction": 0.6520797227, "include": true, "reason": "import numpy", "num_tokens": 1626}
|
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import os
import glob
import torch
import numpy as np
from examples.mnist.gendata import get_projection_grid, project_2d_on_sphere_sun360, rand_rotation_matrix, rotate_grid
import cv2
from utils import rotate_map_given_R, calculate_Rmatrix_from_phi_theta, show_spheres
class SUN360Dataset(Dataset):
# class_names = ('bathroom', 'beach', 'cave', 'church',
# 'desert', 'field ', 'forest', 'mountain', 'theater',
# 'train_interior')
def __init__(self, root, split, vis=False, rotate=True):
# indoor vs outdoor classification
self.root = os.path.join(root, "pano1024x512")
# self.img_indoor_path = glob.glob(os.path.join(self.root, 'indoor', '*/*.jpg'))
# self.img_outdoor_path = glob.glob(os.path.join(self.root, 'outdoor', '*/*.jpg'))
self.img_indoor_path = glob.glob(os.path.join(self.root, 'indoor_sample', '*/*.jpg'))
self.img_outdoor_path = glob.glob(os.path.join(self.root, 'outdoor_sample', '*/*.jpg'))
self.img_path = self.img_indoor_path + self.img_outdoor_path
# self.img_others_path = glob.glob(os.path.join(self.root, 'others', '*.jpg'))
ratio = 0.7
num_train_data = int(ratio * len(self.img_path))
np.random.seed(1)
train_data_path = sorted(np.random.choice(self.img_path, num_train_data, replace=False))
test_data_path = sorted(list(set(self.img_path) - set(train_data_path)))
assert len(train_data_path) + len(test_data_path) == len(self.img_path)
self.split = split
if self.split == 'train':
self.img_path = train_data_path
elif self.split == 'test':
self.img_path = test_data_path
self.rotate = rotate
self.vis = vis
super().__init__()
def __getitem__(self, idx):
img = cv2.imread(self.img_path[idx]) # BGR
img = cv2.imread("D:\data\SUN360_panoramas_1024x512\pano1024x512\outdoor\others\pano_aaartbimirvryq.jpg") # BGR
# print(self.img_path[idx])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
img_np = cv2.resize(img, (224, 224))
# FIXME
bandwidth = 112
grid = get_projection_grid(b=bandwidth)
if self.rotate:
rot = rand_rotation_matrix()
rotated_grid = rotate_grid(rot, grid)
map_x, map_y = rotate_map_given_R(rot, bandwidth * 2, bandwidth * 2)
img_np = cv2.remap(img_np, map_x, map_y, cv2.INTER_CUBIC, borderMode=cv2.BORDER_TRANSPARENT)
else:
rotated_grid = grid
if self.vis:
img_np_vis = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) # RGB
cv2.imshow('rotated_img', img_np_vis)
cv2.waitKey(0)
img_np_ = np.transpose(img_np, (2, 0, 1))
show_spheres(scale=2, points=rotated_grid, rgb=img_np_)
# R = calculate_Rmatrix_from_phi_theta(0, 0)
img_np = np.transpose(img_np, (2, 0, 1)) # [3, 224, 224]
img_torch = torch.FloatTensor(img_np) # [3, 224, 224]
if "indoor" in self.img_path[idx]:
label = torch.zeros(1).type(torch.long)
elif "outdoor" in self.img_path[idx]:
label = torch.ones(1).type(torch.long)
return img_torch, label
def __len__(self):
return len(self.img_path)
if __name__ == '__main__':
root = "D:\data\SUN360_panoramas_1024x512"
dataset = SUN360Dataset(root, 'train')
dataloader = DataLoader(dataset=dataset,
batch_size=4,
shuffle=True,
pin_memory=True,
num_workers=4)
for img, label in dataloader:
print(label)
|
{"hexsha": "08d16f4441195a3cfb5129446014a1504eedcd93", "size": 3842, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/sun360/sun360_dataset.py", "max_stars_repo_name": "csm-kr/s2cnn", "max_stars_repo_head_hexsha": "09652af9811357c4bf6f7a6d3e912a06d7826f70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/sun360/sun360_dataset.py", "max_issues_repo_name": "csm-kr/s2cnn", "max_issues_repo_head_hexsha": "09652af9811357c4bf6f7a6d3e912a06d7826f70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/sun360/sun360_dataset.py", "max_forks_repo_name": "csm-kr/s2cnn", "max_forks_repo_head_hexsha": "09652af9811357c4bf6f7a6d3e912a06d7826f70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.42, "max_line_length": 127, "alphanum_fraction": 0.610619469, "include": true, "reason": "import numpy", "num_tokens": 984}
|
import argparse
import json
import os
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import numpy as np
import scipy
from gensim.models import TfidfModel
from gensim.corpora import Dictionary
def parse_arguments():
parser = argparse.ArgumentParser(description="TSNE Visualization of Papers in ML4Code")
parser.add_argument("json", default=False, help="the path the json containing all papers.")
parser.add_argument("outdir", default=False, help="the target path of the visualizations papers.")
parser.add_argument("--num-relwork", default=4, help="Number of related work per paper.", type=int)
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
num_relworks = args.num_relwork
with open(args.json) as f:
data = json.load(f)
print(f"Num papers: {len(data)}")
lemmatizer = WordNetLemmatizer()
stopwords = set(stopwords.words('english'))
stopwords.update(["one", "two", "using"])
tokens_per_paper = []
keys = []
for paper_info in data:
keys.append((paper_info["key"], paper_info["title"]))
text = paper_info["title"] + " " + paper_info["abstract"].replace("<p>", " ").replace("</p>", " ") + " ".join(paper_info["tags"])
lemmatized_tokens = [lemmatizer.lemmatize(w).lower() for w in nltk.word_tokenize(text) if w.lower() not in stopwords and w.isalpha()]
tokens_per_paper.append(lemmatized_tokens)
dictionary = Dictionary(tokens_per_paper)
dictionary.filter_extremes(no_below=2, no_above=0.5)
corpus = [dictionary.doc2bow(line) for line in tokens_per_paper]
model = TfidfModel(corpus)
tf_idf_vectors = []
for bow in corpus:
vec = np.zeros(len(dictionary), dtype=np.float64)
for i, v in model[bow]:
vec[i] = v
tf_idf_vectors.append(vec)
tf_idf_vectors = np.array(tf_idf_vectors)
distances = scipy.spatial.distance.cdist(tf_idf_vectors, tf_idf_vectors, metric='cosine')
sorted_idxs = np.argsort(distances, axis=-1)[:, 1:num_relworks+1]
os.makedirs(args.outdir, exist_ok=True)
for i, (bibkey, title) in enumerate(keys):
with open(os.path.join(args.outdir, bibkey + ".json"), "w") as f:
json.dump([keys[j] for j in sorted_idxs[i]], f)
|
{"hexsha": "93e232b6688bcc34af781c3319bd121cee2252fe", "size": 2401, "ext": "py", "lang": "Python", "max_stars_repo_path": "etc/compute_related.py", "max_stars_repo_name": "learning2hash/learning2hash.github.io", "max_stars_repo_head_hexsha": "71447a57e0288660ba5fc245e19b2cc748884be6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2018-04-23T12:54:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T08:46:43.000Z", "max_issues_repo_path": "etc/compute_related.py", "max_issues_repo_name": "learning2hash/learning2hash.github.io", "max_issues_repo_head_hexsha": "71447a57e0288660ba5fc245e19b2cc748884be6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-05-21T13:23:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-14T12:35:39.000Z", "max_forks_repo_path": "etc/compute_related.py", "max_forks_repo_name": "learning2hash/learning2hash.github.io", "max_forks_repo_head_hexsha": "71447a57e0288660ba5fc245e19b2cc748884be6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-05-17T19:47:52.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-06T02:13:56.000Z", "avg_line_length": 32.8904109589, "max_line_length": 141, "alphanum_fraction": 0.6817992503, "include": true, "reason": "import numpy,import scipy", "num_tokens": 602}
|
#!/usr/bin/env Rscript
library(readr)
library(lmerTest)
library(car)
library(psych)
library(scales)
speed_data <- read_csv('data.csv')
#calculate reading speed in WPM
speed_data$speed <- speed_data$num_words/(speed_data$adjust_rt/60000)
#remove retake participants
speed_data <- subset(speed_data, retake != 1)
#remove outliers
iqr = IQR(speed_data[speed_data$dyslexia_bin == 0,]$speed,na.rm=TRUE)
cutoff_high = median(speed_data$speed) +3*iqr #3*iqr=645, cutoff_high = 928
#-------remove trials based on speed-------
result_analysis <- speed_data[! speed_data$speed > cutoff_high, ]
result_analysis <- result_analysis[ ! result_analysis$speed < 10,]
#-------remove smartphone users-------
length(unique(subset(result_analysis$uuid, result_analysis$device=='smartphone')))
#remove 64 smartphone users, 363 trials
result_analysis <- result_analysis[! result_analysis$device == 'smartphone',]
#-------remove trials based on comprehension < 2/3-------
result_analysis <- result_analysis[ ! result_analysis$correct_rate < .6,]
#remove 111 trials
result_analysis$log_speed <- log(result_analysis$speed)
#dyslexia in three groups
model <- lmer(log_speed ~ img_width + num_words + page_condition*as.factor(dyslexia) + age + english_native + (1 | uuid), data = result_analysis)
AIC(model)
summary(model)
|
{"hexsha": "b5d79192eb1bf559f3d35a6c3ae09886d6e0ffaf", "size": 1306, "ext": "r", "lang": "R", "max_stars_repo_path": "example/reading/script.r", "max_stars_repo_name": "uwdata/boba", "max_stars_repo_head_hexsha": "80ff10ffd9a2ae99002bc7e88d173869b86c736c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2020-08-02T14:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T10:32:51.000Z", "max_issues_repo_path": "example/reading/script.r", "max_issues_repo_name": "uwdata/boba", "max_issues_repo_head_hexsha": "80ff10ffd9a2ae99002bc7e88d173869b86c736c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2020-09-26T23:05:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-20T18:34:31.000Z", "max_forks_repo_path": "example/reading/script.r", "max_forks_repo_name": "uwdata/boba", "max_forks_repo_head_hexsha": "80ff10ffd9a2ae99002bc7e88d173869b86c736c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-18T05:39:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-18T05:39:46.000Z", "avg_line_length": 32.65, "max_line_length": 145, "alphanum_fraction": 0.7488514548, "num_tokens": 333}
|
# Copyright (c) 2021 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
import numpy as np
def get_filename_to_download(output_dir: str, data_uri: str):
if output_dir is None:
output_file = None
else:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_file = os.path.join(output_dir, data_uri.split('/')[-1])
return output_file
def save_list_to_csv(csv_data, csv_path, csv_file_name):
with open(os.path.join(csv_path, csv_file_name), 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(csv_data)
def split_data_into_train_val(data_list, val_size=10000, seed=0):
n = len(data_list) - 1 # forst row is title
np.random.seed(seed)
idx_val = np.random.permutation(n)[:val_size]
idx_train = np.setdiff1d(np.arange(n), idx_val)
data = data_list[1:]
train_data = [data_list[0]] + [data[i] for i in idx_train]
val_data = [data_list[0]] + [data[i] for i in idx_val]
return train_data, val_data
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
|
{"hexsha": "6e1dd0bff4dd60afe467839a14f37f336d5a1ddf", "size": 1665, "ext": "py", "lang": "Python", "max_stars_repo_path": "responsible_ai/data_cleansing/datasets/utils.py", "max_stars_repo_name": "JonathanLehner/nnabla-examples", "max_stars_repo_head_hexsha": "2971b987484945e12fb171594181908789485a0f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "responsible_ai/data_cleansing/datasets/utils.py", "max_issues_repo_name": "JonathanLehner/nnabla-examples", "max_issues_repo_head_hexsha": "2971b987484945e12fb171594181908789485a0f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "responsible_ai/data_cleansing/datasets/utils.py", "max_forks_repo_name": "JonathanLehner/nnabla-examples", "max_forks_repo_head_hexsha": "2971b987484945e12fb171594181908789485a0f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3, "max_line_length": 74, "alphanum_fraction": 0.7045045045, "include": true, "reason": "import numpy", "num_tokens": 415}
|
\documentclass[english]{../thermomemo/thermomemo}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{array}% improves tabular environment.
\usepackage{dcolumn}% also improves tabular environment, with decimal centring.
\usepackage{booktabs}
\usepackage{todonotes}
\usepackage{subcaption,caption}
\usepackage{xspace}
\usepackage{tikz}
\usetikzlibrary{arrows}
\usetikzlibrary{snakes}
\usepackage{verbatim}
\usepackage{hyperref}
\usepackage{xcolor}
\hypersetup{
colorlinks,
linkcolor={red!50!black},
citecolor={blue!50!black},
urlcolor={blue!80!black}
}
%
% Egendefinerte
%
% Kolonnetyper for array.sty:
\newcolumntype{C}{>{$}c<{$}}
\newcolumntype{L}{>{$}l<{$}}
%
\newcommand*{\unit}[1]{\ensuremath{\,\mathrm{#1}}}
\newcommand*{\uunit}[1]{\ensuremath{\mathrm{#1}}}
%\newcommand*{\od}[3][]{\frac{\mathrm{d}^{#1}#2}{\mathrm{d}{#3}^{#1}}}% ordinary derivative
\newcommand*{\od}[3][]{\frac{\dif^{#1}#2}{\dif{#3}^{#1}}}% ordinary derivative
\newcommand*{\pd}[3][]{\frac{\partial^{#1}#2}{\partial{#3}^{#1}}}% partial derivative
\newcommand*{\pdc}[3]{\frac{\partial^{2}#1}{\partial{#2}\partial{#3}}}% partial derivative
\newcommand*{\pdt}[3][]{{\partial^{#1}#2}/{\partial{#3}^{#1}}}% partial
% derivative for inline use.
\newcommand{\pone}[3]{\frac{\partial #1}{\partial #2}_{#3}}% partial
% derivative with information of
% constant variables
\newcommand{\ponel}[3]{\frac{\partial #1}{\partial #2}\bigg|_{#3}} % partial derivative with informatio of constant variable. A line is added.
\newcommand{\ptwo}[3]{\frac{\partial^{2} #1}{\partial #2 \partial
#3}} % partial differential in two different variables
\newcommand{\pdn}[3]{\frac{\partial^{#1}#2}{\partial{#3}^{#1}}}% partial derivative
% Total derivative:
\newcommand*{\ttd}[2]{\frac{\mathrm{D} #1}{\mathrm{D} #2}}
\newcommand*{\td}[2]{\frac{\mathrm{d} #1}{\mathrm{d} #2}}
\newcommand*{\ddt}{\frac{\partial}{\partial t}}
\newcommand*{\ddx}{\frac{\partial}{\partial x}}
% Vectors etc:
% For Computer Modern:
\DeclareMathAlphabet{\mathsfsl}{OT1}{cmss}{m}{sl}
\renewcommand*{\vec}[1]{\boldsymbol{#1}}%
\newcommand*{\vektor}[1]{\boldsymbol{#1}}%
\newcommand*{\tensor}[1]{\mathsfsl{#1}}% 2. order tensor
\newcommand*{\matr}[1]{\tensor{#1}}% matrix
\renewcommand*{\div}{\boldsymbol{\nabla\cdot}}% divergence
\newcommand*{\grad}{\boldsymbol{\nabla}}% gradient
% fancy differential from Claudio Beccari, TUGboat:
% adjusts spacing automatically
\makeatletter
\newcommand*{\dif}{\@ifnextchar^{\DIfF}{\DIfF^{}}}
\def\DIfF^#1{\mathop{\mathrm{\mathstrut d}}\nolimits^{#1}\gobblesp@ce}
\def\gobblesp@ce{\futurelet\diffarg\opsp@ce}
\def\opsp@ce{%
\let\DiffSpace\!%
\ifx\diffarg(%
\let\DiffSpace\relax
\else
\ifx\diffarg[%
\let\DiffSpace\relax
\else
\ifx\diffarg\{%
\let\DiffSpace\relax
\fi\fi\fi\DiffSpace}
\makeatother
%
\newcommand*{\me}{\mathrm{e}}% e is not a variable (2.718281828...)
%\newcommand*{\mi}{\mathrm{i}}% nor i (\sqrt{-1})
\newcommand*{\mpi}{\uppi}% nor pi (3.141592...) (works for for Lucida)
%
% lav tekst-indeks/subscript/pedex
\newcommand*{\ped}[1]{\ensuremath{_{\text{#1}}}}
\newcommand*{\ap}[1]{\ensuremath{^{\text{#1}}}}
\newcommand*{\apr}[1]{\ensuremath{^{\mathrm{#1}}}}
\newcommand*{\pedr}[1]{\ensuremath{_{\mathrm{#1}}}}
%
\newcommand*{\volfrac}{\alpha}% volume fraction
\newcommand*{\surften}{\sigma}% coeff. of surface tension
\newcommand*{\curv}{\kappa}% curvature
\newcommand*{\ls}{\phi}% level-set function
\newcommand*{\ep}{\Phi}% electric potential
\newcommand*{\perm}{\varepsilon}% electric permittivity
\newcommand*{\visc}{\mu}% molecular (dymamic) viscosity
\newcommand*{\kvisc}{\nu}% kinematic viscosity
\newcommand*{\cfl}{C}% CFL number
\newcommand*{\cons}{\vec U}
\newcommand*{\flux}{\vec F}
\newcommand*{\dens}{\rho}
\newcommand*{\svol}{\ensuremath v}
\newcommand*{\temp}{\ensuremath T}
\newcommand*{\vel}{\ensuremath u}
\newcommand*{\mom}{\dens\vel}
\newcommand*{\toten}{\ensuremath E}
\newcommand*{\inten}{\ensuremath e}
\newcommand*{\press}{\ensuremath p}
\renewcommand*{\ss}{\ensuremath a}
\newcommand*{\jac}{\matr A}
%
\newcommand*{\abs}[1]{\lvert#1\rvert}
\newcommand*{\bigabs}[1]{\bigl\lvert#1\bigr\rvert}
\newcommand*{\biggabs}[1]{\biggl\lvert#1\biggr\rvert}
\newcommand*{\norm}[1]{\lVert#1\rVert}
%
\newcommand*{\e}[1]{\times 10^{#1}}
\newcommand*{\ex}[1]{\times 10^{#1}}%shorthand -- for use e.g. in tables
\newcommand*{\exi}[1]{10^{#1}}%shorthand -- for use e.g. in tables
\newcommand*{\nondim}[1]{\ensuremath{\mathit{#1}}}% italic iflg. ISO. (???)
\newcommand*{\rey}{\nondim{Re}}
\newcommand*{\acro}[1]{\textsc{\MakeLowercase{#1}}}%acronyms etc.
\newcommand*{\ousum}[2]{\overset{#1}{\underset{#2}{\sum}}}
\newcommand{\nto}{\ensuremath{\mbox{N}_{\mbox{\scriptsize 2}}}}
\newcommand{\chfire}{\ensuremath{\mbox{CH}_{\mbox{\scriptsize 4}}}}
%\newcommand*{\checked}{\ding{51}}
\newcommand{\coto}{\ensuremath{\text{CO}_{\text{\scriptsize 2}}}}
\newcommand{\celsius}{\ensuremath{^\circ\text{C}}}
\newcommand{\clap}{Clapeyron~}
\newcommand{\subl}{\ensuremath{\text{sub}}}
\newcommand{\spec}{\text{spec}}
\newcommand{\sat}{\text{sat}}
\newcommand{\sol}{\text{sol}}
\newcommand{\liq}{\text{liq}}
\newcommand{\vap}{\text{vap}}
\newcommand{\amb}{\text{amb}}
\newcommand{\tr}{\text{tr}}
\newcommand{\crit}{\text{crit}}
\newcommand{\entr}{\ensuremath{\text{s}}}
\newcommand{\fus}{\text{fus}}
\newcommand{\flash}[1]{\ensuremath{#1\text{-flash}}}
\newcommand{\spce}[2]{\ensuremath{#1\, #2\text{ space}}}
\newcommand{\spanwagner}{\text{Span--Wagner}}
\newcommand{\triplepoint}{\text{TP triple point}}
\newcommand{\wrpt}{\text{with respect to}\xspace}
\newcommand{\excess}{\text{E}\xspace}
\newcommand{\comb}{\text{comb}\xspace}
\newcommand{\FH}{\text{FH}\xspace}
\newcommand{\SG}{\text{SG}\xspace}
\newcommand{\NC}{\text{NC}\xspace}
\newcommand{\NGr}{\text{NG}\xspace}
\newcommand{\res}{\text{R}\xspace}
\title{UNIFAC excess gibbs mixing rules}
\author{Morten Hammer}
\graphicspath{{gfx/}}
\begin{document}
\frontmatter
\tableofcontents
\section{Introduction}
The UNIFAC (\textit{UNI}QUAC \textit{F}unctional-group \textit{A}ctivity
\textit{C}oefficients) model \cite{Fredenslund1975}, is a group
contribution model, and a further development of the UNIQUAC model
\cite{Abrams1975}. Being a group contribution model, it accounts for
molecular groups like $\text{C-H}_2$ and $\text{C-H}_3$,
that can be thought upon as monomers in a polymer.
The UNIFAC excess Gibbs mixing rule have found application in the
predictive SRK, PSRK \cite{Holderbaum1991}, model, and VTPR
\cite{Collinet2006}. It is also used as the universal mixing rule
(UMR) \cite{Voutsas2004} togther with t-mPR
\cite{Magoulas1990,Avlonitis1994}. The combined model is denoted
UMR-PR.
\section{UNIFAC model}
The UNIFAC model \cite{Fredenslund1975} is given as follows,
\begin{equation}
\frac{A^\res}{RT} = \frac{A^\excess}{RT} - \frac{A^\excess}{RT_0} = - \ousum{\NC}{i} n_i \ousum{\NGr}{k} v_k^i Q_k(\Lambda_k - \Lambda_k^i).
\label{eq:Ae}
\end{equation}
The symbols and formalism of Michelsen \cite{Michelsen2007} is
used. $A^\excess/(RT_0)$ is the combinatorial term and is described in
a later sub section. It is assumed that $A^\excess = G^\excess$.
The different symbols are defined as follows,
\begin{align}
\Lambda_k &= \ln \ousum{\NGr}{j} \Theta_j \tilde{E}_{jk},\label{eq:Lambda_k}\\
\Lambda_k^i &= \ln \ousum{\NGr}{j} \Theta_j^i \tilde{E}_{jk}, \label{eq:Lambda_ki}\\
\tilde{E}_{jk} &= \exp \left(-\frac{\tilde{U}_{jk}}{RT}\right), \label{eq:E_jk}\\
\Theta_j &= \frac{Q_j \ousum{\NC}{l}n_lv_j^l}{\ousum{\NC}{l}n_l\ousum{\NGr}{m}v_m^lQ_m}, \label{eq:Theta_j}\\
\Theta_j^i &= \frac{Q_j v_j^i}{\ousum{\NGr}{k}v_k^iQ_k}. \label{eq:Theta_ji}
\end{align}
Here $Q_k$ is the group surface area of group $k$, and $v_k^i$ is the
number of groups $k$ in molecule $i$. Both $Q_k$ and $v_k^i$ are
constants. $\tilde{U}_{jk}$ is the interaction energy per unit surface
area of the $j-k$ group interaction. $\tilde{U}_{jk}$ can be a
constant, or a temperature function.
\subsection{Differentials}
Differentiating \ref{eq:Ae} \wrpt $n_\alpha$ we get,
\begin{equation}
\frac{1}{RT}\pd{A^\res}{n_\alpha} = \frac{A^\res_\alpha}{RT} = - \ousum{\NGr}{k} v_k^\alpha Q_k(\Lambda_k - \Lambda_k^\alpha) - \ousum{\NC}{i} n_i \ousum{\NGr}{k} v_k^i Q_k \pd{\Lambda_k}{n_\alpha}.
\label{eq:dAedna}
\end{equation}
Michelsen \cite[Chap.~5,Eq.~56]{Michelsen2007} show that
\begin{equation}
\ousum{\NC}{i} n_i \ousum{\NGr}{k} v_k^i Q_k \pd{\Lambda_k}{n_\alpha} = \ousum{\NGr}{j}v_j^\alpha Q_j\left(\ousum{\NGr}{k}\frac{\Theta_j \tilde{E}_{jk}}{\ousum{\NGr}{l}\Theta_l \tilde{E}_{lk}} -1\right).
\label{eq:sumdLambdadna}
\end{equation}
But since second differentials are required, it do not help much for
the compositional differentials. Using
\begin{equation}
\Lambda_k = \ln \ousum{\NGr}{j} \Theta_j \tilde{E}_{jk} = \ln \ousum{\NC}{l}n_l \ousum{\NGr}{j} v_j^l Q_j \tilde{E}_{jk} -\ln \ousum{\NC}{l}n_l\ousum{\NGr}{m}v_m^lQ_m,
\end{equation}
we get
\begin{equation}
\pd{\Lambda_k}{n_\alpha} = \frac{\ousum{\NGr}{j} v_j^\alpha Q_j \tilde{E}_{jk}}{\ousum{\NC}{l}n_l \ousum{\NGr}{j} v_j^l Q_j \tilde{E}_{jk}} - \frac{\ousum{\NGr}{m}v_m^\alpha Q_m}{\ousum{\NC}{l}n_l\ousum{\NGr}{m}v_m^lQ_m}.
\label{eq:dLambdadna}
\end{equation}
Differentiating \ref{eq:dAedna} further \wrpt $n_\beta$ we get,
\begin{align}
\frac{A^\res_{\alpha\beta}}{RT} &= - \ousum{\NGr}{k} Q_k \left(v_k^\alpha \pd{\Lambda_k}{n_\beta} + v_k^\beta \pd{\Lambda_k}{n_\alpha}\right) - \ousum{\NC}{i} n_i \ousum{\NGr}{k} v_k^i Q_k \ptwo{\Lambda_k}{n_\alpha}{n_\beta}, \label{eq:d2Aedna2}\\
&= - \ousum{\NGr}{k} Q_k \left(v_k^\alpha \pd{\Lambda_k}{n_\beta} + v_k^\beta \pd{\Lambda_k}{n_\alpha}\right) - \ousum{\NGr}{k} \left(\ousum{\NC}{i} n_i v_k^i\right) Q_k \ptwo{\Lambda_k}{n_\alpha}{n_\beta}.
\end{align}
Differentiating Equation \ref{eq:dLambdadna} we get the second differential of $\Lambda_k$,
\begin{equation}
\ptwo{\Lambda_k}{n_\alpha}{n_\beta} = -\frac{\left(\ousum{\NGr}{j} v_j^\alpha Q_j \tilde{E}_{jk}\right) \left(\ousum{\NGr}{j} v_j^\beta Q_j \tilde{E}_{jk}\right)}{\left(\ousum{\NC}{l}n_l \ousum{\NGr}{j} v_j^l Q_j \tilde{E}_{jk}\right)^2} + \frac{\left(\ousum{\NGr}{m}v_m^\alpha Q_m\right) \left(\ousum{\NGr}{m}v_m^\beta Q_m\right)}{\left(\ousum{\NC}{l}n_l\ousum{\NGr}{m}v_m^lQ_m\right)^2}.
\label{eq:ddLambdadnadnb}
\end{equation}
We immediately see that \ref{eq:ddLambdadnadnb} give a symmetric matrix of the second differentials.
Differentiating \ref{eq:Ae} \wrpt $T$ we get,
\begin{align}
\pd{\left(\frac{A^\res}{RT}\right)}{T} &= - \ousum{\NC}{i} n_i \ousum{\NGr}{k} v_k^i Q_k\left(\pd{\Lambda_k}{T} - \pd{\Lambda_k^i}{T}\right), \label{eq:dAedT} \\
\pdn{2}{\left(\frac{A^\res}{RT}\right)}{T} &= - \ousum{\NC}{i} n_i \ousum{\NGr}{k} v_k^i Q_k\left(\pdn{2}{\Lambda_k}{T} - \pdn{2}{\Lambda_k^i}{T}\right). \label{eq:d2AedT2}
\end{align}
Here,
\begin{align}
\pd{\Lambda_k}{T} &= \frac{\ousum{\NC}{l}n_l \ousum{\NGr}{j} v_j^l Q_j \pd{\tilde{E}_{jk}}{T}}{\ousum{\NC}{l}n_l \ousum{\NGr}{j} v_j^l Q_j \tilde{E}_{jk}},\label{eq:dLkdT} \\
\pd{\Lambda_k^i}{T} &= \frac{\ousum{\NGr}{j} Q_j v_j^i \pd{\tilde{E}_{jk}}{T}}{\ousum{\NGr}{j} Q_j v_j^i \tilde{E}_{jk}},\label{eq:dLikdT} \\
\pdn{2}{\Lambda_k}{T} &= \frac{\ousum{\NC}{l}n_l \ousum{\NGr}{j} v_j^l Q_j \pdn{2}{\tilde{E}_{jk}}{T}}{\ousum{\NC}{l}n_l \ousum{\NGr}{j} v_j^l Q_j \tilde{E}_{jk}} - \left(\pd{\Lambda_k}{T}\right)^2,\label{eq:d2LkdT2} \\
\pdn{2}{\Lambda_k^i}{T} &= \frac{\ousum{\NGr}{j} Q_j v_j^i \pdn{2}{\tilde{E}_{jk}}{T}}{\ousum{\NGr}{j} Q_j v_j^i \tilde{E}_{jk}} - \left(\pd{\Lambda_k^i}{T}\right)^2.\label{eq:d2LikdT2} \\
\end{align}
Differentiating Equation \ref{eq:dAedna} we get
\begin{equation}
\pd{\left(\frac{A^\res_\alpha}{RT}\right)}{T} = - \ousum{\NGr}{k} v_k^\alpha Q_k\left(\pd{\Lambda_k}{T} - \pd{\Lambda_k^\alpha}{T}\right) - \ousum{\NGr}{k} \left(\ousum{\NC}{i} n_i v_k^i\right) Q_k \ptwo{\Lambda_k}{n_\alpha}{T}. \label{eq:d2AednadT}
\end{equation}
The cross differential of $\Lambda_k$ is found by differentiating Equation \ref{eq:dLambdadna} \wrpt $T$,
\begin{equation}
\ptwo{\Lambda_k}{n_\alpha}{T} = \frac{\ousum{\NGr}{j} v_j^\alpha Q_j \pd{\tilde{E}_{jk}}{T}}{\ousum{\NC}{l}n_l \ousum{\NGr}{j} v_j^l Q_j \tilde{E}_{jk}} - \frac{\left(\ousum{\NGr}{j} v_j^\alpha Q_j \tilde{E}_{jk}\right) \left(\ousum{\NC}{l}n_l \ousum{\NGr}{j} v_j^l Q_j \pd{\tilde{E}_{jk}}{T}\right)}{\left(\ousum{\NC}{l}n_l \ousum{\NGr}{j} v_j^l Q_j \tilde{E}_{jk}\right)^2} .
\label{eq:d2LambdadnadT}
\end{equation}
\subsection{The combinatorial term}
The combinatorial term is comprised of a Flory-Huggins (\FH) and a
Staverman-Guggenheim (\SG) contribution,
\begin{align}
G^{\excess,\comb} &= G^{\excess,\FH} + G^{\excess,\SG},\label{eq:comb}\\
G^{\excess,\FH} &= \underset{i}{\sum} x_i \ln \frac{\phi_i}{x_i}, \label{eq:fh}\\
G^{\excess,\SG} &= \frac{z}{2} \underset{i}{\sum} x_i q_i \ln \frac{\theta_i}{\phi_i}\label{eq:sg}.
\end{align}
Where $z=10$,
\begin{align}
\phi_i &= \frac{x_i r_i}{\underset{j}{\sum} x_j r_j},\label{eq:phii}\\
\theta_i &= \frac{x_i q_i}{\underset{j}{\sum} x_j q_j}.\label{eq:thetai}\\
\end{align}
$r_i$ and $q_i$ are molecule paramaters and non of the parameters are temperature dependent. $r_i$ is the molecular van der Waals volume and $q_i$ is the molecular van der Waals surface area. They are calculated from the group paramaters as follows,
\begin{align}
r_i &= \ousum{\NGr}{k}v_k^i R_k,\label{eq:ri}\\
q_i &= \ousum{\NGr}{k}v_k^i Q_k.\label{eq:qi}\\
\end{align}
\subsubsection{Differentials of the Flory-Huggins combinatorial term}
Writing Equation \ref{eq:fh} as a function of mole numbers, we get,
\begin{align}
G^{\excess,\FH} &= \ousum{\NC}{i} n_i \left(\ln \phi_i - \ln n_i + \ln \ousum{\NC}{j}n_j\right), \\
&= \ousum{\NC}{i} n_i \left(\ln n_i r_i - \ln \ousum{\NC}{j} n_j r_j - \ln n_i + \ln \ousum{\NC}{j}n_j\right) = \ousum{\NC}{i} n_i \ln r_i -n\ln \ousum{\NC}{j}n_jr_j + n\ln n. \label{eq:fhn}
\end{align}
Differentiating $G^{\excess,\FH}$ \wrpt $n_\alpha$ we get,
\begin{align}
G^{\excess,\FH}_\alpha &= \ln r_\alpha -\ln \ousum{\NC}{j}n_jr_j + \ln \ousum{\NC}{j}n_j + 1-\frac{r_\alpha\ousum{\NC}{i} n_i}{\ousum{\NC}{j}n_jr_j},\\
&= \ln r_\alpha -\ln \ousum{\NC}{j}n_jr_j + \ln n + 1-\frac{n r_\alpha}{\ousum{\NC}{j}n_jr_j}, \label{eq:dfhndna}\\
&= \ln \left(\frac{n r_\alpha}{\ousum{\NC}{j}n_jr_j}\right) + 1-\frac{n r_\alpha}{\ousum{\NC}{j}n_jr_j}\\
&= \ln \left(\frac{\phi_\alpha}{x_\alpha}\right) + 1-\frac{\phi_\alpha}{x_\alpha}
\end{align}
Differentiating \ref{eq:dfhndna} \wrpt $n_\beta$ we get,
\begin{align}
G^{\excess,\FH}_{\alpha\beta} &= -\frac{r_\alpha + r_\beta}{\ousum{\NC}{j}n_jr_j} + \frac{1}{n} + \frac{n r_\alpha r_\beta}{\left(\ousum{\NC}{j}n_jr_j\right)^2}. \label{eq:d2fhndnadnb}
\end{align}
\subsubsection{Differentials of the Staverman-Guggenheim combinatorial term}
Writing Equation \ref{eq:sg} as a function of mole numbers, we get,
\begin{align}
G^{\excess,\SG} &= \frac{z}{2} \ousum{\NC}{i} n_i q_i \left(\ln \theta_i - \ln \phi_i\right),\\
&= \frac{z}{2} \ousum{\NC}{i} n_i q_i \left(\ln \frac{q_i}{r_i} - \ln \ousum{\NC}{j} n_j q_j + \ln \ousum{\NC}{j} n_j r_j \right)\label{eq:sgn}.
\end{align}
Differentiating $G^{\excess,\SG}$ \wrpt $n_\alpha$ we get,
\begin{align}
G^{\excess,\SG}_\alpha &= \frac{z}{2} q_\alpha \left( \ln \frac{q_\alpha}{r_\alpha} - \ln \ousum{\NC}{j} n_j q_j + \ln \ousum{\NC}{j} n_j r_j - 1 + \frac{r_\alpha \ousum{\NC}{i} n_i q_i}{q_\alpha\ousum{\NC}{j} n_j r_j}\right), \label{eq:dsgnda}\\
&= \frac{z}{2} q_\alpha \left( - \ln \left(\frac{r_\alpha \ousum{\NC}{j} n_j q_j}{q_\alpha \ousum{\NC}{j} n_j r_j}\right) - 1 + \frac{r_\alpha \ousum{\NC}{i} n_i q_i}{q_\alpha\ousum{\NC}{j} n_j r_j}\right),\\
&= \frac{z}{2} q_\alpha \left( \ln \left(\frac{\theta_\alpha}{\phi_\alpha}\right) - 1 + \frac{\phi_\alpha}{\theta_\alpha}\right).
\end{align}
Differentiating \ref{eq:dsgnda} \wrpt $n_\beta$ we get,
\begin{align}
G^{\excess,\SG}_{\alpha\beta} &= \frac{z}{2} \left( - \frac{q_\alpha q_\beta}{\ousum{\NC}{j} n_j q_j} + \frac{q_\alpha r_\beta + q_\beta r_\alpha}{\ousum{\NC}{j} n_j r_j} - \frac{r_\alpha r_\beta \ousum{\NC}{i} n_i q_i}{\left(\ousum{\NC}{j} n_j r_j\right)^2}\right). \label{eq:dssgndadb}
\end{align}
\subsubsection{Comparing to combinatorial activity coefficient of Fredenslund et al.}
Fredenslund et al. \cite{Fredenslund1975} uses the following expression for the activity combinatorial coefficient,
\begin{align}
\ln \gamma_{\text{c}} &= \ln \frac{\phi_i}{x_i} + \frac{z}{2} q_i \ln \frac{\theta_i}{\phi_i} + l_i - \frac{\phi_i}{x_i}\ousum{\NC}{j} x_j l_j , \label{eq:gamma_c} \\
l_i &= \frac{z}{2} \left(r_i - q_i\right) - \left(r_i - 1\right) . \label{eq:li}
\end{align}
Inserting for $l_i$ in the last term of Equation \ref{eq:gamma_c}, we get,
\begin{align}
\frac{\phi_i}{x_i}\ousum{\NC}{j} x_j l_j &= \frac{z\phi_i}{2x_i}\left(\ousum{\NC}{j} x_j r_j - \ousum{\NC}{j}x_jq_j\right) - \frac{\phi_i}{x_i}\left(\ousum{\NC}{j} x_j r_j - 1\right), \\
&= \frac{z}{2}\left(r_i - \frac{q_i\phi_i}{\theta_i}\right) - r_i + \frac{\phi_i}{x_i}. \label{eq:second_term}
\end{align}
Inserting Equation \ref{eq:second_term} and Equation \ref{eq:li} into Equation \ref{eq:gamma_c} we get,
\begin{align}
\ln \gamma_{\text{c}} &= \ln \frac{\phi_i}{x_i} + 1 - \frac{\phi_i}{x_i} + \frac{z}{2} q_i \left(\ln \frac{\theta_i}{\phi_i} -1 + \frac{\phi_i}{\theta_i}\right).
\end{align}
We see that
\begin{equation}
\ln \gamma_{\text{c}} = G^{\excess,\FH}_\alpha + G^{\excess,\SG}_\alpha.
\end{equation}
\section{UMR-PR model}
The UMR-PR model is developed by Voutsas et al \cite{Voutsas2004}, and
uses the UNIFAC mixing rules together with a volume translated
Peng-Robinson EOS, t-mPR \cite{Avlonitis1994}.
UMR-PR applies the following covolume mixing rule,
\begin{align}
b &= \underset{i}{\sum} \underset{j}{\sum} x_i x_j b_{ij},\\
b_{ij} &= \left[\frac{b_i^{\frac{1}{s}} + b_j^{\frac{1}{s}}}{2}\right]^s,
\label{eq:bij}
\end{align}
with $s=2$.
UMR-PR ignores the Flory-Huggins contribution, Equation \ref{eq:fh},
of the combinatorial term, Equation \ref{eq:comb}.
UMR-PR uses the original temperature independent UNIFAC parameters published by Hansen et al \cite{Hansen1991} and Dortmund Data Bank, Wittig et al \cite{Wittig2003}.
Data source:
\url{https://en.wikipedia.org/wiki/UNIFAC}
\url{http://www.ddbst.com/unifacga.html}
\url{http://www.aim.env.uea.ac.uk/aim/info/UNIFACgroups.html}
The volume correction temperature differentials used in UMR is not
continous. This might be a good reason not to use the model.
\subsection{t-mPR model}
t-mPR \cite{Avlonitis1994} is an extension of the t-PR
\cite{Magoulas1990} to mixtures.
The t-mPR model take the following form,
\begin{equation}
\label{eq:t-mPR}
P = \frac{RT}{V+t-b} - \frac{a}{(V+t)(V+t+b) + b(V+t-b)},
\end{equation}
where,
\begin{equation}
\label{eq:tmix}
t = t(\vektor{x},T) = \underset{i}{\sum} x_i t_i(T).
\end{equation}
We see that by introducing $\tilde{V} = V + t$, the relations for this
equation of state can be related to the standard Peng-Robinson
equation of state. The translation is slightly more complicated than
the P{\'e}neloux \cite{Peneloux1982} volume shift, due to the
temperature dependency, and the lack of correction to the covolume.
\section{PSRK model}
PSRK \cite{Holderbaum1991} uses SRK with Mathias-Copeman
$\alpha$-correlation \cite{Mathias1983}, and a UNIFAC excess Gibbs
energy model.
The zero pressure limit, Equation \ref{eq:zero_limit},
is used when including the mixing rules into the SRK EOS.
$h_{\text{PSRK}}(\beta_0) = 0.64663$ is used.
The zero pressure limit is used when including the excess Gibbs energy into the equation of state,
\begin{equation}
\label{eq:zero_limit}
\frac{a}{RTb} = \underset{i}{\sum} x_i \frac{a_i}{RTb_i} - \frac{1}{h(\beta_0)}\left(\underset{i}{\sum} x_i \ln \frac{b}{b_i} + \frac{G^\excess}{RT} \right),
\end{equation}
where $h(\beta_0)$ is a constant that depend on the EOS. We have $h_{\text{PR}}(\beta_0) = 0.53$.
The linear mixing of the covolume is used in PSRK,
\begin{equation}
\label{eq:linbmix}
b = \underset{i}{\sum} x_i b_i.
\end{equation}
\textcolor{red}{Parameters: \cite{Horstmann2005,Holderbaum1991,Horstmann2000,Fischer1996,Gmehling1997}}
\section{VTPR model}
The Volume-Translated-Peng-Robinson (VTPR) EOS \cite{Collinet2006}, uses a constant volume correction for each component. The correction in volume therefore don't depend on temperature. The Twu, Bluck, Cunningham and Coon $\alpha$-correlation \cite{Twu1991} is used.
For the excess Gibbs energy, the UNIFAC model is used without the combinatorial term, Equation \ref{eq:comb}. The infinite pressure limit is used when including the activity coefficient model into the EOS.
Covolume mixing uses Equation \ref{eq:bij} with $s=4/3$.
\textcolor{red}{Parameters: \cite{Schmid2014}}
\section{The general mixing rule for the covolume}
The general mixing rules for the covolume take the following form,
\begin{align}
nB = n^2 b &= \underset{i}{\sum} \underset{j}{\sum} n_i n_j b_{ij}, \label{eq:B}\\
b_{ij}^{\frac{1}{s}} &= (1 - l_{ij}) \frac{b_i^{\frac{1}{s}} + b_j^{\frac{1}{s}}}{2}.
\label{eq:bij_mod}
\end{align}
Where $l_{ij}$ is assumed constant, symmetric, and have a default value of zero.
Differentiating and manipulating Equation \ref{eq:B}, $B_{i}$ and
$B_{ij}$ become,
\begin{align}
n B_i = &= 2\underset{j}{\sum} n_j b_{ij} -B, \label{eq:Bi}\\
n B_{ij} = &= 2b_{ij} - B_i - B_j.
\label{eq:Bij}
\end{align}
\clearpage
\bibliographystyle{plain}
\bibliography{../thermopack}
\end{document}
|
{"hexsha": "e65cadcd3f6e20efc5a4999ec5770ec963e87b93", "size": 21701, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/memo/UNIFAC/unifac.tex", "max_stars_repo_name": "SINTEF/Thermopack", "max_stars_repo_head_hexsha": "63c0dc82fe6f88dd5612c53a35f7fbf405b4f3f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2020-10-14T07:51:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T04:59:23.000Z", "max_issues_repo_path": "doc/memo/UNIFAC/unifac.tex", "max_issues_repo_name": "SINTEF/Thermopack", "max_issues_repo_head_hexsha": "63c0dc82fe6f88dd5612c53a35f7fbf405b4f3f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2020-10-26T11:43:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T22:06:30.000Z", "max_forks_repo_path": "doc/memo/UNIFAC/unifac.tex", "max_forks_repo_name": "SINTEF/Thermopack", "max_forks_repo_head_hexsha": "63c0dc82fe6f88dd5612c53a35f7fbf405b4f3f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-10-27T13:04:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T04:59:24.000Z", "avg_line_length": 50.2337962963, "max_line_length": 391, "alphanum_fraction": 0.6725496521, "num_tokens": 8529}
|
import unittest
import numpy as np
class TestCase(unittest.TestCase):
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape,
b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(
np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays, or dicts of same, have near values.
This does not support nested dicts.
Args:
a: The expected numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `b` is a dict.
b: The actual numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `a` is a dict.
rtol: relative tolerance.
atol: absolute tolerance.
Raises:
ValueError: if only one of `a` and `b` is a dict.
"""
is_a_dict = isinstance(a, dict)
if is_a_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, %s vs %s." % (a,
b))
if is_a_dict:
self.assertCountEqual(
a.keys(),
b.keys(),
msg="mismatched keys, expected %s, got %s" % (a.keys(),
b.keys()))
for k in a:
self._assertArrayLikeAllClose(
a[k],
b[k],
rtol=rtol,
atol=atol,
msg="%s: expected %s, got %s." % (k, a, b))
else:
self._assertArrayLikeAllClose(a, b, rtol=rtol, atol=atol)
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape,
b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, err_msg=msg)
|
{"hexsha": "10f5b9326be2b3bb5b30fcc514c98dc64b1a31a2", "size": 4524, "ext": "py", "lang": "Python", "max_stars_repo_path": "second/framework/test.py", "max_stars_repo_name": "jerry99s/second.pytorch", "max_stars_repo_head_hexsha": "80143908a349b9f3ff1642d21dacaf23455b3cf8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1541, "max_stars_repo_stars_event_min_datetime": "2018-10-04T00:32:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T17:54:59.000Z", "max_issues_repo_path": "second/framework/test.py", "max_issues_repo_name": "Karthik-Ragunath/second.pytorch", "max_issues_repo_head_hexsha": "414de8936a165d7cdba4a6eb15ce9603201d2e61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 466, "max_issues_repo_issues_event_min_datetime": "2018-10-06T01:05:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T08:49:44.000Z", "max_forks_repo_path": "second/framework/test.py", "max_forks_repo_name": "Karthik-Ragunath/second.pytorch", "max_forks_repo_head_hexsha": "414de8936a165d7cdba4a6eb15ce9603201d2e61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 537, "max_forks_repo_forks_event_min_datetime": "2018-10-04T07:36:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T10:12:02.000Z", "avg_line_length": 43.0857142857, "max_line_length": 79, "alphanum_fraction": 0.4913793103, "include": true, "reason": "import numpy", "num_tokens": 1037}
|
import numpy as np
from data_loading import load_data, store_song
from transitions_creation import fade
def main():
load_path = "../songs/dev_songs_house/"
store_path = "../listening_test/mixes/mix_A.wav"
store_path_transition_times = "../listening_test/mixes/mix_A_transition_times.txt"
# Load data
playlist = load_data(load_path)
with open(store_path_transition_times, "a") as myFile:
myFile.write(f"\n-----Tracklist-----\n\n")
for song in playlist:
myFile.write(f"{song['song_name']} - {song['artist_name']}\n")
myFile.write(f"\n-------------------\n\n")
mix = playlist[0]
for i in range(1, len(playlist)):
print(f"\tMixing tracks {i} and {i+1}...")
previous_mix, next_song = fade(mix, playlist[i])
mix, transition_time = combine_songs(previous_mix, next_song)
with open(store_path_transition_times, "a") as myFile:
myFile.write(f"Transition {i} at time {convert(transition_time)}.\n")
store_song(mix, store_path)
def combine_songs(previous_mix, next_song):
mix = previous_mix.copy()
previous_ending = len(previous_mix["audio_array"])-previous_mix["frame_rate"]*20
next_audio_padded = np.pad(next_song["audio_array"], (previous_ending, 0), constant_values=0)
mix["audio_array"] = next_audio_padded
mix["audio_array"][: previous_mix["audio_array"].size] += previous_mix["audio_array"]
return mix, np.round(previous_ending/previous_mix["frame_rate"],0)
def convert(seconds):
seconds = seconds % (24 * 3600)
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%02d:%02d" % (minutes, seconds)
if __name__ == "__main__":
main()
|
{"hexsha": "94c1706ac282bc2cc74ab3bdd6db85d2fcd95db0", "size": 1749, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/fadeinfadeout_mix.py", "max_stars_repo_name": "erikpiscator/song_mixing", "max_stars_repo_head_hexsha": "8fb430311e46d9e917d75ccdd85be57bac67f262", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/fadeinfadeout_mix.py", "max_issues_repo_name": "erikpiscator/song_mixing", "max_issues_repo_head_hexsha": "8fb430311e46d9e917d75ccdd85be57bac67f262", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/fadeinfadeout_mix.py", "max_forks_repo_name": "erikpiscator/song_mixing", "max_forks_repo_head_hexsha": "8fb430311e46d9e917d75ccdd85be57bac67f262", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6721311475, "max_line_length": 97, "alphanum_fraction": 0.6529445397, "include": true, "reason": "import numpy", "num_tokens": 449}
|
# Copyright (c) 2018-2020 Manfred Moitzi
# License: MIT License
from typing import Iterable, Tuple, List, Sequence, Union, Any
from itertools import repeat
import math
import reprlib
__all__ = [
'Matrix', 'gauss_vector_solver', 'gauss_matrix_solver', 'gauss_jordan_solver', 'gauss_jordan_inverse',
'LUDecomposition', 'freeze_matrix', 'tridiagonal_vector_solver', 'tridiagonal_matrix_solver',
'detect_banded_matrix', 'compact_banded_matrix', 'BandedMatrixLU', 'banded_matrix', 'quadratic_equation',
]
def zip_to_list(*args) -> Iterable[List]:
for e in zip(*args): # returns immutable tuples
yield list(e) # need mutable list
MatrixData = List[List[float]]
FrozenMatrixData = Tuple[Tuple[float, ...]]
Shape = Tuple[int, int]
def copy_float_matrix(A) -> MatrixData:
if isinstance(A, Matrix):
A = A.matrix
return [[float(v) for v in row] for row in A]
def freeze_matrix(A: Union[MatrixData, 'Matrix']) -> 'Matrix':
""" Returns a frozen matrix, all data is stored in immutable tuples. """
if isinstance(A, Matrix):
A = A.matrix
m = Matrix()
m.matrix = tuple(tuple(float(v) for v in row) for row in A)
return m
class Matrix:
"""
Basic matrix implementation without any optimization for speed of memory usage. Matrix data is stored in
row major order, this means in a list of rows, where each row is a list of floats. Direct access to the
data is accessible by the attribute :attr:`Matrix.matrix`.
The matrix can be frozen by function :func:`freeze_matrix` or method :meth:`Matrix.freeze`, than the data
is stored in immutable tuples.
Initialization:
- Matrix(shape=(rows, cols)) ... new matrix filled with zeros
- Matrix(matrix[, shape=(rows, cols)]) ... from copy of matrix and optional reshape
- Matrix([[row_0], [row_1], ..., [row_n]]) ... from Iterable[Iterable[float]]
- Matrix([a1, a2, ..., an], shape=(rows, cols)) ... from Iterable[float] and shape
.. versionadded:: 0.13
"""
__slots__ = ('matrix', 'abs_tol')
def __init__(self, items: Any = None, shape: Shape = None, matrix: MatrixData = None):
self.matrix: MatrixData = matrix
self.abs_tol: float = 1e-12
if items is None:
if shape is not None:
self.matrix = Matrix.reshape(repeat(0.), shape).matrix
else: # items is None, shape is None
return
elif isinstance(items, Matrix):
if shape is None:
shape = items.shape
self.matrix = Matrix.reshape(items, shape=shape).matrix
else:
items = list(items)
try:
self.matrix = [list(row) for row in items]
except TypeError:
self.matrix = Matrix.reshape(items, shape).matrix
def __iter__(self) -> Iterable[float]:
for row in self.matrix:
yield from row
def __copy__(self) -> 'Matrix':
m = Matrix()
m.abs_tol = self.abs_tol
m.matrix = [list(row) for row in self.rows()]
return m
def __str__(self) -> str:
return str(self.matrix)
def __repr__(self) -> str:
return f'Matrix({reprlib.repr(self.matrix)})'
@staticmethod
def reshape(items: Iterable[float], shape: Shape) -> 'Matrix':
""" Returns a new matrix for iterable `items` in the configuration of `shape`. """
items = iter(items)
rows, cols = shape
return Matrix(matrix=[[next(items) for _ in range(cols)] for _ in range(rows)])
@property
def nrows(self) -> int:
""" Count of matrix rows. """
return len(self.matrix)
@property
def ncols(self) -> int:
""" Count of matrix columns. """
return len(self.matrix[0])
@property
def shape(self) -> Shape:
""" Shape of matrix as (n, m) tuple for n rows and m columns. """
return self.nrows, self.ncols
def row(self, index) -> List[float]:
""" Returns row `index` as list of floats. """
return self.matrix[index]
def iter_row(self, index) -> Iterable[float]:
""" Yield values of row `index`. """
return iter(self.matrix[index])
def col(self, index) -> List[float]:
""" Return column `index` as list of floats. """
return [row[index] for row in self.matrix]
def iter_col(self, index) -> Iterable[float]:
""" Yield values of column `index`. """
return (row[index] for row in self.matrix)
def diag(self, index) -> List[float]:
""" Returns diagonal `index` as list of floats.
An `index` of ``0`` specifies the main diagonal, negative values specifies diagonals below the
main diagonal and positive values specifies diagonals above the main diagonal.
e.g. given a 4x4 matrix:
index ``0`` is [00, 11, 22, 33],
index ``-1`` is [10, 21, 32] and
index ``+1`` is [01, 12, 23]
"""
return list(self.iter_diag(index))
def iter_diag(self, index) -> Iterable[float]:
""" Yield values of diagonal `index`, see also :meth:`diag`. """
get = self.__getitem__
col_offset = max(index, 0)
row_offset = abs(min(index, 0))
for i in range(max(self.nrows, self.ncols)):
try:
yield get((i + row_offset, i + col_offset))
except IndexError:
break
def rows(self) -> MatrixData:
""" Return a list of all rows. """
return self.matrix
def cols(self) -> MatrixData:
""" Return a list of all columns. """
return [self.col(i) for i in range(self.ncols)]
def set_row(self, index: int, items: Union[float, Iterable[float]] = 1.0) -> None:
""" Set row values to a fixed value or from an iterable of floats. """
if isinstance(items, (float, int)):
items = [float(items)] * self.ncols
if len(items) != self.ncols:
raise ValueError('Invalid item count')
self.matrix[index] = items
def set_col(self, index: int, items: Union[float, Iterable[float]] = 1.0) -> None:
""" Set column values to a fixed value or from an iterable of floats. """
if isinstance(items, (float, int)):
items = [float(items)] * self.nrows
for row, item in zip(self.rows(), items):
row[index] = item
def set_diag(self, index: int = 0, items: Union[float, Iterable[float]] = 1.0) -> None:
""" Set diagonal values to a fixed value or from an iterable of floats.
An `index` of ``0`` specifies the main diagonal, negative values specifies diagonals below the
main diagonal and positive values specifies diagonals above the main diagonal.
e.g. given a 4x4 matrix:
index ``0`` is [00, 11, 22, 33],
index ``-1`` is [10, 21, 32] and
index ``+1`` is [01, 12, 23]
"""
if isinstance(items, (float, int)):
items = repeat(float(items))
col_offset = max(index, 0)
row_offset = abs(min(index, 0))
for index, value in zip(range(max(self.nrows, self.ncols)), items):
try:
self.matrix[index + row_offset][index + col_offset] = value
except IndexError:
return
@classmethod
def identity(cls, shape: Shape) -> 'Matrix':
"""Returns the identity matrix for configuration `shape`. """
m = Matrix(shape=shape)
m.set_diag(0, 1.0)
return m
def append_row(self, items: Sequence[float]) -> None:
""" Append a row to the matrix. """
if self.matrix is None:
self.matrix = [list(items)]
elif len(items) == self.ncols:
self.matrix.append(items)
else:
raise ValueError('Invalid item count.')
def append_col(self, items: Sequence[float]) -> None:
""" Append a column to the matrix. """
if self.matrix is None:
self.matrix = [[item] for item in items]
elif len(items) == self.nrows:
for row, item in zip(self.matrix, items):
row.append(item)
else:
raise ValueError('Invalid item count.')
def swap_rows(self, a: int, b: int) -> None:
""" Swap rows `a` and `b` inplace. """
m = self.matrix
m[a], m[b] = m[b], m[a]
def swap_cols(self, a: int, b: int) -> None:
""" Swap columns `a` and `b` inplace. """
for row in self.rows():
row[a], row[b] = row[b], row[a]
def freeze(self) -> 'Matrix':
""" Returns a frozen matrix, all data is stored in immutable tuples. """
return freeze_matrix(self.matrix)
def lu_decomp(self) -> 'LUDecomposition':
""" Returns the `LU decomposition`_ as :class:`LUDecomposition` object, a faster linear equation solver. """
return LUDecomposition(self)
def __getitem__(self, item: Tuple[int, int]) -> float:
""" Get value by (row, col) index tuple, fancy slicing as known from numpy is not supported. """
row, col = item
return self.matrix[row][col]
def __setitem__(self, item: Tuple[int, int], value: float):
""" Set value by (row, col) index tuple, fancy slicing as known from numpy is not supported. """
row, col = item
self.matrix[row][col] = value
def __eq__(self, other: 'Matrix') -> bool:
""" Returns ``True`` if matrices are equal, tolerance value for comparision
is adjustable by the attribute :attr:`Matrix.abs_tol`.
"""
if not isinstance(other, Matrix):
raise TypeError('Matrix class required.')
if self.shape != other.shape:
raise TypeError('Matrices have different shapes.')
for row1, row2 in zip(self.matrix, other.matrix):
for item1, item2 in zip(row1, row2):
if not math.isclose(item1, item2, abs_tol=self.abs_tol):
return False
return True
def __mul__(self, other: Union['Matrix', float]) -> 'Matrix':
""" Matrix multiplication by another matrix or a float, returns a new matrix. """
if isinstance(other, Matrix):
matrix = Matrix(
matrix=[[sum(a * b for a, b in zip(X_row, Y_col)) for Y_col in zip(*other.matrix)] for X_row in
self.matrix])
else:
factor = float(other)
matrix = Matrix(matrix=[[item * factor for item in row] for row in self.matrix])
return matrix
__imul__ = __mul__
def __add__(self, other: Union['Matrix', float]) -> 'Matrix':
""" Matrix addition by another matrix or a float, returns a new matrix. """
if isinstance(other, Matrix):
matrix = Matrix.reshape([a + b for a, b in zip(self, other)], shape=self.shape)
else:
value = float(other)
matrix = Matrix(matrix=[[item + value for item in row] for row in self.matrix])
return matrix
__iadd__ = __add__
def __sub__(self, other: Union['Matrix', float]) -> 'Matrix':
""" Matrix subtraction by another matrix or a float, returns a new matrix. """
if isinstance(other, Matrix):
matrix = Matrix.reshape([a - b for a, b in zip(self, other)], shape=self.shape)
else:
value = float(other)
matrix = Matrix(matrix=[[item - value for item in row] for row in self.matrix])
return matrix
__isub__ = __sub__
def transpose(self) -> 'Matrix':
""" Returns a new transposed matrix. """
return Matrix(matrix=list(zip_to_list(*self.matrix)))
def inverse(self) -> 'Matrix':
""" Returns inverse of matrix as new object. """
if self.nrows != self.ncols:
raise TypeError('Inverse of non-square matrix not supported.')
if self.nrows > 10:
return LUDecomposition(self).inverse()
else: # faster for small matrices
return gauss_jordan_inverse(self)
def determinant(self) -> float:
""" Returns determinant of matrix, raises :class:`ZeroDivisionError` if matrix is singular. """
return LUDecomposition(self).determinant()
def quadratic_equation(a: float, b: float, c: float) -> Tuple[float, float]:
discriminant = math.sqrt(b ** 2 - 4 * a * c)
return ((-b + discriminant) / (2.0 * a)), ((-b - discriminant) / (2.0 * a))
def gauss_vector_solver(A: Iterable[Iterable[float]], B: Iterable[float]) -> List[float]:
"""
Solves the linear equation system given by a nxn Matrix A . x = B,
right-hand side quantities as vector B with n elements by the `Gauss-Elimination`_
algorithm, which is faster than the `Gauss-Jordan`_ algorithm. The speed improvement
is more significant for solving multiple right-hand side quantities as matrix at once.
Reference implementation for error checking.
Args:
A: matrix [[a11, a12, ..., a1n], [a21, a22, ..., a2n], [a21, a22, ..., a2n], ... [an1, an2, ..., ann]]
B: vector [b1, b2, ..., bn]
Returns:
vector as list of floats
Raises:
ZeroDivisionError: singular matrix
.. versionadded:: 0.13
"""
# copy input data
A = copy_float_matrix(A)
B = list(B)
num = len(A)
if len(A[0]) != num:
raise ValueError('A square nxn matrix A is required.')
if len(B) != num:
raise ValueError('Item count of vector B has to be equal to matrix A row count.')
# inplace modification of A & B
_build_upper_triangle(A, B)
return _backsubstitution(A, B)
def gauss_matrix_solver(A: Iterable[Iterable[float]], B: Iterable[Iterable[float]]) -> Matrix:
"""
Solves the linear equation system given by a nxn Matrix A . x = B,
right-hand side quantities as nxm Matrix B by the `Gauss-Elimination`_ algorithm,
which is faster than the `Gauss-Jordan`_ algorithm.
Reference implementation for error checking.
Args:
A: matrix [[a11, a12, ..., a1n], [a21, a22, ..., a2n], [a21, a22, ..., a2n], ... [an1, an2, ..., ann]]
B: matrix [[b11, b12, ..., b1m], [b21, b22, ..., b2m], ... [bn1, bn2, ..., bnm]]
Returns:
matrix as :class:`Matrix` object
Raises:
ZeroDivisionError: singular matrix
.. versionadded:: 0.13
"""
# copy input data
A = copy_float_matrix(A)
B = copy_float_matrix(B)
num = len(A)
if len(A[0]) != num:
raise ValueError('A square nxn matrix A is required.')
if len(B) != num:
raise ValueError('Row count of matrices A and B has to match.')
# inplace modification of A & B
_build_upper_triangle(A, B)
columns = Matrix(matrix=B).cols()
result = Matrix()
for col in columns:
result.append_col(_backsubstitution(A, col))
return result
def _build_upper_triangle(A: MatrixData, B: List) -> None:
""" Build upper triangle for backsubstitution. Modifies A and B inplace!
Args:
A: row major matrix
B: vector of floats or row major matrix
"""
num = len(A)
try:
b_col_count = len(B[0])
except TypeError:
b_col_count = 1
for i in range(0, num):
# Search for maximum in this column
max_element = abs(A[i][i])
max_row = i
for row in range(i + 1, num):
value = abs(A[row][i])
if value > max_element:
max_element = value
max_row = row
# Swap maximum row with current row
A[max_row], A[i] = A[i], A[max_row]
B[max_row], B[i] = B[i], B[max_row]
# Make all rows below this one 0 in current column
for row in range(i + 1, num):
c = -A[row][i] / A[i][i]
for col in range(i, num):
if i == col:
A[row][col] = 0
else:
A[row][col] += c * A[i][col]
if b_col_count == 1:
B[row] += c * B[i]
else:
for col in range(b_col_count):
B[row][col] += c * B[i][col]
def _backsubstitution(A: MatrixData, B: List[float]) -> List[float]:
""" Solve equation A . x = B for an upper triangular matrix A by backsubstitution.
Args:
A: row major matrix
B: vector of floats
"""
num = len(A)
x = [0.0] * num
for i in range(num - 1, -1, -1):
x[i] = B[i] / A[i][i]
for row in range(i - 1, -1, -1):
B[row] -= A[row][i] * x[i]
return x
def gauss_jordan_solver(A: Iterable[Iterable[float]], B: Iterable[Iterable[float]]) -> Tuple[Matrix, Matrix]:
""" Solves the linear equation system given by a nxn Matrix A . x = B, right-hand side quantities as
nxm Matrix B by the `Gauss-Jordan`_ algorithm, which is the slowest of all, but
it is very reliable. Returns a copy of the modified input matrix `A` and the
result matrix `x`.
Internally used for matrix inverse calculation.
Args:
A: matrix [[a11, a12, ..., a1n], [a21, a22, ..., a2n], [a21, a22, ..., a2n], ... [an1, an2, ..., ann]]
B: matrix [[b11, b12, ..., b1m], [b21, b22, ..., b2m], ... [bn1, bn2, ..., bnm]]
Returns:
2-tuple of :class:`Matrix` objects
Raises:
ZeroDivisionError: singular matrix
.. versionadded:: 0.13
"""
# copy input data
A = copy_float_matrix(A)
B = copy_float_matrix(B)
n = len(A)
m = len(B[0])
if len(A[0]) != n:
raise ValueError('A square nxn matrix A is required.')
if len(B) != n:
raise ValueError('Row count of matrices A and B has to match.')
icol = 0
irow = 0
col_indices = [0] * n
row_indices = [0] * n
ipiv = [0] * n
for i in range(n):
big = 0.0
for j in range(n):
if ipiv[j] != 1:
for k in range(n):
if ipiv[k] == 0:
if abs(A[j][k]) >= big:
big = abs(A[j][k])
irow = j
icol = k
ipiv[icol] += 1
if irow != icol:
A[irow], A[icol] = A[icol], A[irow]
B[irow], B[icol] = B[icol], B[irow]
row_indices[i] = irow
col_indices[i] = icol
pivinv = 1.0 / A[icol][icol]
A[icol][icol] = 1.0
A[icol] = [v * pivinv for v in A[icol]]
B[icol] = [v * pivinv for v in B[icol]]
for row in range(n):
if row == icol:
continue
dum = A[row][icol]
A[row][icol] = 0.0
for col in range(n):
A[row][col] -= A[icol][col] * dum
for col in range(m):
B[row][col] -= B[icol][col] * dum
for i in range(n - 1, -1, -1):
irow = row_indices[i]
icol = col_indices[i]
if irow != icol:
for row in A:
row[irow], row[icol] = row[icol], row[irow]
return Matrix(matrix=A), Matrix(matrix=B)
def gauss_jordan_inverse(A: Iterable[Iterable[float]]) -> Matrix:
""" Returns the inverse of matrix `A` as :class:`Matrix` object.
.. hint::
For small matrices (n<10) is this function faster than LUDecomposition(m).inverse()
and as fast even if the decomposition is already done.
Raises:
ZeroDivisionError: singular matrix
.. versionadded:: 0.13
"""
if isinstance(A, Matrix):
A = A.matrix
else:
A = list(A)
nrows = len(A)
return gauss_jordan_solver(A, repeat([0.0], nrows))[0]
class LUDecomposition:
""" Represents a `LU decomposition`_ matrix of A, raise :class:`ZeroDivisionError` for a singular matrix.
This algorithm is a little bit faster than the `Gauss-Elimination`_ algorithm using CPython and
much faster when using pypy.
The :attr:`LUDecomposition.matrix` attribute gives access to the matrix data
as list of rows like in the :class:`Matrix` class, and the :attr:`LUDecomposition.index`
attribute gives access to the swapped row indices.
Args:
A: matrix [[a11, a12, ..., a1n], [a21, a22, ..., a2n], [a21, a22, ..., a2n], ... [an1, an2, ..., ann]]
raises:
ZeroDivisionError: singular matrix
.. versionadded:: 0.13
"""
__slots__ = ('matrix', 'index', '_det')
def __init__(self, A: Iterable[Iterable[float]]):
lu = copy_float_matrix(A)
n = len(lu)
det = 1.0
index = []
# find max value for each row, raises ZeroDivisionError for singular matrix!
scaling = [1.0 / max(abs(v) for v in row) for row in lu]
for k in range(n):
big = 0.0
imax = k
for i in range(k, n):
temp = scaling[i] * abs(lu[i][k])
if temp > big:
big = temp
imax = i
if k != imax:
for col in range(n):
temp = lu[imax][col]
lu[imax][col] = lu[k][col]
lu[k][col] = temp
det = -det
scaling[imax] = scaling[k]
index.append(imax)
for i in range(k + 1, n):
temp = lu[i][k] / lu[k][k]
lu[i][k] = temp
for j in range(k + 1, n):
lu[i][j] -= temp * lu[k][j]
self.index: List[int] = index
self.matrix: MatrixData = lu
self._det = det
def __str__(self) -> str:
return str(self.matrix)
def __repr__(self) -> str:
return f'{self.__class__} {reprlib.repr(self.matrix)}'
@property
def nrows(self) -> int:
""" Count of matrix rows (and cols). """
return len(self.matrix)
def solve_vector(self, B: Iterable[float]) -> List[float]:
"""
Solves the linear equation system given by the nxn Matrix A . x = B,
right-hand side quantities as vector B with n elements.
Args:
B: vector [b1, b2, ..., bn]
Returns:
vector as list of floats
"""
X = [float(v) for v in B]
lu = self.matrix
index = self.index
n = self.nrows
ii = 0
if len(X) != n:
raise ValueError('Item count of vector B has to be equal to matrix row count.')
for i in range(n):
ip = index[i]
sum_ = X[ip]
X[ip] = X[i]
if ii != 0:
for j in range(ii - 1, i):
sum_ -= lu[i][j] * X[j]
elif sum_ != 0.0:
ii = i + 1
X[i] = sum_
for row in range(n - 1, -1, -1):
sum_ = X[row]
for col in range(row + 1, n):
sum_ -= lu[row][col] * X[col]
X[row] = sum_ / lu[row][row]
return X
def solve_matrix(self, B: Iterable[Iterable[float]]) -> Matrix:
"""
Solves the linear equation system given by the nxn Matrix A . x = B,
right-hand side quantities as nxm Matrix B.
Args:
B: matrix [[b11, b12, ..., b1m], [b21, b22, ..., b2m], ... [bn1, bn2, ..., bnm]]
Returns:
matrix as :class:`Matrix` object
"""
if not isinstance(B, Matrix):
B = Matrix(matrix=[list(row) for row in B])
if B.nrows != self.nrows:
raise ValueError('Row count of self and matrix B has to match.')
return Matrix(matrix=[self.solve_vector(col) for col in B.cols()]).transpose()
def inverse(self) -> Matrix:
""" Returns the inverse of matrix as :class:`Matrix` object,
raise :class:`ZeroDivisionError` for a singular matrix. """
return self.solve_matrix(Matrix.identity(shape=(self.nrows, self.nrows)))
def determinant(self) -> float:
""" Returns the determinant of matrix, raises :class:`ZeroDivisionError` if matrix is singular.
"""
det = self._det
lu = self.matrix
for i in range(self.nrows):
det *= lu[i][i]
return det
def tridiagonal_vector_solver(A: Iterable[Iterable[float]], B: Iterable[float]) -> List[float]:
"""
Solves the linear equation system given by a tri-diagonal nxn Matrix A . x = B,
right-hand side quantities as vector B.
Matrix A is diagonal matrix defined by 3 diagonals [-1 (a), 0 (b), +1 (c)].
Note: a0 is not used but has to be present, cn-1 is also not used and must not be present.
If an :class:`ZeroDivisionError` exception occurs, the equation system can possibly be solved by
:code:`BandedMatrixLU(A, 1, 1).solve_vector(B)`
Args:
A: diagonal matrix [[a0..an-1], [b0..bn-1], [c0..cn-1]] ::
[[b0, c0, 0, 0, ...],
[a1, b1, c1, 0, ...],
[0, a2, b2, c2, ...],
... ]
B: iterable of floats [[b1, b1, ..., bn]
Returns:
list of floats
Raises:
ZeroDivisionError: singular matrix
.. versionadded:: 0.13
"""
a, b, c = [list(v) for v in A]
return _solve_tridiagonal_matrix(a, b, c, list(B))
def tridiagonal_matrix_solver(A: Iterable[Iterable[float]], B: Iterable[Iterable[float]]) -> Matrix:
"""
Solves the linear equation system given by a tri-diagonal nxn Matrix A . x = B,
right-hand side quantities as nxm Matrix B.
Matrix A is diagonal matrix defined by 3 diagonals [-1 (a), 0 (b), +1 (c)].
Note: a0 is not used but has to be present, cn-1 is also not used and must not be present.
If an :class:`ZeroDivisionError` exception occurs, the equation system can possibly be solved by
:code:`BandedMatrixLU(A, 1, 1).solve_vector(B)`
Args:
A: diagonal matrix [[a0..an-1], [b0..bn-1], [c0..cn-1]] ::
[[b0, c0, 0, 0, ...],
[a1, b1, c1, 0, ...],
[0, a2, b2, c2, ...],
... ]
B: matrix [[b11, b12, ..., b1m],
[b21, b22, ..., b2m],
...
[bn1, bn2, ..., bnm]]
Returns:
matrix as :class:`Matrix` object
Raises:
ZeroDivisionError: singular matrix
.. versionadded:: 0.13
"""
a, b, c = [list(v) for v in A]
if not isinstance(B, Matrix):
B = Matrix(matrix=[list(row) for row in B])
if B.nrows != len(b):
raise ValueError('Row count of matrices A and B has to match.')
return Matrix(matrix=[_solve_tridiagonal_matrix(a, b, c, col) for col in B.cols()]).transpose()
def _solve_tridiagonal_matrix(a: List[float], b: List[float], c: List[float], r: List[float]) -> List[float]:
""" Solves the linear equation system given by a tri-diagonal Matrix(a, b, c) . x = r.
Matrix configuration::
[[b0, c0, 0, 0, ...],
[a1, b1, c1, 0, ...],
[0, a2, b2, c2, ...],
... ]
Args:
a: lower diagonal [a0 .. an-1], a0 is not used but has to be present
b: central diagonal [b0 .. bn-1]
c: upper diagonal [c0 .. cn-1], cn-1 is not used and must not be present
r: right-hand side quantities
Returns:
vector x as list of floats
Raises:
ZeroDivisionError: singular matrix
"""
n = len(a)
u = [0.0] * n
gam = [0.0] * n
bet = b[0]
u[0] = r[0] / bet
for j in range(1, n):
gam[j] = c[j - 1] / bet
bet = b[j] - a[j] * gam[j]
u[j] = (r[j] - a[j] * u[j - 1]) / bet
for j in range((n - 2), -1, -1):
u[j] -= gam[j + 1] * u[j + 1]
return u
def banded_matrix(A: Matrix, check_all=True) -> Tuple[Matrix, int, int]:
"""
Transform matrix A into a compact banded matrix representation.
Returns compact representation as :class:`Matrix` object and
lower- and upper band count m1 and m2.
Args:
A: input :class:`Matrix`
check_all: check all diagonals if ``True`` or abort testing
after first all zero diagonal if ``False``.
"""
m1, m2 = detect_banded_matrix(A, check_all)
m = compact_banded_matrix(A, m1, m2)
return m, m1, m2
def detect_banded_matrix(A: Matrix, check_all=True) -> Tuple[int, int]:
"""
Returns lower- and upper band count m1 and m2.
Args:
A: input :class:`Matrix`
check_all: check all diagonals if ``True`` or abort testing
after first all zero diagonal if ``False``.
"""
def detect_m2() -> int:
m2 = 0
for d in range(1, A.ncols):
if any(A.iter_diag(d)):
m2 = d
elif not check_all:
break
return m2
def detect_m1() -> int:
m1 = 0
for d in range(1, A.nrows):
if any(A.iter_diag(-d)):
m1 = d
elif not check_all:
break
return m1
return detect_m1(), detect_m2()
def compact_banded_matrix(A: Matrix, m1: int, m2: int) -> Matrix:
"""
Returns compact banded matrix representation as :class:`Matrix` object.
Args:
A: matrix to transform
m1: lower band count, excluding main matrix diagonal
m2: upper band count, excluding main matrix diagonal
"""
if A.nrows != A.ncols:
raise TypeError('Square matrix required.')
m = Matrix()
for d in range(m1, 0, -1):
col = [0.0] * d
col.extend(A.diag(-d))
m.append_col(col)
m.append_col(A.diag(0))
for d in range(1, m2 + 1):
col = A.diag(d)
col.extend([0.0] * d)
m.append_col(col)
return m
class BandedMatrixLU:
""" Represents a LU decomposition of a compact banded matrix.
Attributes:
- :attr:`upper` - upper triangle
- :attr:`lower` - lower triangle
- :attr:`m1` - lower band count, excluding main matrix diagonal
- :attr:`m2` - upper band count, excluding main matrix diagonal
- :attr:`index` - swapped indices
"""
def __init__(self, A: Matrix, m1: int, m2: int):
self.upper = copy_float_matrix(A) # upper triangle of LU decomposition
self.m1 = int(m1)
self.m2 = int(m2)
n = self.nrows
self.lower = [[0.0] * m1 for _ in range(n)] # lower triangle of LU decomposition
self.index = [0] * n
self._det = 1.0
m1 = self.m1
m2 = self.m2
upper = self.upper
lower = self.lower
mm = m1 + m2 + 1
l = m1
for i in range(m1):
for j in range(m1 - i, mm):
upper[i][j - l] = upper[i][j]
l -= 1
for j in range(mm - l - 1, mm):
upper[i][j] = 0.0
l = m1
for k in range(n):
dum = upper[k][0]
i = k
if l < n:
l += 1
for j in range(k + 1, l):
if abs(upper[j][0]) > abs(dum):
dum = upper[j][0]
i = j
self.index[k] = i + 1
if i != k:
self._det = -self._det
for j in range(mm):
upper[k][j], upper[i][j] = upper[i][j], upper[k][j]
for i in range(k + 1, l):
dum = upper[i][0] / upper[k][0]
lower[k][i - k - 1] = dum
for j in range(1, mm):
upper[i][j - 1] = upper[i][j] - dum * upper[k][j]
upper[i][mm - 1] = 0.0
@property
def nrows(self):
""" Count of matrix rows. """
return len(self.upper)
def solve_vector(self, B: Iterable[float]) -> List[float]:
"""
Solves the linear equation system given by the banded nxn Matrix A . x = B,
right-hand side quantities as vector B with n elements.
Args:
B: vector [b1, b2, ..., bn]
Returns:
vector as list of floats
"""
x = list(B)
if len(x) != self.nrows:
raise ValueError('Item count of vector B has to be equal to matrix row count.')
n = self.nrows
m1 = self.m1
m2 = self.m2
index = self.index
al = self.lower
au = self.upper
mm = m1 + m2 + 1
l = m1
for k in range(n):
j = index[k] - 1
if j != k:
x[k], x[j] = x[j], x[k]
if l < n:
l += 1
for j in range(k + 1, l):
x[j] -= al[k][j - k - 1] * x[k]
l = 1
for i in range(n - 1, -1, -1):
dum = x[i]
for k in range(1, l):
dum -= au[i][k] * x[k + i]
x[i] = dum / au[i][0]
if l < mm:
l += 1
return x
def solve_matrix(self, B: Iterable[Iterable[float]]) -> Matrix:
"""
Solves the linear equation system given by the banded nxn Matrix A . x = B,
right-hand side quantities as nxm Matrix B.
Args:
B: matrix [[b11, b12, ..., b1m], [b21, b22, ..., b2m], ... [bn1, bn2, ..., bnm]]
Returns:
matrix as :class:`Matrix` object
"""
if not isinstance(B, Matrix):
B = Matrix(matrix=[list(row) for row in B])
if B.nrows != self.nrows:
raise ValueError('Row count of self and matrix B has to match.')
return Matrix(matrix=[self.solve_vector(col) for col in B.cols()]).transpose()
def determinant(self) -> float:
""" Returns the determinant of matrix. """
dd = self._det
au = self.upper
for i in range(0, len(au)):
dd *= au[i][0]
return dd
|
{"hexsha": "0c493de70629d63ee4ee9d202c223c6925740a7e", "size": 33565, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ezdxf/math/linalg.py", "max_stars_repo_name": "hh-wu/ezdxf", "max_stars_repo_head_hexsha": "62509ba39b826ee9b36f19c0a5abad7f3518186a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ezdxf/math/linalg.py", "max_issues_repo_name": "hh-wu/ezdxf", "max_issues_repo_head_hexsha": "62509ba39b826ee9b36f19c0a5abad7f3518186a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ezdxf/math/linalg.py", "max_forks_repo_name": "hh-wu/ezdxf", "max_forks_repo_head_hexsha": "62509ba39b826ee9b36f19c0a5abad7f3518186a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0276717557, "max_line_length": 116, "alphanum_fraction": 0.5450320274, "include": true, "reason": "from numpy", "num_tokens": 8898}
|
\chapter{Guidelines on the preparation of theses} \label{ch-1}
The guidelines below set out the organization and formatting requirements of the OIST PhD thesis, in order to assist students in the preparation of theses for submission.
The academic requirements of the thesis are defined in the Academic Program Policies that you can find here: \url{https://groups.oist.jp/grad/academic-program-policies}. Please always refer to the website for the latest updates in the guidelines as there may be a delay in updating the guidelines in this template.
This particular documents refers specifically a thesis written in \LaTeX. As such, some points from the full guideline (for example page sizes) are not referenced directly here as they are already defined in this template. Some other points concerning specific pages (for example the abstract) are described in the specific pages themselves in this PDF.
\section{Guidelines on the preparation of theses}
\textbf{Plagiarism and Fraud}: Students are reminded that they must take all necessary precautions to avoid plagiarism and fraudulent misrepresentation of data. The Graduate School conducts plagiarism checks on all submitted theses, and may require rewriting if present. When submitting a thesis by dissertation, students should avoid self-plagiarism through rewriting earlier published work and/or self-citation.
\textbf{Reproducibility}: OIST is committed to openness in science. A cornerstone of this philosophy is reproducibility. Your thesis should present all data and methods necessary to allow complete repetition of the experiments and their results, and to allow expert review of your analysis of data. Accordingly, you must ensure that your methods are comprehensive, and that your data sets and code are available for subsequent review by lodging them in the OIST Institutional Repository or some other data repository or database, as appropriate.
\textbf{Inclusion of Published Material}: In some cases, inclusion of published material as chapters is desirable. Normally, however, when published material is included in the thesis, it should be modified in order to remove redundancy and achieve a coherent narrative. It is essential to indicate clearly any portion of the thesis that duplicates parts of articles that were previously published by the candidate. The candidate must cite the article and indicate any parts of a section or chapter of the thesis that depend on the previously published article. This does not apply to previous documents such as thesis proposals and reports written as part of the candidate’s research.
An appropriate level of independence on the part of the student is expected. If parts of the thesis are based on published work under joint authorship, the supervisor should provide a statement about the extent to which this is the candidate’s own work as part of the standard supervisor declaration.
When including material from publications in a thesis, students should be aware of the copyright policies of journals. It is recommended that students request journals to vary their normal copyright agreements to allow material from an article to be included in a thesis (as the thesis will be publicly available through the University’s library). If, for copyright reasons, material from previously published papers may not be included in the electronically published thesis, the electronically published thesis may cite papers that are already published.
\section{Organization of chapters and sections}
\textbf{Title Page}: This page is the first printed page.
\textbf{Choice of Title}: Select a descriptive and unique title that clearly communicates your research. Avoid brief or misleading titles. The title will be displayed on your graduation testamur. The title should be unique within OIST, to distinguish your thesis from those of others working on similar subject.
\textbf{Declaration of Original Authorship}: Students must provide a signed declaration that the thesis is their own work and is original.
\textbf{Co-authorship}: Co-authorship is not allowed in an OIST PhD thesis. All research and analysis is to be the student’s own work. Where co-authors have contributed to papers arising from the research, this data should not be included unless essential to the scientific narrative. When included, full disclosure of the contribution is required. Any and all work conducted by others, either internal or external to OIST, must be acknowledged.
\textbf{Abstract, Acknowledgements, List of Abbreviations, Glossary, Nomenclature, Dedication, Table of Contents, List of Figures and Tables}: Those are commented directly in the template. Glossary, Nomenclature, and Dedication pages are optional.
\textbf{Main body}: The main body of text may be arranged as a single body of material, divided into subsections of Introduction (including a statement of the problem to be investigated), Methods, Results, Discussions, or, if preferred, in chapters that each deal with a smaller part of the research, each itself divided into subchapters as above.
\textbf{Bibliography}: A complete list of all articles and books cited within the thesis, once only, at the end of the thesis. Citations should provide the title of the reference, and list at least the first three authors (et al. format is acceptable). Articles not cited within the thesis should not be included.
\textbf{Appendices}: As required. Unlike a journal article, no data or discussion may be presented separately as unpublished supplementary documents or data. Appendices should be used instead for material that is tangentially relevant to the thesis but does not belong in the main narrative. If reference is needed to large volumes of data that cannot be printed (for example, an annotated genome, or a simulation including moving images), the data should be located on an OIST repository or public database and the URL of the dataset provided in the thesis.
\section{Formatting Requirements}
\textbf{Page size, Margins, Spacing, Justification, Pagination, Header, Fonts}: those are already built-in the template. Do not modify them.
\textbf{Equations}: Equations are considered part of the main text. As such, they should be formatted consistently throughout the thesis, following the advice of the Thesis Supervisor. Equations should be numbered to the right-hand margin.
\textbf{Spelling}: American spelling.
\textbf{Colors}: Color may be used in images and charts where necessary to enhance comprehension, but not for normal text or headings. The combination of red and green for binary images should be avoided to assist those who have difficulty in discerning hues. All text should be in black unless color-coding is necessary for meaning or contrast.
\textbf{Figues, Tables, Images}: Those are detailed in a later Chapter with examples.
\textbf{Word length}: No minimum word length is imposed on OIST theses. However, students must be concise in language and succinct in expression. The average length of a PhD thesis will vary between fields and between authors, but typical PhD theses are 100-400 pages in length (20,000-80,000 words of main body text).
\textbf{Citations}: All papers cited in the thesis must be referenced in a style relevant to the student’s field. All referencing must include the full title, authors, reference location and the year of publication, all in the same style for all references. Citation style must be consistent throughout the thesis. Reference manager software, such as Endnote, or BibTex which offers similar functionality with \LaTeX, may be used.
Citing one reference can be done like so: \cite{Lee98} and multiple references in one go like so \cite{Fil09, Muc10, Kra27}.
\textbf{Editing}: The thesis must be entirely the work of the student. Minimal editing may be provided by the Thesis Supervisor(s) or peers, but only as a review of initial drafts. Assistance should not be sought from OIST internal or paid external editing services unless directed to do so by the Dean of Graduate School in revision stages.
|
{"hexsha": "146c1eb6ba434931e5b9368dab46cd4fa7fc97f3", "size": 8102, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "PhD Thesis/MainText/chapter1.tex", "max_stars_repo_name": "Pradeep20oist/LaTeX-templates", "max_stars_repo_head_hexsha": "658b7f8745cc4d1ae157c1b75bc197fb4fa146b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-01T08:42:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-01T08:42:20.000Z", "max_issues_repo_path": "PhD Thesis/MainText/chapter1.tex", "max_issues_repo_name": "HannibalWangLecter/LaTeX-templates", "max_issues_repo_head_hexsha": "0484c0ffb89fe90464242d04082c35e92fbefb7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PhD Thesis/MainText/chapter1.tex", "max_forks_repo_name": "HannibalWangLecter/LaTeX-templates", "max_forks_repo_head_hexsha": "0484c0ffb89fe90464242d04082c35e92fbefb7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 130.6774193548, "max_line_length": 689, "alphanum_fraction": 0.8070846705, "num_tokens": 1642}
|
type Configuration{T <: Parameter} <: Parameter
parameters::Dict{String, T}
name::String
value::Dict{String, Any}
function Configuration(parameters::Dict{String, T}, name::String, values::Dict{String, Any})
for key in keys(parameters)
@inbounds values[key] = parameters[key].value
end
new(parameters, name, values)
end
end
function Configuration{T <: Parameter}(parameters::Dict{String, T}, name::String)
Configuration{T}(parameters, name, Dict{String, Any}())
end
function Configuration{T <: Parameter}(parameters::Array{T, 1}, name::String)
params = Dict{String, T}()
for parameter in parameters
@inbounds params[parameter.name] = parameter
end
Configuration{T}(params, name, Dict{String, Any}())
end
function Configuration(name::String)
params = Dict{String, Parameter}()
Configuration{Parameter}(params, name, Dict{String, Any}())
end
function Base.convert{T <: Parameter}(::Type{Array{T}}, configuration::Configuration)
parameter_array = T[]
for key in collect(keys(configuration.parameters))
push!(parameter_array, configuration[key])
end
parameter_array
end
function getindex(configuration::Configuration, index::String)
configuration.parameters[index]
end
function setindex!{T <: Parameter}(configuration::Configuration, value::T, index::String)
configuration.parameters[index] = value
end
|
{"hexsha": "45e565b27b38d22a7b3db08fc69fb56d0b5b4caf", "size": 1429, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/core/configuration.jl", "max_stars_repo_name": "JuliaPackageMirrors/StochasticSearch.jl", "max_stars_repo_head_hexsha": "58e48c8812fb402e4a46ffff1d5bcb87fca3fd05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-03-06T07:56:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T13:06:59.000Z", "max_issues_repo_path": "src/core/configuration.jl", "max_issues_repo_name": "JuliaPackageMirrors/StochasticSearch.jl", "max_issues_repo_head_hexsha": "58e48c8812fb402e4a46ffff1d5bcb87fca3fd05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-03-09T09:07:06.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-13T12:51:03.000Z", "max_forks_repo_path": "src/core/configuration.jl", "max_forks_repo_name": "JuliaPackageMirrors/StochasticSearch.jl", "max_forks_repo_head_hexsha": "58e48c8812fb402e4a46ffff1d5bcb87fca3fd05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0652173913, "max_line_length": 96, "alphanum_fraction": 0.700489853, "num_tokens": 323}
|
import time
import numpy as np
import trajPlot
class TrajectoryController:
def __init__(self,speedMax,accMax,size=3):
self.initPoint = np.zeros((size,1))
self.endPoint = np.zeros((size,1))
self.speedMax = speedMax
self.accMax = accMax
self.speed = 0
self.D = np.zeros((size,1))
self.timef = 0
self.position = np.zeros((size,1))
def set(self,initPoint,endPoint):
self.initPoint = initPoint
self.endPoint = endPoint
self.timeInit = time.time()
self.D = np.subtract(endPoint,initPoint)
Dj = np.linalg.norm(self.D)
Dj2 = 2 * np.sqrt(Dj/ self.accMax)
Dj = 2 * Dj / self.speedMax
self.timef = max(Dj,Dj2)
# q(t) = qi + A *D * t^2
self.A = 2 / (self.timef * self.timef)
self.B = 4 / self.timef
def update(self):
# The +0.1 is to anticipate the next value
currentTime = time.time() - self.timeInit +0.01
if currentTime < self.timef/2:
acc = self.accMax
speed = min(self.speedMax,self.accMax * currentTime)
pos = self.A * currentTime * currentTime
elif currentTime < self.timef:
acc = - self.accMax
speed = min(self.speedMax,- self.accMax * currentTime + self.accMax * self.timef)
pos = (-1 + self.B * currentTime - self.A * currentTime * currentTime )
else :
acc = 0
speed = 0
pos = (-1 + self.B * self.timef - self.A * self.timef * self.timef )
self.position = np.multiply(self.D,pos)
self.position += self.initPoint
self.speed = speed
return self.isEnded()
def isEnded(self):
if time.time() - self.timeInit > self.timef:
return True
else :
return False
|
{"hexsha": "659b711ae9577aee656b3fcc011c788ac72b5587", "size": 1733, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/traj/trajectoryController.py", "max_stars_repo_name": "Fdepraetre/PinokioProject", "max_stars_repo_head_hexsha": "dfea3ee23f10a44d761597d2547db3a1ff196fb1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2015-01-30T09:24:36.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-17T09:02:56.000Z", "max_issues_repo_path": "src/traj/trajectoryController.py", "max_issues_repo_name": "rougier/PinokioProject", "max_issues_repo_head_hexsha": "0b8db638d0b8f6ad915aa585b5eb865f1c5f7a18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2015-02-12T02:39:42.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-17T09:12:13.000Z", "max_forks_repo_path": "src/traj/trajectoryController.py", "max_forks_repo_name": "rougier/PinokioProject", "max_forks_repo_head_hexsha": "0b8db638d0b8f6ad915aa585b5eb865f1c5f7a18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2015-01-20T14:11:48.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-16T10:29:04.000Z", "avg_line_length": 27.078125, "max_line_length": 87, "alphanum_fraction": 0.5926139642, "include": true, "reason": "import numpy", "num_tokens": 498}
|
from datetime import datetime
from lib.pyparsing import Word, Keyword, alphas, ParseException, Literal, CaselessLiteral \
, Combine, Optional, nums, Or, Forward, ZeroOrMore, StringEnd, alphanums, oneOf \
, QuotedString, quotedString, removeQuotes, delimitedList, nestedExpr, Suppress, Group, Regex, operatorPrecedence \
, opAssoc, ParserElement
import math
import sys
import traceback
import tools
from constants import *
import logging
ParserElement.enablePackrat()
class ExpressionParser(object):
opMap = {
"<" : lambda a,b : a < b,
"<=" : lambda a,b : a <= b,
">" : lambda a,b : a > b,
">=" : lambda a,b : a >= b,
"!=" : lambda a,b : a != b,
"==" : lambda a,b : a == b,
"AND" : lambda a,b : a and b,
"OR" : lambda a,b : a or b
# "NOT" : lambda x : not a
}
FUNCTIONS = [
"SUM",
"AVE",
"MAX",
"MIN",
"COUNT",
"ALARMS",
"DISTANCE",
"SQRT",
"SINCE",
"LAST_ALARM",
"NOW",
"DOT",
"DELTA"
]
def __init__(self, expr, column=None, analysis=None, run_ms=0, verbose=False):
self.verbose = verbose
if self.verbose:
logging.debug("Building expression parser for %s" % expr)
self.expr = expr
self.column = column
self.analysis = analysis
self.run_ms = run_ms
self.record_list = []
self.alarm_list = []
self.record = None
# TODO: Pass prior record for accurate calcuations such as distance
# self.prior_batch_last_record = prior_batch_last_record
self.pattern = self._getPattern()
logging.debug("Initialized expression parser with expression: %s" % expr)
# Generator to extract operators and operands in pairs
def operatorOperands(self, tokenlist):
it = iter(tokenlist)
while 1:
try:
yield (it.next(), it.next())
except StopIteration:
break
def __normalizeNumeric(self, value):
if not tools.is_numeric(value):
# Handle unexpected text addition
value = 0
return value
def __evalCurrentValue(self, toks):
return [self.analysis.columnValue(self.column, 0)]
def __evalAggregateColumn(self, toks):
column = toks[0]
if not self.record_list:
raise Exception("Can't evaluate aggregate column without record list")
if column == 'ts':
res = [tools.unixtime(r.dt_recorded) for r in self.record_list]
else:
res = [r.columnValue(column, 0) for r in self.record_list]
return [res]
def __evalSingleColumn(self, toks):
column = toks[0]
if not self.record:
raise Exception("Can't evaluate single column with no record")
val = self.record.columnValue(column)
return [val]
def __multOp(self, toks):
value = toks[0]
_prod = self.__normalizeNumeric(value[0])
for op,val in self.operatorOperands(value[1:]):
if op == '*': _prod *= val
if op == '/':
_prod /= val
return _prod
def __expOp(self, toks):
value = toks[0]
res = self.__normalizeNumeric(value[0])
for op,val in self.operatorOperands(value[1:]):
if op == '^': res = pow(res, val)
return res
def __addOp(self, toks):
value = toks[0]
_sum = self.__normalizeNumeric(value[0])
for op,val in self.operatorOperands(value[1:]):
if op == '+': _sum += val
if op == '-': _sum -= val
return _sum
def __evalLogicOp(self, toks):
args = toks[0]
if self.verbose:
logging.debug(args)
val1 = args[0]
for op, val in self.operatorOperands(args[1:]):
fn = self.opMap[op]
val2 = val
val1 = fn(val1, val2)
return val1
def __evalComparisonOp(self, tokens):
args = tokens[0]
val1 = args[0]
for op,val in self.operatorOperands(args[1:]):
fn = self.opMap[op]
val2 = val
if not fn(val1,val2):
break
val1 = val2
else:
return True
return False
def __evalConstant(self, toks):
return float(toks[0])
def __getArglist(self, args):
if type(args) is list and len(args) > 0:
first = args[0]
if type(first) is list:
return first
return args
return []
def __evalFunction(self, toks):
val = toks[0]
fnName = val[0].upper()
args = val[1:]
args = [arg for arg in args if arg is not None] # Filter nones
if not args:
return 0
if fnName == 'SUM':
args = self.__getArglist(args)
if args:
return [sum(args)]
return [0]
elif fnName == 'AVE':
from tools import average
args = self.__getArglist(args)
if args:
return [average(args)]
return [0]
elif fnName == 'MAX':
args = self.__getArglist(args)
if args:
res = max(args)
return [res]
return [0]
elif fnName == "MIN":
args = self.__getArglist(args)
if args:
return [min(args)]
return [0]
elif fnName == "COUNT":
args = self.__getArglist(args)
return [len(args)]
elif fnName == "ALARMS":
from models import Alarm
# Usage: ALARMS([rule_id])
# Returns list of alarms in processed batch, optionally filtered by rule_id
alarm_list = list(self.alarm_list)
if args and type(args[0]) in [int, long, float]:
rule_id = int(args[0])
if rule_id:
alarm_list = [al for al in alarm_list if tools.getKey(Alarm, 'rule', al, asID=True) == rule_id]
return [alarm_list]
elif fnName == "DISTANCE":
dist = 0
last_gp = None
args = self.__getArglist(args)
for gp in args:
gp = tools.safe_geopoint(gp)
if last_gp and gp:
dist += tools.calcLocDistance(last_gp, gp)
if gp:
last_gp = gp
return [dist] # m
elif fnName == "SQRT":
arg = args[0]
return [math.sqrt(arg)]
elif fnName == "SINCE":
# Returns ms since event (argument), or 0 if none found
event = args[0]
since = 0
now = self.run_ms
try:
if event:
if type(event) in [long, float]:
# Treat as ms timestamp
since = now - event
elif isinstance(event, basestring):
pass
elif event.kind() == 'Alarm':
since = now - tools.unixtime(event.dt_start)
elif event.kind() == 'Record':
since = now - tools.unixtime(event.dt_recorded)
except Exception, e:
logging.warning("Error in SINCE() - %s" % e)
return [since]
elif fnName == "LAST_ALARM":
# Takes optional argument of rule ID to filter alarms
from models import Alarm
rule_id = None
last_alarm = None
if args:
rule_id = int(args[0])
alarm_list = list(self.alarm_list)
if alarm_list:
if rule_id:
alarm_list = [al for al in alarm_list if tools.getKey(Alarm, 'rule', al, asID=True) == rule_id]
if alarm_list:
last_alarm = sorted(alarm_list, key=lambda al : al.dt_end, reverse=True)[0]
else:
last_alarm = self.analysis.sensor.alarm_set.order("-dt_end").get()
return [last_alarm]
elif fnName == "NOW":
return [self.run_ms]
elif fnName == "DOT":
# Calculate dot product. Args 1 and 2 must be numeric aggregate/lists of same size.
res = 0
if len(args) == 2:
if type(args[0]) is list and type(args[1]) is list:
import numpy as np
res = np.dot(args[0], args[1])
return [res]
elif fnName == "DELTA":
# Calculate delta between each item in an array.
# Input: X = [1,2,2,2,5,6]
# Delta: [2-1, 2-2, 2-2, 5-2, 6-5]
# Result: [1, 0, 0, 3, 1]
res = 0
li = args[0]
if type(li) is list:
res = []
for i, item in enumerate(li):
diff = 0 # TODO: Correct handling of edge diff (not actually 0)
if i+1 < len(li):
next = li[i+1]
diff = next - item
res.append(diff)
return [res]
else:
return [None]
return 0
def _getPattern(self):
arith_expr = Forward()
comp_expr = Forward()
logic_expr = Forward()
LPAR, RPAR, SEMI = map(Suppress, "();")
multop = oneOf('* /')
plusop = oneOf('+ -')
expop = Literal("^")
compop = oneOf('> < >= <= != ==')
andop = Literal("AND")
orop = Literal("OR")
current_value = Literal(".")
# notop = Literal('NOT')
function = oneOf(' '.join(self.FUNCTIONS))
function_call = Group(function.setResultsName('fn') + LPAR + Optional(delimitedList(arith_expr)) + RPAR)
aggregate_column = QuotedString(quoteChar='{', endQuoteChar='}')
single_column = QuotedString(quoteChar='[', endQuoteChar=']')
integer = Regex(r"-?\d+")
real = Regex(r"-?\d+\.\d*")
# quotedString enables strings without quotes to pass
operand = \
function_call.setParseAction(self.__evalFunction) | \
aggregate_column.setParseAction(self.__evalAggregateColumn) | \
single_column.setParseAction(self.__evalSingleColumn) | \
((real | integer).setParseAction(self.__evalConstant)) | \
current_value.setParseAction(self.__evalCurrentValue) | \
quotedString.setParseAction(removeQuotes)
arith_expr << operatorPrecedence(operand,
[
(expop, 2, opAssoc.LEFT, self.__expOp),
(multop, 2, opAssoc.LEFT, self.__multOp),
(plusop, 2, opAssoc.LEFT, self.__addOp),
])
# comp_expr = Group(arith_expr + compop + arith_expr)
comp_expr << operatorPrecedence(arith_expr,
[
(compop, 2, opAssoc.LEFT, self.__evalComparisonOp),
])
logic_expr << operatorPrecedence(comp_expr,
[
(andop, 2, opAssoc.LEFT, self.__evalLogicOp),
(orop, 2, opAssoc.LEFT, self.__evalLogicOp)
])
pattern = logic_expr + StringEnd()
return pattern
def _parse_it(self):
if self.expr:
# try parsing the input string
try:
L = self.pattern.parseString(self.expr)
except ParseException, err:
L = ['Parse Failure', self.expr]
if self.verbose:
logging.error('Parse Failure')
logging.error(err.line)
logging.error(" "*(err.column-1) + "^")
logging.error(err)
except:
e = sys.exc_info()[0]
detail = traceback.format_exc()
logging.error("Other error occurred in parse_it for < %s >: %s - %s" % (self.expr, e, detail))
else:
if self.verbose:
logging.debug("%s -> %s" % (self.expr, L[0]))
return L[0]
return None
def run(self, record=None, record_list=None, alarm_list=None):
self.record_list = record_list
self.alarm_list = alarm_list
self.record = record
return self._parse_it()
|
{"hexsha": "7ead86cd946d8d3a8f75bb1ae4c172e8013d5b15", "size": 12391, "ext": "py", "lang": "Python", "max_stars_repo_path": "expressionParser.py", "max_stars_repo_name": "lagvier/echo-sense", "max_stars_repo_head_hexsha": "fe8ab921e7f61c48b224f0cc2832103a395a6cf7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "expressionParser.py", "max_issues_repo_name": "lagvier/echo-sense", "max_issues_repo_head_hexsha": "fe8ab921e7f61c48b224f0cc2832103a395a6cf7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "expressionParser.py", "max_forks_repo_name": "lagvier/echo-sense", "max_forks_repo_head_hexsha": "fe8ab921e7f61c48b224f0cc2832103a395a6cf7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-20T13:22:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-20T13:22:22.000Z", "avg_line_length": 34.7086834734, "max_line_length": 115, "alphanum_fraction": 0.5107739488, "include": true, "reason": "import numpy", "num_tokens": 2908}
|
#!/usr/bin/env python
# coding=utf-8
import numpy as np
import cv2
file = open("../build/Record.txt")
cv2.namedWindow("Window")
for line in file.readlines():
background = np.zeros((1024, 1024, 3), dtype=np.uint8)
line = line[:-1]
points = line.split(" ")
oriPt = (-int(100 * float(points[0])), int(100 * float(points[1])) + 256)
prePt = (-int(100 * float(points[2])), int(100 * float(points[3])) + 256)
img = cv2.circle(background, oriPt, 10, (255, 0, 0), 3)
img = cv2.circle(background, prePt, 10, (0, 255, 0), 3)
cv2.imshow("Window", img)
key = cv2.waitKey(0)
if key == ord('q'):
break
|
{"hexsha": "b54cab0afadf764b7837d920324a2e014809e84a", "size": 644, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/drawCircle.py", "max_stars_repo_name": "srm2021/WMJ2021", "max_stars_repo_head_hexsha": "ce142019ed55ca591a27f5f79abb26cdb98fdb0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2021-08-30T01:46:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T06:41:21.000Z", "max_issues_repo_path": "test/drawCircle.py", "max_issues_repo_name": "srm2021/WMJ2021", "max_issues_repo_head_hexsha": "ce142019ed55ca591a27f5f79abb26cdb98fdb0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/drawCircle.py", "max_forks_repo_name": "srm2021/WMJ2021", "max_forks_repo_head_hexsha": "ce142019ed55ca591a27f5f79abb26cdb98fdb0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-08-30T09:46:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T11:19:19.000Z", "avg_line_length": 23.8518518519, "max_line_length": 77, "alphanum_fraction": 0.5885093168, "include": true, "reason": "import numpy", "num_tokens": 213}
|
[STATEMENT]
lemma Class_cover_imp_subset_or_disj:
assumes "A = (\<Union> (Class B ` C))" and "x \<in> G" and "C \<subseteq> G"
shows "Class B x \<subseteq> A \<or> Class B x \<inter> A = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Class B x \<subseteq> A \<or> Class B x \<inter> A = {}
[PROOF STEP]
by (simp add: Class_cover_imp_subset_or_disj assms)
|
{"llama_tokens": 147, "file": "Kneser_Cauchy_Davenport_Kneser_Cauchy_Davenport_preliminaries", "length": 1}
|
(==){G<:Grasp, R<:Real}(a::Rvl{G,R}, b::Rvl{G,R}) = ((a.lo == b.lo) & (a.hi == b.hi))
(!=){G<:Grasp, R<:Real}(a::Rvl{G,R}, b::Rvl{G,R}) = ((a.lo != b.lo) | (a.hi != b.hi))
(<=){G<:Grasp, R<:Real}(a::Rvl{G,R}, b::Rvl{G,R}) = ((a.lo <= b.lo) & (a.hi <= b.hi))
(>=){G<:Grasp, R<:Real}(a::Rvl{G,R}, b::Rvl{G,R}) = ((a.lo >= b.lo) & (a.hi >= b.hi))
(< ){G<:Grasp, R<:Real}(a::Rvl{G,R}, b::Rvl{G,R}) = (!(a >= b))
(> ){G<:Grasp, R<:Real}(a::Rvl{G,R}, b::Rvl{G,R}) = (!(a <= b))
(isequal){G<:Grasp, R<:Real}(a::Rvl{G,R}, b::Rvl{G,R}) = (a == b)
(isless ){G<:Grasp, R<:Real}(a::Rvl{G,R}, b::Rvl{G,R}) = (a < b)
|
{"hexsha": "3032c9655d47b699a8988f76ffa9a7c6bd67f5b8", "size": 606, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/number/compares.jl", "max_stars_repo_name": "J-Sarnoff/InterVal.jl", "max_stars_repo_head_hexsha": "320e6980b596fc89f50b460669481ea0d80645d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/number/compares.jl", "max_issues_repo_name": "J-Sarnoff/InterVal.jl", "max_issues_repo_head_hexsha": "320e6980b596fc89f50b460669481ea0d80645d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/number/compares.jl", "max_forks_repo_name": "J-Sarnoff/InterVal.jl", "max_forks_repo_head_hexsha": "320e6980b596fc89f50b460669481ea0d80645d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 55.0909090909, "max_line_length": 85, "alphanum_fraction": 0.4174917492, "num_tokens": 338}
|
import Random
struct VariableIndex
value::Int64
end
struct ConstraintIndex
value::Int64
end
const CI = ConstraintIndex
const VI = VariableIndex
function chooseNbVar(L::Vector{Float64})
x::Float64 = rand()
if x < L[1]
return 2
elseif x+L[1] < L[2]
return 3
else
return 4
end
end
# Idée comme structure de donnée : comme données en entrée !
function generate(nbrVar::Int64, nbrConst::Int64)
c::Dict{VI, Int64} = Dict( VI(ind) => Random.rand(1:28) for ind in 1:nbrVar)
# Mbis::Array{Int64, 2} = zeros(Int, nbrConst, nbrVar)
# Mtris::Dict{VI, Dict{VI, Int64}} = Dict(VI(var) => Dict(VI(varBis) => 0 for varBis = 1:nbrVar) for var = 1:nbrVar)
M::Dict{CI, Vector{VI}} = Dict()
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# La complexité de ca !
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# listOccur::Dict{VI, Dict{VI, Int64}} = Dict(VI(var) => Dict(VI(varBis) => 0 for varBis = 1:nbrVar) for var = 1:nbrVar)
listOccur::Dict{VI, Dict{VI, Int64}} = Dict()
listLiaison::Dict{VI, Vector{VI}} = Dict()
occurVar::Dict{VI, Int64} = Dict(VI(var) => 0 for var = 1:nbrVar)
listProba::Vector{Float64} = [0.75, 0.20, 0.05]
for ind in 1:nbrConst
varConst::Int64 = chooseNbVar(listProba)
ListTirage = []
for _ in 1:varConst
var = rand(1:nbrVar)
while var in ListTirage
var = rand(1:nbrVar)
end
push!(ListTirage, var)
# Mbis[ind, var] = 1
end
# println(ListTirage)
for var1 in VI.(ListTirage)
occurVar[var1] += 1
for var2 in VI.(ListTirage)
if var1 != var2
if !haskey(listOccur, var1)
push!(listOccur, var1 => Dict())
push!(listLiaison, var1 => [var1])
end
if !haskey(listOccur[var1], var2)
push!(listOccur[var1], var2 => 0)
push!(listLiaison[var1], var2)
end
listOccur[var1][var2] += 1
end
end
end
push!(M, CI(ind) => VI.(ListTirage))
end
return c, M, listOccur, listLiaison, occurVar
# for indVar = 1:nbrVar
# for indConst = 1:nbrConst
# if Mbis[indConst, indVar] == 1
# for indVarBis = 1:nbrVar
# if Mbis[indConst, indVarBis] == 1 && indVar != indVarBis
# Mtris[VI(indVar)][VI(indVarBis)]+=1
# end
# end
# end
# end
# end
# return c, M, Mbis
end
function loadSPP(fname)
f= open(fname)
nbConst, nbVar = parse.(Int, split(readline(f)))
c = parse.(Int, split(readline(f)))
c = Dict(VI(ind) => c[ind] for ind=1:nbVar)
# listOccur::Dict{VI, Dict{VI, Int64}} = Dict(var1 => Dict(var1 => 0 for var2 in VI.(1:nbVar)) for var1 in VI.(1:nbVar))
listOccur::Dict{VI, Dict{VI, Int64}} = Dict()
listLiaison::Dict{VI, Vector{VI}} = Dict()
occurVar::Dict{VI, Int64} = Dict(VI(var) => 0 for var = 1:nbVar)
M::Dict{CI, Vector{VI}} = Dict()
for ind = 1:nbConst
readline(f)
ListTirage = parse.(Int, split(readline(f)))
# println(ListTirage)
for var1 in VI.(ListTirage)
occurVar[var1] += 1
for var2 in VI.(ListTirage)
if var1 != var2
if !haskey(listOccur, var1)
push!(listOccur, var1 => Dict())
push!(listLiaison, var1 => [var1])
end
if !haskey(listOccur[var1], var2)
push!(listOccur[var1], var2 => 0)
push!(listLiaison[var1], var2)
end
listOccur[var1][var2] += 1
end
end
end
push!(M, CI(ind) => VI.(ListTirage))
end
return c, M, listOccur, listLiaison, occurVar
end
|
{"hexsha": "45d46921ae5948d8d4b17f0c5e7865a5c5e6715a", "size": 4074, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Dm/SppGenerator.jl", "max_stars_repo_name": "LucasBaussay/AntOptim.jl", "max_stars_repo_head_hexsha": "d97041d2763a66d92fd3a7a205670aa963dabd68", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/Dm/SppGenerator.jl", "max_issues_repo_name": "LucasBaussay/AntOptim.jl", "max_issues_repo_head_hexsha": "d97041d2763a66d92fd3a7a205670aa963dabd68", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/Dm/SppGenerator.jl", "max_forks_repo_name": "LucasBaussay/AntOptim.jl", "max_forks_repo_head_hexsha": "d97041d2763a66d92fd3a7a205670aa963dabd68", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9558823529, "max_line_length": 124, "alphanum_fraction": 0.4950908198, "num_tokens": 1277}
|
import numpy as np
from PyQt5.QtCore import pyqtSlot, QThread
from checkers.gui.worker import Worker
from checkers.image.pawn_colour import opposite
from checkers.logic.move import check_move
from checkers.logic.move_status import MoveStatus
first_matrix = np.array(
[[0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 2, 0],
[0, 0, 0, 2, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 2, 0],
[0, 2, 0, 2, 0, 2, 0, 2],
[2, 0, 2, 0, 2, 0, 2, 0]],
dtype=int
)
second_matrix = np.array(
[[0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0],
[0, 2, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 2, 0],
[0, 2, 0, 2, 0, 2, 0, 2],
[2, 0, 2, 0, 2, 0, 2, 0]],
dtype=int
)
third_matrix = np.array(
[[0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 2, 0],
[0, 2, 0, 2, 0, 2, 0, 2],
[2, 0, 2, 0, 2, 0, 2, 0]],
dtype=int
)
class WorkerNoCam(Worker):
def __init__(self):
super().__init__()
self.i = 0
@pyqtSlot()
def capture_video(self):
while True:
QThread.usleep(16660)
if self.should_emit:
self.before_matrix = self.after_matrix
if self.i == 0:
self.after_matrix = first_matrix
if self.i == 1:
self.after_matrix = second_matrix
if self.i == 2:
self.after_matrix = third_matrix
self.emit_new_board(self.after_matrix)
self.emit_new_pawns_label(self.count_pawns_and_display(self.after_matrix))
if self.after_matrix is not None and self.i > 0:
print(check_move(self.before_matrix, self.after_matrix, self.player_colour))
if check_move(self.before_matrix, self.after_matrix, self.player_colour) == MoveStatus.CORRECT:
text = 'Correct move'
self.player_colour = opposite(self.player_colour)
elif check_move(self.before_matrix, self.after_matrix, self.player_colour) == MoveStatus.INCORRECT:
text = 'Incorrect move'
elif check_move(self.before_matrix, self.after_matrix, self.player_colour) == MoveStatus.UNDEFINED:
text = 'Undefined move'
elif check_move(self.before_matrix, self.after_matrix, self.player_colour) == MoveStatus.GAME_OVER:
text = 'Game over'
self.emit_new_label('Game over - ' + str(self.player_colour)[11:].lower() + ' wins')
else:
text = 'No change detected'
if text != 'Game over':
self.emit_new_label(text + ' - ' + str(self.player_colour)[11:].lower() + ' turn')
print(self.player_colour)
self.i = self.i + 1
self.should_emit = False
|
{"hexsha": "a1b5f9798a60312e498ea2d620a0ad4ab099bf6c", "size": 3170, "ext": "py", "lang": "Python", "max_stars_repo_path": "checkers/gui/worker_no_cam.py", "max_stars_repo_name": "mnajborowski/pt-projekt", "max_stars_repo_head_hexsha": "fa02580464579dbe3eb13b6b07f4f8f3cb4d44ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-26T17:20:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-26T17:20:35.000Z", "max_issues_repo_path": "checkers/gui/worker_no_cam.py", "max_issues_repo_name": "mnajborowski/pt-projekt", "max_issues_repo_head_hexsha": "fa02580464579dbe3eb13b6b07f4f8f3cb4d44ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "checkers/gui/worker_no_cam.py", "max_forks_repo_name": "mnajborowski/pt-projekt", "max_forks_repo_head_hexsha": "fa02580464579dbe3eb13b6b07f4f8f3cb4d44ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8604651163, "max_line_length": 119, "alphanum_fraction": 0.5, "include": true, "reason": "import numpy", "num_tokens": 1078}
|
-- Integration over the complex closed disk
import measure_theory.function.jacobian
import measure
import prod
import simple
import tactics
open complex (abs arg exp I)
open linear_map (to_matrix_apply)
open measure_theory
open metric (ball closed_ball sphere)
open real (cos sin)
open set (Icc Ioc)
open_locale real
noncomputable theory
section real_circle_map
-- circle_map as a map from ℝ² → ℝ²
def real_circle_map (c : ℂ) (x : ℝ × ℝ) : ℝ × ℝ := ⟨c.re + x.1 * cos x.2, c.im + x.1 * sin x.2⟩
def real_circle_map_eq_circle_map (c : ℂ) (x : ℝ × ℝ)
: real_circle_map c x = complex.equiv_real_prod (circle_map c x.1 x.2) :=
by simp [real_circle_map, circle_map]
-- The derivative of real_circle_map
def d1 := continuous_linear_map.fst ℝ ℝ ℝ
def d2 := continuous_linear_map.snd ℝ ℝ ℝ
def rcm_deriv (x : ℝ × ℝ) : ℝ × ℝ →L[ℝ] ℝ × ℝ :=
(0 + (x.1 • (-sin x.2) • d2 + cos x.2 • d1)).prod (0 + (x.1 • cos x.2 • d2 + sin x.2 • d1))
lemma real_circle_map.fderiv {c : ℂ} {x : ℝ × ℝ} : has_fderiv_at (λ x, real_circle_map c x) (rcm_deriv x) x := begin
simp_rw [real_circle_map],
apply_rules [has_fderiv_at_const, has_fderiv_at_fst, has_fderiv_at_snd, has_fderiv_at.cos, has_fderiv_at.sin,
has_fderiv_at.add, has_fderiv_at.mul, has_fderiv_at.prod],
end
-- det rcm_deriv
def rcm_matrix (x : ℝ × ℝ) := linear_map.to_matrix (basis.fin_two_prod ℝ) (basis.fin_two_prod ℝ) (rcm_deriv x)
lemma rcm00 (x : ℝ × ℝ) : rcm_matrix x 0 0 = cos x.2 := by simp [rcm_matrix, rcm_deriv, to_matrix_apply, d1, d2]
lemma rcm01 (x : ℝ × ℝ) : rcm_matrix x 0 1 = -x.1 * sin x.2 := by simp [rcm_matrix, rcm_deriv, to_matrix_apply, d1, d2]
lemma rcm10 (x : ℝ × ℝ) : rcm_matrix x 1 0 = sin x.2 := by simp [rcm_matrix, rcm_deriv, to_matrix_apply, d1, d2]
lemma rcm11 (x : ℝ × ℝ) : rcm_matrix x 1 1 = x.1 * cos x.2 := by simp [rcm_matrix, rcm_deriv, to_matrix_apply, d1, d2]
lemma rcm_deriv.det (x : ℝ × ℝ) : (rcm_deriv x).det = x.1 := begin
rw [continuous_linear_map.det, ←linear_map.det_to_matrix (basis.fin_two_prod ℝ), ←rcm_matrix],
rw [matrix.det_fin_two, rcm00, rcm01, rcm10, rcm11], ring_nf,
calc x.1 * cos x.2^2 + sin x.2^2 * x.1 = x.1 * (cos x.2^2 + sin x.2^2) : by ring
... = x.1 : by simp,
end
end real_circle_map
@[simp] lemma metric.sphere_eq_empty {S : Type} [is_R_or_C S] {c : S} {r : ℝ}
: sphere c r = ∅ ↔ r < 0 := begin
constructor, {
intros rp, contrapose rp, simp at rp,
refine set.nonempty.ne_empty ⟨c+r, _⟩,
simpa [is_R_or_C.norm_of_real],
}, {
intros n, contrapose n,
rw ←set.not_nonempty_iff_eq_empty at n, simp at n ⊢, assumption,
},
end
-- range (circle_map c r _) = sphere c r even when restricted to set.Ioc
lemma circle_map_Ioc {c z : ℂ} {r : ℝ} (zs : z ∈ sphere c r) : ∃ t, t ∈ Ioc 0 (2*π) ∧ z = circle_map c r t := begin
by_cases rp : r < 0, { simp [metric.sphere_eq_empty.mpr rp] at zs, finish },
simp at rp,
rw [←abs_of_nonneg rp, ←range_circle_map, set.mem_range] at zs,
rcases zs with ⟨t,ht⟩,
generalize ha : 2*π = a,
have ap : a > 0, { rw ←ha, exact real.two_pi_pos },
set s := t + a - a*⌈t/a⌉,
existsi s, constructor, {
simp, constructor, {
calc a*⌈t/a⌉ < a*(t/a+1) : by bound [(mul_lt_mul_left ap).mpr, int.ceil_lt_add_one]
... = a/a*t + a : by ring
... = t + a : by field_simp [ne_of_gt ap]
}, {
flip_ineq,
calc a + a*⌈t/a⌉ ≥ a + a*(t/a) : by bound [int.le_ceil]
... = a/a*t + a : by ring
... = t + a : by field_simp [ne_of_gt ap]
}
}, {
rw [←ht, circle_map], simp,
rw [mul_sub_right_distrib, right_distrib, complex.exp_sub, complex.exp_add],
rw [mul_comm _ ↑⌈_⌉, mul_assoc, complex.exp_int_mul, ←ha], simp,
},
end
section fubini_helper
-- The square that we'll map onto the ball
def square (r : ℝ) : set (ℝ × ℝ) := set.Ioc 0 r ×ˢ (Ioc 0 (2*π))
lemma square.rp {r : ℝ} {x : ℝ × ℝ} : x ∈ square r → x.1 > 0 := begin simp [square], finish end
lemma measurable.square {r : ℝ} : measurable_set (square r) := by apply_rules [measurable_set.prod, measurable_set_Ioc]
lemma square_eq {c : ℂ} {r : ℝ}
: complex.measurable_equiv_real_prod.symm ⁻¹' (closed_ball c r \ {c}) = real_circle_map c '' square r := begin
rw ←measurable_equiv.image_eq_preimage,
have e : real_circle_map c = (λ x : ℝ × ℝ, complex.measurable_equiv_real_prod (circle_map c x.1 x.2)), {
funext, rw real_circle_map_eq_circle_map, simp [complex.measurable_equiv_real_prod],
},
have i : (λ x : ℝ × ℝ, circle_map c x.1 x.2) '' square r = closed_ball c r \ {c}, {
apply set.ext, intro z, rw set.mem_image, constructor, {
intro gp, rcases gp with ⟨⟨s,t⟩,ss,tz⟩, simp at tz, simp [square] at ss, rw ←tz, simp,
exact ⟨by simp [circle_map, abs_of_pos ss.1.1, ss.1.2], ne_of_gt ss.1.1⟩,
}, {
intro zr, simp at zr, rw dist_comm at zr,
have zz : z ∈ sphere c (dist c z), { simp [complex.dist_eq], rw ←complex.abs_neg, simp },
rcases circle_map_Ioc zz with ⟨t,ts,tz⟩,
existsi (⟨dist c z, t⟩ : ℝ × ℝ), simp [dist_nonneg, zr, ne.symm zr.2, square, ts, tz.symm],
},
},
rw [e, set.image_comp _ _ (square r), i],
end
-- exp it = cos t + i sin t
lemma exp_of_im (t : ℝ) : exp (t * I) = cos t + sin t*I :=
by simp [complex.ext_iff, complex.cos_of_real_re, complex.sin_of_real_re]
lemma complex.cos_eq_cos (t : ℝ) : complex.cos t = ↑(real.cos t) := by simp
lemma complex.sin_eq_sin (t : ℝ) : complex.sin t = ↑(real.sin t) := by simp
-- arg e^(it)
lemma arg_exp_of_im (t : ℝ) : ∃ n : ℤ, arg (exp (t * I)) = t - 2*π*n := begin
generalize hn : ⌈t/(2*π) - 1/2⌉ = n, existsi n,
have en : exp (2*π*n*I) = 1, { rw [mul_comm _ ↑n, mul_assoc, complex.exp_int_mul], simp [complex.exp_neg] },
have e : exp (t*I) = exp (↑(t - 2*π*n)*I) := by simp [mul_sub_right_distrib, complex.exp_sub, en],
have ts : t - 2*π*n ∈ Ioc (-π) π, {
simp, constructor, {
have h : ↑n < t*(2*π)⁻¹ - 1/2 + 1, { rw ←hn, exact int.ceil_lt_add_one _ },
calc 2*π*↑n < 2*π*(t*(2*π)⁻¹ - 1/2 + 1) : by bound [(mul_lt_mul_left real.two_pi_pos).mpr]
... = π + (2*π)*(2*π)⁻¹*t : by ring
... = π + t : by field_simp [ne_of_gt real.two_pi_pos]
}, {
flip_ineq,
have h : ↑n ≥ t*(2*π)⁻¹ - 1/2, { rw ←hn, exact int.le_ceil _ },
calc π + 2*π*↑n ≥ π + 2*π*(t*(2*π)⁻¹ - 1/2) : by bound [real.two_pi_pos]
... = (2*π)*(2*π)⁻¹*t : by ring
... = t : by field_simp [ne_of_gt real.two_pi_pos]
}
},
rw [e, exp_of_im, ←complex.cos_eq_cos, ←complex.sin_eq_sin, complex.arg_cos_add_sin_mul_I ts],
end
-- real_circle_map is injective on the square
lemma rcm_inj {c : ℂ} {r : ℝ} : set.inj_on (real_circle_map c) (square r) := begin
intros x xs y ys e, simp [square] at xs ys,
simp_rw [real_circle_map_eq_circle_map, equiv.apply_eq_iff_eq] at e,
simp_rw [circle_map] at e, simp at e,
have re : abs (↑(x.1) * exp (x.2*I)) = abs (↑(y.1) * exp (y.2*I)) := by rw e,
simp [abs_of_pos xs.1.1, abs_of_pos ys.1.1] at re,
have ae : arg (↑(x.1) * exp (x.2*I)) = arg (↑(y.1) * exp (y.2*I)) := by rw e,
simp [complex.arg_real_mul _ xs.1.1, complex.arg_real_mul _ ys.1.1] at ae,
rcases arg_exp_of_im x.2 with ⟨nx,hx⟩,
rcases arg_exp_of_im y.2 with ⟨ny,h⟩,
rw [←ae, hx] at h, clear e ae hx,
have n0 : 2*π*(nx - ny) < (2*π)*1 := by linarith,
have n1 : (2*π)*-1 < 2*π*(nx - ny) := by linarith,
have hn : (nx : ℝ) - ny = ↑(nx - ny) := by simp,
have hn1 : (-1 : ℝ) = ↑(-1 : ℤ) := by simp,
have h1 : (1 : ℝ) = ↑(1 : ℤ) := by simp,
rw [mul_lt_mul_left real.two_pi_pos, hn] at n0 n1,
rw hn1 at n1, rw h1 at n0, rw int.cast_lt at n0 n1,
have n : nx = ny := by linarith,
rw n at h,
have i : x.2 = y.2 := by linarith,
have g : (x.1,x.2) = (y.1,y.2) := by rw [re, i],
simp at g, exact g,
end
end fubini_helper
-- Inverse lemma for fubini_ball
lemma measurable_symm_equiv_inverse {z : ℂ} : complex.measurable_equiv_real_prod.symm (complex.equiv_real_prod z) = z := begin
simp, rw [complex.measurable_equiv_real_prod, homeomorph.to_measurable_equiv_symm_coe], simp, apply complex.ext, simp, simp,
end
-- circle_map is continuous on ℝ × ℝ
lemma continuous_circle_map_full {c : ℂ} : continuous (λ x : ℝ × ℝ, circle_map c x.1 x.2) := by continuity
-- If x.to_real = y is positive, then x = of_real y
lemma invert_to_real {x : ennreal} {y : ℝ} (yp : y > 0) : x.to_real = y → x = ennreal.of_real y := begin
intro h, rw ←h, refine (ennreal.of_real_to_real _).symm,
contrapose yp, simp at yp, simp [yp] at h, simp [←h],
end
-- Integration over a complex ball using polar coordinates
lemma fubini_ball {E : Type} [normed_add_comm_group E] [normed_space ℝ E] [complete_space E]
{f : ℂ → E} {c : ℂ} {r : ℝ} (fc : continuous_on f (closed_ball c r))
: ∫ z in closed_ball c r, f z = ∫ s in set.Ioc 0 r, s • ∫ t in Ioc 0 (2*π), f (circle_map c s t) := begin
have center : closed_ball c r =ᵐ[volume] (closed_ball c r \ {c} : set ℂ) := ae_minus_point,
rw measure_theory.set_integral_congr_set_ae center, clear center,
have im := measure_preserving.symm _ complex.volume_preserving_equiv_real_prod,
rw ←measure_preserving.set_integral_preimage_emb im complex.measurable_equiv_real_prod.symm.measurable_embedding f _,
clear im,
rw square_eq,
have dc : ∀ x, x ∈ square r → has_fderiv_within_at (real_circle_map c) (rcm_deriv x) (square r) x :=
λ _ _, real_circle_map.fderiv.has_fderiv_within_at,
rw integral_image_eq_integral_abs_det_fderiv_smul volume measurable.square dc rcm_inj, clear dc,
simp_rw rcm_deriv.det,
simp_rw real_circle_map_eq_circle_map,
simp_rw measurable_symm_equiv_inverse,
have e : ∀ x : ℝ × ℝ, x ∈ square r → |x.1| • f (circle_map c x.1 x.2) = x.1 • f (circle_map c x.1 x.2), {
intros x xs, rw abs_of_pos (square.rp xs),
},
rw measure_theory.set_integral_congr measurable.square e, clear e, simp,
rw [square, measure.volume_eq_prod, measure_theory.set_integral_prod],
simp [integral_smul],
have fi : integrable_on (λ x : ℝ × ℝ, x.1 • f (circle_map c x.1 x.2)) (Icc 0 r ×ˢ Icc 0 (2*π)), {
apply continuous_on.integrable_on_compact,
exact is_compact.prod is_compact_Icc is_compact_Icc,
apply continuous_on.smul,
exact continuous_fst.continuous_on,
apply fc.comp (continuous_circle_map_full.continuous_on),
intros x xs, simp only, simp at xs,
apply metric.closed_ball_subset_closed_ball xs.2.1,
apply circle_map_mem_closed_ball _ xs.1.1,
},
exact fi.mono_set (set.prod_mono set.Ioc_subset_Icc_self set.Ioc_subset_Icc_self),
end
-- The volume of complex closed balls
lemma complex.volume_closed_ball' {c : ℂ} {r : ℝ} (rp : r ≥ 0) : (volume (closed_ball c r)).to_real = π * r^2 := begin
have c : continuous_on (λ _ : ℂ, (1 : ℝ)) (closed_ball c r) := continuous_on_const,
have f := fubini_ball c, clear c,
simp [ennreal.to_real_of_real (le_of_lt real.two_pi_pos), ←interval_integral.integral_of_le rp] at f,
have e : r ^ 2 / 2 * (2 * π) = π * r^2 := by ring, rwa e at f,
end
lemma complex.volume_closed_ball {c : ℂ} {r : ℝ} (rp : r ≥ 0) : volume (closed_ball c r) = ennreal.of_real (π * r^2) := begin
by_cases rp' : r > 0, {
exact invert_to_real (by bound [real.pi_pos]) (complex.volume_closed_ball' rp),
}, {
simp at rp', simp [le_antisymm rp' rp],
},
end
-- The volume of complex open balls
lemma complex.volume_ball' {c : ℂ} {r : ℝ} (rp : r ≥ 0) : (volume (ball c r)).to_real = π * r^2 := begin
by_cases r0 : r = 0, simp [r0],
have rs := lt_of_le_of_ne rp (ne.symm r0),
have hi' : volume (ball c r) ≤ volume (closed_ball c r) := measure_mono metric.ball_subset_closed_ball,
have hi := ennreal.to_real_mono (by simp [complex.volume_closed_ball rp]) hi',
have lo : (volume (ball c r)).to_real ≥ (volume (closed_ball c r)).to_real, {
simp [complex.volume_closed_ball' rp],
apply @le_of_forall_ge_of_dense _ _ _ (π * r^2) (volume (ball c r)).to_real,
intros a ar, by_cases an : a < 0, exact trans (le_of_lt an) (by simp), simp at an,
set s := real.sqrt (a / π),
have πp := real.pi_pos,
have sp : s ≥ 0 := real.sqrt_nonneg _,
have sr : s < r, {
calc s = real.sqrt (a / π) : rfl
... < real.sqrt (π * r^2 / π) : real.sqrt_lt_sqrt (by bound) ((div_lt_div_right (by bound)).mpr (by bound))
... = real.sqrt (π / π * r^2) : by ring_nf
... = real.sqrt (r^2) : by field_simp [ne_of_gt real.pi_pos]
... = r : real.sqrt_sq (by bound)
},
have e : a = (volume (closed_ball c s)).to_real, {
rw complex.volume_closed_ball' sp, symmetry,
have app : a / π ≥ 0 := by bound,
calc π * s^2 = π * real.sqrt (a / π)^2 : rfl
... = π * (a / π) : by rw real.sq_sqrt app
... = π / π * a : by ring
... = a : by field_simp [ne_of_gt real.pi_pos]
},
rw e, apply ennreal.to_real_mono, {
rw ←lt_top_iff_ne_top, refine lt_of_le_of_lt hi' _, simp [complex.volume_closed_ball rp],
}, {
apply measure_mono (metric.closed_ball_subset_ball sr),
}
},
have e := le_antisymm hi lo, rw e,
exact complex.volume_closed_ball' rp,
end
lemma complex.volume_ball {c : ℂ} {r : ℝ} (rp : r ≥ 0) : volume (ball c r) = ennreal.of_real (π * r^2) := begin
by_cases rp' : r > 0, {
exact invert_to_real (by bound [real.pi_pos]) (complex.volume_ball' rp),
}, {
simp at rp', simp [le_antisymm rp' rp],
},
end
-- closed_balls are nice
lemma nice_volume.closed_ball (c : ℂ) {r : ℝ} (rp : r > 0) : nice_volume (closed_ball c r) := {
measurable := measurable_set_closed_ball,
finite := by simp [complex.volume_closed_ball (le_of_lt rp)],
pos := begin simp [complex.volume_closed_ball (le_of_lt rp)], bound [real.pi_pos], end,
}
-- closed_balls have local volume
lemma local_volume.closed_ball {c : ℂ} {r : ℝ} (rp : r > 0) : local_volume_set (closed_ball c r) := begin
apply local_volume.closure_interior,
intros x r rp, rw complex.volume_ball (le_of_lt rp), simp, bound [real.pi_pos],
have rz := ne_of_gt rp,
simp [interior_closed_ball c rz, closure_ball c rz],
end
|
{"author": "girving", "repo": "ray", "sha": "e0c501756e067711e2d3667d4b1d18045d83a313", "save_path": "github-repos/lean/girving-ray", "path": "github-repos/lean/girving-ray/ray-e0c501756e067711e2d3667d4b1d18045d83a313/src/fubini_ball.lean"}
|
# -*- coding: utf-8 -*-
from numpy import linspace, zeros
from ....Classes.Segment import Segment
from ....Classes.Arc1 import Arc1
from ....Classes.SurfLine import SurfLine
def get_surface_active(self, alpha=0, delta=0):
"""Return the full active surface
Parameters
----------
self : SlotM13
A SlotM13 object
alpha : float
float number for rotation (Default value = 0) [rad]
delta : complex
complex number for translation (Default value = 0)
Returns
-------
surf_act: Surface
Surface corresponding to the Active Area
"""
# get the name of the lamination
st = self.get_name_lam()
point_dict = self._comp_point_coordinate()
ZM0 = point_dict["ZM0"]
ZM1 = point_dict["ZM1"]
ZM2 = point_dict["ZM2"]
ZM3 = point_dict["ZM3"]
ZM4 = point_dict["ZM4"]
curve_list = list()
curve_list.append(Segment(ZM1, ZM2))
if self.is_outwards():
curve_list.append(Arc1(ZM2, ZM3, -self.Rtopm, is_trigo_direction=False))
else:
curve_list.append(Arc1(ZM2, ZM3, self.Rtopm))
curve_list.append(Segment(ZM3, ZM4))
curve_list.append(Segment(ZM4, ZM1))
Zmid = (ZM1 + ZM3) / 2
surface = SurfLine(
line_list=curve_list, label="Wind_" + st + "_R0_T0_S0", point_ref=Zmid
)
# Apply transformation
surface.rotate(alpha)
surface.translate(delta)
return surface
|
{"hexsha": "461aff09744eef9c4242fb9f308e7cadb4eb6425", "size": 1416, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyleecan/Methods/Slot/SlotM13/get_surface_active.py", "max_stars_repo_name": "IrakozeFD/pyleecan", "max_stars_repo_head_hexsha": "5a93bd98755d880176c1ce8ac90f36ca1b907055", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 95, "max_stars_repo_stars_event_min_datetime": "2019-01-23T04:19:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T18:22:10.000Z", "max_issues_repo_path": "pyleecan/Methods/Slot/SlotM13/get_surface_active.py", "max_issues_repo_name": "IrakozeFD/pyleecan", "max_issues_repo_head_hexsha": "5a93bd98755d880176c1ce8ac90f36ca1b907055", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 366, "max_issues_repo_issues_event_min_datetime": "2019-02-20T07:15:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:37:23.000Z", "max_forks_repo_path": "pyleecan/Methods/Slot/SlotM13/get_surface_active.py", "max_forks_repo_name": "IrakozeFD/pyleecan", "max_forks_repo_head_hexsha": "5a93bd98755d880176c1ce8ac90f36ca1b907055", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 74, "max_forks_repo_forks_event_min_datetime": "2019-01-24T01:47:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T05:44:42.000Z", "avg_line_length": 24.4137931034, "max_line_length": 80, "alphanum_fraction": 0.6398305085, "include": true, "reason": "from numpy", "num_tokens": 408}
|
import torch
import numpy as np
from PIL import Image
import random
from ..model.vocab import Vocab
from ..tool.translate import process_image
import os
from collections import defaultdict
import math
from prefetch_generator import background
class BucketData(object):
def __init__(self, device):
self.max_label_len = 0
self.data_list = []
self.label_list = []
self.file_list = []
self.device = device
def append(self, datum, label, filename):
self.data_list.append(datum)
self.label_list.append(label)
self.file_list.append(filename)
self.max_label_len = max(len(label), self.max_label_len)
return len(self.data_list)
def flush_out(self):
"""
Shape:
- img: (N, C, H, W)
- tgt_input: (T, N)
- tgt_output: (N, T)
- tgt_padding_mask: (N, T)
"""
# encoder part
img = np.array(self.data_list, dtype=np.float32)
# decoder part
target_weights = []
tgt_input = []
for label in self.label_list:
label_len = len(label)
tgt = np.concatenate((
label,
np.zeros(self.max_label_len - label_len, dtype=np.int32)))
tgt_input.append(tgt)
one_mask_len = label_len - 1
target_weights.append(np.concatenate((
np.ones(one_mask_len, dtype=np.float32),
np.zeros(self.max_label_len - one_mask_len,dtype=np.float32))))
# reshape to fit input shape
tgt_input = np.array(tgt_input, dtype=np.int64).T
tgt_output = np.roll(tgt_input, -1, 0).T
tgt_output[:, -1]=0
tgt_padding_mask = np.array(target_weights)==0
filenames = self.file_list
self.data_list, self.label_list, self.file_list = [], [], []
self.max_label_len = 0
rs = {
'img': torch.FloatTensor(img).to(self.device),
'tgt_input': torch.LongTensor(tgt_input).to(self.device),
'tgt_output': torch.LongTensor(tgt_output).to(self.device),
'tgt_padding_mask':torch.BoolTensor(tgt_padding_mask).to(self.device),
'filenames': filenames
}
return rs
def __len__(self):
return len(self.data_list)
def __iadd__(self, other):
self.data_list += other.data_list
self.label_list += other.label_list
self.max_label_len = max(self.max_label_len, other.max_label_len)
self.max_width = max(self.max_width, other.max_width)
def __add__(self, other):
res = BucketData()
res.data_list = self.data_list + other.data_list
res.label_list = self.label_list + other.label_list
res.max_width = max(self.max_width, other.max_width)
res.max_label_len = max((self.max_label_len, other.max_label_len))
return res
class DataGen(object):
def __init__(self,data_root, annotation_fn, vocab, device, image_height=32, image_min_width=32, image_max_width=512):
self.image_height = image_height
self.image_min_width = image_min_width
self.image_max_width = image_max_width
self.data_root = data_root
self.annotation_path = os.path.join(data_root, annotation_fn)
self.vocab = vocab
self.device = device
self.clear()
def clear(self):
self.bucket_data = defaultdict(lambda: BucketData(self.device))
@background(max_prefetch=1)
def gen(self, batch_size, last_batch=True):
with open(self.annotation_path, 'r') as ann_file:
lines = ann_file.readlines()
np.random.shuffle(lines)
for l in lines:
img_path, lex = l.strip().split('\t')
img_path = os.path.join(self.data_root, img_path)
try:
img_bw, word = self.read_data(img_path, lex)
except IOError:
print('ioread image:{}'.format(img_path))
width = img_bw.shape[-1]
bs = self.bucket_data[width].append(img_bw, word, img_path)
if bs >= batch_size:
b = self.bucket_data[width].flush_out()
yield b
if last_batch:
for bucket in self.bucket_data.values():
if len(bucket) > 0:
b = bucket.flush_out()
yield b
self.clear()
def read_data(self, img_path, lex):
with open(img_path, 'rb') as img_file:
img = Image.open(img_file).convert('RGB')
img_bw = process_image(img, self.image_height, self.image_min_width, self.image_max_width)
word = self.vocab.encode(lex)
return img_bw, word
|
{"hexsha": "e021618745358585932b4a057a1a0790207879bb", "size": 5009, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules/ocr/loader/dataloader_v1.py", "max_stars_repo_name": "martinhoang11/vietnamese-ocr-toolbox", "max_stars_repo_head_hexsha": "524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2021-09-05T10:42:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T16:27:26.000Z", "max_issues_repo_path": "modules/ocr/loader/dataloader_v1.py", "max_issues_repo_name": "martinhoang11/vietnamese-ocr-toolbox", "max_issues_repo_head_hexsha": "524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-16T11:35:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-16T11:35:24.000Z", "max_forks_repo_path": "modules/ocr/loader/dataloader_v1.py", "max_forks_repo_name": "martinhoang11/vietnamese-ocr-toolbox", "max_forks_repo_head_hexsha": "524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-09-05T13:26:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T07:49:45.000Z", "avg_line_length": 32.108974359, "max_line_length": 121, "alphanum_fraction": 0.567578359, "include": true, "reason": "import numpy", "num_tokens": 1079}
|
//#include <QtGui/QApplication>
#include <QApplication>
#include <QtDebug>
#include <QFile>
#include <QTextStream>
#include <QDateTime>
#include <QDir>
#include <QDesktopServices>
#include <boost/program_options.hpp>
namespace po = boost::program_options;
#include <iostream>
#include <algorithm>
#include <iterator>
using namespace std;
#include "mainwindow.h"
void myMessageOutput(QtMsgType type, const QMessageLogContext &context, const QString &msg)
{
QByteArray localMsg = msg.toLocal8Bit();
switch (type) {
case QtDebugMsg:
fprintf(stderr, "Debug: %s (%s:%u, %s)\n", localMsg.constData(), context.file, context.line, context.function);
break;
case QtWarningMsg:
fprintf(stderr, "Warning: %s (%s:%u, %s)\n", localMsg.constData(), context.file, context.line, context.function);
break;
case QtCriticalMsg:
fprintf(stderr, "Critical: %s (%s:%u, %s)\n", localMsg.constData(), context.file, context.line, context.function);
break;
case QtFatalMsg:
fprintf(stderr, "Fatal: %s (%s:%u, %s)\n", localMsg.constData(), context.file, context.line, context.function);
abort();
}
}
int main(int argc, char *argv[])
{
QApplication app(argc, argv);
//QCoreApplication::setAttribute (Qt::AA_NativeWindows);
QCoreApplication::setAttribute (Qt::AA_DontCreateNativeWidgetSiblings);
//QFile outFile("pviz3.log");
//outFile.open(QIODevice::WriteOnly | QIODevice::Truncate);
std::vector<std::string> input_files;
std::string directory_name;
bool enableConnectToServer = false;
bool enableTileWindows = false;
bool enableFullscreen = false;
int colormap = 13;
QString location = QStandardPaths::writableLocation(QStandardPaths::DataLocation);
QDir dir(location);
if (!dir.exists())
dir.mkpath(location);
QFileInfo info = QFileInfo(dir.absoluteFilePath("pviz3.log"));
std::string logfile = info.absoluteFilePath().toUtf8().constData();
try
{
po::options_description desc("Allowed options");
desc.add_options()
("help,h", "produce help message")
("input-file,i", po::value< std::vector<std::string> >(&input_files), "input file")
("directory-open,d", po::value< std::string >(&directory_name), "directory open")
("connect", "connect to server")
("tile", "tile windows")
("fullscreen", "fullscreen")
("colormap,c", po::value< int>(&colormap), "colormap")
("logfile", po::value< std::string >(&logfile), "logfile")
;
po::positional_options_description p;
p.add("input-file", -1);
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).
options(desc).positional(p).run(), vm);
po::notify(vm);
if (vm.count("help"))
{
cout << "Usage: options_description [options]\n";
cout << desc;
return 0;
}
if (vm.count("connect"))
{
enableConnectToServer = true;
}
if (vm.count("tile"))
{
enableTileWindows = true;
}
if (vm.count("fullscreen"))
{
enableFullscreen = true;
}
}
catch(std::exception& e)
{
cout << e.what() << "\n";
return 1;
}
if (logfile != "-")
{
QFileInfo info = QFileInfo(QString::fromLatin1(logfile.c_str()));
qDebug() << "logfile ... " << info.absoluteFilePath();
freopen(info.absoluteFilePath().toUtf8().constData(), "w", stderr);
setvbuf(stderr, NULL, _IONBF, 0);
}
qInstallMessageHandler(myMessageOutput);
qDebug() << QDateTime::currentDateTime().toString() << "... Started. ";
MainWindow w;
w.show();
if (enableFullscreen)
w.setWindowState(Qt::WindowFullScreen);
for (std::vector<std::string>::iterator it = input_files.begin(); it < input_files.end(); it++)
{
w.OpenDataFile(QString((*it).c_str()));
}
if (directory_name.length() > 0)
{
qDebug() << "Ready to open directory: " << QDir(QString(directory_name.c_str())).canonicalPath();
w.OpenDirectory(QString(directory_name.c_str()));
}
if (enableConnectToServer)
{
qDebug() << "Connect to server";
w.ConnectToActiveMQServer();
}
if (enableTileWindows)
w.TileSubWindows();
w.SetColorMap(colormap);
//QString location = QDesktopServices::storageLocation(QDesktopServices::DataLocation);
//QFileInfo info = QFileInfo(location + "/PVIZ3/pviz3.ini");
return app.exec();
}
|
{"hexsha": "4987ca14bc46f54244fad87ba20aecc711c530f8", "size": 4816, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/main.cpp", "max_stars_repo_name": "jychoi-hpc/pviz3", "max_stars_repo_head_hexsha": "d55c84a45df0a5bf30ecb832b370e03f0c7ab4c1", "max_stars_repo_licenses": ["xpp"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/main.cpp", "max_issues_repo_name": "jychoi-hpc/pviz3", "max_issues_repo_head_hexsha": "d55c84a45df0a5bf30ecb832b370e03f0c7ab4c1", "max_issues_repo_licenses": ["xpp"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main.cpp", "max_forks_repo_name": "jychoi-hpc/pviz3", "max_forks_repo_head_hexsha": "d55c84a45df0a5bf30ecb832b370e03f0c7ab4c1", "max_forks_repo_licenses": ["xpp"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9130434783, "max_line_length": 126, "alphanum_fraction": 0.5884551495, "num_tokens": 1130}
|
"""
Helper routines to perform bias subtraction and overscan trimming of LRIS data.
"""
import scipy
def oneamp(data):
"""
Subtracts bias from data and returns the overscan region-subtracted
image.
"""
bias = (data[:,2:21].mean(axis=1)*18+data[:,2069:2148].mean(axis=1)*80)/98.
out_data = data[:,21:2069]-bias
return out_data
def redside(data):
"""
Subtracts bias from data and returns the overscan region-subtracted
image. CCD geometry is currently hardwired, so this won't work for
windowed or binned setups.
"""
if data.shape[1]==2148:
return oneamp(data)
bias = scipy.empty((2,data.shape[0]))
bias[0] = (data[:,1:20].mean(axis=1)*19+data[:,2088:2168].mean(axis=1)*80)/99.
bias[1] = (data[:,20:38].mean(axis=1)*18+data[:,2168:2248].mean(axis=1)*80)/98.
out_data = scipy.empty((2046,2048))
"""
Mask out the bad columns. Note this might not be appropriate for older
data (or if the CCDs change).
"""
mask = (data[0:1490,995]+data[0:1490,997])/2.
data[0:1490,996] = mask.copy()
mask = (data[0:1493,999]+data[0:1493,1001])/2.
data[0:1493,1000] = mask.copy()
data = data.transpose()
out_data[0:1023,:] = data[41:1064,:] - bias[0]
out_data[1023:2046,:] = data[1064:2087,:] - bias[1]
"""
Fix difference in amplifier gains. This does *not* convert from DN
to electrons.
"""
out_data[1023:2046,:] *= 1.0765 # Note this differs from the LRIS
# website that would suggest 1.0960
out_data = out_data.transpose()
return out_data
def blueside(data):
"""
Subtracts bias from data and returns the overscan region-subtracted
image.
"""
data = data.T
data = data[:,::-1]
size = data.shape[1]
bias = scipy.empty((4,size))
bias[0] = (data[0:50,:].mean(axis=0) + data[4300:4380,:].mean(axis=0))/2.
bias[1] = (data[52:101,:].mean(axis=0) + data[4380:4460,:].mean(axis=0))/2.
bias[2] = (data[102:153,:].mean(axis=0) + data[4460:4540,:].mean(axis=0))/2.
bias[3] = (data[153:202,:].mean(axis=0) + data[4540:4620,:].mean(axis=0))/2.
"""
Conversion factor from DN to electrons (from LRIS website; disabled for
now).
"""
gain = [1.55,1.56,1.63,1.70]
outdata = scipy.empty((4096,4096))
for i in range(4):
outstart = i*1024
datastart = i*1024 + 204
outdata[outstart:outstart+1024,:] = data[datastart:datastart+1024,:] - bias[i]
# outdata[outstart:outstart+1024,:] *= gain[i]
del bias
return outdata
|
{"hexsha": "0a70da429f7935c17f2e70b5bb068a76a0895e83", "size": 2377, "ext": "py", "lang": "Python", "max_stars_repo_path": "keckcode/lris_redux/lris/lris_biastrim.py", "max_stars_repo_name": "cdfassnacht/keck_code", "max_stars_repo_head_hexsha": "a952b3806b3e64eef70deec2b2d1352e6ef6dfa0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "keckcode/lris_redux/lris/lris_biastrim.py", "max_issues_repo_name": "cdfassnacht/keck_code", "max_issues_repo_head_hexsha": "a952b3806b3e64eef70deec2b2d1352e6ef6dfa0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "keckcode/lris_redux/lris/lris_biastrim.py", "max_forks_repo_name": "cdfassnacht/keck_code", "max_forks_repo_head_hexsha": "a952b3806b3e64eef70deec2b2d1352e6ef6dfa0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-15T23:16:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-15T23:16:36.000Z", "avg_line_length": 26.4111111111, "max_line_length": 80, "alphanum_fraction": 0.6571308372, "include": true, "reason": "import scipy", "num_tokens": 818}
|
from tpot import TPOTRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from deap import creator
from sklearn.model_selection import cross_val_score, cross_val_predict
import numpy as np
from tempfile import mkdtemp
from shutil import rmtree
random_seed = 42
housing = load_boston()
X_train, X_test, y_train, y_test = \
train_test_split(housing.data, housing.target, train_size=0.75, test_size=0.25, random_state=random_seed)
# used scoring
scoring = 'neg_mean_absolute_error'
tpot_config = {
'sklearn.linear_model.ElasticNetCV': {
'l1_ratio': np.arange(0.0, 1.01),
'tol': [1e-5]
},
'sklearn.neighbors.KNeighborsRegressor': {
'n_neighbors': range(1,2),
'weights': ["uniform", "distance"],
'p': [1, 2]
},
# preprocessing
'sklearn.decomposition.PCA': {
'svd_solver': ['randomized'],
'iterated_power': range(1,2)
}
}
# create a directory where to cache the results
cachedir = mkdtemp()
tpot = TPOTRegressor(generations=5,
population_size=50,
verbosity=2,
random_state=random_seed,
config_dict='TPOT light',
scoring=scoring
)
tpot.fit(X_train, y_train)
print('Test score using optimal model: %f ' %tpot.score(X_test, y_test))
tpot.export('BayOptPy/tpot/debug/tpot_boston_pipeline.py')
# get the list of models analysed
analysed_models = list(tpot.evaluated_individuals_.items())
predicted_age = {}
for model in analysed_models:
model_name = model[0]
model_info = model[1]
# fit the data
optimised_pipeline = creator.Individual.from_string(model_name, tpot._pset)
# Transform the tree expression int a callable function
fitted_pipeline = tpot._toolbox.compile(expr=optimised_pipeline)
tpot._set_param_recursive(fitted_pipeline.steps, 'random_state', random_seed)
predicted_age[model_name] = {}
predicted_age[model_name]['age'] = cross_val_predict(fitted_pipeline, X_test, y_test, cv=5)
# remove the cached directory
rmtree(cachedir)
print('Done')
# Evaluate analysed pipelines
# print the top two values of the pipeline dictionary
print(dict(list(tpot.evaluated_individuals_.items())[0:2]))
# print a pipeline and its values
pipeline_str = list(tpot.evaluated_individuals_.keys())[0]
print(pipeline_str)
print(tpot.evaluated_individuals_[pipeline_str])
# convert pipeline string to scikit-learn pipeline object
optimized_pipeline = creator.Individual.from_string(pipeline_str, tpot._pset) # deap object
fitted_pipeline = tpot._toolbox.compile(expr=optimized_pipeline) # scikit-learn pipeline object
# print scikit-learn pipeline object
print(fitted_pipeline)
# Fix random state when the operator allows (optional) just for get consistent CV score
tpot._set_param_recursive(fitted_pipeline.steps, 'random_state', random_seed)
scores = cross_val_score(fitted_pipeline, X_train, y_train, cv=5, scoring=scoring, verbose=0)
print(np.mean(scores))
print(tpot.evaluated_individuals[pipeline_str][1])
|
{"hexsha": "d74dcddc44fa6a8d5664ba158fea073671809d7f", "size": 3065, "ext": "py", "lang": "Python", "max_stars_repo_path": "BayOptPy/tpot/debug/tpot_boston.py", "max_stars_repo_name": "Mind-the-Pineapple/tpot-age", "max_stars_repo_head_hexsha": "2969bfa6dc5c652d5b4f00f59e9b0b23869f6bef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-04-09T16:53:54.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-21T16:49:52.000Z", "max_issues_repo_path": "BayOptPy/tpot/debug/tpot_boston.py", "max_issues_repo_name": "Mind-the-Pineapple/tpot-age", "max_issues_repo_head_hexsha": "2969bfa6dc5c652d5b4f00f59e9b0b23869f6bef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BayOptPy/tpot/debug/tpot_boston.py", "max_forks_repo_name": "Mind-the-Pineapple/tpot-age", "max_forks_repo_head_hexsha": "2969bfa6dc5c652d5b4f00f59e9b0b23869f6bef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8395061728, "max_line_length": 105, "alphanum_fraction": 0.7389885808, "include": true, "reason": "import numpy", "num_tokens": 760}
|
from Pipeline.main.PullData.Misc.PullCoinMarketCap import PullCoinMarketCap
import numpy as np
class MarketDetails:
"""
Period:
short: 1h, mid: 24h, long: 1w
"""
def __init__(self):
self.pull = PullCoinMarketCap()
def multiTicks(self, tickSizeList):
coinPage = self.pull.getPage()
tickDict = {}
for tickSize in tickSizeList:
tickDict[str(tickSize)] = self.getTick(noCoins=tickSize, page=coinPage)
return tickDict
def getTick(self, noCoins=1000, page=None):
n = 0
tickList = []
coinPage = self.pull.getPage() if not page else page
for coinDiv in coinPage.find("tbody").find_all("tr")[:noCoins]:
try:
percentSect = coinDiv.find_all("td", class_="percent-change")
if len(percentSect) == 3:
tickList.append(
[
1 if float(val["data-sort"]) > 0 else -1
for val in percentSect
]
)
else:
n += 1
except ValueError:
n += 1
ticks = [sum(tL) for tL in np.transpose(tickList)]
# print(tickList)
return {"short": ticks[0], "mid": ticks[1], "long": ticks[2]}
|
{"hexsha": "9d86a8c3dde79b322b53513d36394d8329a20965", "size": 1350, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pipeline/main/Monitor/MarketDetails.py", "max_stars_repo_name": "simonydbutt/b2a", "max_stars_repo_head_hexsha": "0bf4a6de8547d73ace22967780442deeaff2d5c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-07-01T03:36:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-13T17:22:46.000Z", "max_issues_repo_path": "Pipeline/main/Monitor/MarketDetails.py", "max_issues_repo_name": "simonydbutt/b2a", "max_issues_repo_head_hexsha": "0bf4a6de8547d73ace22967780442deeaff2d5c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pipeline/main/Monitor/MarketDetails.py", "max_forks_repo_name": "simonydbutt/b2a", "max_forks_repo_head_hexsha": "0bf4a6de8547d73ace22967780442deeaff2d5c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3953488372, "max_line_length": 83, "alphanum_fraction": 0.5074074074, "include": true, "reason": "import numpy", "num_tokens": 322}
|
"""RDF datasets
Datasets from "A Collection of Benchmark Datasets for
Systematic Evaluations of Machine Learning on
the Semantic Web"
"""
import os
from collections import OrderedDict
import itertools
import rdflib as rdf
import abc
import re
import networkx as nx
import numpy as np
import dgl
import dgl.backend as F
from .utils import download, extract_archive, get_download_dir, _get_dgl_url
__all__ = ['AIFB', 'MUTAG', 'BGS', 'AM']
class Entity:
"""Class for entities
Parameters
----------
id : str
ID of this entity
cls : str
Type of this entity
"""
def __init__(self, id, cls):
self.id = id
self.cls = cls
def __str__(self):
return '{}/{}'.format(self.cls, self.id)
class Relation:
"""Class for relations
Parameters
----------
cls : str
Type of this relation
"""
def __init__(self, cls):
self.cls = cls
def __str__(self):
return str(self.cls)
class RDFGraphDataset:
"""Base graph dataset class from RDF tuples.
To derive from this, implement the following abstract methods:
* ``parse_entity``
* ``parse_relation``
* ``process_tuple``
* ``process_idx_file_line``
* ``predict_category``
Preprocessed graph and other data will be cached in the download folder
to speedup data loading.
The dataset should contain a "trainingSet.tsv" and a "testSet.tsv" file
for training and testing samples.
Attributes
----------
graph : dgl.DGLHeteroGraph
Graph structure
num_classes : int
Number of classes to predict
predict_category : str
The entity category (node type) that has labels for prediction
train_idx : Tensor
Entity IDs for training. All IDs are local IDs w.r.t. to ``predict_category``.
test_idx : Tensor
Entity IDs for testing. All IDs are local IDs w.r.t. to ``predict_category``.
labels : Tensor
All the labels of the entities in ``predict_category``
Parameters
----------
url : str or path
URL to download the raw dataset.
name : str
Name of the dataset
force_reload : bool, optional
If true, force load and process from raw data. Ignore cached pre-processed data.
print_every : int, optional
Log for every X tuples.
insert_reverse : bool, optional
If true, add reverse edge and reverse relations to the final graph.
"""
def __init__(self, url, name,
force_reload=False,
print_every=10000,
insert_reverse=True):
download_dir = get_download_dir()
zip_file_path = os.path.join(download_dir, '{}.zip'.format(name))
download(url, path=zip_file_path)
self._dir = os.path.join(download_dir, name)
extract_archive(zip_file_path, self._dir)
self._print_every = print_every
self._insert_reverse = insert_reverse
if not force_reload and self.has_cache():
print('Found cached graph. Load cache ...')
self.load_cache()
else:
raw_tuples = self.load_raw_tuples()
self.process_raw_tuples(raw_tuples)
print('#Training samples:', len(self.train_idx))
print('#Testing samples:', len(self.test_idx))
print('#Classes:', self.num_classes)
print('Predict category:', self.predict_category)
def load_raw_tuples(self):
raw_rdf_graphs = []
for i, filename in enumerate(os.listdir(self._dir)):
fmt = None
if filename.endswith('nt'):
fmt = 'nt'
elif filename.endswith('n3'):
fmt = 'n3'
if fmt is None:
continue
g = rdf.Graph()
print('Parsing file %s ...' % filename)
g.parse(os.path.join(self._dir, filename), format=fmt)
raw_rdf_graphs.append(g)
return itertools.chain(*raw_rdf_graphs)
def process_raw_tuples(self, raw_tuples):
mg = nx.MultiDiGraph()
ent_classes = OrderedDict()
rel_classes = OrderedDict()
entities = OrderedDict()
src = []
dst = []
ntid = []
etid = []
for i, (sbj, pred, obj) in enumerate(raw_tuples):
if i % self._print_every == 0:
print('Processed %d tuples, found %d valid tuples.' % (i, len(src)))
sbjent = self.parse_entity(sbj)
rel = self.parse_relation(pred)
objent = self.parse_entity(obj)
processed = self.process_tuple((sbj, pred, obj), sbjent, rel, objent)
if processed is None:
# ignored
continue
# meta graph
sbjclsid = _get_id(ent_classes, sbjent.cls)
objclsid = _get_id(ent_classes, objent.cls)
relclsid = _get_id(rel_classes, rel.cls)
mg.add_edge(sbjent.cls, objent.cls, key=rel.cls)
if self._insert_reverse:
mg.add_edge(objent.cls, sbjent.cls, key='rev-%s' % rel.cls)
# instance graph
src_id = _get_id(entities, str(sbjent))
if len(entities) > len(ntid): # found new entity
ntid.append(sbjclsid)
dst_id = _get_id(entities, str(objent))
if len(entities) > len(ntid): # found new entity
ntid.append(objclsid)
src.append(src_id)
dst.append(dst_id)
etid.append(relclsid)
src = np.array(src)
dst = np.array(dst)
ntid = np.array(ntid)
etid = np.array(etid)
ntypes = list(ent_classes.keys())
etypes = list(rel_classes.keys())
# add reverse edge with reverse relation
if self._insert_reverse:
print('Adding reverse edges ...')
newsrc = np.hstack([src, dst])
newdst = np.hstack([dst, src])
src = newsrc
dst = newdst
etid = np.hstack([etid, etid + len(etypes)])
etypes.extend(['rev-%s' % t for t in etypes])
self.build_graph(mg, src, dst, ntid, etid, ntypes, etypes)
print('Load training/validation/testing split ...')
idmap = F.asnumpy(self.graph.nodes[self.predict_category].data[dgl.NID])
glb2lcl = {glbid : lclid for lclid, glbid in enumerate(idmap)}
def findidfn(ent):
if ent not in entities:
return None
else:
return glb2lcl[entities[ent]]
self.load_data_split(findidfn)
self.save_cache(mg, src, dst, ntid, etid, ntypes, etypes)
def build_graph(self, mg, src, dst, ntid, etid, ntypes, etypes):
# create homo graph
print('Creating one whole graph ...')
g = dgl.graph((src, dst))
g.ndata[dgl.NTYPE] = F.tensor(ntid)
g.edata[dgl.ETYPE] = F.tensor(etid)
print('Total #nodes:', g.number_of_nodes())
print('Total #edges:', g.number_of_edges())
# convert to heterograph
print('Convert to heterograph ...')
hg = dgl.to_hetero(g,
ntypes,
etypes,
metagraph=mg)
print('#Node types:', len(hg.ntypes))
print('#Canonical edge types:', len(hg.etypes))
print('#Unique edge type names:', len(set(hg.etypes)))
self.graph = hg
def save_cache(self, mg, src, dst, ntid, etid, ntypes, etypes):
nx.write_gpickle(mg, os.path.join(self._dir, 'cached_mg.gpickle'))
np.save(os.path.join(self._dir, 'cached_src.npy'), src)
np.save(os.path.join(self._dir, 'cached_dst.npy'), dst)
np.save(os.path.join(self._dir, 'cached_ntid.npy'), ntid)
np.save(os.path.join(self._dir, 'cached_etid.npy'), etid)
save_strlist(os.path.join(self._dir, 'cached_ntypes.txt'), ntypes)
save_strlist(os.path.join(self._dir, 'cached_etypes.txt'), etypes)
np.save(os.path.join(self._dir, 'cached_train_idx.npy'), F.asnumpy(self.train_idx))
np.save(os.path.join(self._dir, 'cached_test_idx.npy'), F.asnumpy(self.test_idx))
np.save(os.path.join(self._dir, 'cached_labels.npy'), F.asnumpy(self.labels))
def has_cache(self):
return (os.path.exists(os.path.join(self._dir, 'cached_mg.gpickle'))
and os.path.exists(os.path.join(self._dir, 'cached_src.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_dst.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_ntid.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_etid.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_ntypes.txt'))
and os.path.exists(os.path.join(self._dir, 'cached_etypes.txt'))
and os.path.exists(os.path.join(self._dir, 'cached_train_idx.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_test_idx.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_labels.npy')))
def load_cache(self):
mg = nx.read_gpickle(os.path.join(self._dir, 'cached_mg.gpickle'))
src = np.load(os.path.join(self._dir, 'cached_src.npy'))
dst = np.load(os.path.join(self._dir, 'cached_dst.npy'))
ntid = np.load(os.path.join(self._dir, 'cached_ntid.npy'))
etid = np.load(os.path.join(self._dir, 'cached_etid.npy'))
ntypes = load_strlist(os.path.join(self._dir, 'cached_ntypes.txt'))
etypes = load_strlist(os.path.join(self._dir, 'cached_etypes.txt'))
self.train_idx = F.tensor(np.load(os.path.join(self._dir, 'cached_train_idx.npy')))
self.test_idx = F.tensor(np.load(os.path.join(self._dir, 'cached_test_idx.npy')))
labels = np.load(os.path.join(self._dir, 'cached_labels.npy'))
self.num_classes = labels.max() + 1
self.labels = F.tensor(labels)
self.build_graph(mg, src, dst, ntid, etid, ntypes, etypes)
def load_data_split(self, ent2id):
label_dict = {}
labels = np.zeros((self.graph.number_of_nodes(self.predict_category),)) - 1
train_idx = self.parse_idx_file(
os.path.join(self._dir, 'trainingSet.tsv'),
ent2id, label_dict, labels)
test_idx = self.parse_idx_file(
os.path.join(self._dir, 'testSet.tsv'),
ent2id, label_dict, labels)
self.train_idx = F.tensor(train_idx)
self.test_idx = F.tensor(test_idx)
self.labels = F.tensor(labels).long()
self.num_classes = len(label_dict)
def parse_idx_file(self, filename, ent2id, label_dict, labels):
idx = []
with open(filename, 'r') as f:
for i, line in enumerate(f):
if i == 0:
continue # first line is the header
sample, label = self.process_idx_file_line(line)
#person, _, label = line.strip().split('\t')
ent = self.parse_entity(sample)
entid = ent2id(str(ent))
if entid is None:
print('Warning: entity "%s" does not have any valid links associated. Ignored.' % str(ent))
else:
idx.append(entid)
lblid = _get_id(label_dict, label)
labels[entid] = lblid
return idx
@abc.abstractmethod
def parse_entity(self, term):
"""Parse one entity from an RDF term.
Return None if the term does not represent a valid entity and the
whole tuple should be ignored.
Parameters
----------
term : rdflib.term.Identifier
RDF term
Returns
-------
Entity or None
An entity.
"""
pass
@abc.abstractmethod
def parse_relation(self, term):
"""Parse one relation from an RDF term.
Return None if the term does not represent a valid relation and the
whole tuple should be ignored.
Parameters
----------
term : rdflib.term.Identifier
RDF term
Returns
-------
Relation or None
A relation
"""
pass
@abc.abstractmethod
def process_tuple(self, raw_tuple, sbj, rel, obj):
"""Process the tuple.
Return (Entity, Relation, Entity) tuple for as the final tuple.
Return None if the tuple should be ignored.
Parameters
----------
raw_tuple : tuple of rdflib.term.Identifier
(subject, predicate, object) tuple
sbj : Entity
Subject entity
rel : Relation
Relation
obj : Entity
Object entity
Returns
-------
(Entity, Relation, Entity)
The final tuple or None if should be ignored
"""
pass
@abc.abstractmethod
def process_idx_file_line(self, line):
"""Process one line of ``trainingSet.tsv`` or ``testSet.tsv``.
Parameters
----------
line : str
One line of the file
Returns
-------
(str, str)
One sample and its label
"""
pass
@property
@abc.abstractmethod
def predict_category(self):
"""Return the category name that has labels."""
pass
def _get_id(dict, key):
id = dict.get(key, None)
if id is None:
id = len(dict)
dict[key] = id
return id
def save_strlist(filename, strlist):
with open(filename, 'w') as f:
for s in strlist:
f.write(s + '\n')
def load_strlist(filename):
with open(filename, 'r') as f:
ret = []
for line in f:
ret.append(line.strip())
return ret
class AIFB(RDFGraphDataset):
"""AIFB dataset.
Examples
--------
>>> dataset = dgl.data.rdf.AIFB()
>>> print(dataset.graph)
"""
employs = rdf.term.URIRef("http://swrc.ontoware.org/ontology#employs")
affiliation = rdf.term.URIRef("http://swrc.ontoware.org/ontology#affiliation")
entity_prefix = 'http://www.aifb.uni-karlsruhe.de/'
relation_prefix = 'http://swrc.ontoware.org/'
def __init__(self,
force_reload=False,
print_every=10000,
insert_reverse=True):
url = _get_dgl_url('dataset/rdf/aifb-hetero.zip')
name = 'aifb-hetero'
super(AIFB, self).__init__(url, name,
force_reload=force_reload,
print_every=print_every,
insert_reverse=insert_reverse)
def parse_entity(self, term):
if isinstance(term, rdf.Literal):
return Entity(id=str(term), cls="_Literal")
if isinstance(term, rdf.BNode):
return None
entstr = str(term)
if entstr.startswith(self.entity_prefix):
sp = entstr.split('/')
return Entity(id=sp[5], cls=sp[3])
else:
return None
def parse_relation(self, term):
if term == self.employs or term == self.affiliation:
return None
relstr = str(term)
if relstr.startswith(self.relation_prefix):
return Relation(cls=relstr.split('/')[3])
else:
relstr = relstr.split('/')[-1]
return Relation(cls=relstr)
def process_tuple(self, raw_tuple, sbj, rel, obj):
if sbj is None or rel is None or obj is None:
return None
return (sbj, rel, obj)
def process_idx_file_line(self, line):
person, _, label = line.strip().split('\t')
return person, label
@property
def predict_category(self):
return 'Personen'
class MUTAG(RDFGraphDataset):
"""MUTAG dataset.
Examples
--------
>>> dataset = dgl.data.rdf.MUTAG()
>>> print(dataset.graph)
"""
d_entity = re.compile("d[0-9]")
bond_entity = re.compile("bond[0-9]")
is_mutagenic = rdf.term.URIRef("http://dl-learner.org/carcinogenesis#isMutagenic")
rdf_type = rdf.term.URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type")
rdf_subclassof = rdf.term.URIRef("http://www.w3.org/2000/01/rdf-schema#subClassOf")
rdf_domain = rdf.term.URIRef("http://www.w3.org/2000/01/rdf-schema#domain")
entity_prefix = 'http://dl-learner.org/carcinogenesis#'
relation_prefix = entity_prefix
def __init__(self,
force_reload=False,
print_every=10000,
insert_reverse=True):
url = _get_dgl_url('dataset/rdf/mutag-hetero.zip')
name = 'mutag-hetero'
super(MUTAG, self).__init__(url, name,
force_reload=force_reload,
print_every=print_every,
insert_reverse=insert_reverse)
def parse_entity(self, term):
if isinstance(term, rdf.Literal):
return Entity(id=str(term), cls="_Literal")
elif isinstance(term, rdf.BNode):
return None
entstr = str(term)
if entstr.startswith(self.entity_prefix):
inst = entstr[len(self.entity_prefix):]
if self.d_entity.match(inst):
cls = 'd'
elif self.bond_entity.match(inst):
cls = 'bond'
else:
cls = None
return Entity(id=inst, cls=cls)
else:
return None
def parse_relation(self, term):
if term == self.is_mutagenic:
return None
relstr = str(term)
if relstr.startswith(self.relation_prefix):
cls = relstr[len(self.relation_prefix):]
return Relation(cls=cls)
else:
relstr = relstr.split('/')[-1]
return Relation(cls=relstr)
def process_tuple(self, raw_tuple, sbj, rel, obj):
if sbj is None or rel is None or obj is None:
return None
if not raw_tuple[1].startswith('http://dl-learner.org/carcinogenesis#'):
obj.cls = 'SCHEMA'
if sbj.cls is None:
sbj.cls = 'SCHEMA'
if obj.cls is None:
obj.cls = rel.cls
assert sbj.cls is not None and obj.cls is not None
return (sbj, rel, obj)
def process_idx_file_line(self, line):
bond, _, label = line.strip().split('\t')
return bond, label
@property
def predict_category(self):
return 'd'
class BGS(RDFGraphDataset):
"""BGS dataset.
BGS namespace convention:
http://data.bgs.ac.uk/(ref|id)/<Major Concept>/<Sub Concept>/INSTANCE
We ignored all literal nodes and the relations connecting them in the
output graph. We also ignored the relation used to mark whether a
term is CURRENT or DEPRECATED.
Examples
--------
>>> dataset = dgl.data.rdf.BGS()
>>> print(dataset.graph)
"""
lith = rdf.term.URIRef("http://data.bgs.ac.uk/ref/Lexicon/hasLithogenesis")
entity_prefix = 'http://data.bgs.ac.uk/'
status_prefix = 'http://data.bgs.ac.uk/ref/CurrentStatus'
relation_prefix = 'http://data.bgs.ac.uk/ref'
def __init__(self,
force_reload=False,
print_every=10000,
insert_reverse=True):
url = _get_dgl_url('dataset/rdf/bgs-hetero.zip')
name = 'bgs-hetero'
super(BGS, self).__init__(url, name,
force_reload=force_reload,
print_every=print_every,
insert_reverse=insert_reverse)
def parse_entity(self, term):
if isinstance(term, rdf.Literal):
return None
elif isinstance(term, rdf.BNode):
return None
entstr = str(term)
if entstr.startswith(self.status_prefix):
return None
if entstr.startswith(self.entity_prefix):
sp = entstr.split('/')
if len(sp) != 7:
return None
# instance
cls = '%s/%s' % (sp[4], sp[5])
inst = sp[6]
return Entity(id=inst, cls=cls)
else:
return None
def parse_relation(self, term):
if term == self.lith:
return None
relstr = str(term)
if relstr.startswith(self.relation_prefix):
sp = relstr.split('/')
if len(sp) < 6:
return None
assert len(sp) == 6, relstr
cls = '%s/%s' % (sp[4], sp[5])
return Relation(cls=cls)
else:
relstr = relstr.replace('.', '_')
return Relation(cls=relstr)
def process_tuple(self, raw_tuple, sbj, rel, obj):
if sbj is None or rel is None or obj is None:
return None
return (sbj, rel, obj)
def process_idx_file_line(self, line):
_, rock, label = line.strip().split('\t')
return rock, label
@property
def predict_category(self):
return 'Lexicon/NamedRockUnit'
class AM(RDFGraphDataset):
"""AM dataset.
Namespace convention:
Instance: http://purl.org/collections/nl/am/<type>-<id>
Relation: http://purl.org/collections/nl/am/<name>
We ignored all literal nodes and the relations connecting them in the
output graph.
Examples
--------
>>> dataset = dgl.data.rdf.AM()
>>> print(dataset.graph)
"""
objectCategory = rdf.term.URIRef("http://purl.org/collections/nl/am/objectCategory")
material = rdf.term.URIRef("http://purl.org/collections/nl/am/material")
entity_prefix = 'http://purl.org/collections/nl/am/'
relation_prefix = entity_prefix
def __init__(self,
force_reload=False,
print_every=10000,
insert_reverse=True):
url = _get_dgl_url('dataset/rdf/am-hetero.zip')
name = 'am-hetero'
super(AM, self).__init__(url, name,
force_reload=force_reload,
print_every=print_every,
insert_reverse=insert_reverse)
def parse_entity(self, term):
if isinstance(term, rdf.Literal):
return None
elif isinstance(term, rdf.BNode):
return Entity(id=str(term), cls='_BNode')
entstr = str(term)
if entstr.startswith(self.entity_prefix):
sp = entstr.split('/')
assert len(sp) == 7, entstr
spp = sp[6].split('-')
if len(spp) == 2:
# instance
cls, inst = spp
else:
cls = 'TYPE'
inst = spp
return Entity(id=inst, cls=cls)
else:
return None
def parse_relation(self, term):
if term == self.objectCategory or term == self.material:
return None
relstr = str(term)
if relstr.startswith(self.relation_prefix):
sp = relstr.split('/')
assert len(sp) == 7, relstr
cls = sp[6]
return Relation(cls=cls)
else:
relstr = relstr.replace('.', '_')
return Relation(cls=relstr)
def process_tuple(self, raw_tuple, sbj, rel, obj):
if sbj is None or rel is None or obj is None:
return None
return (sbj, rel, obj)
def process_idx_file_line(self, line):
proxy, _, label = line.strip().split('\t')
return proxy, label
@property
def predict_category(self):
return 'proxy'
if __name__ == '__main__':
AIFB()
|
{"hexsha": "ce223ba004fe589837c77572cfc6b4e184964e57", "size": 23627, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/dgl/data/rdf.py", "max_stars_repo_name": "vipermu/dgl", "max_stars_repo_head_hexsha": "c9ac6c9889423019977e431c8b74a7b6c70cdc01", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-04-27T16:31:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T16:27:51.000Z", "max_issues_repo_path": "python/dgl/data/rdf.py", "max_issues_repo_name": "vipermu/dgl", "max_issues_repo_head_hexsha": "c9ac6c9889423019977e431c8b74a7b6c70cdc01", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/dgl/data/rdf.py", "max_forks_repo_name": "vipermu/dgl", "max_forks_repo_head_hexsha": "c9ac6c9889423019977e431c8b74a7b6c70cdc01", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-03-17T11:21:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-02T09:42:24.000Z", "avg_line_length": 33.6566951567, "max_line_length": 111, "alphanum_fraction": 0.5687137597, "include": true, "reason": "import numpy,import networkx", "num_tokens": 5528}
|
from newspaper import Article
import random
import string
import nltk
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Download the punkt package
nltk.download('punkt', quiet=True)
article = Article('https://www.mayoclinic.org/diseases-conditions/depression/symptoms-causes/syc-20356007')
article.download()
article.parse()
article.nlp()
corpus = article.text
#print(corpus)
text = corpus
#Creating a list of sentences
sentence_list = nltk.sent_tokenize(text)
#print(sentence_list)
#Function to return a random greeting response to a users greeting
def greeting_response(text):
text = text.lower()
#bots greeting response
bot_greetings = ['howdy', 'hi', 'holla', 'hey you', 'hi there']
#Users greeting
user_greetings = ['hi', 'hey', 'hello', 'hola', 'greetings', 'wassup']
for word in text.split():
if word in user_greetings:
return random.choice(bot_greetings)
def index_sort(list_var):
length = len(list_var)
list_index = list(range(0, length))
x = list_var
for i in range(length):
for j in range(length):
if x[list_index[i]] > x[list_index[j]]:
#swap
temp = list_index[i]
list_index[i] = list_index[j]
list_index[j] = temp
return list_index
def bot_response(user_input):
user_input = user_input.lower()
sentence_list.append(user_input)
bot_response = ''
cm = CountVectorizer().fit_transform(sentence_list) #cm = count matrix
similarity_scores = cosine_similarity(cm[-1], cm)
similarity_scores_list = similarity_scores.flatten()
index = index_sort(similarity_scores_list)
index = index[1:]
response_flag = 0
d = 0
for i in range(len(index)):
if similarity_scores_list[index[i]] > 0.0:
bot_response = bot_response+' '+sentence_list[index[i]]
response_flag = 1
d = d+1
if d > 2:
break
if response_flag == 0:
bot_response = bot_response+ ' '+'I apologize, I dont understand'
sentence_list.remove(user_input)
return bot_response
Intro = 'Doc Bot: I am Doctor Bot or Doc Bot for short. I will answer your questions about depression. To end the conversation, just type bye'
#Starting the chat
print(Intro)
exit_list = ['exit', 'see you later', 'bye', 'quit', 'break']
while(True):
user_input = input()
if user_input.lower() in exit_list:
print('Doc Bot: Chat with you later !')
break
else:
if greeting_response(user_input) != None:
print('Doc Bot: ' + greeting_response(user_input))
else:
print('Doc Bot: ' + bot_response(user_input))
|
{"hexsha": "4714973d1b1dbb64d7f2e87f385f2bd08aa52f6c", "size": 2826, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scripts/Chat_bot.py", "max_stars_repo_name": "Deborah-code/Chatbot", "max_stars_repo_head_hexsha": "db211bdd7032018c69e1c34fd933f3b81a47e208", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-06T09:50:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-06T09:50:59.000Z", "max_issues_repo_path": "Scripts/Chat_bot.py", "max_issues_repo_name": "Deborah-code/Chatbot", "max_issues_repo_head_hexsha": "db211bdd7032018c69e1c34fd933f3b81a47e208", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Scripts/Chat_bot.py", "max_forks_repo_name": "Deborah-code/Chatbot", "max_forks_repo_head_hexsha": "db211bdd7032018c69e1c34fd933f3b81a47e208", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7473684211, "max_line_length": 142, "alphanum_fraction": 0.6666666667, "include": true, "reason": "import numpy", "num_tokens": 664}
|
import numpy as np
import math
scale = 255.0/32768.0
scale_1 = 32768.0/255.0
def ulaw2lin(u):
u = u - 128
s = np.sign(u)
u = np.abs(u)
return s*scale_1*(np.exp(u/128.*math.log(256))-1)
def lin2ulaw(x):
s = np.sign(x)
x = np.abs(x)
u = (s*(128*np.log(1+scale*x)/math.log(256)))
u = np.clip(128 + np.round(u), 0, 255)
return u.astype('int16')
|
{"hexsha": "b79d4315bf1fa7cfc1236c71e79762e0511713fb", "size": 381, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ulaw.py", "max_stars_repo_name": "Mozilla-GitHub-Standards/ee089678ec78c1555fc3f1eff2962a95ae31dcf042f14e37b019b4fbb4b13288", "max_stars_repo_head_hexsha": "5d4d89070dc8da54a716bb3d0db7f394334b3325", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 865, "max_stars_repo_stars_event_min_datetime": "2018-10-26T20:59:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T09:42:39.000Z", "max_issues_repo_path": "src/ulaw.py", "max_issues_repo_name": "azraelkuan/LPCNet", "max_issues_repo_head_hexsha": "3a7ef33dcbade95ec08b85839b6268e35b8d3366", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 167, "max_issues_repo_issues_event_min_datetime": "2018-10-30T07:53:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-22T08:24:03.000Z", "max_forks_repo_path": "src/ulaw.py", "max_forks_repo_name": "azraelkuan/LPCNet", "max_forks_repo_head_hexsha": "3a7ef33dcbade95ec08b85839b6268e35b8d3366", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 267, "max_forks_repo_forks_event_min_datetime": "2018-10-30T15:46:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T09:23:32.000Z", "avg_line_length": 19.05, "max_line_length": 53, "alphanum_fraction": 0.5695538058, "include": true, "reason": "import numpy", "num_tokens": 151}
|
"""
Estimate a linear model with high dimensional categorical variables / instrumental variables
### Arguments
* `df::AbstractDataFrame`
* `model::Model`: A model created using [`@model`](@ref)
* `save::Union{Bool, Symbol} = false`: Should residuals and eventual estimated fixed effects saved in a dataframe? Use `save = :residuals` to only save residuals. Use `save = :fe` to only save fixed effects.
* `method::Symbol = :lsmr`: Method to deman regressors. `:lsmr` is akin to conjugate gradient descent. With parallel use `:lsmr_parallel`. To use multi threaded use `lsmr_threads`. Other choices are `:qr` and `:cholesky` (factorization methods)
* `contrasts::Dict = Dict()` An optional Dict of contrast codings for each categorical variable in the `formula`. Any unspecified variables will have `DummyCoding`.
* `maxiter::Integer = 10000`: Maximum number of iterations
* `tol::Real =1e-8`: Tolerance
### Details
Models with instruments variables are estimated using 2SLS. `reg` tests for weak instruments by computing the Kleibergen-Paap rk Wald F statistic, a generalization of the Cragg-Donald Wald F statistic for non i.i.d. errors. The statistic is similar to the one returned by the Stata command `ivreg2`.
### Examples
```julia
using DataFrames, RDatasets, FixedEffectModels
df = dataset("plm", "Cigar")
df[:StateC] = categorical(df[:State])
df[:YearC] = categorical(df[:Year])
reg(df, @model(Sales ~ Price, fe = StateC + YearC))
reg(df, @model(Sales ~ NDI, fe = StateC + StateC&Year))
reg(df, @model(Sales ~ NDI, fe = StateC*Year))
reg(df, @model(Sales ~ (Price ~ Pimin)))
reg(df, @model(Sales ~ Price, weights = Pop))
reg(df, @model(Sales ~ NDI, subset = State .< 30))
reg(df, @model(Sales ~ NDI, vcov = robust))
reg(df, @model(Sales ~ NDI, vcov = cluster(StateC)))
reg(df, @model(Sales ~ NDI, vcov = cluster(StateC + YearC)))
reg(df, @model(Sales ~ YearC), contrasts = Dict(:YearC => DummyCoding(base = 80)))
```
"""
function reg(df::AbstractDataFrame, m::Model; kwargs...)
reg(df, m.f; m.dict..., kwargs...)
end
function reg(df::AbstractDataFrame, f::Formula;
fe::Union{Symbol, Expr, Nothing} = nothing,
vcov::Union{Symbol, Expr, Nothing} = :(simple()),
weights::Union{Symbol, Expr, Nothing} = nothing,
subset::Union{Symbol, Expr, Nothing} = nothing,
maxiter::Integer = 10000, contrasts::Dict = Dict(),
tol::Real= 1e-8, df_add::Integer = 0,
save::Union{Bool, Symbol} = false, method::Symbol = :lsmr, drop_singletons = true
)
feformula = fe
if isa(vcov, Symbol)
vcovformula = VcovFormula(Val{vcov})
else
vcovformula = VcovFormula(Val{vcov.args[1]}, (vcov.args[i] for i in 2:length(vcov.args))...)
end
##############################################################################
##
## Parse formula
##
##############################################################################
rf = deepcopy(f)
(has_iv, iv_formula, iv_terms, endo_formula, endo_terms) = decompose_iv!(rf)
rt = Terms(rf)
has_absorb = feformula != nothing
if has_absorb
# check depth 1 symbols in original formula are all CategoricalVector
if isa(feformula, Symbol)
x = feformula
!isa(df[x], CategoricalVector) && error("$x should be CategoricalVector")
elseif feformula.args[1] == :+
x = feformula.args
for i in 2:length(x)
isa(x[i], Symbol) && !isa(df[x[i]], CategoricalVector) && error("$(x[i]) should be CategoricalVector")
end
end
end
has_weights = (weights != nothing)
##############################################################################
##
## Save keyword argument
##
##############################################################################
if !isa(save, Bool)
if save ∉ (:residuals, :fe)
error("the save keyword argument must be a Bool or a Symbol equal to :residuals or :fe")
end
end
save_residuals = (save == :residuals) | (save == true)
save_fe = (save == :fe) | ((save == true) & has_absorb)
##############################################################################
##
## Construct new dataframe after removing missing values
##
##############################################################################
# create a dataframe without missing values & negative weights
vars = allvars(rf)
iv_vars = allvars(iv_formula)
endo_vars = allvars(endo_formula)
absorb_vars = allvars(feformula)
vcov_vars = allvars(vcovformula)
# create a dataframe without missing values & negative weights
all_vars = vcat(vars, vcov_vars, absorb_vars, endo_vars, iv_vars)
all_vars = unique(Symbol.(all_vars))
esample = completecases(df[all_vars])
if has_weights
esample .&= isnaorneg(df[weights])
end
if subset != nothing
subset = eval(evaluate_subset(df, subset))
if length(subset) != size(df, 1)
error("df has $(size(df, 1)) rows but the subset vector has $(length(subset)) elements")
end
esample .&= convert(BitArray, subset)
end
if has_absorb
fes, ids = parse_fixedeffect(df, Terms(@eval(@formula(nothing ~ $(feformula)))))
if drop_singletons
for fe in fes
@show sum(esample)
remove_singletons!(esample, fe)
@show sum(esample)
end
end
end
nobs = sum(esample)
(nobs > 0) || error("sample is empty")
# Compute weights
sqrtw = get_weights(df, esample, weights)
# Compute pfe, a FixedEffectMatrix
has_intercept = rt.intercept
if has_absorb
# in case some FixedEffect does not have interaction, remove the intercept
if any([isa(fe.interaction, Ones) for fe in fes])
rt.intercept = false
has_intercept = true
end
fes = FixedEffect[_subset(fe, esample) for fe in fes]
pfe = FixedEffectMatrix(fes, sqrtw, Val{method})
end
# Compute data for std errors
vcov_method_data = VcovMethod(df[esample, unique(Symbol.(vcov_vars))], vcovformula)
##############################################################################
##
## Dataframe --> Matrix
##
##############################################################################
mf = ModelFrame2(rt, df, esample; contrasts = contrasts)
# Obtain y
# for a Vector{Float64}, conver(Vector{Float64}, y) aliases y
y = convert(Vector{Float64}, model_response(mf)[:])
yname = rt.eterms[1]
y .= y .* sqrtw
# Obtain X
coef_names = coefnames(mf)
has_exo = !isempty(mf.terms.terms) | mf.terms.intercept
if has_exo
Xexo = ModelMatrix(mf).m
Xexo .= Xexo .* sqrtw
else
Xexo = Matrix{Float64}(undef, nobs, 0)
end
if has_iv
mf = ModelFrame2(endo_terms, df, esample)
coef_names = vcat(coef_names, coefnames(mf))
Xendo = ModelMatrix(mf).m
Xendo .= Xendo .* sqrtw
mf = ModelFrame2(iv_terms, df, esample)
Z = ModelMatrix(mf).m
Z .= Z .* sqrtw
else
Xendo = Matrix{Float64}(undef, nobs, 0)
Z = Matrix{Float64}(undef, nobs, 0)
end
# compute tss now before potentially demeaning y
tss = compute_tss(y, has_intercept, sqrtw)
if has_absorb
# used to compute tss even without save_fe
if save_fe
oldy = deepcopy(y)
oldX = hcat(Xexo, Xendo)
end
# initialize iterations and converged
iterations = Int[]
convergeds = Bool[]
y, b, c = solve_residuals!(y, pfe; maxiter = maxiter, tol = tol)
append!(iterations, b)
append!(convergeds, c)
Xexo, b, c = solve_residuals!(Xexo, pfe; maxiter = maxiter, tol = tol)
append!(iterations, b)
append!(convergeds, c)
Xendo, b, c = solve_residuals!(Xendo, pfe; maxiter = maxiter, tol = tol)
append!(iterations, b)
append!(convergeds, c)
Z, b, c = solve_residuals!(Z, pfe; maxiter = maxiter, tol = tol)
append!(iterations, b)
append!(convergeds, c)
iterations = maximum(iterations)
converged = all(convergeds)
if converged == false
@warn "convergence not achieved in $(iterations) iterations; try increasing maxiter or decreasing tol."
end
end
##############################################################################
##
## Get Linearly Independent Components of Matrix
##
##############################################################################
# Compute linearly independent columns + create the Xhat matrix
if has_iv
if size(Z, 2) < size(Xendo, 2)
error("Model not identified. There must be at least as many ivs as endogeneneous variables")
end
# get linearly independent columns
# note that I do it after residualizing
baseall = basecol(Z, Xexo, Xendo)
basecolXexo = baseall[(size(Z, 2)+1):(size(Z, 2) + size(Xexo, 2))]
basecolXendo = baseall[(size(Z, 2) + size(Xexo, 2) + 1):end]
Z = getcols(Z, baseall[1:size(Z, 2)])
Xexo = getcols(Xexo, basecolXexo)
Xendo = getcols(Xendo, basecolXendo)
basecoef = vcat(basecolXexo, basecolXendo)
# Build
X = hcat(Xexo, Xendo)
newZ = hcat(Xexo, Z)
crossz = cholesky!(Symmetric(newZ' * newZ))
Pi = crossz \ (newZ' * Xendo)
Xhat = hcat(Xexo, newZ * Pi)
# prepare residuals used for first stage F statistic
## partial out Xendo in place wrt (Xexo, Z)
Xendo_res = gemm!('N', 'N', -1.0, newZ, Pi, 1.0, Xendo)
## partial out Z in place wrt Xexo
Pi2 = cholesky!(Symmetric(Xexo' * Xexo)) \ (Xexo' * Z)
Z_res = gemm!('N', 'N', -1.0, Xexo, Pi2, 1.0, Z)
else
# get linearly independent columns
basecolXexo = basecol(Xexo)
Xexo = getcols(Xexo, basecolXexo)
Xhat = Xexo
X = Xexo
basecoef = basecolXexo
end
##############################################################################
##
## Do the regression
##
##############################################################################
crossx = cholesky!(Symmetric(Xhat' * Xhat))
coef = crossx \ (Xhat' * y)
residuals = y - X * coef
##############################################################################
##
## Optionally save objects in a new dataframe
##
##############################################################################
augmentdf = DataFrame()
if save_residuals
augmentdf[:residuals] = Vector{Union{Missing, Float64}}(missing, length(esample))
augmentdf[esample, :residuals] = residuals ./ sqrtw
end
if save_fe
oldX = getcols(oldX, basecoef)
newfes, b, c = solve_coefficients!(oldy - oldX * coef, pfe; tol = tol, maxiter = maxiter)
for j in 1:length(fes)
augmentdf[ids[j]] = Vector{Union{Float64, Missing}}(missing, length(esample))
augmentdf[esample, ids[j]] = newfes[j]
end
end
##############################################################################
##
## Test Statistics
##
##############################################################################
# Compute degrees of freedom
df_intercept = 0
if has_absorb || rt.intercept
df_intercept = 1
end
df_absorb = 0
if has_absorb
for fe in fes
# adjust degree of freedom only if fe is not fully nested in a cluster variable:
if isa(vcovformula, VcovClusterFormula)
if any(isnested(fe, vcov_method_data.clusters[v]) for v in names(vcov_method_data.clusters))
df_absorb = 1 # if fe is nested you still lose 1 degree of freedom
break
end
end
#only count groups that exists
df_absorb += length(Set(fe.refs))
end
end
dof_residual = max(1, nobs - size(X, 2) - df_absorb - df_add)
# Compute rss, tss, r2, r2 adjusted
rss = sum(abs2, residuals)
mss = tss - rss
r2 = 1 - rss / tss
adjr2 = 1 - rss / tss * (nobs - has_intercept) / dof_residual
if has_absorb
r2_within = 1 - rss / compute_tss(y, rt.intercept, sqrtw)
end
# Compute standard error
vcov_data = VcovData(Xhat, crossx, residuals, dof_residual)
matrix_vcov = vcov!(vcov_method_data, vcov_data)
# Compute Fstat
(F, p) = compute_Fstat(coef, matrix_vcov, nobs, rt.intercept, vcov_method_data, vcov_data)
# Compute Fstat of First Stage
if has_iv
Pip = Pi[(size(Pi, 1) - size(Z_res, 2) + 1):end, :]
(F_kp, p_kp) = ranktest!(Xendo_res, Z_res, Pip,
vcov_method_data, size(X, 2), df_absorb)
end
##############################################################################
##
## Return regression result
##
##############################################################################
# add omitted variables
if !all(basecoef)
newcoef = fill(zero(Float64), length(basecoef))
newmatrix_vcov = fill(NaN, (length(basecoef), length(basecoef)))
newindex = [searchsortedfirst(cumsum(basecoef), i) for i in 1:length(coef)]
for i in 1:length(coef)
newcoef[newindex[i]] = coef[i]
for j in 1:length(coef)
newmatrix_vcov[newindex[i], newindex[j]] = matrix_vcov[i, j]
end
end
coef = newcoef
matrix_vcov = newmatrix_vcov
end
# return
if !has_iv && !has_absorb
return RegressionResult(coef, matrix_vcov, esample, augmentdf,
coef_names, yname, f, nobs, dof_residual,
rss, tss, r2, adjr2, F, p)
elseif has_iv && !has_absorb
return RegressionResultIV(coef, matrix_vcov, esample, augmentdf,
coef_names, yname, f, nobs, dof_residual,
rss, tss, r2, adjr2, F, p, F_kp, p_kp)
elseif !has_iv && has_absorb
return RegressionResultFE(coef, matrix_vcov, esample, augmentdf,
coef_names, yname, f, feformula, nobs, dof_residual,
rss, tss, r2, adjr2, r2_within, F, p, iterations, converged)
elseif has_iv && has_absorb
return RegressionResultFEIV(coef, matrix_vcov, esample, augmentdf,
coef_names, yname, f, feformula, nobs, dof_residual,
rss, tss, r2, adjr2, r2_within, F, p, F_kp, p_kp,
iterations, converged)
end
end
##############################################################################
##
## Fstat
##
##############################################################################
function compute_Fstat(coef::Vector{Float64}, matrix_vcov::Matrix{Float64},
nobs::Int, hasintercept::Bool,
vcov_method_data::AbstractVcovMethod, vcov_data::VcovData)
coefF = copy(coef)
# TODO: check I can't do better
length(coef) == hasintercept && return NaN, NaN
if hasintercept
coefF = coefF[2:end]
matrix_vcov = matrix_vcov[2:end, 2:end]
end
F = (Diagonal(coefF) * (matrix_vcov \ Diagonal(coefF)))[1]
df_ans = df_FStat(vcov_method_data, vcov_data, hasintercept)
dist = FDist(nobs - hasintercept, max(df_ans, 1))
return F, ccdf(dist, F)
end
function compute_tss(y::Vector{Float64}, hasintercept::Bool, sqrtw::AbstractVector)
if hasintercept
tss = zero(Float64)
m = (mean(y) / sum(sqrtw) * length(y))::Float64
@inbounds @simd for i in 1:length(y)
tss += abs2(y[i] - sqrtw[i] * m)
end
else
tss = sum(abs2, y)
end
return tss
end
##############################################################################
##
## Syntax without keywords
##
##############################################################################
function evaluate_subset(df, ex::Expr)
if ex.head == :call
return Expr(ex.head, ex.args[1], (evaluate_subset(df, ex.args[i]) for i in 2:length(ex.args))...)
else
return Expr(ex.head, (evaluate_subset(df, ex.args[i]) for i in 1:length(ex.args))...)
end
end
evaluate_subset(df, ex::Symbol) = df[ex]
evaluate_subset(df, ex) = ex
|
{"hexsha": "a90f70a4cbd3bb380924d54ca7e6f94b3c9d2426", "size": 16564, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/reg.jl", "max_stars_repo_name": "maxnorton/FixedEffectModels.jl", "max_stars_repo_head_hexsha": "b97a25dfbe2cc5c325df6133ead55b2d0e5609fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/reg.jl", "max_issues_repo_name": "maxnorton/FixedEffectModels.jl", "max_issues_repo_head_hexsha": "b97a25dfbe2cc5c325df6133ead55b2d0e5609fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/reg.jl", "max_forks_repo_name": "maxnorton/FixedEffectModels.jl", "max_forks_repo_head_hexsha": "b97a25dfbe2cc5c325df6133ead55b2d0e5609fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8908685969, "max_line_length": 299, "alphanum_fraction": 0.5391209853, "num_tokens": 4295}
|
import os
import numpy as np
import matplotlib.pyplot as plt
RESULTS_FOLDER = './results/'
NUM_BINS = 100
BITS_IN_BYTE = 8.0
MILLISEC_IN_SEC = 1000.0
M_IN_B = 1000000.0
VIDEO_LEN = 64
VIDEO_BIT_RATE = [1500, 4900, 8200, 11700, 32800, 152400]
COLOR_MAP = plt.cm.jet #nipy_spectral, Set1,Paired
SIM_DP = 'sim_dp'
SCHEMES = ['BB', 'RB', 'fastMPC', 'robustMPC']
def main():
time_all = {}
bit_rate_all = {}
buff_all = {}
bw_all = {}
raw_reward_all = {}
for scheme in SCHEMES:
time_all[scheme] = {}
raw_reward_all[scheme] = {}
bit_rate_all[scheme] = {}
buff_all[scheme] = {}
bw_all[scheme] = {}
log_files = os.listdir(RESULTS_FOLDER)
for log_file in log_files:
time_ms = []
bit_rate = []
buff = []
bw = []
reward = []
print log_file
with open(RESULTS_FOLDER + log_file, 'rb') as f:
# if SIM_DP in log_file:
# for line in f:
# parse = line.split()
# if len(parse) == 1:
# reward = float(parse[0])
# elif len(parse) >= 6:
# time_ms.append(float(parse[3]))
# bit_rate.append(VIDEO_BIT_RATE[int(parse[6])])
# buff.append(float(parse[4]))
# bw.append(float(parse[5]))
# else:
for line in f:
parse = line.split()
if len(parse) <= 1:
break
time_ms.append(float(parse[0]))
bit_rate.append(int(parse[1]))
buff.append(float(parse[2]))
bw.append(float(parse[4]) / float(parse[5]) * BITS_IN_BYTE * MILLISEC_IN_SEC / M_IN_B)
reward.append(float(parse[6]))
# if SIM_DP in log_file:
# time_ms = time_ms[::-1]
# bit_rate = bit_rate[::-1]
# buff = buff[::-1]
# bw = bw[::-1]
time_ms = np.array(time_ms)
time_ms -= time_ms[0]
# print log_file
for scheme in SCHEMES:
if scheme in log_file:
time_all[scheme][log_file[len('log_' + str(scheme) + '_'):]] = time_ms
bit_rate_all[scheme][log_file[len('log_' + str(scheme) + '_'):]] = bit_rate
buff_all[scheme][log_file[len('log_' + str(scheme) + '_'):]] = buff
bw_all[scheme][log_file[len('log_' + str(scheme) + '_'):]] = bw
raw_reward_all[scheme][log_file[len('log_' + str(scheme) + '_'):]] = reward
break
# ---- ---- ---- ----
# Reward records
# ---- ---- ---- ----
log_file_all = []
reward_all = {}
for scheme in SCHEMES:
reward_all[scheme] = []
for l in time_all[SCHEMES[0]]:
schemes_check = True
# for scheme in SCHEMES:
# if l not in time_all[scheme] or len(time_all[scheme][l]) < VIDEO_LEN:
# schemes_check = False
# break
if schemes_check:
log_file_all.append(l)
for scheme in SCHEMES:
if scheme == SIM_DP:
reward_all[scheme].append(raw_reward_all[scheme][l])
else:
reward_all[scheme].append(np.sum(raw_reward_all[scheme][l][1:VIDEO_LEN]))
mean_rewards = {}
for scheme in SCHEMES:
mean_rewards[scheme] = np.mean(reward_all[scheme])
fig = plt.figure()
ax = fig.add_subplot(111)
for scheme in SCHEMES:
ax.plot(reward_all[scheme])
SCHEMES_REW = []
for scheme in SCHEMES:
SCHEMES_REW.append(scheme + ': ' + str(mean_rewards[scheme]))
colors = [COLOR_MAP(i) for i in np.linspace(0, 1, len(ax.lines))]
for i,j in enumerate(ax.lines):
j.set_color(colors[i])
ax.legend(SCHEMES_REW, loc=4)
plt.ylabel('total reward')
plt.xlabel('trace index')
plt.show()
# ---- ---- ---- ----
# CDF
# ---- ---- ---- ----
fig = plt.figure()
ax = fig.add_subplot(111)
for scheme in SCHEMES:
values, base = np.histogram(reward_all[scheme], bins=NUM_BINS)
cumulative = np.cumsum(values)
ax.plot(base[:-1], cumulative)
colors = [COLOR_MAP(i) for i in np.linspace(0, 1, len(ax.lines))]
for i,j in enumerate(ax.lines):
j.set_color(colors[i])
ax.legend(SCHEMES_REW, loc=4)
plt.ylabel('CDF')
plt.xlabel('total reward')
plt.show()
# ---- ---- ---- ----
# check each trace
# ---- ---- ---- ----
for l in time_all[SCHEMES[0]]:
schemes_check = True
for scheme in SCHEMES:
if l not in time_all[scheme] or len(time_all[scheme][l]) < VIDEO_LEN:
schemes_check = False
break
if schemes_check:
fig = plt.figure()
ax = fig.add_subplot(311)
for scheme in SCHEMES:
ax.plot(time_all[scheme][l][:VIDEO_LEN], bit_rate_all[scheme][l][:VIDEO_LEN])
colors = [COLOR_MAP(i) for i in np.linspace(0, 1, len(ax.lines))]
for i,j in enumerate(ax.lines):
j.set_color(colors[i])
plt.title(l)
plt.ylabel('bit rate selection (kbps)')
ax = fig.add_subplot(312)
for scheme in SCHEMES:
ax.plot(time_all[scheme][l][:VIDEO_LEN], buff_all[scheme][l][:VIDEO_LEN])
colors = [COLOR_MAP(i) for i in np.linspace(0, 1, len(ax.lines))]
for i,j in enumerate(ax.lines):
j.set_color(colors[i])
plt.ylabel('buffer size (sec)')
ax = fig.add_subplot(313)
for scheme in SCHEMES:
ax.plot(time_all[scheme][l][:VIDEO_LEN], bw_all[scheme][l][:VIDEO_LEN])
colors = [COLOR_MAP(i) for i in np.linspace(0, 1, len(ax.lines))]
for i,j in enumerate(ax.lines):
j.set_color(colors[i])
plt.ylabel('bandwidth (mbps)')
plt.xlabel('time (sec)')
SCHEMES_REW = []
for scheme in SCHEMES:
if scheme == SIM_DP:
SCHEMES_REW.append(scheme + ': ' + str(raw_reward_all[scheme][l]))
else:
SCHEMES_REW.append(scheme + ': ' + str(np.sum(raw_reward_all[scheme][l][1:VIDEO_LEN])))
ax.legend(SCHEMES_REW, loc=9, bbox_to_anchor=(0.5, -0.1), ncol=int(np.ceil(len(SCHEMES) / 2.0)))
plt.show()
if __name__ == '__main__':
main()
|
{"hexsha": "fc1577804d65b36b5888b7aedb81e894a07b5b95", "size": 5382, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_exp/plot_results.py", "max_stars_repo_name": "ahmadhassan997/pensieve", "max_stars_repo_head_hexsha": "d54f16bc398d2f24c7b0525dad90df002b31506a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run_exp/plot_results.py", "max_issues_repo_name": "ahmadhassan997/pensieve", "max_issues_repo_head_hexsha": "d54f16bc398d2f24c7b0525dad90df002b31506a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_exp/plot_results.py", "max_forks_repo_name": "ahmadhassan997/pensieve", "max_forks_repo_head_hexsha": "d54f16bc398d2f24c7b0525dad90df002b31506a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0, "max_line_length": 99, "alphanum_fraction": 0.6291341509, "include": true, "reason": "import numpy", "num_tokens": 1716}
|
"""
author: muzexlxl
email: muzexlxl@foxmail.com
time series factors
bias: -1 0 1
neut: 1, 0
"""
import pandas as pd
import numpy as np
from datetime import datetime
import collections
import math
# import src.data.clickhouse_control as cc
class FactorX:
def __init__(self, id: list, timeframe: str, data_source: str, start: str, end: str):
# self.db_conn = cc.ClickHouse(data_source)
# self.db_conn = 0
# self.id = id
# if self.id[0] == 'symbol':
# self.database = self.db_conn.db_conf.db_processed
# self.data_table = self.db_conn.db_conf.processed_trade_data_main
# elif self.id[0] == 'code':
# self.database = self.db_conn.db_conf.db_raw
# self.data_table = self.db_conn.db_conf.raw_trade_data
# else:
# raise AttributeError(f'Wrong id type: {self.id[0]}')
# self.timeframe = timeframe
# self.data_source = data_source
# self.main_df = self.data_reader(start, end)
self.main_df = pd.DataFrame()
# def data_reader(self, start_date, end_date):
# sql_ = f"select `code`, `symbol`, `datetime`, `open`, `close`, " \
# f"`high`, `low`, `turnover`, `volume`, `open_interest` from " \
# f"{self.database}.{self.data_table} where `{self.id[0]}` = '{self.id[1]}' and " \
# f"`timeframe` = '{self.timeframe}' and `data_source` = '{self.data_source}' and " \
# f"`datetime` >= '{start_date}' and `datetime` <= '{end_date}'"
# df = self.db_conn.reader_to_dataframe(sql_)
# df['datetime'] = pd.to_datetime(df['datetime'])
# df['date'] = df['datetime'].dt.strftime("%Y-%m-%d")
# return df.set_index('datetime')
def reset_df(self, df: pd.DataFrame):
self.main_df = df
def factor_tmom_neut_01(self, w):
"""adx indicator"""
source_df = self.main_df.copy()
source_df['up'] = source_df['high'] - source_df['high'].shift()
source_df['down'] = source_df['low'].shift() - source_df['low']
source_df['dm+'] = np.where(
(source_df['up'] > source_df['down']) & (source_df['down'] > 0), source_df['up'], 0
)
source_df['dm-'] = np.where(
(source_df['down'] > source_df['up']) & (source_df['up'] > 0), source_df['down'], 0
)
source_df['hl'] = source_df['high'] - source_df['low']
source_df['hc'] = abs(source_df['high'] - source_df['close'])
source_df['lc'] = abs(source_df['low'] - source_df['close'])
source_df['atr'] = source_df[['hl', 'hc', 'lc']].max(axis=1).rolling(w).mean()
source_df['di+'] = (source_df['dm+'].rolling(w).mean() / source_df['atr']) * 100
source_df['di-'] = (source_df['dm-'].rolling(w).mean() / source_df['atr']) * 100
source_df['dx'] = ((source_df['di+'] - source_df['di-']) / (source_df['di+'] + source_df['di-'])) * 100
source_df['adx'] = source_df['dx'].rolling(w).mean()
source_df['factor'] = np.where(source_df['adx'] > 25, source_df['adx'], 0)
source_df['signal'] = np.where(
(source_df['factor'] / source_df['factor'].shift()).fillna(0) > 1,
1,
0
)
return source_df[['factor', 'signal']]
def factor_tmom_bias_01(self, w):
source_df = self.main_df.copy()
source_df['return_close'] = source_df['close'].diff() / source_df['close'].shift()
ls = source_df['return_close'].rolling(w).apply(
lambda x: pd.Series([(i/abs(i)) if abs(i) > 0 else 0 for i in x.cumsum()[::-5]]).mean()
)
source_df['factor'] = [i if abs(i) > 0.5 else 0 for i in ls]
source_df['signal'] = np.sign(source_df['factor'])
return source_df[['factor', 'signal']]
@staticmethod
def factor_compound(factors, w: [int, None], valve: int):
compounded_factor = pd.DataFrame(factors).T.mean(axis=1)
if w is not None:
compounded_factor = compounded_factor.rolling(w).mean().apply(
lambda x: np.where(abs(x) > valve, np.sign(x), 0)
)
else:
compounded_factor = compounded_factor.apply(lambda x: x/abs(x) if abs(x) >= valve else 0)
return compounded_factor
|
{"hexsha": "3d98bf2982ee336f49382e01bed153699d261da1", "size": 4278, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/factor/factor.py", "max_stars_repo_name": "jiangtiantu/JAB", "max_stars_repo_head_hexsha": "39d91043619c337c07ade87a86f3f876b05ad3e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-05-14T02:27:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T11:08:33.000Z", "max_issues_repo_path": "src/factor/factor.py", "max_issues_repo_name": "WangChen663/time_series_analysis", "max_issues_repo_head_hexsha": "e88cee1e414c0851853ef328264f4ffb1f3fa44c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/factor/factor.py", "max_forks_repo_name": "WangChen663/time_series_analysis", "max_forks_repo_head_hexsha": "e88cee1e414c0851853ef328264f4ffb1f3fa44c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.78, "max_line_length": 111, "alphanum_fraction": 0.5729312763, "include": true, "reason": "import numpy", "num_tokens": 1199}
|
import logging
from functools import reduce
from operator import mul
from torch import optim
import math
import numpy as np
import random
import torch
from torch import nn
import torch.nn.functional as F
class CdnnClassifier():
def __init__(self, vec_len, cnn_params=[(21, 12), (9, 6)], dnn_params=[(0.5, 0.2)], num_epochs=100, batch_size=32,
padding='auto', learning_rate = 1e-5):
'''
:param vec_len: length of vectors in vecframe
:param cnn_params: list of pairs (kernel_size, max_pool), will be applied one after another
:param dnn_params: list of pairs (fraction_of_neurons, dropout), will be applied one after another
:param num_epochs: number of training epochs
:param batch_size: training batch size
:param padding: vectors from vecframe will be padded up to a multiple of this value
if 'auto', then the product of max_pools will be taken
'''
super().__init__()
self.vec_len = vec_len
self.cnn_params = cnn_params
self.dnn_params = dnn_params
self.num_epochs = num_epochs
self.batch_size = batch_size
self.padding = padding if padding != 'auto' else self.find_smallest_padding()
self.learning_rate = learning_rate
self.logger = logging.getLogger('CdnnClassifier')
self.classifier_class = Classifier
self.model = None
def find_smallest_padding(self):
"""
automatically finds the smallest padding, which is the product of max_pools
:return: padding
"""
return reduce(mul, (max_pool for (_, max_pool) in self.cnn_params), 1)
def reshape(self, data):
nrows, ncols = data.shape
return data.reshape(nrows, 1, ncols)
def add_padding(self, data):
pad_len = self.vec_len - math.floor(self.vec_len / self.padding) * self.padding
self.vec_len += pad_len
before = pad_len // 2
after = pad_len - before
return np.pad(data, [(0, 0), (before, after)], 'constant')
def shuffle_in_unison(self, a, b):
assert len(a) == len(b)
shuffled_a = np.empty(a.shape, dtype=a.dtype)
shuffled_b = np.empty(b.shape, dtype=b.dtype)
permutation = np.random.permutation(len(a))
for old_index, new_index in enumerate(permutation):
shuffled_a[new_index] = a[old_index]
shuffled_b[new_index] = b[old_index]
return shuffled_a, shuffled_b
def fit(self, X_train, y_train):
# padding
assert X_train.shape[1] == self.vec_len
X_train = self.add_padding(X_train)
data_set = MyDataset(X_train, y_train)
data_loader = torch.utils.data.DataLoader(data_set, batch_size=self.batch_size, shuffle=True)
self.model = self.classifier_class(self.vec_len, self.cnn_params, self.dnn_params).cpu().double()
criterion = nn.BCELoss()
optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) # , weight_decay=5e-5)
for epoch in range(self.num_epochs):
for local_X, local_y in data_loader:
# possible reshaping
local_y = local_y.double()
local_X = self.reshape(local_X)
# forward
output = self.model(local_X)
# print(output)
# self.logger.debug(output)
loss = criterion(output, local_y)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.logger.debug('epoch [{}/{}], loss:{:.4f}'.format(epoch + 1, self.num_epochs, loss.item()))
def predict(self, X_test):
# padding
X_test = self.add_padding(X_test)
assert X_test.shape[1] == self.vec_len
X_test = torch.from_numpy(X_test).double()
X_test = self.reshape(X_test)
output = self.model(X_test)
return output > 0.5
class Classifier(nn.Module):
def __init__(self, vec_len, cnn_layer_params, dnn_layer_params):
super().__init__()
self.logger = logging.getLogger("CdnnClassifier")
self.cnn_layers = []
self.cnn_pools = []
curr_size = vec_len
# for kernel_size, max_pool in cnn_layer_params:
# self.cnn_layers.append(nn.Conv1d(in_channels=1, out_channels=1, kernel_size=kernel_size,
# padding= kernel_size//2))
# self.cnn_pools.append(nn.MaxPool1d(max_pool))
# curr_size //= max_pool
self.dnn_layers = []
self.dropout_layers = []
for frac, drop in dnn_layer_params:
next_size = int(curr_size * frac)
self.dnn_layers.append(nn.Linear(curr_size, next_size).double())
self.dropout_layers.append(nn.Dropout(drop).double())
curr_size = next_size
self.final_fc = nn.Linear(curr_size, 1)
def forward(self, x):
# for cnn, pool in zip(self.cnn_layers, self.cnn_pools):
# x = pool(F.relu(cnn(x)))
for dnn, dropout in zip(self.dnn_layers, self.dropout_layers):
x = dropout(F.relu(dnn(x)))
x = torch.sigmoid(self.final_fc(x))
x = x.squeeze()
return x
class MyDataset(torch.utils.data.Dataset):
def __init__(self, data, labels):
super().__init__()
self.data = data.astype('double')
self.labels = labels.astype('double')
def __len__(self):
'''
:return: Total number of samples
'''
return len(self.labels)
def __getitem__(self, index):
'Generates one sample of data'
return self.data[index], self.labels[index]
|
{"hexsha": "f625902306d993549f4c98b7aa34c4dae0eddc83", "size": 5738, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/attacks/CdnnClassifier.py", "max_stars_repo_name": "tahleen-rahman/linkability_stepcount", "max_stars_repo_head_hexsha": "ed873782453d391865ad15e7c2d538058f5db88a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/attacks/CdnnClassifier.py", "max_issues_repo_name": "tahleen-rahman/linkability_stepcount", "max_issues_repo_head_hexsha": "ed873782453d391865ad15e7c2d538058f5db88a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2021-04-30T21:20:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:35:33.000Z", "max_forks_repo_path": "src/attacks/CdnnClassifier.py", "max_forks_repo_name": "tahleen-rahman/linkability_stepcount", "max_forks_repo_head_hexsha": "ed873782453d391865ad15e7c2d538058f5db88a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5032679739, "max_line_length": 118, "alphanum_fraction": 0.6124085047, "include": true, "reason": "import numpy", "num_tokens": 1317}
|
# Rough outline
from typing import Union, Tuple, Callable
import numpy as np
import scipy.linalg
from general.Environment import Environment
from general.Exceptions import ConvergenceFailure
from utils.MutableFloat import MutableFloat
class Circuit:
def __init__(self, environment: Environment):
self.matrix_n = 0
self.node_mapping = {}
self.environment = environment
self.componentNodes = []
self.components = ()
self.groundNode = None
self.jacobian = None
self.resultVector = None
self.inputVector = None
def add(self, component, nodes):
"""
Adds a component instance to the circuit, connected to the appropriate nodes.
:param component: An instance of Component to add.
:param nodes: A iterable of nodes which the component is connected to.
:return: None
"""
for n in nodes:
if n not in self.node_mapping.keys():
self.node_mapping[n] = self.matrix_n
self.matrix_n += 1
# Matrix_n will never be the same for 2 distinct components, so it works as an arbitrary unique identifier
for n in component.getRequiredCrossNodes(nodes, self.matrix_n):
if n not in self.node_mapping.keys():
self.node_mapping[n] = self.matrix_n
self.matrix_n += 1
self.componentNodes.append((component, nodes))
def finalise(self, groundNode: int):
"""
Constructs the circuit using components that have been added.
:param groundNode: The node that is treated as the reference; all voltages are relative to this node.
:return: None
"""
# We remove rows and columns of ground...
ground_entry = self.node_mapping[groundNode]
for node in self.node_mapping.keys():
# Shift down once
if self.node_mapping[node] > ground_entry:
self.node_mapping[node] -= 1
self.matrix_n -= 1
self.jacobian = np.array([[MutableFloat() for _ in range(self.matrix_n)] for _ in range(self.matrix_n)])
self.resultVector = [MutableFloat() for _ in range(self.matrix_n)]
self.inputVector = [MutableFloat(0) for _ in range(self.matrix_n)]
self.groundNode = groundNode
for component in self.componentNodes:
component[0].connect(self, component[1])
self.components = [component[0] for component in self.componentNodes]
def getInputReference(self, node: Union[Tuple[int, int, int], int]) -> MutableFloat:
return self.inputVector[self.node_mapping[node]] if node != self.groundNode else MutableFloat()
def getResultReference(self, node: Union[Tuple[int, int, int], int]) -> MutableFloat:
return self.resultVector[self.node_mapping[node]] if node != self.groundNode else MutableFloat()
def getJacobianReference(self, nodeA: Union[Tuple[int, int, int], int],
nodeB: Union[Tuple[int, int, int], int]) -> MutableFloat:
return self.jacobian[self.node_mapping[nodeA], self.node_mapping[nodeB]] \
if nodeA != self.groundNode and nodeB != self.groundNode else MutableFloat()
def solve(self, convergence_limit: int, stamp_f: Callable[['Component', Environment], None]):
# TODO do better guessing!
extract_value = np.vectorize(lambda x: x.value, otypes=[np.float64])
# Set starting inputs to 1
for x in self.inputVector:
x.reset(x.value)
# Limit convergence
for _ in range(convergence_limit):
# Clear result vector
for x in self.resultVector:
x.reset(0.0)
# Clear jacobian (old not necessary?)
for x in np.nditer(self.jacobian, flags=['refs_ok']):
x.item().reset_without_old(0.0)
# Stamp each component's contributions
for component in self.components:
stamp_f(component, self.environment)
jac = extract_value(self.jacobian)
resultVector = [-x.value for x in self.resultVector]
# Solve matrices
inverseResult = scipy.linalg.lapack.dgesv(jac, resultVector)
delta_in = inverseResult[2]
for i, inputValue in enumerate(self.inputVector):
inputValue += delta_in[i]
# If the better guess is indistinguishable from the prior guess, we probably have the right value...
if (abs(delta_in) < 4e-5).all():
return
else:
raise ConvergenceFailure("Failed to converge.")
|
{"hexsha": "22424386e25fa6c6d434d391c46335a994b8df1b", "size": 4640, "ext": "py", "lang": "Python", "max_stars_repo_path": "general/Circuit.py", "max_stars_repo_name": "MrAttoAttoAtto/CircuitSimulatorC2", "max_stars_repo_head_hexsha": "4d821c86404fe3271363fd8c1438e4ca29c17a13", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-25T14:46:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-25T07:58:33.000Z", "max_issues_repo_path": "general/Circuit.py", "max_issues_repo_name": "MrAttoAttoAtto/CircuitSimulatorC2", "max_issues_repo_head_hexsha": "4d821c86404fe3271363fd8c1438e4ca29c17a13", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "general/Circuit.py", "max_forks_repo_name": "MrAttoAttoAtto/CircuitSimulatorC2", "max_forks_repo_head_hexsha": "4d821c86404fe3271363fd8c1438e4ca29c17a13", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3220338983, "max_line_length": 114, "alphanum_fraction": 0.6297413793, "include": true, "reason": "import numpy,import scipy", "num_tokens": 985}
|
From iris.program_logic Require Export weakestpre.
From iris.heap_lang Require Export notation lang.
From iris.proofmode Require Export tactics.
From iris.heap_lang Require Import proofmode.
From iris.base_logic.lib Require Export invariants.
Set Default Proof Using "Type".
Section IncRA.
Inductive incRAT : Type :=
| S : incRAT
| F : incRAT
| Bot : incRAT.
(* From what I've read, this roughly speaking allows the Coq type
inference to view incRAT as a particular kind of OFE, which has some
additional structure. *)
Canonical Structure incRAC := leibnizC incRAT.
(* Supply the resource algebra operation by giving an instance of
the Op type class. *)
Instance incRAop : Op incRAT :=
λ p1 p2,
match (p1, p2) with
| (F, F) => F
| _ => Bot
end.
Instance incRAvalid : Valid incRAT :=
λ p,
match p with
| Bot => False
| _ => True
end.
Instance incRAcore : PCore incRAT :=
λ p, None.
(* Combine all the components of the RA and prove that it is an
RA *)
Definition incRA_mixin : RAMixin incRAT.
Proof.
split; try apply _; try done.
- (* associativity, simply show by case analysis *)
rewrite /op. rewrite /incRAop.
intros [] [] []; reflexivity.
- (* commutativity *)
rewrite /op. rewrite /incRAop.
intros [] []; reflexivity.
- (* validity of combination implies validity of parts *)
intros [] [] HV; try done.
(* there were a bunch more properties, such as the relation
between validity and extension order but apparently it could
automatically proof those. *)
Qed.
Canonical Structure incRA := discreteR incRAT incRA_mixin.
(* Let Coq know that our CMRA is discrete. *)
Instance incRA_cmra_discrete : CmraDiscrete incRA.
Proof. apply discrete_cmra_discrete. Qed.
(* Use an instantiation of Iris that has my new resource algebra. *)
Class incG Σ := IncG { inc_inG :> inG Σ incRA }.
Definition incΣ : gFunctors := #[GFunctor incRA].
Instance subG_incΣ {Σ} : subG incΣ Σ -> incG Σ.
Proof. solve_inG. Qed.
Context `{!heapG Σ, !incG Σ}.
Lemma incRA_F_duplicable γ: own γ F -∗ (own γ F ∗ own γ F).
Proof.
iIntros "HF".
iApply own_op.
iExact "HF".
Qed.
Lemma incRA_S_F_incompatible γ Φ: own γ S ∗ own γ F -∗ Φ.
Proof.
iIntros "HR".
iExFalso.
rewrite -own_op.
iDestruct (own_valid with "HR") as "HV".
iDestruct "HV" as %HV.
iPureIntro.
exact HV.
Qed.
Lemma incRA_S_F_update: S ~~> F.
Proof.
intros n mz H.
destruct mz.
- rewrite /opM. simpl.
rewrite <-cmra_discrete_valid_iff.
rewrite /opM in H.
rewrite <-cmra_discrete_valid_iff in H.
destruct c; naive_solver.
- simpl. rewrite <-cmra_discrete_valid_iff. done.
Qed.
End IncRA.
|
{"author": "ocecaco", "repo": "iris-iterators", "sha": "ce5c1bf34178e0cd7592dc08884956f0fad2403a", "save_path": "github-repos/coq/ocecaco-iris-iterators", "path": "github-repos/coq/ocecaco-iris-iterators/iris-iterators-ce5c1bf34178e0cd7592dc08884956f0fad2403a/experiments/IncRA.v"}
|
% corrected VD 98
\subsection{Requirements}
% Describe here all the properties that characterize the deliverables you
% produced. It should describe, for each main deliverable, what are the expected
% functional and non functional properties of the deliverables, who are the actors
% exploiting the deliverables. It is expected that you have at least one
% scientific deliverable (e.g. ``Scientific presentation of the Python programming
% language'', ``State of the art on quality models for human computer
% interaction'', \ldots.) and one technical deliverable (e.g. ``BSProSoft - A
% python/django web-site for IT job offers retrieval and analysis'', \ldots). \\
\begin{itemize}
\item \textbf{FR01} Present the feature extraction with MFCCs. \\
\item \textbf{FR02} Investigate the notions of deep learning and Artificial
Neural Networks (ANNs).\\
This should give an introduction to the domain of deep learning. It presents
an overview of four different ANNs architectures, each of them covered by a
brief introduction and theoretical
aspect.\\
\item \textbf{NFR01} Performance evaluation and comparison.\\
During this section, we present and discuss the results obtained using four
Neural Network architectures.
\end{itemize}
|
{"hexsha": "6dd00b77b8618641676737275b275aefe6e20d86", "size": 1269, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "sections/scientific/requirements.tex", "max_stars_repo_name": "Lemswasabi/bsps3-report", "max_stars_repo_head_hexsha": "ca3f7bee2d4740c5c7ad9f586766ab04a0e5f58b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sections/scientific/requirements.tex", "max_issues_repo_name": "Lemswasabi/bsps3-report", "max_issues_repo_head_hexsha": "ca3f7bee2d4740c5c7ad9f586766ab04a0e5f58b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sections/scientific/requirements.tex", "max_forks_repo_name": "Lemswasabi/bsps3-report", "max_forks_repo_head_hexsha": "ca3f7bee2d4740c5c7ad9f586766ab04a0e5f58b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.76, "max_line_length": 82, "alphanum_fraction": 0.7643814027, "num_tokens": 294}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#Import Cancer data from the Sklearn library
# Dataset can also be found here (http://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+%28diagnostic%29)
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
df_cancer = pd.DataFrame(np.c_[cancer['data'], cancer['target']], columns = np.append(cancer['feature_names'], ['target']))
df_cancer.head()
# Let's plot out just the first 5 variables (features)
sns.pairplot(df_cancer, hue = 'target', vars = ['mean radius', 'mean texture', 'mean perimeter','mean area','mean smoothness'] )
df_cancer['target'].value_counts()
sns.countplot(df_cancer['target'], label = "Count")
plt.figure(figsize=(20,12))
sns.heatmap(df_cancer.corr(), annot=True)
|
{"hexsha": "6ac99f8c6a97ce7553603aba0211837dfb3b5635", "size": 842, "ext": "py", "lang": "Python", "max_stars_repo_path": "cancerbreast.py", "max_stars_repo_name": "axyroxxx/Breast-Cancer", "max_stars_repo_head_hexsha": "f7bbc00b43ddee0a810191e1fc1ee667f01586ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cancerbreast.py", "max_issues_repo_name": "axyroxxx/Breast-Cancer", "max_issues_repo_head_hexsha": "f7bbc00b43ddee0a810191e1fc1ee667f01586ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cancerbreast.py", "max_forks_repo_name": "axyroxxx/Breast-Cancer", "max_forks_repo_head_hexsha": "f7bbc00b43ddee0a810191e1fc1ee667f01586ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6086956522, "max_line_length": 129, "alphanum_fraction": 0.7304038005, "include": true, "reason": "import numpy", "num_tokens": 215}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 16 15:44:19 2018
@author: tmthydvnprt
This function is adapted from the discussion at:
https://stackoverflow.com/questions/6620471/fitting-empirical-distribution-to-theoretical-ones-with-scipy-python
Though I've made it easy to use, I did NOT write this awesome code - mrich
"""
from ModelFit import do_fit
import numpy as np
# test
# Data taken from: https://compliancy-group.com/hipaa-fines-directory-year/
finesdata = np.array([3500000, 100000, 4348000, 475000, 2200000, 3200000, 5500000,
400000, 31000, 2500000, 2400000, 387200, 2300000, 239800,
25000, 1550000, 3900000, 750000, 2200000, 650000, 2700000,
2750000, 5550000, 400000, 2140000, 650000])
do_fit(finesdata, "HIPAA Fines")
|
{"hexsha": "be5325b00d534f4d1b6de895e5173f2d8ac6392a", "size": 812, "ext": "py", "lang": "Python", "max_stars_repo_path": "Example/fineDataAnalysis.py", "max_stars_repo_name": "richmr/QuantitativeRiskSim", "max_stars_repo_head_hexsha": "f98d416d075dc6232fdc573844847f8c4843e7f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2017-08-01T19:07:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-07T11:22:17.000Z", "max_issues_repo_path": "Example/fineDataAnalysis.py", "max_issues_repo_name": "richmr/QuantitativeRiskSim", "max_issues_repo_head_hexsha": "f98d416d075dc6232fdc573844847f8c4843e7f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Example/fineDataAnalysis.py", "max_forks_repo_name": "richmr/QuantitativeRiskSim", "max_forks_repo_head_hexsha": "f98d416d075dc6232fdc573844847f8c4843e7f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-08-28T17:48:37.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-31T01:17:00.000Z", "avg_line_length": 36.9090909091, "max_line_length": 112, "alphanum_fraction": 0.6884236453, "include": true, "reason": "import numpy", "num_tokens": 268}
|
using BinaryBuilder, Pkg
name = "MKL"
version = v"2021.1.1"
sources = [
ArchiveSource("https://anaconda.org/intel/mkl/2021.1.1/download/linux-64/mkl-2021.1.1-intel_52.tar.bz2",
"bfb0fd056576cad99ae1d9c69ada2745420da9f9cf052551d5b91f797538bda2"; unpack_target = "mkl-x86_64-linux-gnu"),
ArchiveSource("https://anaconda.org/intel/mkl/2021.1.1/download/linux-32/mkl-2021.1.1-intel_52.tar.bz2",
"7b6f55a30886154bd96d4b4c6b7428494a59397b87779b58e5b3de00250343f9"; unpack_target = "mkl-i686-linux-gnu"),
ArchiveSource("https://anaconda.org/intel/mkl/2021.1.1/download/osx-64/mkl-2021.1.1-intel_50.tar.bz2",
"819fb8875909d4d024e2a936c54b561aebd1e3aebe58fc605c70aa1ad9a66b70"; unpack_target = "mkl-x86_64-apple-darwin14"),
ArchiveSource("https://anaconda.org/intel/mkl/2021.1.1/download/win-32/mkl-2021.1.1-intel_52.tar.bz2",
"dba6a12a481407ec55fba9895b68afacb15f044905dcb5e185db341b688e6177"; unpack_target = "mkl-i686-w64-mingw32"),
ArchiveSource("https://anaconda.org/intel/mkl/2021.1.1/download/win-64/mkl-2021.1.1-intel_52.tar.bz2",
"4024391b8a45836d5a7ee92405b7767874b3c3bbf2f490349fda042db3b60dfd"; unpack_target = "mkl-x86_64-w64-mingw32"),
]
# Bash recipe for building across all platforms
script = raw"""
cd ${WORKSPACE}/srcdir/mkl-${target}
if [[ ${target} == *-mingw* ]]; then
cp -r Library/bin/* ${libdir}
else
cp -r lib/* ${libdir}
fi
install_license info/licenses/*.txt
"""
platforms = [
Platform("x86_64", "linux"; libc="glibc"),
Platform("i686", "linux"; libc="glibc"),
Platform("x86_64", "macos"),
Platform("i686", "windows"),
Platform("x86_64", "windows"),
]
# The products that we will ensure are always built
products = [
LibraryProduct(["libmkl_core", "mkl_core", "mkl_core.1"], :libmkl_core),
LibraryProduct(["libmkl_rt", "mkl_rt", "mkl_rt.1"], :libmkl_rt),
]
# Dependencies that must be installed before this package can be built
dependencies = [
Dependency("IntelOpenMP_jll"),
]
non_reg_ARGS = filter(arg -> arg != "--register", ARGS)
include("../../fancy_toys.jl")
no_autofix_platforms = [Platform("i686", "windows"), Platform("x86_64", "windows"), Platform("x86_64", "macos")]
autofix_platforms = [Platform("x86_64", "linux"), Platform("i686", "linux")]
if any(should_build_platform.(triplet.(no_autofix_platforms)))
# Need to disable autofix: updating linkage of libmkl_intel_thread.dylib on
# macOS causes runtime issues:
# https://github.com/JuliaPackaging/Yggdrasil/issues/915.
build_tarballs(non_reg_ARGS, name, version, sources, script, no_autofix_platforms, products, dependencies; lazy_artifacts = true, autofix = false)
end
if any(should_build_platform.(triplet.(autofix_platforms)))
# ... but we need to run autofix on Linux, because here libmkl_rt doesn't
# have a soname, so we can't ccall it without specifying the path:
# https://github.com/JuliaSparse/Pardiso.jl/issues/69
build_tarballs(ARGS, name, version, sources, script, autofix_platforms, products, dependencies; lazy_artifacts = true)
end
|
{"hexsha": "1d84a86bfbb28d1c6197e3080603aa79f4cce59f", "size": 3120, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "M/MKL/build_tarballs.jl", "max_stars_repo_name": "waralex/Yggdrasil", "max_stars_repo_head_hexsha": "bba5443f75b221c6973d479e2c6727cf0ae3a0b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-10T07:57:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-10T07:57:39.000Z", "max_issues_repo_path": "M/MKL/build_tarballs.jl", "max_issues_repo_name": "waralex/Yggdrasil", "max_issues_repo_head_hexsha": "bba5443f75b221c6973d479e2c6727cf0ae3a0b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "M/MKL/build_tarballs.jl", "max_forks_repo_name": "waralex/Yggdrasil", "max_forks_repo_head_hexsha": "bba5443f75b221c6973d479e2c6727cf0ae3a0b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-24T15:29:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-24T15:29:42.000Z", "avg_line_length": 47.2727272727, "max_line_length": 150, "alphanum_fraction": 0.7169871795, "num_tokens": 1034}
|
#' Dates of different days within isoweekyears
#'
#' @format
#' \describe{
#' \item{yrwk}{Isoweek-isoyear.}
#' \item{mon}{Date of Monday.}
#' \item{tue}{Date of Tuesday.}
#' \item{wed}{Date of Wednesday.}
#' \item{thu}{Date of Thursday.}
#' \item{fri}{Date of Friday.}
#' \item{sat}{Date of Saturday.}
#' \item{sun}{Date of Sunday.}
#' }
"days"
# Creates the norway_locations data.table
gen_days <- function() {
. <- NULL
yrwk <- NULL
day <- NULL
mon <- NULL
tue <- NULL
wed <- NULL
thu <- NULL
fri <- NULL
sat <- NULL
sun <- NULL
days <- data.table(day = seq.Date(as.IDate("2000-01-01"), as.IDate("2030-01-01"), by = "days"))
days[, yrwk := format.Date(day, format = "%G-%V")]
days <- days[, .(mon = as.IDate(min(day))), by = .(yrwk)]
days[, tue := mon + 1]
days[, wed := mon + 2]
days[, thu := mon + 3]
days[, fri := mon + 4]
days[, sat := mon + 5]
days[, sun := mon + 6]
days <- days[yrwk >= "2000-01"]
return(days)
}
|
{"hexsha": "84b5f108c1a67237ffa6becb8fa0149c8292fa2f", "size": 966, "ext": "r", "lang": "R", "max_stars_repo_path": "R/days.r", "max_stars_repo_name": "folkehelseinstituttet/municipdata", "max_stars_repo_head_hexsha": "eae72bd8eb130adb6397b9d5f3f8c00a02982b8c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/days.r", "max_issues_repo_name": "folkehelseinstituttet/municipdata", "max_issues_repo_head_hexsha": "eae72bd8eb130adb6397b9d5f3f8c00a02982b8c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/days.r", "max_forks_repo_name": "folkehelseinstituttet/municipdata", "max_forks_repo_head_hexsha": "eae72bd8eb130adb6397b9d5f3f8c00a02982b8c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0, "max_line_length": 97, "alphanum_fraction": 0.5693581781, "num_tokens": 345}
|
import os
from functools import partial
import tensorflow as tf
import numpy as np
def _parse(filename, channels):
image_string = tf.io.read_file(filename)
image_decoded = tf.image.decode_png(image_string, channels=channels)
return tf.cast(image_decoded, tf.float32)
def _flip(x):
x = tf.image.random_flip_left_right(x)
return x
def _crop(x, crop_size):
shape = x.shape
topleft_x = tf.random.uniform((1,), minval=0, maxval=(shape[0] - crop_size), dtype=tf.int32)
topleft_y = tf.random.uniform((1,), minval=0, maxval=(shape[1] - crop_size), dtype=tf.int32)
return tf.image.crop_to_bounding_box(x, topleft_y[0], topleft_x[0], crop_size, crop_size)
def _resize(x, resize_size):
return tf.image.resize(x, [resize_size, resize_size])
def iterator(path, dataset_size, batch_size, channels, resize_size, crop_size):
filenames = list(map(lambda p: os.path.join(path, p), os.listdir(path)))[:dataset_size]
files = tf.constant(filenames)
dataset = tf.data.Dataset.from_tensor_slices(files) \
.map(partial(_parse, channels=channels)) \
.map(lambda x: (x / 127.5) - 1) \
.map(_flip) \
.map(partial(_resize, resize_size=resize_size))\
.map(partial(_crop, crop_size=crop_size)) \
.shuffle(buffer_size=500) \
.batch(batch_size) \
.repeat()
return dataset.make_initializable_iterator()
|
{"hexsha": "53efdd40403c43787dbcb7c6d85ccc659750060d", "size": 1403, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/datasets/from_images.py", "max_stars_repo_name": "cyprienruffino/CycleGAN-TensorFlow", "max_stars_repo_head_hexsha": "5eaa864e406d4ff0a1b86a85cf43a9096d0d0395", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-06T14:05:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-08T12:10:08.000Z", "max_issues_repo_path": "src/datasets/from_images.py", "max_issues_repo_name": "cyprienruffino/CycleGAN-TensorFlow", "max_issues_repo_head_hexsha": "5eaa864e406d4ff0a1b86a85cf43a9096d0d0395", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/datasets/from_images.py", "max_forks_repo_name": "cyprienruffino/CycleGAN-TensorFlow", "max_forks_repo_head_hexsha": "5eaa864e406d4ff0a1b86a85cf43a9096d0d0395", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-12-02T10:06:10.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-02T10:06:10.000Z", "avg_line_length": 31.1777777778, "max_line_length": 96, "alphanum_fraction": 0.6885245902, "include": true, "reason": "import numpy", "num_tokens": 374}
|
///////////////////////////////////////////////////////////////
// Copyright 2018 John Maddock. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt
//[eigen_eg
#include <iostream>
#include <boost/multiprecision/cpp_complex.hpp>
#include <boost/multiprecision/eigen.hpp>
#include <Eigen/Dense>
int main()
{
using namespace Eigen;
typedef boost::multiprecision::cpp_complex_quad complex_type;
//
// We want to solve Ax = b for x,
// define A and b first:
//
Matrix<complex_type, 2, 2> A, b;
A << complex_type(2, 3), complex_type(-1, -2), complex_type(-1, -4), complex_type(3, 6);
b << 1, 2, 3, 1;
std::cout << "Here is the matrix A:\n" << A << std::endl;
std::cout << "Here is the right hand side b:\n" << b << std::endl;
//
// Solve for x:
//
Matrix<complex_type, 2, 2> x = A.fullPivHouseholderQr().solve(b);
std::cout << "The solution is:\n" << x << std::endl;
//
// Compute the error in the solution by using the norms of Ax - b and b:
//
complex_type::value_type relative_error = (A*x - b).norm() / b.norm();
std::cout << "The relative error is: " << relative_error << std::endl;
return 0;
}
//]
/*
//[eigen_out
Here is the matrix A:
(2,3) (-1,-2)
(-1,-4) (3,6)
Here is the right hand side b:
1 2
3 1
The solution is:
(0.6,-0.6) (0.7,-0.7)
(0.64,-0.68) (0.58,-0.46)
The relative error is: 2.63132e-34
//]
*/
|
{"hexsha": "a70e3fbcf527f14e5a5c5261351e7e3ee173affe", "size": 1487, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "3rdParty/boost/1.71.0/libs/multiprecision/example/eigen_example.cpp", "max_stars_repo_name": "rajeev02101987/arangodb", "max_stars_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12278.0, "max_stars_repo_stars_event_min_datetime": "2015-01-29T17:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:00.000Z", "max_issues_repo_path": "3rdParty/boost/1.71.0/libs/multiprecision/example/eigen_example.cpp", "max_issues_repo_name": "rajeev02101987/arangodb", "max_issues_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9469.0, "max_issues_repo_issues_event_min_datetime": "2015-01-30T05:33:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:17:21.000Z", "max_forks_repo_path": "3rdParty/boost/1.71.0/libs/multiprecision/example/eigen_example.cpp", "max_forks_repo_name": "rajeev02101987/arangodb", "max_forks_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 892.0, "max_forks_repo_forks_event_min_datetime": "2015-01-29T16:26:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T07:44:30.000Z", "avg_line_length": 28.0566037736, "max_line_length": 91, "alphanum_fraction": 0.5965030262, "num_tokens": 481}
|
import numpy as np
import torch
def filter_samples(Y_hat: torch.Tensor, Y: torch.Tensor, weights):
if weights is None:
return Y_hat, Y
if isinstance(weights, torch.Tensor):
idx = torch.nonzero(weights).view(-1)
else:
idx = torch.tensor(np.nonzero(weights)[0])
if Y.dim() > 1:
Y = Y[idx, :]
else:
Y = Y[idx]
if Y_hat.dim() > 1:
Y_hat = Y_hat[idx, :]
else:
Y_hat = Y_hat[idx]
return Y_hat, Y
def tensor_sizes(input):
if isinstance(input, dict):
return {k: tensor_sizes(v) for k, v in input.items()}
elif isinstance(input, tuple):
return tuple(tensor_sizes(v) for v in input)
elif isinstance(input, list):
return [tensor_sizes(v) for v in input]
else:
return input.shape
def preprocess_input(input, device, dtype=None, half=False):
if isinstance(input, dict):
input = {k: preprocess_input(v, device, dtype, half) for k, v in input.items()}
elif isinstance(input, tuple):
input = tuple(preprocess_input(v, device, dtype, half) for v in input)
elif isinstance(input, list):
input = [preprocess_input(v, device, dtype, half) for v in input]
else:
input = process_tensor(input, device=device, dtype=dtype, half=half)
return input
def process_tensor(input, device=None, dtype=None, half=False):
if not isinstance(input, torch.Tensor):
input = torch.tensor(input)
if dtype:
input = input.type(dtype)
if half:
input = input.half()
if device:
input = input.to(device)
return input
|
{"hexsha": "bd484694a685b35de9acbbd6ffcd1e8141a53461", "size": 1627, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "JonnyTran/LATTE", "max_stars_repo_head_hexsha": "613c976c1361560d1b5b78f1d8131002cbeabfc5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.py", "max_issues_repo_name": "JonnyTran/LATTE", "max_issues_repo_head_hexsha": "613c976c1361560d1b5b78f1d8131002cbeabfc5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "JonnyTran/LATTE", "max_forks_repo_head_hexsha": "613c976c1361560d1b5b78f1d8131002cbeabfc5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0307692308, "max_line_length": 87, "alphanum_fraction": 0.618930547, "include": true, "reason": "import numpy", "num_tokens": 399}
|
(*
This is the definition of formal syntax for Dan Grossman's Thesis,
"SAFE PROGRAMMING AT THE C LEVEL OF ABSTRACTION".
An attempt at a variable module in a context.
*)
Require Import List.
Export ListNotations.
Require Import ZArith.
Require Import Init.Datatypes.
Require Import Coq.Init.Logic.
Require Export CpdtTactics.
Require Export Case.
Require Export TacticNotations.
Require Export TypingInfoSigDef.
Set Implicit Arguments.
Module Type TypingInfoProofsSig.
Include TypingInfoSig.
Axiom beq_t_refl: forall a, beq_t a a = true.
Hint Resolve beq_t_refl.
Axiom beq_t_sym: forall a b, beq_t a b = beq_t b a.
Hint Immediate beq_t_sym.
Axiom beq_t_trans: forall a b c, beq_t a b = true -> beq_t b c = true -> beq_t a c = true.
Hint Resolve beq_t_trans.
Axiom beq_t_eq: forall a b, beq_t a b = true -> a = b.
Hint Resolve beq_t_eq.
Axiom beq_t_neq: forall a b, beq_t a b = false -> a <> b.
Hint Resolve beq_t_neq.
End TypingInfoProofsSig.
|
{"author": "briangmilnes", "repo": "CycloneCoqSemantics", "sha": "190c0fc57d5aebfde244efb06a119f108de7a150", "save_path": "github-repos/coq/briangmilnes-CycloneCoqSemantics", "path": "github-repos/coq/briangmilnes-CycloneCoqSemantics/CycloneCoqSemantics-190c0fc57d5aebfde244efb06a119f108de7a150/3/TypingInfoProofsSigDef.v"}
|
"""
This library contains metrics to quantify the shape of a waveform
1. threshold_amplitude - only look at a metric while oscillatory amplitude is above a set percentile threshold
2. rdratio - Ratio of rise time and decay time
3. pt_duration - Peak and trough durations and their ratio
3. symPT - symmetry between peak and trough
4. symRD - symmetry between rise and decay
5. pt_sharp - calculate sharpness of oscillatory extrema
6. rd_steep - calculate rise and decay steepness
7. ptsr - calculate extrema sharpness ratio
8. rdsr - calculate rise-decay steepness ratio
9. average_waveform_trigger - calculate the average waveform of an oscillation by triggering on peak or trough
10. gips_swm - identify a repeated waveform in the signal
11. rd_diff - normalized difference between rise and decay time
12. compute_shape_by_cycle - make a dataframe of shape features for each cycle
13. define_true_oscillating_cycles - determine which cycles are part of an oscillating period
"""
from __future__ import division
import numpy as np
import pandas as pd
from misshapen.nonshape import ampT, bandpass_default, findpt, findzerox
def threshold_amplitude(x, metric, samples, percentile, frange, Fs, filter_fn=None, filter_kwargs=None):
"""
Exclude from analysis the samples in which the amplitude falls below a defined percentile
Parameters
----------
x : numpy array
raw time series
metric : numpy array
series of measures corresponding to time samples in 'samples' (e.g. peak sharpness)
samples : numpy array
time samples at which metric was computer (e.g. peaks)
percentile : float
percentile cutoff for exclusion (e.g. 10 = bottom 10% excluded)
frange : [lo, hi]
frequency range of interest for calculating amplitude
Fs : float
Sampling rate (Hz)
Returns
-------
metric_new : numpy array
same as input 'metric' but only for samples above the amplitude threshold
samples_new : numpy array
samples above the amplitude threshold
"""
# Do nothing if threshold is 0
if percentile == 0:
return metric, samples
# Default filter function
if filter_fn is None:
filter_fn = bandpass_default
if filter_kwargs is None:
filter_kwargs = {}
# Calculate amplitude time series and threshold
amp = ampT(x, frange, Fs, rmv_edge=False,
filter_fn=filter_fn, filter_kwargs=filter_kwargs)
amp = amp[samples]
amp_threshold = np.percentile(amp, percentile)
# Update samples used
samples_new = samples[amp >= amp_threshold]
metric_new = metric[amp >= amp_threshold]
return metric_new, samples_new
def rdratio(Ps, Ts):
"""
Calculate the ratio between rise time and decay time for oscillations
Note: must have the same number of peaks and troughs
Note: the final rise or decay is unused
Parameters
----------
Ps : numpy arrays 1d
time points of oscillatory peaks
Ts : numpy arrays 1d
time points of osillatory troughs
Returns
-------
rdr : array-like 1d
rise-decay ratios for each oscillation
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Assure Ps and Ts are numpy arrays
if type(Ps) == list or type(Ts) == list:
print('Converted Ps and Ts to numpy arrays')
Ps = np.array(Ps)
Ts = np.array(Ts)
# Calculate rise and decay times
if Ts[0] < Ps[0]:
riset = Ps[:-1] - Ts[:-1]
decayt = Ts[1:] - Ps[:-1]
else:
riset = Ps[1:] - Ts[:-1]
decayt = Ts[:-1] - Ps[:-1]
# Calculate ratio between each rise and decay time
rdr = riset / decayt.astype(float)
return riset, decayt, rdr
def pt_duration(Ps, Ts, zeroxR, zeroxD):
"""
Calculate the ratio between peak and trough durations
NOTE: must have the same number of peaks and troughs
NOTE: the durations of the first and last extrema will be estimated by using the only zerox they have
Parameters
----------
Ps : numpy arrays 1d
time points of oscillatory peaks
Ts : numpy arrays 1d
time points of osillatory troughs
zeroxR : array-like 1d
indices at which oscillatory rising zerocrossings occur
zeroxD : array-like 1d
indices at which oscillatory decaying zerocrossings occur
Returns
-------
Ps_dur : array-like 1d
peak-trough duration ratios for each oscillation
Ts_dur : array-like 1d
peak-trough duration ratios for each oscillation
ptr : array-like 1d
peak-trough duration ratios for each oscillation
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Assure Ps and Ts are numpy arrays
if type(Ps) == list or type(Ts) == list:
print('Converted Ps and Ts to numpy arrays')
Ps = np.array(Ps)
Ts = np.array(Ts)
# Calculate the duration of each peak and trough until last
Ps_dur = np.zeros(len(Ps))
Ts_dur = np.zeros(len(Ts))
if Ps[0] < Ts[0]:
# treat first extrema differently
Ps_dur[0] = 2 * (zeroxD[0] - Ps[0])
# duration of each peak
for i in range(1, len(Ps) - 1):
Ps_dur[i] = (zeroxD[i] - zeroxR[i - 1])
# duration of each trough
for i in range(len(Ts) - 1):
Ts_dur[i] = (zeroxR[i] - zeroxD[i])
else:
Ts_dur[0] = 2 * (zeroxR[0] - Ts[0])
for i in range(len(Ps) - 1):
Ps_dur[i] = (zeroxD[i] - zeroxR[i])
# duration of each trough
for i in range(1, len(Ts) - 1):
Ts_dur[i] = (zeroxR[i] - zeroxD[i - 1])
# Treat last extrema differently
if Ps[-1] < Ts[-1]:
Ps_dur[-1] = (zeroxD[-1] - zeroxR[-1])
Ts_dur[-1] = 2 * (Ts[-1] - zeroxD[-1])
else:
Ps_dur[-1] = 2 * (Ps[-1] - zeroxR[-1])
Ts_dur[-1] = (zeroxR[-1] - zeroxD[-1])
ptr = Ps_dur / Ts_dur
return Ps_dur, Ts_dur, ptr
def symPT(x, Ps, Ts, window_half):
"""
Measure of asymmetry between oscillatory peaks and troughs
Parameters
----------
x : array-like 1d
voltage time series
Ps : array-like 1d
time points of oscillatory peaks
Ts : array-like 1d
time points of oscillatory troughs
window_half : int
Number of samples around extrema to analyze, in EACH DIRECTION
Returns
-------
sym : array-like 1d
measure of symmetry between each trough-peak pair
Result of 0 means the peak and trough are perfectly symmetric
Notes
-----
Opt 2: Roemer; The metric should be between 0 and 1
Inner product of Peak and Trough divided by the squareroot of the product of SSQ_peak and SSQ_trough
I'll need to fine tune this to make it more complicated and less susceptible to noise
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
E = len(Ps)
sym = np.zeros(E)
for e in range(E):
# Find region around each peak and trough. Make extrema be 0
peak = x[Ps[e] - window_half:Ps[e] + window_half + 1] - x[Ps[e]]
peak = -peak
trough = x[Ts[e] - window_half:Ts[e] + window_half + 1] - x[Ts[e]]
# Compare the two measures
peakenergy = np.sum(peak**2)
troughenergy = np.sum(trough**2)
energy = np.max((peakenergy, troughenergy))
diffenergy = np.sum((peak - trough)**2)
sym[e] = diffenergy / energy
return sym
def symRD(x, Ts, window_full):
"""
Measure of asymmetry between oscillatory peaks and troughs
Parameters
----------
x : array-like 1d
voltage time series
Ts : array-like 1d
time points of oscillatory troughs
window_full : int
Number of samples after peak to analyze for decay and before peak to analyze for rise
Returns
-------
sym : array-like 1d
measure of symmetry between each rise and decay
"""
T = len(Ts)
sym = np.zeros(T)
for t in range(T):
# Find regions for the rise and the decay
rise = x[Ts[t]:Ts[t] + window_full + 1] - x[Ts[t]]
decay = x[Ts[t] - window_full:Ts[t] + 1] - x[Ts[t]]
# Ensure the minimum value is 0
rise[rise < 0] = 0
decay[decay < 0] = 0
# Make rises and decays go the same direction
rise = np.flipud(rise)
# Calculate absolute difference between each point in the rise and
# decay
diffenergy = np.sum(np.abs(rise - decay))
# Normalize this difference by the max voltage value at each point
rise_decay_maxes = np.max(np.vstack((rise, decay)), axis=0)
energy = np.sum(rise_decay_maxes)
# Compare the two measures
sym[t] = diffenergy / energy
return sym
def pt_sharp(x, Ps, Ts, window_half, method='diff'):
"""
Calculate the sharpness of extrema
Parameters
----------
x : array-like 1d
voltage time series
Ps : array-like 1d
time points of oscillatory peaks
Ts : array-like 1d
time points of oscillatory troughs
window_half : int
Number of samples in each direction around extrema to use for sharpness estimation
Returns
-------
Psharps : array-like 1d
sharpness of peaks
Tsharps : array-like 1d
sharpness of troughs
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Calculate the sharpness of each peak
P = len(Ps)
Psharps = np.zeros(P)
for e in range(P):
if method == 'deriv':
Edata = x[Ps[e] - window_half: Ps[e] + window_half + 1]
Psharps[e] = np.mean(np.abs(np.diff(Edata)))
elif method == 'diff':
Psharps[e] = np.mean(
(x[Ps[e]] - x[Ps[e] - window_half], x[Ps[e]] - x[Ps[e] + window_half]))
T = len(Ts)
Tsharps = np.zeros(T)
for e in range(T):
if method == 'deriv':
Edata = x[Ts[e] - window_half: Ts[e] + window_half + 1]
Tsharps[e] = np.mean(np.abs(np.diff(Edata)))
elif method == 'diff':
Tsharps[e] = np.mean(
(x[Ts[e] - window_half] - x[Ts[e]], x[Ts[e] + window_half] - x[Ts[e]]))
return Psharps, Tsharps
def rd_steep(x, Ps, Ts):
"""
Calculate the max steepness of rises and decays
Parameters
----------
x : array-like 1d
voltage time series
Ps : array-like 1d
time points of oscillatory peaks
Ts : array-like 1d
time points of oscillatory troughs
Returns
-------
risesteep : array-like 1d
max steepness in each period for rise
decaysteep : array-like 1d
max steepness in each period for decay
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Calculate rise and decay steepness
E = len(Ps) - 1
risesteep = np.zeros(E)
for t in range(E):
if Ts[0] < Ps[0]:
rise = x[Ts[t]:Ps[t] + 1]
else:
rise = x[Ts[t]:Ps[t + 1] + 1]
risesteep[t] = np.max(np.diff(rise))
decaysteep = np.zeros(E)
for p in range(E):
if Ts[0] < Ps[0]:
decay = x[Ps[p]:Ts[p + 1] + 1]
else:
decay = x[Ps[p]:Ts[p] + 1]
decaysteep[p] = -np.min(np.diff(decay))
return risesteep, decaysteep
def ptsr(Psharp, Tsharp, log=True, polarity=True):
if polarity:
sharpnessratio = Psharp / Tsharp
else:
sharpnessratio = np.max((Psharp / Tsharp, Tsharp / Psharp))
if log:
sharpnessratio = np.log10(sharpnessratio)
return sharpnessratio
def rdsr(Rsteep, Dsteep, log=True, polarity=True):
if polarity:
steepnessratio = Rsteep / Dsteep
else:
steepnessratio = np.max((Rsteep / Dsteep, Dsteep / Rsteep))
if log:
steepnessratio = np.log10(steepnessratio)
return steepnessratio
def average_waveform_trigger(x, f_range, Fs, avgwave_halflen, trigger='trough'):
"""
Calculate the average waveform of a signal by triggering on the peaks or troughs
Parameters
----------
x : array-like 1d
voltage time series
f_range : (low, high), Hz
frequency range for narrowband signal of interest
Fs : float
The sampling rate
avgwave_halflen : float
length of time for the averaged signal to be recorded in the positive and negative direction
trigger : str
'trough' to trigger the averaging on each trough
'peak' to trigger the averaging on each peak
Returns
-------
avg_wave : array-like 1d
the average waveform in 'x' in the frequency 'f_range' triggered on 'trigger'
"""
# Set up the parameters for averaging
dt = 1 / float(Fs)
t_avg_wave = np.arange(-avgwave_halflen, avgwave_halflen + dt, dt)
N_samples_halflen = int(avgwave_halflen * Fs)
# Find the trigger points for averaging
Ps, Ts = findpt(x, f_range, Fs, boundary=N_samples_halflen + 1)
if trigger == 'trough':
trig_samps = Ts
elif trigger == 'peak':
trig_samps = Ps
else:
raise ValueError('Trigger not implemented')
# Do the averaging at each trigger
avg_wave = np.zeros(int(N_samples_halflen * 2 + 1))
N_triggers = len(trig_samps)
for i in range(N_triggers):
avg_wave += x[trig_samps[i] -
N_samples_halflen:trig_samps[i] + N_samples_halflen + 1]
avg_wave = avg_wave / N_triggers
return t_avg_wave, avg_wave
def gips_swm(x, Fs, L, G,
max_iterations=100, T=1, window_starts_custom=None):
"""
Sliding window matching methods to find recurring patterns in a time series
using the method by Bart Gips in J Neuro Methods 2017.
See matlab code at: https://github.com/bartgips/SWM
Calculate the average waveform of a signal by triggering on the peaks or troughs
Note should high-pass if looking at high frequency activity so that it does not converge on a low frequency motif
L and G should be chosen to be about the size of the motif of interest, and the N derived should be about the number of occurrences
Parameters
----------
x : array-like 1d
voltage time series
Fs : float
The sampling rate (samples per second)
L : float
Window length (seconds)
G : float
Minimum window spacing (seconds)
T : float
temperature parameter. Controls acceptance probability
max_iterations : int
Maximum number of iterations for the pattern finder
window_starts_custom : np.ndarray (1d)
Pre-set locations of initial windows (instead of evenly spaced by 2G)
Returns
-------
avg_wave : np.ndarray (1d)
the average waveform in 'x' in the frequency 'f_range' triggered on 'trigger'
window_starts : np.ndarray (1d)
indices at which each window begins for the final set of windows
J : np.ndarray (1d)
History of costs
"""
# Initialize window positions, separated by 2*G
L_samp = int(L * Fs)
G_samp = int(G * Fs)
if window_starts_custom is None:
window_starts = np.arange(0, len(x) - L_samp, 2 * G_samp)
else:
window_starts = window_starts_custom
# Calculate the total number of windows
N_windows = len(window_starts)
# Calculate initial cost
J = np.zeros(max_iterations)
J[0] = _gips_compute_J(x, window_starts, L_samp)
# Randomly sample windows with replacement
random_window_idx = np.random.choice(range(N_windows), size=max_iterations)
# Optimize X
iter_num = 1
while iter_num < max_iterations:
print(iter_num)
# Pick a random window position
window_idx_replace = random_window_idx[iter_num]
# Find a new allowed position for the window
# OH. CHANGE IT IN THE WINDOW ARRAY. at the end have all windows
window_starts_temp = np.copy(window_starts)
window_starts_temp[window_idx_replace] = _gips_find_new_windowidx(
window_starts, G_samp, L_samp, len(x) - L_samp)
# Calculate the cost
J_temp = _gips_compute_J(x, window_starts_temp, L_samp)
# Calculate the change in cost function
deltaJ = J_temp - J[iter_num - 1]
# Calculate the acceptance probability
p_accept = np.exp(-deltaJ / float(T))
# Accept update to J with a certain probability
if np.random.rand() < p_accept:
# Update J
J[iter_num] = J_temp
# Update X
window_starts = window_starts_temp
else:
# Update J
J[iter_num] = J[iter_num - 1]
# Update iteration number
iter_num += 1
# Calculate average wave
avg_wave = np.zeros(L_samp)
for w in range(N_windows):
avg_wave = avg_wave + x[window_starts[w]:window_starts[w] + L_samp]
avg_wave = avg_wave / float(N_windows)
return avg_wave, window_starts, J
def _gips_compute_J(x, window_starts, L_samp):
"""Compute the cost, which is the average distance between all windows"""
# Get all windows and zscore them
N_windows = len(window_starts)
windows = np.zeros((N_windows, L_samp))
for w in range(N_windows):
temp = x[window_starts[w]:window_starts[w] + L_samp]
windows[w] = (temp - np.mean(temp)) / np.std(temp)
# Calculate distances for all pairs of windows
d = []
for i in range(N_windows):
for j in range(i + 1, N_windows):
window_diff = windows[i] - windows[j]
d_temp = 1 / float(L_samp) * np.sum(window_diff**2)
d.append(d_temp)
# Calculate cost
J = 1 / float(2 * (N_windows - 1)) * np.sum(d)
return J
def _gips_find_new_windowidx(window_starts, G_samp, L_samp, N_samp):
"""Find a new sample for the starting window"""
found = False
while found is False:
# Generate a random sample
new_samp = np.random.randint(N_samp)
# Check how close the sample is to other window starts
dists = np.abs(window_starts - new_samp)
if np.min(dists) > G_samp:
return new_samp
def rd_diff(Ps, Ts):
"""
Calculate the normalized difference between rise and decay times,
as Gips, 2017 refers to as the "skewnwss index"
SI = (T_up-T_down)/(T_up+T_down)
Parameters
----------
Ps : numpy arrays 1d
time points of oscillatory peaks
Ts : numpy arrays 1d
time points of osillatory troughs
Returns
-------
rdr : array-like 1d
rise-decay ratios for each oscillation
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Assure Ps and Ts are numpy arrays
if type(Ps) == list or type(Ts) == list:
print('Converted Ps and Ts to numpy arrays')
Ps = np.array(Ps)
Ts = np.array(Ts)
# Calculate rise and decay times
if Ts[0] < Ps[0]:
riset = Ps[:-1] - Ts[:-1]
decayt = Ts[1:] - Ps[:-1]
else:
riset = Ps[1:] - Ts[:-1]
decayt = Ts[:-1] - Ps[:-1]
# Calculate ratio between each rise and decay time
rdr = (riset - decayt) / float(riset + decayt)
return riset, decayt, rdr
def compute_shape_by_cycle(x, f_range, Fs,
findpt_kwargs=None,
define_true_oscillating_periods_kwargs=None):
"""
Calculate several features of an oscillation's waveform
shape for each cycle in the recording.
Parameters
----------
x : array-like 1d
voltage time series
f_range : (low, high), Hz
frequency range for narrowband signal of interest
Fs : float
The sampling rate (default = 1000Hz)
findpt_kwargs : dict or None
Keyword arguments for function to find peaks and
troughs (nonshape.findpt)
define_true_oscillating_periods_kwargs : dict or None
Keyword arguments for function to find label cycles
as in or not in an oscillation
Returns
-------
df_P : pandas DataFrame
features of the waveform shape of each peak:
sample : sample of 'x' at which the peak occurs
sample_zeroxD : sample of the decaying zerocrossing
sample_zeroxR : sample of the rising zerocrossing
sample_lastE : sample of the last trough
sample_nextE : sample of the next trough
period : period of the cycle
half_decay_time : time between peak and decay zerocross
half_rise_time : time rise zerocross and peak
whole_decay_time : time between peak and next trough
whole_rise_time : time rise zerocross and previous trough
peak_time : time between rise and decay zerocrosses
half_decay_volt : voltage change between peak and decay zerocross
half_rise_volt : voltage change between peak and rise zerocross
whole_decay_volt : voltage change between peak and next trough
whole_rise_volt : voltage change between peak and previous trough
peak_volt : voltage at the peak
half_decay_sharp : steepness between peak and decay zerocross
half_rise_sharp : steepness between peak and rise zerocross
whole_decay_sharp : steepness between peak and next trough
whole_rise_sharp : steepness between peak and previous trough
peak_sharp : sharpness of peak
rdsym_time : asymmetry between the whole rise and decay times
rdsym_volt : asymmetry between the whole rise and decay voltages
rdsym_sharp : asymmetry between the whole rise and decay steepnesses
df_T : pandas DataFrame
features of the waveform shape of each trough.
Similar features as df_P
.. note:: First extrema is peak
The first extrema analyzed will be a peak,
and the final one a trough. In order to switch
the preference, simply invert the polarity of x.
"""
# Set defaults if user input is None
if findpt_kwargs is None:
findpt_kwargs = {}
else:
# Raise warning if switch from peak start to trough start
if 'forcestart' in findpt_kwargs.keys():
if findpt_kwargs['forcestart'] == 'trough':
print('WARNING: This function has been designed to assume that\
the first extrema identified will be a peak. This has\
been overwritten. Proceed with caution.')
if define_true_oscillating_periods_kwargs is None:
define_true_oscillating_periods_kwargs = {}
# Find peak and trough locations in the signal
Ps, Ts = findpt(x, f_range, Fs, boundary=None, **findpt_kwargs)
# Find zero-crossings
zeroxR, zeroxD = findzerox(x, Ps, Ts)
# Compute stats on peak-to-peak cycles (trough-centered)
N_p2p = len(Ps) - 1
trough_stats = {}
# Define important samples
trough_stats['sample'] = Ts[:-1]
trough_stats['sample_zeroxD'] = zeroxD[:-1]
trough_stats['sample_zeroxR'] = zeroxR
trough_stats['sample_lastE'] = Ps[:-1]
trough_stats['sample_nextE'] = Ps[1:]
# Compute other statistics of the cycle centered around the trough
trough_stats['period'] = trough_stats['sample_nextE'] - \
trough_stats['sample_lastE']
trough_stats['trough_volt'] = x[Ts[:-1]]
trough_stats['half_decay_time'] = (Ts[:-1] - zeroxD[:-1])
trough_stats['half_rise_time'] = (zeroxR - Ts[:-1])
trough_stats['trough_time'] = trough_stats['half_decay_time'] + \
trough_stats['half_rise_time']
trough_stats['half_decay_volt'] = x[zeroxD[:-1]] - x[Ts[:-1]]
trough_stats['half_rise_volt'] = x[zeroxR] - x[Ts[:-1]]
trough_stats['half_decay_sharp'] = trough_stats['half_decay_volt'] / \
trough_stats['half_decay_time']
trough_stats['half_rise_sharp'] = trough_stats['half_rise_volt'] / \
trough_stats['half_rise_time']
trough_stats['trough_sharp'] = (
trough_stats['half_decay_sharp'] + trough_stats['half_rise_sharp']) / 2.
trough_stats['whole_decay_time'] = (Ts[:-1] - Ps[:-1])
trough_stats['whole_rise_time'] = (Ps[1:] - Ts[:-1])
trough_stats['rdsym_time'] = trough_stats['whole_decay_time'] - \
trough_stats['whole_rise_time']
trough_stats['whole_decay_volt'] = x[Ps[:-1]] - x[Ts[:-1]]
trough_stats['whole_rise_volt'] = x[Ps[1:]] - x[Ts[:-1]]
trough_stats['rdsym_volt'] = trough_stats['whole_decay_volt'] - \
trough_stats['whole_rise_volt']
trough_stats['whole_decay_sharp'] = trough_stats['whole_decay_volt'] / \
trough_stats['whole_decay_time']
trough_stats['whole_rise_sharp'] = trough_stats['whole_rise_volt'] / \
trough_stats['whole_rise_time']
trough_stats['rdsym_sharp'] = trough_stats['whole_decay_sharp'] - \
trough_stats['whole_rise_sharp']
# Compute stats on trough-to-trough cycles (peak-centered)
N_t2t = len(Ts) - 1
peak_stats = {}
peak_stats['sample'] = Ps[1:]
peak_stats['sample_zeroxD'] = zeroxD[1:]
peak_stats['sample_zeroxR'] = zeroxR
peak_stats['sample_lastE'] = Ts[:-1]
peak_stats['sample_nextE'] = Ts[1:]
peak_stats['period'] = peak_stats['sample_nextE'] - \
peak_stats['sample_lastE']
peak_stats['peak_volt'] = x[Ps[1:]]
peak_stats['half_decay_time'] = (zeroxD[1:] - Ps[1:])
peak_stats['half_rise_time'] = (Ps[1:] - zeroxR)
peak_stats['peak_time'] = peak_stats['half_decay_time'] + \
peak_stats['half_rise_time']
peak_stats['half_decay_volt'] = x[Ps[1:]] - x[zeroxD[1:]]
peak_stats['half_rise_volt'] = x[Ps[1:]] - x[zeroxR]
peak_stats['half_decay_sharp'] = peak_stats['half_decay_volt'] / \
peak_stats['half_decay_time']
peak_stats['half_rise_sharp'] = peak_stats['half_rise_volt'] / \
peak_stats['half_rise_time']
peak_stats['peak_sharp'] = (peak_stats['half_decay_sharp'] +
peak_stats['half_rise_sharp']) / 2.
peak_stats['whole_decay_time'] = (Ts[1:] - Ps[1:])
peak_stats['whole_rise_time'] = (Ps[1:] - Ts[:-1])
peak_stats['rdsym_time'] = peak_stats['whole_decay_time'] - \
peak_stats['whole_rise_time']
peak_stats['whole_decay_volt'] = x[Ps[1:]] - x[Ts[1:]]
peak_stats['whole_rise_volt'] = x[Ps[1:]] - x[Ts[:-1]]
peak_stats['rdsym_volt'] = peak_stats['whole_decay_volt'] - \
peak_stats['whole_rise_volt']
peak_stats['whole_decay_sharp'] = peak_stats['whole_decay_volt'] / \
peak_stats['whole_decay_time']
peak_stats['whole_rise_sharp'] = peak_stats['whole_rise_volt'] / \
peak_stats['whole_rise_time']
peak_stats['rdsym_sharp'] = peak_stats['whole_decay_sharp'] - \
peak_stats['whole_rise_sharp']
# Compute features of peak-trough symmetry
# Peak relative to previous trough
peak_stats['ptsym_volt'] = peak_stats['peak_volt'] + trough_stats['trough_volt']
peak_stats['ptsym_time'] = peak_stats['peak_time'] - \
trough_stats['trough_time']
peak_stats['ptsym_sharp'] = peak_stats['peak_sharp'] - trough_stats['trough_sharp']
trough_stats['ptsym_volt'] = peak_stats['ptsym_volt']
trough_stats['ptsym_time'] = peak_stats['ptsym_time']
trough_stats['ptsym_sharp'] = peak_stats['ptsym_sharp']
# Compute amplitude features
amp = ampT(x, f_range, Fs)
trough_stats['amp_mean'] = [
np.mean(amp[zeroxD[i]:zeroxR[i]]) for i in range(len(zeroxR))]
peak_stats['amp_mean'] = [
np.mean(amp[zeroxR[i]:zeroxD[i + 1]]) for i in range(len(zeroxR))]
# Convert stats into a DataFrame
df_P = pd.DataFrame.from_dict(peak_stats)
df_T = pd.DataFrame.from_dict(trough_stats)
# Define whether or not each cycle is part of an oscillation
df_P, df_T = define_true_oscillating_periods(df_P, df_T, **define_true_oscillating_periods_kwargs)
return df_P, df_T
def define_true_oscillating_periods(dfP, dfT, ampdiff_th=.7, timediff_th=.6):
"""
Append two columns to cycle-by-cycle dataframes to label
which periods are in true oscillatory modes
Parameters
----------
df_P : pandas DataFrame
waveform features for each peak-centered cycle of a recording
df_T : pandas DataFrame
waveform features for each trough-centered cycle of a recording
ampdiff_th : float between 0 and 1
tolerance of amplitude
Returns
-------
df_P : pandas DataFrame
waveform features for each peak-centered cycle of a recording.
Now including oscillatory mode features:
oscillating_amp: are the amplitudes of the two flanks (rise and
decay) within `ampdiff_th` fraction of one another?
oscillating_amp_time: are the amplitudes of the two flanks within
`ampdiff_th` fraction of one another? AND are the durations
of the two flanks within `timediff_th` fraction of one another?
df_T : pandas DataFrame
waveform features for each trough-centered cycle of a recording.
Now including oscillatory mode features
"""
# Make a binary array to indicate if a peak is good to analyze
P = len(dfP)
cycle_good = np.zeros(P, dtype=bool)
# Loop through each peak (skip first and last) and determine if meets
# criteria for good
rises = dfP['whole_rise_volt']
decays = dfP['whole_decay_volt']
for p in range(1, P - 1):
frac1 = np.min([rises[p], decays[p]]) / np.max([rises[p], decays[p]])
frac2 = np.min([rises[p], decays[p - 1]]) / \
np.max([rises[p], decays[p - 1]])
frac3 = np.min([rises[p + 1], decays[p]]) / \
np.max([rises[p + 1], decays[p]])
if np.min([frac1, frac2, frac3]) >= ampdiff_th:
cycle_good[p] = True
# Add oscillating feature to the dataframe
dfP['oscillating_amp'] = cycle_good
# Repeat process for troughs
T = len(dfT)
cycle_good = np.zeros(T, dtype=bool)
rises = dfT['whole_rise_volt']
decays = dfT['whole_decay_volt']
for p in range(1, P - 1):
frac1 = np.min([rises[p], decays[p]]) / np.max([rises[p], decays[p]])
frac2 = np.min([rises[p - 1], decays[p]]) / \
np.max([rises[p - 1], decays[p]])
frac3 = np.min([rises[p], decays[p + 1]]) / \
np.max([rises[p], decays[p + 1]])
if np.min([frac1, frac2, frac3]) >= ampdiff_th:
cycle_good[p] = True
dfT['oscillating_amp'] = cycle_good
# Make a binary array to indicate if a peak is good to analyze
cycle_good_P = np.copy(dfP['oscillating_amp'])
cycle_good_T = np.copy(dfT['oscillating_amp'])
# Loop through each peak (skip first and last) and determine if meets
# criteria for good
P_times = dfP['sample'].values
T_times = dfT['sample'].values
for p in range(1, P - 1):
if cycle_good_P[p]:
p1 = P_times[p] - P_times[p - 1]
p2 = P_times[p + 1] - P_times[p]
frac = np.min([p1, p2]) / np.max([p1, p2])
if frac < timediff_th:
cycle_good_P[p] = False
cycle_good_T[p] = False
cycle_good_T[p + 1] = False
if cycle_good_T[p]:
p1 = T_times[p] - T_times[p - 1]
p2 = T_times[p + 1] - T_times[p]
frac = np.min([p1, p2]) / np.max([p1, p2])
if frac < timediff_th:
cycle_good_T[p] = False
cycle_good_P[p] = False
cycle_good_P[p - 1] = False
dfP['oscillating_amp_time'] = cycle_good_P
dfT['oscillating_amp_time'] = cycle_good_T
return dfP, dfT
|
{"hexsha": "6ca3a110e510a0faaca648ad6254d3c19a732baa", "size": 32116, "ext": "py", "lang": "Python", "max_stars_repo_path": "shape.py", "max_stars_repo_name": "voytekresearch/misshapen", "max_stars_repo_head_hexsha": "8ee2afa2da3449789e52bcce63ecd852c191e6fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2016-04-20T18:27:09.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-11T14:57:39.000Z", "max_issues_repo_path": "shape.py", "max_issues_repo_name": "voytekresearch/misshapen", "max_issues_repo_head_hexsha": "8ee2afa2da3449789e52bcce63ecd852c191e6fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shape.py", "max_forks_repo_name": "voytekresearch/misshapen", "max_forks_repo_head_hexsha": "8ee2afa2da3449789e52bcce63ecd852c191e6fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2016-04-22T03:26:15.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-28T00:43:21.000Z", "avg_line_length": 34.6450916936, "max_line_length": 135, "alphanum_fraction": 0.6325507535, "include": true, "reason": "import numpy", "num_tokens": 8584}
|
import argparse
import os, sys
import numpy as np
import tensorflow as tf
from emd import tf_auctionmatch
from cd import tf_nndistance
import time
def f_score(label, predict, dist_label, dist_pred, threshold):
num_label = label.shape[0]
num_predict = predict.shape[0]
f_scores = []
for i in range(len(threshold)):
num = len(np.where(dist_label <= threshold[i])[0])
recall = 100.0 * num / num_label
num = len(np.where(dist_pred <= threshold[i])[0])
precision = 100.0 * num / num_predict
f_scores.append((2*precision*recall)/(precision+recall+1e-8))
return np.array(f_scores)
# data_path
data_dir = '../../data/ShapeNet/'
test_datapath = '../../data/test_list.txt'
# Load data
namelist = []
with open(test_datapath, 'r') as f:
while(True):
line = f.readline().strip()
if not line:
break
namelist.append(line)
# eval_path
eval_path = '../../result/result_shapenet_ply_out_smooth_pt1024/'
eval_path2 = eval_path
def get_chamfer_metrics(pcl_gt, pred):
'''
Calculate chamfer, forward, backward distance between ground truth and predicted
point clouds. They may or may not be scaled.
Args:
pcl_gt: tf placeholder of shape (B,N,3) corresponding to GT point cloud
pred: tensor of shape (B,N,3) corresponding to predicted point cloud
Returns:
Fwd, Bwd, Chamfer: (B,)
'''
#(B, NUM_POINTS) ==> ((x2-x1)**2 + (y2-y1)**2 + (z2-z1)**2) for nn pair of points (x1,y1,z1) <--> (x2, y2, z2)
dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(pcl_gt, pred)
dists_forward = tf.reduce_mean(tf.sqrt(dists_forward), axis=1) # (B, )
dists_backward = tf.reduce_mean(tf.sqrt(dists_backward), axis=1) # (B, )
chamfer_distance = dists_backward + dists_forward
return dists_forward, dists_backward, chamfer_distance
def get_emd_metrics(pcl_gt, pred, batch_size, num_points):
'''
Calculate emd between ground truth and predicted point clouds.
They may or may not be scaled. GT and pred need to be of the same dimension.
Args:
pcl_gt: tf placeholder of shape (B,N,3) corresponding to GT point cloud
pred: tensor of shape (B,N,3) corresponding to predicted point cloud
Returns:
emd: (B,)
'''
X,_ = tf.meshgrid(tf.range(batch_size), tf.range(num_points), indexing='ij')
ind, _ = tf_auctionmatch.auction_match(pred, pcl_gt) # Ind corresponds to points in pcl_gt
ind = tf.stack((X, ind), -1)
emd = tf.reduce_mean(tf.sqrt(tf.reduce_sum((tf.gather_nd(pcl_gt, ind) - pred)**2, axis=-1)), axis=1) # (B, )
return emd
BATCH_SIZE = 1
NUM_EVAL_POINTS = 1024
# Initialize session
# xyz1:dataset_points * 3, xyz2:query_points * 3
xyz1=tf.placeholder(tf.float32,shape=(None, 3))
xyz2=tf.placeholder(tf.float32,shape=(None, 3))
xyz3 = tf.expand_dims(xyz1, 0)
xyz4 = tf.expand_dims(xyz2, 0)
# chamfer distance
dists_forward, dists_backward, chamfer_distance = get_chamfer_metrics(xyz3, xyz4)
emd_dist = get_emd_metrics(xyz3, xyz4, BATCH_SIZE, NUM_EVAL_POINTS)
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
config.allow_soft_placement=True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
###
class_name = {'02828884':'bench','03001627':'chair','03636649':'lamp','03691459':'speaker','04090263':'firearm','04379243':'table','04530566':'watercraft','02691156':'plane','02933112':'cabinet','02958343':'car','03211117':'monitor','04256520':'couch','04401088':'cellphone'}
model_number = {i:0 for i in class_name}
sum_f = {i:0 for i in class_name}
sum_cd = {i:0 for i in class_name}
sum_emd = {i:0 for i in class_name}
iters = 0
f_sum = 0.0
cd_sum = 0.0
emd_sum = 0.0
for file_list in namelist:
iters += 1
predict = np.loadtxt(eval_path+file_list[19:-4]+'_pred.npy')
label = np.loadtxt(eval_path2+file_list[19:-4]+'_gt.npy')
file_list_sub = file_list.split("_")
class_id = file_list_sub[0][19:]
cd, emd = sess.run([chamfer_distance, emd_dist], feed_dict={xyz1:label,xyz2:predict})
model_number[class_id] += 1.0
sum_cd[class_id] += cd # cd is the mean of all distance
sum_emd[class_id] += emd #emd[0] # emd is the sum of all distance
cd_sum += cd
emd_sum += emd #emd[0]
print(iters, cd_sum/iters*10, emd_sum/iters*10)
for item in model_number:
number = model_number[item] + 1e-8
cd = (sum_cd[item] / number) *100#* 1000 #cd is the mean of all distance, cd is L2
emd = (sum_emd[item] / number) *100 #* 0.01 #emd is the sum of all distance, emd is L1
print(class_name[item], int(number), cd, emd)#, f)
print('Testing Finished!')
|
{"hexsha": "723b84239cff71301a57f06567c8d45c5b405827", "size": 4651, "ext": "py", "lang": "Python", "max_stars_repo_path": "pix3d/eval/eval_shapenet_object_centered.py", "max_stars_repo_name": "zouchuhang/Silhouette-Guided-3D", "max_stars_repo_head_hexsha": "884504982f16567f6c9152baf7a676dbf50711e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2019-07-30T00:27:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T06:40:52.000Z", "max_issues_repo_path": "pix3d/eval/eval_shapenet_object_centered.py", "max_issues_repo_name": "zouchuhang/Silhouette-Guided-3D", "max_issues_repo_head_hexsha": "884504982f16567f6c9152baf7a676dbf50711e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-09-02T07:18:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-12T05:50:26.000Z", "max_forks_repo_path": "pix3d/eval/eval_shapenet_object_centered.py", "max_forks_repo_name": "zouchuhang/Silhouette-Guided-3D", "max_forks_repo_head_hexsha": "884504982f16567f6c9152baf7a676dbf50711e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-07-31T11:03:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-28T11:44:53.000Z", "avg_line_length": 36.9126984127, "max_line_length": 275, "alphanum_fraction": 0.6839389379, "include": true, "reason": "import numpy", "num_tokens": 1388}
|
/*
* Legal Notice
*
* This document and associated source code (the "Work") is a preliminary
* version of a benchmark specification being developed by the TPC. The
* Work is being made available to the public for review and comment only.
* The TPC reserves all right, title, and interest to the Work as provided
* under U.S. and international laws, including without limitation all patent
* and trademark rights therein.
*
* No Warranty
*
* 1.1 TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THE INFORMATION
* CONTAINED HEREIN IS PROVIDED "AS IS" AND WITH ALL FAULTS, AND THE
* AUTHORS AND DEVELOPERS OF THE WORK HEREBY DISCLAIM ALL OTHER
* WARRANTIES AND CONDITIONS, EITHER EXPRESS, IMPLIED OR STATUTORY,
* INCLUDING, BUT NOT LIMITED TO, ANY (IF ANY) IMPLIED WARRANTIES,
* DUTIES OR CONDITIONS OF MERCHANTABILITY, OF FITNESS FOR A PARTICULAR
* PURPOSE, OF ACCURACY OR COMPLETENESS OF RESPONSES, OF RESULTS, OF
* WORKMANLIKE EFFORT, OF LACK OF VIRUSES, AND OF LACK OF NEGLIGENCE.
* ALSO, THERE IS NO WARRANTY OR CONDITION OF TITLE, QUIET ENJOYMENT,
* QUIET POSSESSION, CORRESPONDENCE TO DESCRIPTION OR NON-INFRINGEMENT
* WITH REGARD TO THE WORK.
* 1.2 IN NO EVENT WILL ANY AUTHOR OR DEVELOPER OF THE WORK BE LIABLE TO
* ANY OTHER PARTY FOR ANY DAMAGES, INCLUDING BUT NOT LIMITED TO THE
* COST OF PROCURING SUBSTITUTE GOODS OR SERVICES, LOST PROFITS, LOSS
* OF USE, LOSS OF DATA, OR ANY INCIDENTAL, CONSEQUENTIAL, DIRECT,
* INDIRECT, OR SPECIAL DAMAGES WHETHER UNDER CONTRACT, TORT, WARRANTY,
* OR OTHERWISE, ARISING IN ANY WAY OUT OF THIS OR ANY OTHER AGREEMENT
* RELATING TO THE WORK, WHETHER OR NOT SUCH AUTHOR OR DEVELOPER HAD
* ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES.
*
* Contributors
* - Christopher Chan-Nui
*/
#include <iostream>
#include <boost/test/auto_unit_test.hpp>
#include "../inc/InputFlatFilesDeclarations.h"
#include "../inc/SecurityFile.h"
using boost::unit_test::test_suite;
struct SecurityFixture {
TPCE::CSecurityFile security_file;
TIdent num_securities;
TIdent num_companies;
TIdent configured_customers;
TIdent active_customers;
SecurityFixture(int configured=10000, int active=10000)
: security_file("../flat_in/Security.txt", configured, active)
, configured_customers(configured)
, active_customers(active)
{
struct TPCE::TSecurityLimits security_limits;
num_securities = security_limits.TotalElements();
struct TPCE::TCompanyLimits company_limits;
num_companies = company_limits.TotalElements();
}
};
BOOST_FIXTURE_TEST_SUITE( testsuite_securityfile, SecurityFixture )
BOOST_AUTO_TEST_CASE( test_securitylimits )
{
BOOST_CHECK_EQUAL( num_securities, 6850 );
BOOST_CHECK_EQUAL( num_companies, 5000 );
}
static void check_symbol_to_id_mapping(const TPCE::CSecurityFile& security_file, const char *symbolname, TIdent index) {
char tmpsymbol[256];
security_file.CreateSymbol( index, tmpsymbol, sizeof(tmpsymbol) );
BOOST_CHECK_EQUAL(symbolname, tmpsymbol);
BOOST_CHECK_EQUAL(security_file.GetIndex(tmpsymbol), index);
BOOST_CHECK_EQUAL(security_file.GetId(tmpsymbol), index+1);
}
BOOST_FIXTURE_TEST_CASE( test_securityfile_createsymbol, SecurityFixture )
{
check_symbol_to_id_mapping(security_file, "ZICA", 0);
check_symbol_to_id_mapping(security_file, "ZRAN", num_securities-1);
check_symbol_to_id_mapping(security_file, "ZICA-a", num_securities);
check_symbol_to_id_mapping(security_file, "ZICA-b", num_securities*2);
check_symbol_to_id_mapping(security_file, "ZICA-c", num_securities*3);
check_symbol_to_id_mapping(security_file, "ZICA-d", num_securities*4);
check_symbol_to_id_mapping(security_file, "ZICA-e", num_securities*5);
check_symbol_to_id_mapping(security_file, "ZICA-f", num_securities*6);
check_symbol_to_id_mapping(security_file, "ZICA-g", num_securities*7);
check_symbol_to_id_mapping(security_file, "ZICA-h", num_securities*8);
check_symbol_to_id_mapping(security_file, "ZICA-i", num_securities*9);
check_symbol_to_id_mapping(security_file, "ZICA-j", num_securities*10);
check_symbol_to_id_mapping(security_file, "ZICA-k", num_securities*11);
check_symbol_to_id_mapping(security_file, "ZICA-l", num_securities*12);
check_symbol_to_id_mapping(security_file, "ZICA-m", num_securities*13);
check_symbol_to_id_mapping(security_file, "ZICA-n", num_securities*14);
check_symbol_to_id_mapping(security_file, "ZICA-o", num_securities*15);
check_symbol_to_id_mapping(security_file, "ZICA-p", num_securities*16);
check_symbol_to_id_mapping(security_file, "ZICA-q", num_securities*17);
check_symbol_to_id_mapping(security_file, "ZICA-r", num_securities*18);
check_symbol_to_id_mapping(security_file, "ZICA-s", num_securities*19);
check_symbol_to_id_mapping(security_file, "ZICA-t", num_securities*20);
check_symbol_to_id_mapping(security_file, "ZICA-u", num_securities*21);
check_symbol_to_id_mapping(security_file, "ZICA-v", num_securities*22);
check_symbol_to_id_mapping(security_file, "ZICA-w", num_securities*23);
check_symbol_to_id_mapping(security_file, "ZICA-x", num_securities*24);
check_symbol_to_id_mapping(security_file, "ZICA-y", num_securities*25);
check_symbol_to_id_mapping(security_file, "ZICA-z", num_securities*26);
// The extra +1s come because non-existant characters take up 1 base 26 worth of space
check_symbol_to_id_mapping(security_file, "ZICA-aa", num_securities*(26+1));
check_symbol_to_id_mapping(security_file, "ZICA-az", num_securities*(26+26));
check_symbol_to_id_mapping(security_file, "ZICA-ba", num_securities*(26*2+1));
check_symbol_to_id_mapping(security_file, "ZICA-ca", num_securities*(26*3+1));
check_symbol_to_id_mapping(security_file, "ZICA-za", num_securities*(26*26+1));
check_symbol_to_id_mapping(security_file, "ZICA-aaa", num_securities*(26*(26+1)+1));
check_symbol_to_id_mapping(security_file, "ZICA-aaaa", num_securities*(26*(26*(26+1)+1)+1));
check_symbol_to_id_mapping(security_file, "ZICA-aaaaaaa", num_securities*(26*(26*(26*(26*(26*(26+1)+1)+1)+1)+1)+1));
// This will fail for suffixes larger than 7 characters in length
}
static int num_securities_for_customer_count(int count) {
return count * TPCE::iOneLoadUnitSecurityCount / TPCE::iDefaultLoadUnitSize;
}
BOOST_AUTO_TEST_CASE( test_securityfile_securitycount )
{
BOOST_CHECK_EQUAL(security_file.GetSize(), num_securities_for_customer_count(configured_customers));
BOOST_CHECK_EQUAL(security_file.GetConfiguredSecurityCount(), num_securities_for_customer_count(configured_customers));
BOOST_CHECK_EQUAL(security_file.GetActiveSecurityCount(), num_securities_for_customer_count(active_customers));
security_file.SetConfiguredSecurityCount(20000);
BOOST_CHECK_EQUAL(security_file.GetSize(), num_securities_for_customer_count(20000));
BOOST_CHECK_EQUAL(security_file.GetConfiguredSecurityCount(), num_securities_for_customer_count(20000));
BOOST_CHECK_EQUAL(security_file.GetActiveSecurityCount(), num_securities_for_customer_count(active_customers));
security_file.SetConfiguredSecurityCount(50000);
BOOST_CHECK_EQUAL(security_file.GetSize(), num_securities_for_customer_count(50000));
BOOST_CHECK_EQUAL(security_file.GetConfiguredSecurityCount(), num_securities_for_customer_count(50000));
BOOST_CHECK_EQUAL(security_file.GetActiveSecurityCount(), num_securities_for_customer_count(active_customers));
security_file.SetActiveSecurityCount(70000);
BOOST_CHECK_EQUAL(security_file.GetSize(), num_securities_for_customer_count(50000));
BOOST_CHECK_EQUAL(security_file.GetConfiguredSecurityCount(), num_securities_for_customer_count(50000));
BOOST_CHECK_EQUAL(security_file.GetActiveSecurityCount(), num_securities_for_customer_count(70000));
}
BOOST_AUTO_TEST_CASE( test_securityfile_exchangeid )
{
BOOST_CHECK_EQUAL(security_file.GetExchangeIndex(0), TPCE::ePCX);
BOOST_CHECK_EQUAL(security_file.GetExchangeIndex(2), TPCE::eNYSE);
BOOST_CHECK_EQUAL(security_file.GetExchangeIndex(3), TPCE::eAMEX);
BOOST_CHECK_EQUAL(security_file.GetExchangeIndex(4), TPCE::eNASDAQ);
BOOST_CHECK_EQUAL(security_file.GetExchangeIndex(0+num_securities), TPCE::ePCX);
BOOST_CHECK_EQUAL(security_file.GetExchangeIndex(2+num_securities), TPCE::eNYSE);
BOOST_CHECK_EQUAL(security_file.GetExchangeIndex(3+num_securities), TPCE::eAMEX);
BOOST_CHECK_EQUAL(security_file.GetExchangeIndex(4+num_securities), TPCE::eNASDAQ);
}
BOOST_AUTO_TEST_CASE( test_securityfile_companyid )
{
BOOST_CHECK_EQUAL(security_file.GetCompanyId(6841), 4995 + TPCE::iTIdentShift);
BOOST_CHECK_EQUAL(security_file.GetCompanyId(6841), security_file.GetCompanyIndex(6841)+TPCE::iTIdentShift+1);
BOOST_CHECK_EQUAL(security_file.GetCompanyId(6841+num_securities), 4995 + TPCE::iTIdentShift + num_companies);
BOOST_CHECK_EQUAL(security_file.GetCompanyId(6841+num_securities*2), 4995 + TPCE::iTIdentShift + num_companies*2);
}
BOOST_AUTO_TEST_CASE( test_securityfile_calculations )
{
BOOST_CHECK_EQUAL(security_file.CalculateSecurityCount(5000), num_securities_for_customer_count(5000));
BOOST_CHECK_EQUAL(security_file.CalculateSecurityCount(10000), num_securities_for_customer_count(10000));
BOOST_CHECK_EQUAL(security_file.CalculateSecurityCount(1000000), num_securities_for_customer_count(1000000));
BOOST_CHECK_EQUAL(security_file.CalculateStartFromSecurity(5000), num_securities_for_customer_count(5000));
BOOST_CHECK_EQUAL(security_file.CalculateStartFromSecurity(10000), num_securities_for_customer_count(10000));
BOOST_CHECK_EQUAL(security_file.CalculateStartFromSecurity(1000000), num_securities_for_customer_count(1000000));
}
BOOST_AUTO_TEST_CASE( test_securityfile_fetch_row )
{
const TPCE::TSecurityInputRow* row = security_file.GetRecord(8);
BOOST_CHECK_EQUAL(row->S_ID, 9);
BOOST_CHECK_EQUAL(row->S_ST_ID, "ACTV");
BOOST_CHECK_EQUAL(row->S_ISSUE, "PREF_B");
BOOST_CHECK_EQUAL(row->S_SYMB, "NDNPRB");
BOOST_CHECK_EQUAL(row->S_EX_ID, "AMEX");
BOOST_CHECK_EQUAL(row->S_CO_ID, 6);
const TPCE::TSecurityInputRow* row2 = security_file.GetRecord(8+num_securities);
BOOST_CHECK_EQUAL(row->S_ID, row2->S_ID);
BOOST_CHECK_EQUAL(row->S_ST_ID, row2->S_ST_ID);
BOOST_CHECK_EQUAL(row->S_ISSUE, row2->S_ISSUE);
BOOST_CHECK_EQUAL(row->S_SYMB, row2->S_SYMB);
BOOST_CHECK_EQUAL(row->S_EX_ID, row2->S_EX_ID);
BOOST_CHECK_EQUAL(row->S_CO_ID, row2->S_CO_ID);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "3274847bce6c35530710788d1f9af7192cddc93f", "size": 10627, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "egen/unittest/tc_securityfile.cpp", "max_stars_repo_name": "dotweiba/dbt5", "max_stars_repo_head_hexsha": "39e23b0a0bfd4dfcb80cb2231270324f6bbf4b42", "max_stars_repo_licenses": ["Artistic-1.0"], "max_stars_count": 23.0, "max_stars_repo_stars_event_min_datetime": "2021-05-11T13:14:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T05:59:07.000Z", "max_issues_repo_path": "egen/unittest/tc_securityfile.cpp", "max_issues_repo_name": "dotweiba/dbt5", "max_issues_repo_head_hexsha": "39e23b0a0bfd4dfcb80cb2231270324f6bbf4b42", "max_issues_repo_licenses": ["Artistic-1.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-09-28T05:36:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-15T10:38:29.000Z", "max_forks_repo_path": "egen/unittest/tc_securityfile.cpp", "max_forks_repo_name": "dotweiba/dbt5", "max_forks_repo_head_hexsha": "39e23b0a0bfd4dfcb80cb2231270324f6bbf4b42", "max_forks_repo_licenses": ["Artistic-1.0"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2017-01-18T20:16:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-09T12:23:50.000Z", "avg_line_length": 53.135, "max_line_length": 123, "alphanum_fraction": 0.7847934506, "num_tokens": 2708}
|
# -*- coding: utf-8 -*-
import gym
import numpy as np
# 目標とする報酬
goal_average_steps = 195
# エピソードのタイムステップの最大の長さ
max_number_of_steps = 200
# エピソード数
num_episodes = 5000
# 保存しておく連続したエピソードの数
num_consecutive_iterations = 100
# 最後のエピソードの報酬
last_time_steps = np.zeros(num_consecutive_iterations)
def bins(clip_min, clip_max, num):
"""
ヒストグラム-ビン
"""
return np.linspace(clip_min, clip_max, num + 1)[1:-1]
def digitize_state(observation):
"""
デジタイズ: observationを離散値に変換する
"""
# 各値を4個の離散値に変換
#
# cart_pos: カートの位置 (-2.4 ~ 2.4)
# cart_v: カートの速度 (-inf ~ inf) ※よく分からない
# pole_angle: ポールの角度 (-41.8度 ~ 41.8度)
# pole_v: ポールの速度 (-inf ~ inf) ※よく分からない
cart_pos, cart_v, pole_angle, pole_v = observation
digitized = [np.digitize(cart_pos, bins=bins(-2.4, 2.4, 4)),
np.digitize(cart_v, bins=bins(-3.0, 3.0, 4)),
np.digitize(pole_angle, bins=bins(-0.5, 0.5, 4)),
np.digitize(pole_v, bins=bins(-2.0, 2.0, 4))]
# 0~255に変換
return sum([x * (4 ** i) for i, x in enumerate(digitized)])
def get_action(state, action, observation, reward, episode):
"""
行動の選択を行う
"""
next_state = digitize_state(observation)
# e-greedy
# epsilon = 0.2 # 一定の確率でランダムに次の行動を選択する
epsilon = 0.5 * (0.99 ** episode) # 学習が進むにつれて経験を利用する
if epsilon <= np.random.uniform(0, 1):
# 経験
next_action = np.argmax(q_table[next_state])
else:
# 探索
next_action = np.random.choice([0, 1])
# Q-learning: 学習係数
alpha = 0.2
# Q-learning: 割引報酬和の為の割引率
# 時間が経つほど報酬は少なくなる?
gamma = 0.99
# Qテーブルの更新
q_table[state, action] = (1 - alpha) * q_table[state, action] +\
alpha * (reward + gamma * q_table[next_state, next_action])
return next_action, next_state
if __name__ == '__main__':
env = gym.make('CartPole-v0')
# Qテーブル
q_table = np.random.uniform(low=-1, high=1, size=(4 ** 4, env.action_space.n))
for episode in range(num_episodes):
# 環境の初期化
observation = env.reset()
# 状態
state = digitize_state(observation)
action = np.argmax(q_table[state])
episode_reward = 0
for t in range(max_number_of_steps):
# CartPoleの描画
env.render()
# ランダムで行動の選択
# action = np.random.choice([0, 1])
# 行動の実行とフィードバックの取得
#
# observation: 環境固有のオブジェクト
# reward: 前のアクションによって獲得された報酬
# done: 処理が終了した場合に True
# info: デバック用に診断情報
observation, reward, done, info = env.step(action)
# 罰則の追加
if done:
reward = -200
# 行動の選択
action, state = get_action(state, action, observation, reward, episode)
episode_reward += reward
if done:
print('%d Episode finished after %f time steps / mean %f' % (episode, t + 1,
last_time_steps.mean()))
# 最後のエピソードの報酬を保存する
# last_time_steps = np.hstack((last_time_steps[1:], [episode_reward]))
last_time_steps = np.hstack((last_time_steps[1:], [t + 1]))
break
if (last_time_steps.mean() >= goal_average_steps): # 直近の100エピソードが195以上であれば成功
print('Episode %d train agent successfuly!' % episode)
break
|
{"hexsha": "1329295127eaa15a8e0415c3165e75eb1d26686e", "size": 3393, "ext": "py", "lang": "Python", "max_stars_repo_path": "example/q-leaning/cartpole.py", "max_stars_repo_name": "Silver-birder/reinforcement-learning-fx", "max_stars_repo_head_hexsha": "043e54015387b105669c7d047ca7f43c43dcc72b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-01T13:24:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T05:09:02.000Z", "max_issues_repo_path": "example/q-leaning/cartpole.py", "max_issues_repo_name": "Silver-birder/reinforcement-learning-fx", "max_issues_repo_head_hexsha": "043e54015387b105669c7d047ca7f43c43dcc72b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example/q-leaning/cartpole.py", "max_forks_repo_name": "Silver-birder/reinforcement-learning-fx", "max_forks_repo_head_hexsha": "043e54015387b105669c7d047ca7f43c43dcc72b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7542372881, "max_line_length": 92, "alphanum_fraction": 0.5758915414, "include": true, "reason": "import numpy", "num_tokens": 1271}
|
# Copyright (C) 2016-2021 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import tensorflow as tf
from efl.lib import ops as fed_ops
import numpy as np
class FixedPointTensor():
BASE = 16.
LOG2_BASE = math.log(BASE, 2)
FLOAT_MANTISSA_BITS = sys.float_info.mant_dig
Q = 293973345475167247070445277780365744413
TF_CONVERT_NUM_LENGTH = 31
def __init__(self, n=None, max_int=None):
if n is None:
n = Q
max_int = Q // 3 - 1
self._n = n
self._max_int = max_int
self._encoding = None
self._exponent = None
@property
def encoding(self):
return self._encoding
@property
def exponent(self):
return self._exponent
def set_encoding(self, encoding, exponent):
self._encoding = encoding
self._exponent = exponent
return self
def encode(self, scalar, max_exponent=None):
scalar = tf.where(tf.less_equal(tf.math.abs(scalar), 1e-200),
tf.zeros_like(scalar),
scalar)
if scalar.dtype in (tf.int8, tf.int16, tf.int32, tf.int64):
exponent = tf.zeros_like(scalar, dtype=tf.float32)
elif scalar.dtype in (tf.float16, tf.float32, tf.float64):
scalar = tf.cast(scalar, tf.float32)
_, flt_exponent = fed_ops.frexp(scalar)
lsb_exponent = FixedPointTensor.FLOAT_MANTISSA_BITS - flt_exponent
exponent = tf.math.floor(lsb_exponent / FixedPointTensor.LOG2_BASE)
else:
raise ValueError(f"FixedPointTensor not support encode for type: {scalar.dtype}")
if max_exponent is not None:
max_exponent = tf.ones_like(scalar, dtype=tf.float32) * max_exponent
max_exponent = tf.where(tf.greater(max_exponent, exponent), max_exponent, exponent)
diff_exponent = tf.cast(max_exponent - exponent, dtype=tf.int64)
else:
diff_exponent = tf.zeros_like(scalar, dtype=tf.int64)
max_exponent = exponent
n = tf.constant(str(self._n), dtype=tf.string, shape=[1] * len(scalar.get_shape()))
n = tf.tile(n, tf.shape(scalar))
int_fixpoint = tf.round(scalar * tf.pow(tf.cast(FixedPointTensor.BASE, tf.float32), exponent))
fixpoint = tf.strings.as_string(tf.cast(int_fixpoint, dtype=tf.int64))
base = tf.constant(str(int(FixedPointTensor.BASE)), dtype=tf.string, shape=[1] * len(scalar.get_shape()))
base = tf.tile(base, tf.shape(scalar))
pow_base = fed_ops.gmp_pow(base, diff_exponent)
fixpoint = fed_ops.gmp_mul(fixpoint, pow_base)
encoding = fed_ops.gmp_mod(fixpoint, n)
self._encoding = encoding
self._exponent = max_exponent
return self
def _format_encode(self, encoded, exponent):
expand_exponent = tf.zeros_like(exponent, dtype=tf.float32)
expand_length = tf.cast(tf.strings.length(encoded) - FixedPointTensor.TF_CONVERT_NUM_LENGTH, dtype=tf.float32)
expand_exponent = tf.where(tf.greater(expand_length, 0), expand_length, expand_exponent)
base = tf.constant(str(int(FixedPointTensor.BASE)), dtype=tf.string, shape=[1] * len(encoded.get_shape()))
base = tf.tile(base, tf.shape(encoded))
pow_base = fed_ops.gmp_pow(base, tf.cast(expand_exponent, dtype=tf.int64))
self._encoding = fed_ops.gmp_div(encoded, pow_base)
self._exponent = exponent - expand_exponent
def decode(self):
max_int = tf.constant(str(self._max_int), dtype=tf.string, shape=[1] * len(self._encoding.get_shape()))
max_int = tf.tile(max_int, tf.shape(self._encoding))
n = tf.constant(str(self._n), dtype=tf.string, shape=[1] * len(self._encoding.get_shape()))
n = tf.tile(n, tf.shape(self._encoding))
cmp_result = fed_ops.gmp_cmp(self._encoding, max_int)
pos_matrix = tf.less_equal(cmp_result, 0)
encoded = tf.where(pos_matrix, self.encoding, fed_ops.gmp_sub(n, self.encoding))
self._format_encode(encoded, self.exponent)
encoded = tf.strings.to_number(self.encoding, tf.float32)
pos_matrix = tf.cast(pos_matrix, tf.float32) * 2. - 1.
decoded = encoded * tf.pow(tf.cast(FixedPointTensor.BASE, tf.float32), -self.exponent)
return decoded * pos_matrix
|
{"hexsha": "03a917b75964cf8320e60e812530cbaea5ab2803", "size": 4758, "ext": "py", "lang": "Python", "max_stars_repo_path": "efls-train/python/efl/privacy/fixedpoint_tensor.py", "max_stars_repo_name": "finalljx/Elastic-Federated-Learning-Solution", "max_stars_repo_head_hexsha": "fb588fdc03a2c1598b40b36712b27bdffdd24258", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "efls-train/python/efl/privacy/fixedpoint_tensor.py", "max_issues_repo_name": "finalljx/Elastic-Federated-Learning-Solution", "max_issues_repo_head_hexsha": "fb588fdc03a2c1598b40b36712b27bdffdd24258", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "efls-train/python/efl/privacy/fixedpoint_tensor.py", "max_forks_repo_name": "finalljx/Elastic-Federated-Learning-Solution", "max_forks_repo_head_hexsha": "fb588fdc03a2c1598b40b36712b27bdffdd24258", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7368421053, "max_line_length": 114, "alphanum_fraction": 0.7036569987, "include": true, "reason": "import numpy", "num_tokens": 1229}
|
from ..plugins.state_init import *
import pytest
import numpy as np
State_initialiser
def test_index_based():
method ='index'
input_list = np.zeros(4)
my_S = State_initialiser(method,input_list)
assert my_S.logic == index_based
def test_energy_based():
method ='energy'
input_list = np.zeros(4)
my_S = State_initialiser(method,input_list)
assert my_S.logic == energy_based
def test_all():
method ='all'
input_list = np.zeros(4)
my_S = State_initialiser(method,input_list)
assert my_S.logic == return_all
@pytest.mark.xfail(raises=KeyError)
def test_all():
method ='foo_nafonew'
input_list = np.zeros(4)
my_S = State_initialiser(method,input_list)
def test_get_diags():
method ='all'
input_list = np.zeros(4)
my_S = State_initialiser(method,input_list)
ham = np.ones((3,3))
diags = my_S.get_diagonals(ham)
assert diags.all() == 1
@pytest.mark.xfail(raises=TypeError)
def test_get_diags():
method ='all'
input_list = np.zeros(4)
my_S = State_initialiser(method,input_list)
ham = 'not a matrix'
diags = my_S.get_diagonals(ham)
assert diags.all() == 1
#test the add state function
def test_add_states_all():
method ='all'
input_list = None
my_matrix = np.diag([1.0,2.0,3.0,4.0,5.0])
my_S = State_initialiser(method,input_list)
states = my_S(my_matrix)
assert states[0].vals[0] == 1.0 and states[2].vals[0] == 3.0
def test_add_states_energy():
method ='energy'
input_list = [1.4,3.6]
my_matrix = np.diag([1.0,2.0,3.0,4.0,5.0])
my_S = State_initialiser(method,input_list)
states = my_S(my_matrix)
assert states[0].vals[0] == 2.0 and states[1].vals[0] == 3.0
def test_add_states_energy1():
method ='energy'
input_list = [1.4,4.6]
my_matrix = np.diag([1.0,2.0,3.0,4.0,5.0])
my_S = State_initialiser(method,input_list)
states = my_S(my_matrix)
assert len(states) == 3
def test_add_states_index():
method ='index'
input_list = [1,2,3]
my_matrix = np.diag([1.0,2.0,3.0,4.0,5.0])
my_S = State_initialiser(method,input_list)
states = my_S(my_matrix)
assert len(states) == 3
def test_add_states_index1():
method ='index'
input_list = [1,2,4]
my_matrix = np.diag([1.0,2.0,3.0,4.0,5.0])
my_S = State_initialiser(method,input_list)
states = my_S(my_matrix)
assert states[0].vals[0] == 2.0 and states[2].vals[0] == 5.0
# test the option functions.
def test_energy_based_true():
input_list = np.array([-0.5,0.5])
index = 'foo'
value = 0.2
assert energy_based(index,value,input_list) == True
def test_energy_based_true1():
input_list = np.array([-0.5,0.5])
index = 'foo'
value = 0.5
assert energy_based(index,value,input_list) == True
def test_energy_based_false():
input_list = np.array([-0.5,0.5])
index = 'foo'
value = 0.51
assert energy_based(index,value,input_list) == False
def test_energy_based_false1():
input_list = np.array([-0.5,0.5])
index = 'foo'
value = -0.501
assert energy_based(index,value,input_list) == False
def test_index_based_true():
input_list = np.array([0,1,2,3,4,5])
index = 0
value = 'foo'
assert index_based(index,value,input_list) == True
def test_index_based_True1():
input_list = np.array([1])
index = 1
value = 'foo'
assert index_based(index,value,input_list) == True
def test_index_based_False():
input_list = np.array([0,1,2,3,4,5])
index = 6
value = 'foo'
assert index_based(index,value,input_list) == False
def test_index_based_False1():
input_list = np.array([3])
index = 2
value = 'foo'
assert index_based(index,value,input_list) == False
|
{"hexsha": "3bca2de37718830fc49cc570ad7527c7e000ea10", "size": 3843, "ext": "py", "lang": "Python", "max_stars_repo_path": "rydprop/hohi/adiabatic_solver/tests/test_state_init.py", "max_stars_repo_name": "jdrtommey/rydprops", "max_stars_repo_head_hexsha": "cdc7e14d61ff33929844ee5d779a18fd64f89f4f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rydprop/hohi/adiabatic_solver/tests/test_state_init.py", "max_issues_repo_name": "jdrtommey/rydprops", "max_issues_repo_head_hexsha": "cdc7e14d61ff33929844ee5d779a18fd64f89f4f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rydprop/hohi/adiabatic_solver/tests/test_state_init.py", "max_forks_repo_name": "jdrtommey/rydprops", "max_forks_repo_head_hexsha": "cdc7e14d61ff33929844ee5d779a18fd64f89f4f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3219178082, "max_line_length": 64, "alphanum_fraction": 0.641946396, "include": true, "reason": "import numpy", "num_tokens": 1153}
|
#
# Copyright (c) 2015-2016,2018 CNRS
#
import numpy as np
from pinocchio.robot_wrapper import RobotWrapper
from . import libpinocchio_pywrap as pin
from . import utils
from .explog import exp, log
from .libpinocchio_pywrap import *
from .deprecated import *
from .shortcuts import *
pin.AngleAxis.__repr__ = lambda s: 'AngleAxis(%s)' % s.vector()
|
{"hexsha": "cc8ebdf341dc9adbc9f048e54b245f630bb19fa2", "size": 352, "ext": "py", "lang": "Python", "max_stars_repo_path": "bindings/python/scripts/__init__.py", "max_stars_repo_name": "matthieuvigne/pinocchio", "max_stars_repo_head_hexsha": "01f211eceda3ac2e5edc8cf101690afb6f3184d3", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-22T15:42:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-22T15:42:45.000Z", "max_issues_repo_path": "bindings/python/scripts/__init__.py", "max_issues_repo_name": "matthieuvigne/pinocchio", "max_issues_repo_head_hexsha": "01f211eceda3ac2e5edc8cf101690afb6f3184d3", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bindings/python/scripts/__init__.py", "max_forks_repo_name": "matthieuvigne/pinocchio", "max_forks_repo_head_hexsha": "01f211eceda3ac2e5edc8cf101690afb6f3184d3", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-21T09:14:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-21T09:14:26.000Z", "avg_line_length": 20.7058823529, "max_line_length": 63, "alphanum_fraction": 0.7613636364, "include": true, "reason": "import numpy", "num_tokens": 101}
|
# This file was generated by the Julia Swagger Code Generator
# Do not modify this file directly. Modify the swagger specification instead.
@doc raw"""
ObjsChannel(;
accepted_user=nothing,
created=nothing,
creator=nothing,
id=nothing,
is_archived=nothing,
is_channel=nothing,
is_frozen=nothing,
is_general=nothing,
is_member=nothing,
is_moved=nothing,
is_mpim=nothing,
is_non_threadable=nothing,
is_org_shared=nothing,
is_pending_ext_shared=nothing,
is_private=nothing,
is_read_only=nothing,
is_shared=nothing,
is_thread_only=nothing,
last_read=nothing,
latest=nothing,
members=nothing,
name=nothing,
name_normalized=nothing,
num_members=nothing,
pending_shared=nothing,
previous_names=nothing,
priority=nothing,
purpose=nothing,
topic=nothing,
unlinked=nothing,
unread_count=nothing,
unread_count_display=nothing,
)
- accepted_user::DefsUserId
- created::Int32
- creator::DefsUserId
- id::DefsChannelId
- is_archived::Bool
- is_channel::Bool
- is_frozen::Bool
- is_general::Bool
- is_member::Bool
- is_moved::Int32
- is_mpim::Bool
- is_non_threadable::Bool
- is_org_shared::Bool
- is_pending_ext_shared::Bool
- is_private::Bool
- is_read_only::Bool
- is_shared::Bool
- is_thread_only::Bool
- last_read::DefsTs
- latest::Any
- members::Vector{DefsUserId}
- name::String
- name_normalized::String
- num_members::Int32
- pending_shared::Vector{DefsTeam}
- previous_names::Vector{DefsChannelName}
- priority::Float32
- purpose::ObjsChannelPurpose
- topic::ObjsChannelPurpose
- unlinked::Int32
- unread_count::Int32
- unread_count_display::Int32
"""
mutable struct ObjsChannel <: SwaggerModel
accepted_user::Any # spec type: Union{ Nothing, DefsUserId } # spec name: accepted_user
created::Any # spec type: Union{ Nothing, Int32 } # spec name: created
creator::Any # spec type: Union{ Nothing, DefsUserId } # spec name: creator
id::Any # spec type: Union{ Nothing, DefsChannelId } # spec name: id
is_archived::Any # spec type: Union{ Nothing, Bool } # spec name: is_archived
is_channel::Any # spec type: Union{ Nothing, Bool } # spec name: is_channel
is_frozen::Any # spec type: Union{ Nothing, Bool } # spec name: is_frozen
is_general::Any # spec type: Union{ Nothing, Bool } # spec name: is_general
is_member::Any # spec type: Union{ Nothing, Bool } # spec name: is_member
is_moved::Any # spec type: Union{ Nothing, Int32 } # spec name: is_moved
is_mpim::Any # spec type: Union{ Nothing, Bool } # spec name: is_mpim
is_non_threadable::Any # spec type: Union{ Nothing, Bool } # spec name: is_non_threadable
is_org_shared::Any # spec type: Union{ Nothing, Bool } # spec name: is_org_shared
is_pending_ext_shared::Any # spec type: Union{ Nothing, Bool } # spec name: is_pending_ext_shared
is_private::Any # spec type: Union{ Nothing, Bool } # spec name: is_private
is_read_only::Any # spec type: Union{ Nothing, Bool } # spec name: is_read_only
is_shared::Any # spec type: Union{ Nothing, Bool } # spec name: is_shared
is_thread_only::Any # spec type: Union{ Nothing, Bool } # spec name: is_thread_only
last_read::Any # spec type: Union{ Nothing, DefsTs } # spec name: last_read
latest::Any # spec type: Union{ Nothing, Any } # spec name: latest
members::Any # spec type: Union{ Nothing, Vector{DefsUserId} } # spec name: members
name::Any # spec type: Union{ Nothing, String } # spec name: name
name_normalized::Any # spec type: Union{ Nothing, String } # spec name: name_normalized
num_members::Any # spec type: Union{ Nothing, Int32 } # spec name: num_members
pending_shared::Any # spec type: Union{ Nothing, Vector{DefsTeam} } # spec name: pending_shared
previous_names::Any # spec type: Union{ Nothing, Vector{DefsChannelName} } # spec name: previous_names
priority::Any # spec type: Union{ Nothing, Float32 } # spec name: priority
purpose::Any # spec type: Union{ Nothing, ObjsChannelPurpose } # spec name: purpose
topic::Any # spec type: Union{ Nothing, ObjsChannelPurpose } # spec name: topic
unlinked::Any # spec type: Union{ Nothing, Int32 } # spec name: unlinked
unread_count::Any # spec type: Union{ Nothing, Int32 } # spec name: unread_count
unread_count_display::Any # spec type: Union{ Nothing, Int32 } # spec name: unread_count_display
function ObjsChannel(;accepted_user=nothing, created=nothing, creator=nothing, id=nothing, is_archived=nothing, is_channel=nothing, is_frozen=nothing, is_general=nothing, is_member=nothing, is_moved=nothing, is_mpim=nothing, is_non_threadable=nothing, is_org_shared=nothing, is_pending_ext_shared=nothing, is_private=nothing, is_read_only=nothing, is_shared=nothing, is_thread_only=nothing, last_read=nothing, latest=nothing, members=nothing, name=nothing, name_normalized=nothing, num_members=nothing, pending_shared=nothing, previous_names=nothing, priority=nothing, purpose=nothing, topic=nothing, unlinked=nothing, unread_count=nothing, unread_count_display=nothing)
o = new()
validate_property(ObjsChannel, Symbol("accepted_user"), accepted_user)
setfield!(o, Symbol("accepted_user"), accepted_user)
validate_property(ObjsChannel, Symbol("created"), created)
setfield!(o, Symbol("created"), created)
validate_property(ObjsChannel, Symbol("creator"), creator)
setfield!(o, Symbol("creator"), creator)
validate_property(ObjsChannel, Symbol("id"), id)
setfield!(o, Symbol("id"), id)
validate_property(ObjsChannel, Symbol("is_archived"), is_archived)
setfield!(o, Symbol("is_archived"), is_archived)
validate_property(ObjsChannel, Symbol("is_channel"), is_channel)
setfield!(o, Symbol("is_channel"), is_channel)
validate_property(ObjsChannel, Symbol("is_frozen"), is_frozen)
setfield!(o, Symbol("is_frozen"), is_frozen)
validate_property(ObjsChannel, Symbol("is_general"), is_general)
setfield!(o, Symbol("is_general"), is_general)
validate_property(ObjsChannel, Symbol("is_member"), is_member)
setfield!(o, Symbol("is_member"), is_member)
validate_property(ObjsChannel, Symbol("is_moved"), is_moved)
setfield!(o, Symbol("is_moved"), is_moved)
validate_property(ObjsChannel, Symbol("is_mpim"), is_mpim)
setfield!(o, Symbol("is_mpim"), is_mpim)
validate_property(ObjsChannel, Symbol("is_non_threadable"), is_non_threadable)
setfield!(o, Symbol("is_non_threadable"), is_non_threadable)
validate_property(ObjsChannel, Symbol("is_org_shared"), is_org_shared)
setfield!(o, Symbol("is_org_shared"), is_org_shared)
validate_property(ObjsChannel, Symbol("is_pending_ext_shared"), is_pending_ext_shared)
setfield!(o, Symbol("is_pending_ext_shared"), is_pending_ext_shared)
validate_property(ObjsChannel, Symbol("is_private"), is_private)
setfield!(o, Symbol("is_private"), is_private)
validate_property(ObjsChannel, Symbol("is_read_only"), is_read_only)
setfield!(o, Symbol("is_read_only"), is_read_only)
validate_property(ObjsChannel, Symbol("is_shared"), is_shared)
setfield!(o, Symbol("is_shared"), is_shared)
validate_property(ObjsChannel, Symbol("is_thread_only"), is_thread_only)
setfield!(o, Symbol("is_thread_only"), is_thread_only)
validate_property(ObjsChannel, Symbol("last_read"), last_read)
setfield!(o, Symbol("last_read"), last_read)
validate_property(ObjsChannel, Symbol("latest"), latest)
setfield!(o, Symbol("latest"), latest)
validate_property(ObjsChannel, Symbol("members"), members)
setfield!(o, Symbol("members"), members)
validate_property(ObjsChannel, Symbol("name"), name)
setfield!(o, Symbol("name"), name)
validate_property(ObjsChannel, Symbol("name_normalized"), name_normalized)
setfield!(o, Symbol("name_normalized"), name_normalized)
validate_property(ObjsChannel, Symbol("num_members"), num_members)
setfield!(o, Symbol("num_members"), num_members)
validate_property(ObjsChannel, Symbol("pending_shared"), pending_shared)
setfield!(o, Symbol("pending_shared"), pending_shared)
validate_property(ObjsChannel, Symbol("previous_names"), previous_names)
setfield!(o, Symbol("previous_names"), previous_names)
validate_property(ObjsChannel, Symbol("priority"), priority)
setfield!(o, Symbol("priority"), priority)
validate_property(ObjsChannel, Symbol("purpose"), purpose)
setfield!(o, Symbol("purpose"), purpose)
validate_property(ObjsChannel, Symbol("topic"), topic)
setfield!(o, Symbol("topic"), topic)
validate_property(ObjsChannel, Symbol("unlinked"), unlinked)
setfield!(o, Symbol("unlinked"), unlinked)
validate_property(ObjsChannel, Symbol("unread_count"), unread_count)
setfield!(o, Symbol("unread_count"), unread_count)
validate_property(ObjsChannel, Symbol("unread_count_display"), unread_count_display)
setfield!(o, Symbol("unread_count_display"), unread_count_display)
o
end
end # type ObjsChannel
const _property_map_ObjsChannel = Dict{Symbol,Symbol}(Symbol("accepted_user")=>Symbol("accepted_user"), Symbol("created")=>Symbol("created"), Symbol("creator")=>Symbol("creator"), Symbol("id")=>Symbol("id"), Symbol("is_archived")=>Symbol("is_archived"), Symbol("is_channel")=>Symbol("is_channel"), Symbol("is_frozen")=>Symbol("is_frozen"), Symbol("is_general")=>Symbol("is_general"), Symbol("is_member")=>Symbol("is_member"), Symbol("is_moved")=>Symbol("is_moved"), Symbol("is_mpim")=>Symbol("is_mpim"), Symbol("is_non_threadable")=>Symbol("is_non_threadable"), Symbol("is_org_shared")=>Symbol("is_org_shared"), Symbol("is_pending_ext_shared")=>Symbol("is_pending_ext_shared"), Symbol("is_private")=>Symbol("is_private"), Symbol("is_read_only")=>Symbol("is_read_only"), Symbol("is_shared")=>Symbol("is_shared"), Symbol("is_thread_only")=>Symbol("is_thread_only"), Symbol("last_read")=>Symbol("last_read"), Symbol("latest")=>Symbol("latest"), Symbol("members")=>Symbol("members"), Symbol("name")=>Symbol("name"), Symbol("name_normalized")=>Symbol("name_normalized"), Symbol("num_members")=>Symbol("num_members"), Symbol("pending_shared")=>Symbol("pending_shared"), Symbol("previous_names")=>Symbol("previous_names"), Symbol("priority")=>Symbol("priority"), Symbol("purpose")=>Symbol("purpose"), Symbol("topic")=>Symbol("topic"), Symbol("unlinked")=>Symbol("unlinked"), Symbol("unread_count")=>Symbol("unread_count"), Symbol("unread_count_display")=>Symbol("unread_count_display"))
const _property_types_ObjsChannel = Dict{Symbol,String}(Symbol("accepted_user")=>"DefsUserId", Symbol("created")=>"Int32", Symbol("creator")=>"DefsUserId", Symbol("id")=>"DefsChannelId", Symbol("is_archived")=>"Bool", Symbol("is_channel")=>"Bool", Symbol("is_frozen")=>"Bool", Symbol("is_general")=>"Bool", Symbol("is_member")=>"Bool", Symbol("is_moved")=>"Int32", Symbol("is_mpim")=>"Bool", Symbol("is_non_threadable")=>"Bool", Symbol("is_org_shared")=>"Bool", Symbol("is_pending_ext_shared")=>"Bool", Symbol("is_private")=>"Bool", Symbol("is_read_only")=>"Bool", Symbol("is_shared")=>"Bool", Symbol("is_thread_only")=>"Bool", Symbol("last_read")=>"DefsTs", Symbol("latest")=>"Any", Symbol("members")=>"Vector{DefsUserId}", Symbol("name")=>"String", Symbol("name_normalized")=>"String", Symbol("num_members")=>"Int32", Symbol("pending_shared")=>"Vector{DefsTeam}", Symbol("previous_names")=>"Vector{DefsChannelName}", Symbol("priority")=>"Float32", Symbol("purpose")=>"ObjsChannelPurpose", Symbol("topic")=>"ObjsChannelPurpose", Symbol("unlinked")=>"Int32", Symbol("unread_count")=>"Int32", Symbol("unread_count_display")=>"Int32")
Base.propertynames(::Type{ ObjsChannel }) = collect(keys(_property_map_ObjsChannel))
Swagger.property_type(::Type{ ObjsChannel }, name::Symbol) = Union{Nothing,eval(Base.Meta.parse(_property_types_ObjsChannel[name]))}
Swagger.field_name(::Type{ ObjsChannel }, property_name::Symbol) = _property_map_ObjsChannel[property_name]
function check_required(o::ObjsChannel)
(getproperty(o, Symbol("created")) === nothing) && (return false)
(getproperty(o, Symbol("creator")) === nothing) && (return false)
(getproperty(o, Symbol("id")) === nothing) && (return false)
(getproperty(o, Symbol("is_channel")) === nothing) && (return false)
(getproperty(o, Symbol("is_mpim")) === nothing) && (return false)
(getproperty(o, Symbol("is_org_shared")) === nothing) && (return false)
(getproperty(o, Symbol("is_private")) === nothing) && (return false)
(getproperty(o, Symbol("is_shared")) === nothing) && (return false)
(getproperty(o, Symbol("members")) === nothing) && (return false)
(getproperty(o, Symbol("name")) === nothing) && (return false)
(getproperty(o, Symbol("name_normalized")) === nothing) && (return false)
(getproperty(o, Symbol("purpose")) === nothing) && (return false)
(getproperty(o, Symbol("topic")) === nothing) && (return false)
true
end
function validate_property(::Type{ ObjsChannel }, name::Symbol, val)
end
|
{"hexsha": "b2f7bb24e8e0a6c8f931e84cc557f08e738f59a8", "size": 13528, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/web/model_ObjsChannel.jl", "max_stars_repo_name": "aviks/SlackSDK.jl", "max_stars_repo_head_hexsha": "5035e0d3c53c6812e364a84e81304b36f00f4340", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-02-26T12:31:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T12:32:09.000Z", "max_issues_repo_path": "src/web/model_ObjsChannel.jl", "max_issues_repo_name": "aviks/SlackAPI.jl", "max_issues_repo_head_hexsha": "5035e0d3c53c6812e364a84e81304b36f00f4340", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-01T21:06:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T21:06:58.000Z", "max_forks_repo_path": "src/web/model_ObjsChannel.jl", "max_forks_repo_name": "aviks/SlackSDK.jl", "max_forks_repo_head_hexsha": "5035e0d3c53c6812e364a84e81304b36f00f4340", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 66.6403940887, "max_line_length": 1479, "alphanum_fraction": 0.6990685985, "num_tokens": 3431}
|
'''
This project is written by Anqi Ni(anqini4@gmail.com)
according the algorithm on the paper:
'The Split Bregman Method for L1-Regularized Problems'(2009)
by Tom Goldstein and Stanley Osher published
on SIAM J. IMAGING SCIENCES Vol2, No. 2, pp323-343.
And it is For Educational Purposes Only.
PLEASE do NOT duplicate or spread these files
without citing the author(Anqi Ni).
'''
import numpy as np
import imageio
from util import *
def itv(noisy, real):
u = noisy
u_prev = np.zeros(u.shape)
# initializing
dx = np.zeros(u.shape)
dy = np.zeros(u.shape)
bx = np.zeros(u.shape)
by = np.zeros(u.shape)
lambd = 0.1
mu = 0.05
tol = 0.001
epsilon = 0.0001
iter = 0
# terminate condition
while np.linalg.norm(u - u_prev) / np.linalg.norm(u) > tol \
and iter < 40:
iter = iter + 1
u_prev = u
u = gaussian_method(u, noisy, dx, dy, bx, by, lambd, mu)
s = compute_s(u, bx, by)
# print(s)
dx = shrink(s, lambd) * (gradient_x(u) + bx) / (s + epsilon)
dy = shrink(s, lambd) * (gradient_y(u) + by) / (s + epsilon)
bx = bx + (gradient_x(u) - dx)
by = by + (gradient_y(u) - dy)
print('converge step ratio: {:.04f}'.format(np.linalg.norm(u - u_prev) / np.linalg.norm(u)))
# print('distance to real: {}'.format(norm2(u - real)))
return u
if __name__ == "__main__":
# read image
noisy, real = read_image()
u = itv(noisy, real)
plt.subplot(121)
plt.imshow(u, cmap='gray')
plt.subplot(122)
plt.imshow(noisy, cmap='gray')
plt.show()
|
{"hexsha": "70167a9e8258b68fe2d45e2534bc71084ac9c1a7", "size": 1643, "ext": "py", "lang": "Python", "max_stars_repo_path": "itv.py", "max_stars_repo_name": "ucas010/Split-Bregman-for-TV-Image-Recovery", "max_stars_repo_head_hexsha": "3baf24775f94ac491bc614ce032a74b36731a303", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-05-05T08:45:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-28T01:25:52.000Z", "max_issues_repo_path": "itv.py", "max_issues_repo_name": "anqini/Split-Bregman-for-TV-Image-Recovery", "max_issues_repo_head_hexsha": "3baf24775f94ac491bc614ce032a74b36731a303", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "itv.py", "max_forks_repo_name": "anqini/Split-Bregman-for-TV-Image-Recovery", "max_forks_repo_head_hexsha": "3baf24775f94ac491bc614ce032a74b36731a303", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-06-13T13:36:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-24T15:00:38.000Z", "avg_line_length": 27.8474576271, "max_line_length": 100, "alphanum_fraction": 0.5897748022, "include": true, "reason": "import numpy", "num_tokens": 491}
|
# This file is a part of SimilaritySearch.jl
# License is Apache 2.0: https://www.apache.org/licenses/LICENSE-2.0.txt
export l1_distance, l2_distance, squared_l2_distance, linf_distance, lp_distance
"""
l1_distance(a, b)::Float64
Computes the Manhattan's distance between `a` and `b`
"""
function l1_distance(a, b)::Float64
d::Float64 = 0.0 #zero(eltype(a))
@inbounds @simd for i = 1:length(a)
m = a[i] - b[i]
d += ifelse(m > 0, m, -m)
end
d
end
"""
l2_distance(a, b)::Float64
Computes the Euclidean's distance betweem `a` and `b`
"""
function l2_distance(a, b)::Float64
#d = zero(eltype(a))
d::Float64 = 0.0
@inbounds @simd for i = 1:length(a)
m = a[i] - b[i]
d += m * m
end
sqrt(d)
end
"""
squared_l2_distance(a, b)::Float64
Computes the squared Euclidean's distance between `a` and `b`
"""
function squared_l2_distance(a, b)::Float64
# d = zero(eltype(a))
d::Float64 = 0.0
@inbounds @simd for i = 1:length(a)
m = a[i] - b[i]
d += m * m
end
d
end
"""
linf_distance(a, b)::Float64
Computes the max or Chebyshev'se distance
"""
function linf_distance(a, b)::Float64
d::Float64 = 0.0 # d = zero(eltype(a))
@inbounds @simd for i = 1:length(a)
m = abs(a[i] - b[i])
d = max(d, m)
end
d
end
"""
lp_distance(p_::Real)
Creates a function that computes computes generic Minkowski's distance with the given `p_`
"""
function lp_distance(p::Real)
p = convert(Float64, p)
invp = 1.0 / p
function _lp(a, b)::Float64
d::Float64 = 0.0 # d = zero(eltype(a))
@inbounds @simd for i = 1:length(a)
m = abs(a[i] - b[i])
d += m ^ p
end
d ^ invp
end
end
|
{"hexsha": "69864c38233c7bf36725ea3c718be82d194df96e", "size": 1783, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/distances/vectors.jl", "max_stars_repo_name": "UnofficialJuliaMirror/SimilaritySearch.jl-053f045d-5466-53fd-b400-a066f88fe02a", "max_stars_repo_head_hexsha": "f6815ebd4f018ee3536f5b3be4e39640b344b5e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/distances/vectors.jl", "max_issues_repo_name": "UnofficialJuliaMirror/SimilaritySearch.jl-053f045d-5466-53fd-b400-a066f88fe02a", "max_issues_repo_head_hexsha": "f6815ebd4f018ee3536f5b3be4e39640b344b5e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/distances/vectors.jl", "max_forks_repo_name": "UnofficialJuliaMirror/SimilaritySearch.jl-053f045d-5466-53fd-b400-a066f88fe02a", "max_forks_repo_head_hexsha": "f6815ebd4f018ee3536f5b3be4e39640b344b5e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.1720430108, "max_line_length": 90, "alphanum_fraction": 0.5765563657, "num_tokens": 586}
|
[STATEMENT]
lemma image_mset_ordering_eq:
assumes "M1 = {# (f1 u). u \<in># L #}"
assumes "M2 = {# (f2 u). u \<in># L #}"
assumes "\<forall>u. (u \<in># L \<longrightarrow> (((f1 u), (f2 u)) \<in> r \<or> (f1 u) = (f2 u)))"
shows "(M1 = M2) \<or> ( (M1,M2) \<in> (mult r) )"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
proof (cases)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. ?P \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
2. \<not> ?P \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
assume "M1 = M2"
[PROOF STATE]
proof (state)
this:
M1 = M2
goal (2 subgoals):
1. ?P \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
2. \<not> ?P \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
M1 = M2
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
M1 = M2
goal (1 subgoal):
1. M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
M1 = M2 \<or> (M1, M2) \<in> mult r
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
assume "M1 \<noteq> M2"
[PROOF STATE]
proof (state)
this:
M1 \<noteq> M2
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
let ?L' = "{# u \<in># L. (f1 u) = (f2 u) #}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
let ?L'' = "{# u \<in># L. (f1 u) \<noteq> (f2 u) #}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
have "L = ?L' + ?L''"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. L = {#u \<in># L. f1 u = f2 u#} + {#u \<in># L. f1 u \<noteq> f2 u#}
[PROOF STEP]
by (simp)
[PROOF STATE]
proof (state)
this:
L = {#u \<in># L. f1 u = f2 u#} + {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
from assms(3)
[PROOF STATE]
proof (chain)
picking this:
\<forall>u. u \<in># L \<longrightarrow> (f1 u, f2 u) \<in> r \<or> f1 u = f2 u
[PROOF STEP]
have "\<forall>u. (u \<in># ?L'' \<longrightarrow> ((f1 u),(f2 u)) \<in> r)"
[PROOF STATE]
proof (prove)
using this:
\<forall>u. u \<in># L \<longrightarrow> (f1 u, f2 u) \<in> r \<or> f1 u = f2 u
goal (1 subgoal):
1. \<forall>u. u \<in># {#u \<in># L. f1 u \<noteq> f2 u#} \<longrightarrow> (f1 u, f2 u) \<in> r
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>u. u \<in># {#u \<in># L. f1 u \<noteq> f2 u#} \<longrightarrow> (f1 u, f2 u) \<in> r
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
let ?M1' = "{# (f1 u). u \<in># ?L' #}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
let ?M2' = "{# (f2 u). u \<in># ?L' #}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
have "?M1' = ?M2'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. image_mset f1 {#u \<in># L. f1 u = f2 u#} = image_mset f2 {#u \<in># L. f1 u = f2 u#}
[PROOF STEP]
by (metis (mono_tags, lifting) mem_Collect_eq multiset.map_cong0 set_mset_filter)
[PROOF STATE]
proof (state)
this:
image_mset f1 {#u \<in># L. f1 u = f2 u#} = image_mset f2 {#u \<in># L. f1 u = f2 u#}
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
let ?M1'' = "{# (f1 u). u \<in># ?L'' #}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
let ?M2'' = "{# (f2 u). u \<in># ?L'' #}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
from \<open>L = ?L' + ?L''\<close>
[PROOF STATE]
proof (chain)
picking this:
L = {#u \<in># L. f1 u = f2 u#} + {#u \<in># L. f1 u \<noteq> f2 u#}
[PROOF STEP]
have "M1 = ?M1' + ?M1''"
[PROOF STATE]
proof (prove)
using this:
L = {#u \<in># L. f1 u = f2 u#} + {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. M1 = image_mset f1 {#u \<in># L. f1 u = f2 u#} + image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}
[PROOF STEP]
by (metis assms(1) image_mset_union)
[PROOF STATE]
proof (state)
this:
M1 = image_mset f1 {#u \<in># L. f1 u = f2 u#} + image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
from \<open>L = ?L' + ?L''\<close>
[PROOF STATE]
proof (chain)
picking this:
L = {#u \<in># L. f1 u = f2 u#} + {#u \<in># L. f1 u \<noteq> f2 u#}
[PROOF STEP]
have "M2 = ?M2' + ?M2''"
[PROOF STATE]
proof (prove)
using this:
L = {#u \<in># L. f1 u = f2 u#} + {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. M2 = image_mset f2 {#u \<in># L. f1 u = f2 u#} + image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}
[PROOF STEP]
by (metis assms(2) image_mset_union)
[PROOF STATE]
proof (state)
this:
M2 = image_mset f2 {#u \<in># L. f1 u = f2 u#} + image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
have dom: "(\<forall>k \<in> set_mset ?M1''. \<exists>j \<in> set_mset ?M2''. (k, j) \<in> r)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>k\<in>#image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}. \<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>k. k \<in># image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#} \<Longrightarrow> \<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
[PROOF STEP]
fix k
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>k. k \<in># image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#} \<Longrightarrow> \<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
[PROOF STEP]
assume "k \<in> set_mset ?M1''"
[PROOF STATE]
proof (state)
this:
k \<in># image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. \<And>k. k \<in># image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#} \<Longrightarrow> \<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
k \<in># image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}
[PROOF STEP]
obtain u where "k = (f1 u)" and "u \<in># ?L''"
[PROOF STATE]
proof (prove)
using this:
k \<in># image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. (\<And>u. \<lbrakk>k = f1 u; u \<in># {#u \<in># L. f1 u \<noteq> f2 u#}\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
k = f1 u
u \<in># {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. \<And>k. k \<in># image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#} \<Longrightarrow> \<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
[PROOF STEP]
from \<open>u \<in># ?L''\<close>
[PROOF STATE]
proof (chain)
picking this:
u \<in># {#u \<in># L. f1 u \<noteq> f2 u#}
[PROOF STEP]
have "(f2 u) \<in># ?M2''"
[PROOF STATE]
proof (prove)
using this:
u \<in># {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. f2 u \<in># image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
f2 u \<in># image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. \<And>k. k \<in># image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#} \<Longrightarrow> \<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
[PROOF STEP]
from \<open>\<forall>u. (u \<in># ?L'' \<longrightarrow> ((f1 u),(f2 u)) \<in> r)\<close> and \<open>u \<in># ?L''\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>u. u \<in># {#u \<in># L. f1 u \<noteq> f2 u#} \<longrightarrow> (f1 u, f2 u) \<in> r
u \<in># {#u \<in># L. f1 u \<noteq> f2 u#}
[PROOF STEP]
have "((f1 u),(f2 u)) \<in> r"
[PROOF STATE]
proof (prove)
using this:
\<forall>u. u \<in># {#u \<in># L. f1 u \<noteq> f2 u#} \<longrightarrow> (f1 u, f2 u) \<in> r
u \<in># {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. (f1 u, f2 u) \<in> r
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(f1 u, f2 u) \<in> r
goal (1 subgoal):
1. \<And>k. k \<in># image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#} \<Longrightarrow> \<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
[PROOF STEP]
from this and \<open>k = (f1 u)\<close> and \<open>(f2 u) \<in> set_mset ?M2''\<close>
[PROOF STATE]
proof (chain)
picking this:
(f1 u, f2 u) \<in> r
k = f1 u
f2 u \<in># image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}
[PROOF STEP]
show "\<exists>j \<in> set_mset ?M2''. (k, j) \<in> r"
[PROOF STATE]
proof (prove)
using this:
(f1 u, f2 u) \<in> r
k = f1 u
f2 u \<in># image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}
goal (1 subgoal):
1. \<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>k\<in>#image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}. \<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
from \<open>M1 \<noteq> M2\<close>
[PROOF STATE]
proof (chain)
picking this:
M1 \<noteq> M2
[PROOF STEP]
have "?M2'' \<noteq> {#}"
[PROOF STATE]
proof (prove)
using this:
M1 \<noteq> M2
goal (1 subgoal):
1. image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#} \<noteq> {#}
[PROOF STEP]
using \<open>M1 = image_mset f1 {# u \<in># L. f1 u = f2 u#} + image_mset f1 {# u \<in># L. f1 u \<noteq> f2 u#}\<close> \<open>M2 = image_mset f2 {# u \<in># L. f1 u = f2 u#} + image_mset f2 {# u \<in># L. f1 u \<noteq> f2 u#}\<close> \<open>image_mset f1 {# u \<in># L. f1 u = f2 u#} = image_mset f2 {# u \<in># L. f1 u = f2 u#}\<close>
[PROOF STATE]
proof (prove)
using this:
M1 \<noteq> M2
M1 = image_mset f1 {#u \<in># L. f1 u = f2 u#} + image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}
M2 = image_mset f2 {#u \<in># L. f1 u = f2 u#} + image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}
image_mset f1 {#u \<in># L. f1 u = f2 u#} = image_mset f2 {#u \<in># L. f1 u = f2 u#}
goal (1 subgoal):
1. image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#} \<noteq> {#}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#} \<noteq> {#}
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
from this and dom and \<open>M1 = ?M1' + ?M1''\<close> \<open>M2 = ?M2' + ?M2''\<close> \<open>?M1'=?M2'\<close>
[PROOF STATE]
proof (chain)
picking this:
image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#} \<noteq> {#}
\<forall>k\<in>#image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}. \<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
M1 = image_mset f1 {#u \<in># L. f1 u = f2 u#} + image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}
M2 = image_mset f2 {#u \<in># L. f1 u = f2 u#} + image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}
image_mset f1 {#u \<in># L. f1 u = f2 u#} = image_mset f2 {#u \<in># L. f1 u = f2 u#}
[PROOF STEP]
have "(M1,M2) \<in> (mult r)"
[PROOF STATE]
proof (prove)
using this:
image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#} \<noteq> {#}
\<forall>k\<in>#image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}. \<exists>j\<in>#image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}. (k, j) \<in> r
M1 = image_mset f1 {#u \<in># L. f1 u = f2 u#} + image_mset f1 {#u \<in># L. f1 u \<noteq> f2 u#}
M2 = image_mset f2 {#u \<in># L. f1 u = f2 u#} + image_mset f2 {#u \<in># L. f1 u \<noteq> f2 u#}
image_mset f1 {#u \<in># L. f1 u = f2 u#} = image_mset f2 {#u \<in># L. f1 u = f2 u#}
goal (1 subgoal):
1. (M1, M2) \<in> mult r
[PROOF STEP]
by (simp add: one_step_implies_mult)
[PROOF STATE]
proof (state)
this:
(M1, M2) \<in> mult r
goal (1 subgoal):
1. M1 \<noteq> M2 \<Longrightarrow> M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
(M1, M2) \<in> mult r
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(M1, M2) \<in> mult r
goal (1 subgoal):
1. M1 = M2 \<or> (M1, M2) \<in> mult r
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
M1 = M2 \<or> (M1, M2) \<in> mult r
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6653, "file": "SuperCalc_multisets_continued", "length": 54}
|
[STATEMENT]
lemma sd_1r_correct:
assumes "s\<^sub>o - s\<^sub>e > safe_distance_1r"
shows "no_collision_react {0..}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. no_collision_react {0..}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. no_collision_react {0..}
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
safe_distance_1r < s\<^sub>o - s\<^sub>e
[PROOF STEP]
have "u_max < s\<^sub>o"
[PROOF STATE]
proof (prove)
using this:
safe_distance_1r < s\<^sub>o - s\<^sub>e
goal (1 subgoal):
1. u_max < s\<^sub>o
[PROOF STEP]
using sd_1r_eq
[PROOF STATE]
proof (prove)
using this:
safe_distance_1r < s\<^sub>o - s\<^sub>e
(safe_distance_1r < s\<^sub>o - s\<^sub>e) = (u_max < s\<^sub>o)
goal (1 subgoal):
1. u_max < s\<^sub>o
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
u_max < s\<^sub>o
goal (1 subgoal):
1. no_collision_react {0..}
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
u_max < s\<^sub>o
goal (1 subgoal):
1. no_collision_react {0..}
[PROOF STEP]
by (rule cond_1r)
[PROOF STATE]
proof (state)
this:
no_collision_react {0..}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 544, "file": "Safe_Distance_Safe_Distance_Reaction", "length": 8}
|
#!/usr/bin/env python
### Up to date as of 10/2019 ###
'''Section 0: Import python libraries
This code has a number of dependencies, listed below.
They can be installed using the virtual environment "slab23"
that is setup using script 'library/setup3env.sh'.
Additional functions are housed in file 'slab2functions.py'
and imported below.
There are some additional dependencies used by the function file
that do not need to be installed separately.
'''
# stdlib imports
from datetime import datetime
import os.path
import argparse
import numpy as np
from pandas import DataFrame
import pandas as pd
import warnings
import slab2functions as s2f
import math
import mapio.gmt as gmt
from functools import partial
from multiprocess import Pool
import loops as loops
from scipy import ndimage
import psutil
import cProfile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def main(args):
'''Section 1: Setup
In this section:
(1) Identify necessary input files
(2) Load parameters from '[slab]input.par'
(3) Define optional boxes for PDF/print testing
(4) Define output file names
(5) Gathering optional arguments, setting defaults
(6) Define search ellipsoid parameters
(7) Define Average active source profiles
(8) Define reference model (Slab1.0 and/or slab guides)
(9) Define Trench Locations
(10) Open and modify input dataset
(11) Calculate seismogenic zone thickness
(12) Record variable parameters used for this model
(13) Define search grid
(14) Identify tomography datasets
(15) Initialize arrays for Section 2 '''
print('Start Section 1 of 7: Setup')
print(' Loading inputs...')
''' ------ (1) Identify necessary input files ------ '''
trenches = 'library/misc/trenches_usgs_2017_depths.csv'
agesFile = 'library/misc/interp_age.3.2g.nc'
ageerrorsFile = 'library/misc/interp_ageerror.3.2g.nc'
polygonFile = 'library/misc/slab_polygons.txt'
addFile = 'library/misc/addagain.csv'
parFile = args.parFile
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", message="invalid value encountered in less")
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide")
warnings.filterwarnings("ignore", message="invalid value encountered in greater")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
''' ------ (2) Load parameters from '[slab]input.par' ------'''
for line in open(parFile):
plist = line.split()
if len(plist)>2:
if plist[0] == 'inFile':
inFile = plist[2]
if plist[0] == 'use_box':
use_box = plist[2]
if plist[0] == 'latmin':
latmin = np.float64(plist[2])
if plist[0] == 'latmax':
latmax = np.float64(plist[2])
if plist[0] == 'lonmin':
lonmin = np.float64(plist[2])
if plist[0] == 'lonmax':
lonmax = np.float64(plist[2])
if plist[0] == 'slab':
slab = plist[2]
if plist[0] == 'grid':
grid = np.float64(plist[2])
if plist[0] == 'radius1':
radius1 = np.float64(plist[2])
if plist[0] == 'radius2':
radius2 = np.float64(plist[2])
if plist[0] == 'sdr':
sdr = np.float64(plist[2])
if plist[0] == 'ddr':
ddr = np.float64(plist[2])
if plist[0] == 'taper':
taper = np.float64(plist[2])
if plist[0] == 'T':
T = np.float64(plist[2])
if plist[0] == 'node':
node = np.float64(plist[2])
if plist[0] == 'filt':
filt = np.float64(plist[2])
if plist[0] == 'maxdist':
maxdist = np.float64(plist[2])
if plist[0] == 'minunc':
minunc = np.float64(plist[2])
if plist[0] == 'mindip':
mindip = np.float64(plist[2])
if plist[0] == 'minstk':
minstk = np.float64(plist[2])
if plist[0] == 'maxthickness':
maxthickness = np.float64(plist[2])
if plist[0] == 'seismo_thick':
seismo_thick = np.float64(plist[2])
if plist[0] == 'dipthresh':
dipthresh = np.float64(plist[2])
if plist[0] == 'fracS':
fracS = np.float64(plist[2])
if plist[0] == 'kdeg':
kdeg = np.float64(plist[2])
if plist[0] == 'knot_no':
knot_no = np.float64(plist[2])
if plist[0] == 'rbfs':
rbfs = np.float64(plist[2])
# loop through to find latest slab input file if specified
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
if inFile == 'latest':
yearmax = 0
monthmax = 0
for filename in os.listdir('Input'):
if filename.endswith('.csv'):
try:
slabname,datei,instring = filename.split('_')
except:
continue
if slabname == polyname and instring == 'input.csv':
try:
monthi, yeari = datei.split('-')
except:
continue
yeari = int(yeari)
monthi = int(monthi)
if yeari >= yearmax:
yearmax = yeari
inFile = 'Input/%s'%filename
if monthi > monthmax:
monthmax = monthi
inFile = 'Input/%s'%filename
print (' using input file: %s'%inFile)
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
if args.undergrid is None:
if slab == 'mue':
print ('This slab is truncated by the Caribbean (car) slab, argument -u cardepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'cot':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'sul':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'phi':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'ryu':
print ('This slab is truncated by the Kurils-Japan (kur) slab, argument -u kurdepgrid is required')
print ('Exiting .... ')
exit()
else:
undergrid = args.undergrid
''' ------ (4) Define output file names ------ '''
date = datetime.today().strftime('%m.%d.%y')
now = datetime.now()
time = '%s.%s' % (now.hour, now.minute)
folder = '%s_slab2_%s' % (slab, date)
os.system('mkdir Output/%s'%folder)
outFile = 'Output/%s/%s_slab2_res_%s.csv' % (folder, slab, date)
dataFile = 'Output/%s/%s_slab2_dat_%s.csv' % (folder, slab, date)
nodeFile = 'Output/%s/%s_slab2_nod_%s.csv' % (folder, slab, date)
fillFile = 'Output/%s/%s_slab2_fil_%s.csv' % (folder, slab, date)
rempFile = 'Output/%s/%s_slab2_rem_%s.csv' % (folder, slab, date)
clipFile = 'Output/%s/%s_slab2_clp_%s.csv' % (folder, slab, date)
these_params = 'Output/%s/%s_slab2_par_%s.csv' % (folder, slab, date)
datainfo = 'Output/%s/%s_slab2_din_%s.csv' % (folder, slab, date)
nodeinfo = 'Output/%s/%s_slab2_nin_%s.csv' % (folder, slab, date)
suppFile = 'Output/%s/%s_slab2_sup_%s.csv' % (folder, slab, date)
nodexFile = 'Output/%s/%s_slab2_nox_%s.csv' % (folder, slab, date)
nodeuFile = 'Output/%s/%s_slab2_nou_%s.csv' % (folder, slab, date)
depTextFile = 'Output/%s/%s_slab2_dep_%s.txt' % (folder, slab, date)
depGridFile = 'Output/%s/%s_slab2_dep_%s.grd' % (folder, slab, date)
strTextFile = 'Output/%s/%s_slab2_str_%s.txt' % (folder, slab, date)
strGridFile = 'Output/%s/%s_slab2_str_%s.grd' % (folder, slab, date)
dipTextFile = 'Output/%s/%s_slab2_dip_%s.txt' % (folder, slab, date)
dipGridFile = 'Output/%s/%s_slab2_dip_%s.grd' % (folder, slab, date)
uncTextFile = 'Output/%s/%s_slab2_unc_%s.txt' % (folder, slab, date)
uncGridFile = 'Output/%s/%s_slab2_unc_%s.grd' % (folder, slab, date)
thickTextFile = 'Output/%s/%s_slab2_thk_%s.txt' % (folder, slab, date)
thickGridFile = 'Output/%s/%s_slab2_thk_%s.grd' % (folder, slab, date)
savedir = 'Output/%s'%folder
''' ------ (3) Define optional boxes for PDF/print testing ------'''
if args.test is not None:
testlonmin = args.test[0]
testlonmax = args.test[1]
testlatmin = args.test[2]
testlatmax = args.test[3]
if testlonmin < 0:
testlonmin += 360
if testlonmax < 0:
testlonmax += 360
testarea = [testlonmin, testlonmax, testlatmin, testlatmax]
printtest = True
os.system('mkdir Output/PDF%s' % (slab))
os.system('mkdir Output/multitest_%s' % (slab))
f = open(datainfo, 'w+')
f.write('dataID, nodeID, used_or_where_filtered')
f.write('\n')
f.close()
f = open(datainfo, 'w+')
f.write('nodeID, len(df), status, details')
f.write('\n')
f.close()
else:
# an area not in range of any slab polygon
testarea = [220, 230, 15, 20]
printtest = False
''' --- (5) Gathering optional arguments, setting defaults ---'''
if use_box == 'yes':
check = 1
slab = s2f.rectangleIntersectsPolygon(lonmin, lonmax, latmin,
latmax, polygonFile)
if isinstance(slab, str):
slab = slab
else:
try:
slab = slab[0]
except:
print('System exit because box does not intersect slab polygon')
raise SystemExit()
elif use_box == 'no':
check = 0
lon1, lon2, lat1, lat2 = s2f.determine_polygon_extrema(slab,
polygonFile)
lonmin = float(lon1)
lonmax = float(lon2)
latmin = float(lat1)
latmax = float(lat2)
else:
print('use_box in slab2input.par must be "yes" or "no"')
raise SystemExit()
''' ------ (6) Define search ellipsoid parameters ------'''
alen = radius1
blen = radius2
ec = math.sqrt(1-((math.pow(blen, 2))/(math.pow(alen, 2))))
mdist = alen * ec
''' ------ (7) Define Average active source profiles ------'''
# Different because alu is variable E/W
if slab == 'alu':
AA_data = pd.read_csv('library/avprofiles/alu_av5.csv')
global_average = False
elif slab == 'him':
AA_data = pd.read_csv('library/avprofiles/him_av.csv')
global_average = False
elif slab == 'kur' or slab == 'izu':
AA_source = 'library/avprofiles/%s_av.txt' % 'jap'
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
AA_data = AA_data[AA_data.dist < 125]
global_average = False
# Use RF data like AA data to constrain flat slab in Mexico
elif slab == 'cam':
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
RF_data = pd.read_csv('library/avprofiles/cam_RF_av.csv')
AA_data = pd.concat([AA_data,RF_data],sort=True)
global_average = False
else:
global_average = False
# See if there is a averace active source profile for this slab
try:
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
# If there is no profile for this slab, use the global profile
except:
AA_global = pd.read_csv('library/avprofiles/global_as_av2.csv')
AA_data = AA_global[['dist', 'depth']]
global_average = True
if slab == 'phi' or slab == 'mue':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'cot':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'ita' or slab == 'puy':
AA_data = AA_data[AA_data.dist < 1]
''' ------ (8) Define reference model (Slab1.0 and/or slab guides) ------'''
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
# Search for slab guides in library/slabguides
slabguide = None
slabguide2 = None
for SGfile in os.listdir('library/slabguides'):
if SGfile[0:3] == polyname:
SGfile1 = SGfile
slabguide = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
# Find secondary slab guide for regions where there are two
if polyname == 'sum' or polyname == 'man' or polyname == 'phi' or polyname =='sam' or polyname == 'sco' or polyname == 'mak' or polyname == 'jap':
for f in os.listdir('library/slabguides'):
if f[0:3] == polyname and f != SGfile:
print ('f',f)
SGfile2 = f
slabguide2 = gmt.GMTGrid.load('library/slabguides/%s'%SGfile2)
break
break
# Get Slab1.0 grid where applicable
try:
depgrid = s2f.get_grid(slab, 'depth')
except:
print (' Slab1.0 does not exist in this region, using slab guide')
depgrid = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
slabguide = None
# Calculate strike and dip grids
strgrid, dipgrid = s2f.mkSDgrd(depgrid)
slab1data = s2f.mkSlabData(depgrid, strgrid, dipgrid, printtest)
slab1data.to_csv('gradtest.csv',header=True,index=False)
# Add slab guide data to Slab1.0 grids where necessary
if slabguide is not None:
print ('slab guide for this model:',slabguide)
guidestr, guidedip = s2f.mkSDgrd(slabguide)
guidedata = s2f.mkSlabData(slabguide, guidestr, guidedip, printtest)
if SGfile1 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
elif slab == 'ryu':
guidedata = guidedata[guidedata.lon>137]
slab1data = slab1data[slab1data.lat<=137]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
if slabguide2 is not None:
print ('secondary slab guide for this model:',slabguide2)
guidestr, guidedip = s2f.mkSDgrd(slabguide2)
guidedata = s2f.mkSlabData(slabguide2, guidestr, guidedip, printtest)
if SGfile2 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
#slab1data.to_csv('slab1data.csv',header=True,index=False)
''' ------ (9) Define Trench Locations ------'''
TR_data = pd.read_csv(trenches)
if slab == 'izu' or slab == 'kur':
TR_data = TR_data[TR_data.slab == 'jap']
else:
TR_data = TR_data[TR_data.slab == slab]
TR_data = TR_data.reset_index(drop=True)
TR_data.loc[TR_data.lon < 0, 'lon']+=360
''' ------ (10) Open and modify input dataset ------'''
eventlistALL = pd.read_table('%s' % inFile, sep=',', dtype={
'lon': np.float64, 'lat': np.float64,'depth': np.float64,
'unc': np.float64, 'etype': str, 'ID': np.int, 'mag': np.float64,
'S1': np.float64, 'D1': np.float64, 'R1': np.float64,
'S2': np.float64, 'D2': np.float64, 'R2': np.float64,
'src': str, 'time': str, 'mlon': np.float64, 'mlat': np.float64,
'mdep': np.float64})
ogcolumns = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src']
kagancols = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src', 'mlon', 'mlat', 'mdep']
eventlist = eventlistALL[kagancols]
if printtest:
lat65 = eventlist[eventlist.lat>65]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.lat <= 65]', datainfo,'df')
dataGP = eventlist[eventlist.etype == 'GP']
if len(dataGP>0):
s2f.addToDataInfo(dataGP, 0, 'eventlist = eventlist[eventlist.etype != GP]', datainfo,'df')
eventlist = eventlist[eventlist.lat <= 65]
eventlist = eventlist[eventlist.etype != 'GP']
maxID = eventlistALL['ID'].max()
# Add/Remove manually identified points that don't follow general rules
remdir = 'library/points_to_remove/current_files'
for badFile in os.listdir(remdir):
if badFile[0:3] == slab or badFile[0:3] == 'ALL' or ((slab == 'izu' or slab == 'kur') and badFile[0:3] == 'jap'):
print (' manually removing points listed in:',badFile)
donotuse = pd.read_csv('%s/%s'%(remdir,badFile))
eventlist = s2f.removePoints(donotuse, eventlist, lonmin,
lonmax, latmin, latmax, printtest, datainfo, True, slab)
doubleuse = pd.read_csv(addFile)
eventlist, maxID = s2f.doublePoints(doubleuse, eventlist, maxID)
if slab == 'kur':
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 100
if slab == 'sul' or slab == 'man':
eventlist = eventlist[eventlist.etype != 'CP']
if slab == 'him':
eventlist = eventlist[eventlist.src != 'schulte']
if slab == 'sumz' or slab == 'kur' or slab == 'jap' or slab == 'izu':
if printtest:
lat65 = eventlist[eventlist.etype=='TO']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != TO]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'TO']
if slab == 'kurz':
if printtest:
lat65 = eventlist[eventlist.etype=='ER']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != ER]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'ER']
if slab == 'sol':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon <= 149)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lon > 149)]
TR_data = TR_data[TR_data.lon>149]
if slab == 'man':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon >= 120)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype == BA) & (eventlist.lon >= 120)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | ((eventlist.lon < 120)|(eventlist.lat > 15))]
if slab == 'sum':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lat > 21)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lat <= 21)]
if slab == 'ryu':
ryutodata = eventlist[(eventlist.etype == 'TO')&(eventlist.lon>133)]
if slab == 'hel':
eventlist.loc[eventlist.etype == 'RF', 'etype'] = 'CP'
if slab == 'puyz' or slab == 'mak':
eventlist = eventlist[eventlist.src != 'ddgap']
# Set default uncertainties for events without uncertainties
eventlist.loc[eventlist.etype == 'EQ', 'unc'] = 15.0
eventlist.loc[eventlist.etype == 'CP', 'unc'] = 5.0
eventlist.loc[eventlist.etype == 'BA', 'unc'] = 1.0
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 40.0
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <5), 'unc'] = 5.0
if slab == 'puy':
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <15), 'unc'] = 15.0
eventlist.loc[eventlist.mlon < 0, 'mlon'] += 360
# Ensure all data are within longitudes 0-360
eventlist.loc[eventlist.lon < 0, 'lon']+=360
# Define mean depth of bathymetry (for constraining interp outboard trench)
meanBAlist = eventlist[eventlist.etype == 'BA']
meanBA = meanBAlist['depth'].mean()
del eventlistALL
''' ----- (11) Calculate seismogenic zone thickness ------ '''
# define seismogenic thickness parameters. change if needed
maxdep = 65
maxdepdiff = 20
origorcentl = 'c'
origorcentd = 'c'
slaborev = 'e'
lengthlim = -50
ogcolumns = eventlist.columns
eventlist = s2f.getReferenceKagan(slab1data, eventlist, origorcentl, origorcentd)
if slab != 'hin':
seismo_thick, taper_start = s2f.getSZthickness(eventlist,folder,slab,maxdep,maxdepdiff,origorcentl,origorcentd,slaborev,savedir,lengthlim)
else:
seismo_thick = 20
taper_start = 20
if slab == 'hel' or slab == 'car' or slab == 'mak':
seismo_thick = 40
if slab == 'sol':
seismo_thick = 40
if slab == 'alu' or slab == 'cot' or slab == 'sul':
seismo_thick = 10
if slab == 'sol':
eventlistE = eventlist[eventlist.lon>148]
eventlistW = eventlist[eventlist.lon<=148]
eventlistE = s2f.cmtfilter(eventlistE,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistE,eventlistW],sort=True)
if slab == 'sum':
eventlistS = eventlist[eventlist.lat<=22]
eventlistN = eventlist[eventlist.lat>22]
eventlistS = s2f.cmtfilter(eventlistS,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistS,eventlistN],sort=True)
if slab != 'hal' and slab != 'him' and slab != 'pam' and slab != 'hin' and slab != 'sol' and slab != 'sum' and slab != 'cas':
eventlist = s2f.cmtfilter(eventlist,seismo_thick,printtest,datainfo,slab)
eventlist = eventlist[ogcolumns]
''' ------ (12) Record variable parameters used for this model ------'''
f = open(these_params, 'w+')
f.write('Parameters used to create file for slab_Date_time: %s_%s_%s \n' \
%(slab, date, time))
f.write('\n')
f.close()
f = open(these_params, 'a')
f.write('inFile: %s \n' % inFile)
f.write('use_box: %s \n' % use_box)
f.write('latmin: %s \n' % str(latmin))
f.write('latmax: %s \n' % str(latmax))
f.write('lonmin: %s \n' % str(lonmin))
f.write('lonmax: %s \n' % str(lonmax))
f.write('slab: %s \n' % slab)
f.write('grid: %s \n' % str(grid))
f.write('radius1: %s \n' % str(radius1))
f.write('radius2: %s \n' % str(radius2))
f.write('alen: %s \n' % str(alen))
f.write('blen: %s \n' % str(blen))
f.write('sdr: %s \n' % str(sdr))
f.write('ddr: %s \n' % str(ddr))
f.write('taper: %s \n' % str(taper))
f.write('T: %s \n' % str(T))
f.write('node: %s \n' % str(node))
f.write('filt: %s \n' % str(filt))
f.write('maxdist: %s \n' % str(maxdist))
f.write('mindip: %s \n' % str(mindip))
f.write('minstk: %s \n' % str(minstk))
f.write('maxthickness: %s \n' % str(maxthickness))
f.write('seismo_thick: %s \n' % str(seismo_thick))
f.write('dipthresh: %s \n' % str(dipthresh))
f.write('fracS: %s \n' % str(fracS))
f.write('knot_no: %s \n' % str(knot_no))
f.write('kdeg: %s \n' % str(kdeg))
f.write('rbfs: %s \n' % str(rbfs))
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
f.write('undergrid: %s \n' % str(undergrid))
f.close()
''' ------ (13) Define search grid ------ '''
print(' Creating search grid...')
#Creates a grid over the slab region
regular_grid = s2f.create_grid_nodes3(grid, lonmin, lonmax, latmin, latmax)
grid_in_polygon = s2f.createGridInPolygon2(regular_grid, slab, polygonFile)
lons = grid_in_polygon[:, 0]
lats = grid_in_polygon[:, 1]
lons = np.round(lons,decimals=1)
lats = np.round(lats,decimals=1)
lons[lons <0] += 360
slab1guide,slab1query = s2f.makeReference(slab1data,lons,lats,grid,printtest,slab)
''' ------ (14) Identify tomography datasets ------ '''
## Identify how many tomography datasets are included
tomo_data = eventlist[eventlist.etype == 'TO']
if len(tomo_data) > 0 and slab != 'sam':
sources = tomo_data.src
TOsrc = set()
for x in sources:
TOsrc.add(x)
tomo_sets = TOsrc
tomo = True
else:
tomo_sets = 0
tomo = False
premulti = pd.DataFrame()
postmulti = pd.DataFrame()
OGmulti = pd.DataFrame()
elistAA = pd.DataFrame()
loncuts,latcuts,elistcuts = s2f.getlatloncutoffs(lons,lats,eventlist,printtest)
''' ------ (15) Initialize arrays for Section 2 ------ '''
# Creates list of events that were used for the model based on ID
used_all = np.zeros((1, 2))
used_TO = np.zeros((1, 2))
warnings.filterwarnings('ignore', 'Mean of empty slice.')
pd.options.mode.chained_assignment = None
'''Section 2: First loop
This Accomplishes:
1) Calculate error for each used tomography model.
This is accomplished by determining the difference between measured
depths for tomography and earthquake data, which will be used
outside of the loop.
2) Identify data to constrain depth/coordinate of center of Benioff Zone.
2a) Identify local strike, dip, and depth of Slab1.0.
If Slab 1.0 does not exist, acquire strike from closest trench
location with a strike oriented perpendicularly to this lon/lat.
If extending beyond Slab1.0 depths perpendicularly, find nearest and
most perpendicular point on Slab1.0, and define depth to
search from based on dip and depth of that point on Slab1.0. The
dip is defined as the dip of the local Slab1.0 point.
If extending along strike from Slab1.0, define depth to search from
based on mean depth of data within defined radius of node. The
dip of the node is defined as 0.
2b) Filter by ellipsoid oriented perpendicularly to Slab1.0.
If the local dip is less than mindip, orient ellipsoid vertically
and along strike found in (2a).
If the local dip is greater than mindip, orient ellipsoid
perpendicular to strike/dip found in (2a).
The long axis of the ellipse is defined as radius1, the short axis
is defined as radius2.
The shallow extent of the ellipsoid is defined as sdr at depths
above seismo_thick, and is tapered to 3*sdr at depths greater
than seismo_thick.
The deep extent of the ellipsoid is defined as sdr at depths above
seismo_thick, and is tapered to ddr at depths greater than
seismo_thick.
2c) Nodes outboard of the trench are only constrained by bathymetry.
Nodes inboard of the trench are constrained by all but bathymetry.
2d) Conditionally add average active source/average reciever functions.
If within the distance of the longest AS profile from the trench
identify the average AS profile depth at that distance from
trench. If there is no active source point within the search
ellipsoid defined in (2b), add an average active source data
point to the set of data to constrain the depth at this node.
Reciever functions in cam and alu are being utilized similarly with
defined distances from trench and distances along strike from
key profiles that need to be utilized in the absence of
seismicity.
2e) If information other than tomography is available above 300 km
depth, all tomography is filtered at that node.
2f) If less than two data points are available to constrain a node, no
depth is resolved at that node.
2g) If |strike of Slab1.0 at node - strike of Slab1.0 at farthest data|
> minstrk, filter data at ends until < minstrk.
If this node is outside of Slab1.0, reduce long axis of search
ellipsoid prior to starting filters.
The output of this loop is two numpy arrays and list of nodes with data:
used_TO: local difference between tomography and earthquake depths and
a tomography dataset identifier
used_all: indices for the data used and their associated nodes
This one is created to prevent the need for re-filtering
in later loops
'''
print("Start Section 2 of 7: First loop")
lons1 = (np.ones(len(lons))*-9999).astype(np.float64)
lats1 = (np.ones(len(lons))*-9999).astype(np.float64)
deps1 = (np.ones(len(lons))*-9999).astype(np.float64)
strs1 = (np.ones(len(lons))*-9999).astype(np.float64)
dips1 = (np.ones(len(lons))*-9999).astype(np.float64)
nIDs1 = (np.ones(len(lons))*-9999).astype(np.float64)
aleng = (np.ones(len(lons))*-9999).astype(np.float64)
bleng = (np.ones(len(lons))*-9999).astype(np.float64)
cleng = (np.ones(len(lons))*-9999).astype(np.float64)
sleng = (np.ones(len(lons))*-9999).astype(np.float64)
dleng = (np.ones(len(lons))*-9999).astype(np.float64)
elons1 = (np.ones(len(lons))*-9999).astype(np.float64)
elats1 = (np.ones(len(lons))*-9999).astype(np.float64)
edeps1 = (np.ones(len(lons))*-9999).astype(np.float64)
estrs1 = (np.ones(len(lons))*-9999).astype(np.float64)
edips1 = (np.ones(len(lons))*-9999).astype(np.float64)
enIDs1 = (np.ones(len(lons))*-9999).astype(np.float64)
ealeng = (np.ones(len(lons))*-9999).astype(np.float64)
ebleng = (np.ones(len(lons))*-9999).astype(np.float64)
ecleng = (np.ones(len(lons))*-9999).astype(np.float64)
esleng = (np.ones(len(lons))*-9999).astype(np.float64)
edleng = (np.ones(len(lons))*-9999).astype(np.float64)
if args.nCores is not None:
if args.nCores > 1 and args.nCores < 8:
pooling = True
elif args.nCores == 1:
pooling = False
else:
pooling = False
else:
pooling = False
cutcount = 1
allnewnodes = None
for cut in range(len(loncuts)):
theselats = latcuts[cut]
theselons = loncuts[cut]
theseevents = elistcuts[cut]
indices = range(len(theselats))
if cut == 0:
i2 = 0
cutcount+=1
if pooling:
pool1 = Pool(args.nCores)
partial_loop1 = partial(loops.loop1, theselons, theselats, testarea, slab,
depgrid, strgrid, dipgrid, slab1query, theseevents,
seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID,
AA_data, TR_data, maxdist, maxthickness, minstk,
tomo_sets, meanBA,slab1guide,grid,slab1data,dipthresh,datainfo,nodeinfo)
pts = pool1.map(partial_loop1, indices) #$$#
pool1.close()
pool1.join()
for i in range(len(indices)):
thisnode = pts[i]
if thisnode[13]:
lons1[i2] = thisnode[0]
lats1[i2] = thisnode[1]
deps1[i2] = thisnode[2]
strs1[i2] = thisnode[3]
dips1[i2] = thisnode[4]
nIDs1[i2] = thisnode[5]
aleng[i2] = thisnode[6]
bleng[i2] = thisnode[7]
cleng[i2] = thisnode[8]
sleng[i2] = thisnode[14]
dleng[i2] = thisnode[15]
nused_TO = thisnode[9]
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = thisnode[10]
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = thisnode[11]
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
newnodes = thisnode[12]
if len(newnodes)>0:
if allnewnodes is not None:
allnewnodes = np.vstack((allnewnodes,newnodes))
else:
allnewnodes = newnodes
if not thisnode[13] and np.isfinite(thisnode[2]):
elons1[i2] = thisnode[0]
elats1[i2] = thisnode[1]
edeps1[i2] = thisnode[2]
estrs1[i2] = thisnode[3]
edips1[i2] = thisnode[4]
enIDs1[i2] = thisnode[5]
ealeng[i2] = thisnode[6]
ebleng[i2] = thisnode[7]
ecleng[i2] = thisnode[8]
esleng[i2] = thisnode[14]
edleng[i2] = thisnode[15]
i2 += 1
else:
for nodeno in range(len(theselons)):
alon, alat, alocdep, alocstr, alocdip, anID, aaleng, ableng, acleng, aused_TO, aused_tmp, atrimmedAA, newnodes, anydata, asleng, adleng = loops.loop1(theselons, theselats, testarea, slab, depgrid, strgrid, dipgrid, slab1query, theseevents, seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID, AA_data, TR_data, maxdist, maxthickness, minstk, tomo_sets, meanBA, slab1guide, grid, slab1data, dipthresh, datainfo, nodeinfo, nodeno)
if anydata:
lons1[i2] = alon
lats1[i2] = alat
deps1[i2] = alocdep
strs1[i2] = alocstr
dips1[i2] = alocdip
nIDs1[i2] = anID
aleng[i2] = aaleng
bleng[i2] = ableng
cleng[i2] = acleng
sleng[i2] = asleng
dleng[i2] = adleng
nused_TO = aused_TO
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = aused_tmp
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = atrimmedAA
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if len(newnodes)>0:
if allnewnodes is not None:
allnewnodes = np.vstack((allnewnodes,newnodes))
else:
allnewnodes = newnodes
if not anydata and np.isfinite(alocdep):
elons1[i2] = alon
elats1[i2] = alat
edeps1[i2] = alocdep
estrs1[i2] = alocstr
edips1[i2] = alocdip
enIDs1[i2] = anID
ealeng[i2] = aaleng
ebleng[i2] = ableng
ecleng[i2] = acleng
esleng[i2] = asleng
edleng[i2] = adleng
i2 += 1
lons1 = lons1[lons1>-999]
lats1 = lats1[lats1>-999]
deps1 = deps1[(deps1>-999)|np.isnan(deps1)]
strs1 = strs1[strs1>-999]
dips1 = dips1[dips1>-999]
nIDs1 = nIDs1[nIDs1>-999]
aleng = aleng[aleng>-999]
bleng = bleng[bleng>-999]
cleng = cleng[cleng>-999]
sleng = sleng[sleng>-999]
dleng = dleng[dleng>-999]
elons1 = elons1[edleng>-999]
elats1 = elats1[edleng>-999]
edeps1 = edeps1[(edeps1>-999)|np.isnan(edeps1)]
estrs1 = estrs1[edleng>-999]
edips1 = edips1[edleng>-999]
enIDs1 = enIDs1[edleng>-999]
ealeng = ealeng[edleng>-999]
ebleng = ebleng[edleng>-999]
ecleng = ecleng[edleng>-999]
esleng = esleng[edleng>-999]
edleng = edleng[edleng>-999]
testdf = pd.DataFrame({'lon':lons1,'lat':lats1,'depth':deps1,'strike':strs1,'dip':dips1,'id':nIDs1,'alen':aleng,'blen':bleng,'clen':cleng,'slen':sleng,'dlen':dleng})
testdf.to_csv('firstloop.csv',header=True,index=False,na_rep=np.nan)
if allnewnodes is not None:
theseIDs = []
for i in range(len(allnewnodes)):
if allnewnodes[i,1]>0:
thisnID = int('%i%i'%(allnewnodes[i,0]*10,allnewnodes[i,1]*10))
else:
thisnID = int('%i0%i'%(allnewnodes[i,0]*10,allnewnodes[i,1]*-10))
theseIDs.append(thisnID)
newlonsdf1 = pd.DataFrame({'lon':allnewnodes[:,0],'lat':allnewnodes[:,1],'nID':theseIDs})
newlonsdf = newlonsdf1.drop_duplicates(['nID'])
theselons = newlonsdf['lon'].values
theselats = newlonsdf['lat'].values
if grid == 0.2:
grid2 = 0.1
elif grid == 0.1:
grid2 = 0.05
else:
grid2 = grid
slab1guide,slab1query = s2f.makeReference(slab1data,theselons,theselats,grid2,printtest,slab)
newlats = []
newlons = []
newdeps = []
newstrs = []
newdips = []
newnIDs = []
newalen = []
newblen = []
newclen = []
newslen = []
newdlen = []
enewlats = []
enewlons = []
enewdeps = []
enewstrs = []
enewdips = []
enewnIDs = []
enewalen = []
enewblen = []
enewclen = []
enewslen = []
enewdlen = []
if pooling:
indices = range(len(theselons))
pool1 = Pool(args.nCores)
partial_loop1 = partial(loops.loop1, theselons, theselats, testarea, slab,
depgrid, strgrid, dipgrid, slab1query, eventlist,
seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID,
AA_data, TR_data, maxdist, maxthickness, minstk,
tomo_sets, meanBA,slab1guide,grid,slab1data,dipthresh,datainfo,nodeinfo)
pts = pool1.map(partial_loop1, indices)
pool1.close()
pool1.join()
for i in range(len(indices)):
thisnode = pts[i]
if thisnode[13]:
newlons.append(thisnode[0])
newlats.append(thisnode[1])
newdeps.append(thisnode[2])
newstrs.append(thisnode[3])
newdips.append(thisnode[4])
newnIDs.append(thisnode[5])
newalen.append(thisnode[6])
newblen.append(thisnode[7])
newclen.append(thisnode[8])
newslen.append(thisnode[14])
newdlen.append(thisnode[15])
nused_TO = thisnode[9]
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = thisnode[10]
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = thisnode[11]
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if not thisnode[13] and np.isfinite(thisnode[2]):
enewlons.append(thisnode[0])
enewlats.append(thisnode[1])
enewdeps.append(thisnode[2])
enewstrs.append(thisnode[3])
enewdips.append(thisnode[4])
enewnIDs.append(thisnode[5])
enewalen.append(thisnode[6])
enewblen.append(thisnode[7])
enewclen.append(thisnode[8])
enewslen.append(thisnode[14])
enewdlen.append(thisnode[15])
else:
for nodeno in range(len(theselons)):
alon, alat, alocdep, alocstr, alocdip, anID, aalen, ablen, aclen, aused_TO, aused_tmp, atrimmedAA, newnodes, anydata, aslen, adlen = loops.loop1(theselons, theselats, testarea, slab, depgrid, strgrid, dipgrid, slab1query, eventlist, seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID, AA_data, TR_data, maxdist, maxthickness, minstk, tomo_sets, meanBA, slab1guide, grid, slab1data, dipthresh, datainfo, nodeinfo, nodeno)
if anydata:
newlons.append(alon)
newlats.append(alat)
newdeps.append(alocdep)
newstrs.append(alocstr)
newdips.append(alocdip)
newnIDs.append(anID)
newalen.append(aalen)
newblen.append(ablen)
newclen.append(aclen)
newslen.append(aslen)
newdlen.append(adlen)
nused_TO = aused_TO
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = aused_tmp
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = atrimmedAA
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if not anydata and np.isfinite(alocdep):
enewlons.append(alon)
enewlats.append(alat)
enewdeps.append(alocdep)
enewstrs.append(alocstr)
enewdips.append(alocdip)
enewnIDs.append(anID)
enewalen.append(aalen)
enewblen.append(ablen)
enewclen.append(aclen)
enewslen.append(aslen)
enewdlen.append(adlen)
#np.savetxt('%s_diptest.csv'%slab, allnewnodes, header='lon,lat,depth,strike,dip',fmt='%.2f', delimiter=',',comments='')
if printtest:
fig = plt.figure(figsize=(20, 10))
ax1 = fig.add_subplot(131)
con = ax1.scatter(lons1,lats1,c=dips1,s=10,edgecolors='none',cmap='plasma')
ax1.set_ylabel('Latitude')
ax1.axis('equal')
plt.grid()
title = 'Diptest'
ax1.set_title(title)
cbar = fig.colorbar(con)
cbar.set_label('Dip')
ax2 = fig.add_subplot(132)
con = ax2.scatter(allnewnodes[:,0], allnewnodes[:,1],c=allnewnodes[:,1],s=10,edgecolors='none',cmap='plasma')
ax2.set_xlabel('Longitude')
ax2.set_ylabel('Latitude')
ax2.axis('equal')
plt.grid()
cbar = fig.colorbar(con)
cbar.set_label('Dip')
ax3 = fig.add_subplot(133)
con = ax3.scatter(newlons, newlats,c=newdips,s=10,edgecolors='none',cmap='plasma')
ax3.set_xlabel('Longitude')
ax3.set_ylabel('Latitude')
ax3.axis('equal')
plt.grid()
cbar = fig.colorbar(con)
cbar.set_label('Dip')
figtitle = 'diptest.png'
fig.savefig(figtitle)
plt.close()
lons1 = np.append(lons1, [newlons])
lats1 = np.append(lats1, [newlats])
deps1 = np.append(deps1, [newdeps])
strs1 = np.append(strs1, [newstrs])
dips1 = np.append(dips1, [newdips])
nIDs1 = np.append(nIDs1, [newnIDs])
aleng = np.append(aleng, [newalen])
bleng = np.append(bleng, [newblen])
cleng = np.append(cleng, [newclen])
sleng = np.append(sleng, [newslen])
dleng = np.append(dleng, [newdlen])
elons1 = np.append(elons1, [enewlons])
elats1 = np.append(elats1, [enewlats])
edeps1 = np.append(edeps1, [enewdeps])
estrs1 = np.append(estrs1, [enewstrs])
edips1 = np.append(edips1, [enewdips])
enIDs1 = np.append(enIDs1, [enewnIDs])
ealeng = np.append(ealeng, [enewalen])
ebleng = np.append(ebleng, [enewblen])
ecleng = np.append(ecleng, [enewclen])
esleng = np.append(esleng, [enewslen])
edleng = np.append(edleng, [enewdlen])
#print ('lon',len(elons1),'lat',len(elats1),'ogdep',len(edeps1),'ogstr',len(estrs1),'ogdip',len(edips1),'nID',len(enIDs1),'alen',len(ealeng),'blen',len(ebleng),'clen',len(ecleng),'slen',len(esleng),'dlen',len(edleng))
emptynodes = pd.DataFrame({'lon':elons1,'lat':elats1,'ogdep':edeps1,'ogstr':estrs1,'ogdip':edips1,'nID':enIDs1,'alen':ealeng,'blen':ebleng,'clen':ecleng,'slen':esleng,'dlen':edleng})
#emptynodes.to_csv('emptynodes.csv',header=True,index=False)
refdeps = pd.DataFrame({'lon':lons1, 'lat':lats1, 'ogdep':deps1})
if global_average:
''' # need to fix this after adjusting based on BA depth at trench
AA_global['depthtest'] = (AA_global['depth'].values*100).astype(int)
for index, row in elistAA.iterrows():
depthAA = row['depth']
depthtestAA = int(100*row['depth'])
thisdepth = AA_global[AA_global.depthtest == depthtestAA]
uncAA = thisdepth['unc'].values[0]
elistAA.loc[elistAA.depth == depthAA, 'unc'] = uncAA*2
'''
elistAA['unc'] = 10.0
elistcuts.append(elistAA)
eventlist2 = pd.concat(elistcuts,sort=True)
eventlist = eventlist2.reset_index(drop=True)
del eventlist2
eventlist = eventlist.drop_duplicates(['ID'])
eventlist = eventlist.reset_index(drop=True)
# Remove first line of zeros
used_TO = used_TO[~np.all(used_TO ==0, axis=1)]
used_all = used_all[~np.all(used_all ==0, axis=1)]
'''Section 3: Calculate tomography uncertainties
Here we use the output from the first loop to calculate tomography uncertainties.
For each tomography dataset, we calculate the standard deviation of the distribution of "differences".
We apply this standard deviation as the uncertainty value for each tomography datum from that dataset.
'''
print("Start Section 3 of 7: Assigning tomography uncertainties")
if tomo:
for idx, src in enumerate(tomo_sets):
tomog = used_TO[:][used_TO[:, 1] == idx]
tmp_std = np.std(tomog[:, 0])
if tmp_std > 40.:
tmp_std = 40.
elif tmp_std < 15.:
tmp_std = 15.
elif np.isnan(tmp_std):
tmp_std = 40
eventlist['unc'][eventlist['src'] == src] = tmp_std
'''Section 4: Second loop
The purpose of this loop is to determine a set of "pre-shifted" slab points that do not utilize receiver function data.
This output dataset will represent a transition from slab surface at shallow depths to slab center at deeper depths.
The only output from this loop is an array of the form [ lat lon dep unc nodeID ]
'''
print("Start Section 4 of 7: Second loop")
bzlons, bzlats, bzdeps, stds2, nIDs2 = [], [], [], [], []
lats2, lons2, str2, dip2, centsurf = [], [], [], [], []
bilats, bilons, binods, bistds = [], [], [], []
biindx, bistrs, bidips, bideps = [], [], [], []
baleng, bbleng, bcleng, onlyto = [], [], [], []
rlist = pd.DataFrame()
if pooling:
pool2 = Pool(args.nCores)
npass = args.nCores
partial_loop2 = partial(loops.loop2, testarea, lons1, lats1, nIDs1, deps1, strs1, dips1, used_all, eventlist, sdr, ddr, seismo_thick, slab, maxthickness, rlist, mindip, aleng, bleng, cleng)
indices = range(len(lats1))
pts2 = pool2.map(partial_loop2, indices)
pool2.close()
pool2.join()
for i in range(len(indices)):
thisnode = pts2[i]
if np.isfinite(thisnode[0]):
bzlons.append(thisnode[0])
bzlats.append(thisnode[1])
bzdeps.append(thisnode[2])
stds2.append(thisnode[3])
nIDs2.append(thisnode[4])
lats2.append(thisnode[5])
lons2.append(thisnode[6])
str2.append(thisnode[7])
dip2.append(thisnode[8])
centsurf.append(thisnode[9])
baleng.append(thisnode[20])
bbleng.append(thisnode[21])
bcleng.append(thisnode[22])
onlyto.append(thisnode[23])
if np.isfinite(thisnode[10]):
bilats.append(thisnode[10])
bilons.append(thisnode[11])
binods.append(thisnode[12])
bistds.append(thisnode[13])
biindx.append(thisnode[14])
bistrs.append(thisnode[15])
bidips.append(thisnode[16])
bideps.append(thisnode[17])
rlist = thisnode[18]
if len(rlist) > 0:
removeIDs = np.array(rlist.ID)
thisID = np.ones(len(removeIDs))*thisnode[4]
removearray = list(zip(thisID, removeIDs))
removeIDID = np.array(removearray)
used_all = used_all[~(np.in1d(used_all[:, 1], removeIDID) & np.in1d(used_all[:, 0], thisID))]
multi = thisnode[19]
if len(multi) > 0:
premulti = pd.concat([premulti, multi],sort=True)
del pts2
else:
npass = 1
for nodeno in range(len(lats1)):
bpeak_lon, bpeak_lat, bpeak_depth, bstd, bnID, blat, blon, bcstr, bcdip, bcentsurf, bbilats, bbilons, bbinods, bbistds, bbiindx, bbistrs, bbidips, bbideps, brlist, bpremulti, alen, blen, clen, onlyt = loops.loop2(testarea, lons1, lats1, nIDs1, deps1, strs1, dips1, used_all, eventlist, sdr, ddr, seismo_thick, slab, maxthickness, rlist, mindip, aleng, bleng, cleng, nodeno)
if np.isfinite(bpeak_lon):
bzlons.append(bpeak_lon)
bzlats.append(bpeak_lat)
bzdeps.append(bpeak_depth)
stds2.append(bstd)
nIDs2.append(bnID)
lats2.append(blat)
lons2.append(blon)
str2.append(bcstr)
dip2.append(bcdip)
centsurf.append(bcentsurf)
baleng.append(alen)
bbleng.append(blen)
bcleng.append(clen)
onlyto.append(onlyt)
if np.isfinite(bbilats):
bilats.append(bbilats)
bilons.append(bbilons)
binods.append(bbinods)
bistds.append(bbistds)
biindx.append(bbiindx)
bistrs.append(bbistrs)
bidips.append(bbidips)
bideps.append(bbideps)
rlist = brlist
if len(rlist) > 0:
removeIDs = np.array(rlist.ID)
thisID = np.ones(len(removeIDs))*bnID
removearray = list(zip(thisID, removeIDs))
removeIDID = np.array(removearray)
used_all = used_all[~(np.in1d(used_all[:, 1], removeIDID) & np.in1d(used_all[:, 0], thisID))]
multi = bpremulti
if len(multi) > 0:
premulti = pd.concat([premulti, multi],sort=True)
tmp_res = pd.DataFrame({'bzlon':bzlons,'bzlat':bzlats,'depth':bzdeps,'stdv':stds2,'nID':nIDs2,'lat':lats2,'lon':lons2,'ogstr':str2,'ogdip':dip2,'centsurf':centsurf,'alen':baleng,'blen':bbleng,'clen':bcleng,'onlyto':onlyto})
for j in range(len(bilats)):
lon = bilons[j]
lat = bilats[j]
nID = binods[j]
stdv = bistds[j]
stk = bistrs[j]
dep = bideps[j]
dip = bidips[j]
if dip <= mindip:
peak_depth = s2f.findMultiDepth(lon, lat, nID, tmp_res, grid, premulti, stk, slab, dep, alen, printtest)
peak_lon = lon
peak_lat = lat
else:
peak_lon, peak_lat, peak_depth = s2f.findMultiDepthP(lon, lat, nID, tmp_res, grid, premulti, stk, slab, dep, dip, alen, printtest)
tmp_res.loc[tmp_res.nID == nID, 'bzlon'] = peak_lon
tmp_res.loc[tmp_res.nID == nID, 'bzlat'] = peak_lat
tmp_res.loc[tmp_res.nID == nID, 'depth'] = peak_depth
tmp_res = s2f.addGuidePoints(tmp_res, slab)
if slab == 'sol':
tmp_res = tmp_res[(tmp_res.bzlon>142) & (tmp_res.bzlon<164)]
if slab == 'sul':
tmp_res = tmp_res[(tmp_res.bzlon<123.186518923) | (tmp_res.depth<100)]
tmp_res = tmp_res[(tmp_res.bzlon<122.186518923) | (tmp_res.depth<200)]
# Save data used to file
used_IDs = used_all[:, 1]
used_data = eventlist[eventlist['ID'].isin(used_IDs)]
used_data = used_data[['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', 'S1', 'D1', 'R1', 'S2', 'D2', 'R2', 'src']]
used_data = used_data.drop_duplicates(['ID'])
used_data.loc[used_data.lon < 0, 'lon']+=360
if slab == 'hel':
used_data.loc[used_data.etype == 'CP', 'etype']='RF'
used_data.to_csv(dataFile, header=True, index=False, float_format='%0.2f', na_rep = float('nan'), chunksize=100000)
#tmp_res.to_csv('nodetest.csv', header=True, index=False, float_format='%0.2f', na_rep = float('nan'), chunksize=100000)
'''Section 5: Calculate shifts
Here we use the output of the second loop to calculate shifting locations for non-RF results.
A user-specified lithospheric thickness can be read in or lithosphere thickness will be calculated using the nearest oceanic plate age.
The taper and fracshift is set in the paramter file for each subduction zone. fracshift was determined via testing each individual
subduztion zone to match seismicity. Shift direction is determined by the strike and dip of a surface created using the output from the second loop.
A clipping mask is also created in this section using the shifted output data.
'''
print("Start Section 5 of 7: Calculate shifts")
# Calculate shift for each node
print(" Calculating shift...")
surfnode = 0.5
data0 = tmp_res[(tmp_res.stdv > -0.000001)&(tmp_res.stdv < 0.000001)]
tmp_res = tmp_res[(tmp_res.stdv < -0.000001)|(tmp_res.stdv > 0.000001)]
if use_box == 'yes':
if lonmin<0:
lonmin+=360
if lonmax<0:
lonmax+=360
TR_data = TR_data[(TR_data.lon<lonmax)&(TR_data.lon>lonmin)]
TR_data = TR_data[(TR_data.lat<latmax)&(TR_data.lat>latmin)]
TR_data = TR_data.reset_index(drop=True)
# Read in age grid files
ages = gmt.GMTGrid.load(agesFile)
ages_error = gmt.GMTGrid.load(ageerrorsFile)
shift_out, maxthickness = s2f.slabShift_noGMT(tmp_res, node, T, TR_data, seismo_thick, taper, ages, ages_error, filt, slab, maxthickness, grid, 'bzlon', 'bzlat', 'depth', fracS, npass, meanBA, printtest, kdeg, knot_no, rbfs, use_box)
del ages
del ages_error
tmp_res['pslon'] = tmp_res['lon'].values*1.0
tmp_res['pslat'] = tmp_res['lat'].values*1.0
tmp_res['psdepth'] = tmp_res['depth'].values*1.0
tmp_res = tmp_res[['pslon', 'pslat', 'bzlon', 'bzlat', 'psdepth', 'stdv', 'nID', 'ogstr', 'ogdip', 'centsurf', 'alen', 'blen', 'clen']]
shift_out = shift_out.merge(tmp_res)
shift_out.loc[shift_out.pslon < 0, 'pslon']+=360
shift_out['avstr'] = np.nan
shift_out['avdip'] = np.nan
shift_out['avrke'] = np.nan
'''Section 6: Third loop
The purpose of this loop is to produce the final location measurements for the slab.
Here we edit the input data by adding the shift to the depths, then calculate a PDF with receiver functions included.
The only output from this loop is a 10 column array with all results necessary to build the output.
Output is of the format [ lat lon dep unc shift_mag shift_unc avg_str avg_dip avg_rak pre-shift_dep pre-shift_str pre-shift_dip nodeID ]
'''
print("Start Section 6 of 7: Third (final) loop")
bilats, bilons, binods, bistds = [], [], [], []
biindx, bistrs, bidips, bideps = [], [], [], []
if pooling:
pool3 = Pool(args.nCores)
partial_loop3 = partial(loops.loop3, shift_out, testarea, used_all, eventlist, sdr, ddr, seismo_thick, these_params, slab, maxthickness, mindip, taper)
indices = shift_out['nID'].values
pts3 = pool3.map(partial_loop3, indices)
pool3.close()
pool3.join()
for i in range(len(indices)):
thisnode = pts3[i]
if np.isfinite(thisnode[0]):
nID = thisnode[13]
shift_out.loc[shift_out.nID == nID, 'depth'] = thisnode[0]
shift_out.loc[shift_out.nID == nID, 'stdv'] = thisnode[1]
shift_out.loc[shift_out.nID == nID, 'avstr'] = thisnode[2]
shift_out.loc[shift_out.nID == nID, 'avdip'] = thisnode[3]
shift_out.loc[shift_out.nID == nID, 'avrke'] = thisnode[4]
shift_out.loc[shift_out.nID == nID, 'lon'] = thisnode[15]
shift_out.loc[shift_out.nID == nID, 'lat'] = thisnode[16]
if np.isfinite(thisnode[5]):
bilats.append(thisnode[5])
bilons.append(thisnode[6])
binods.append(thisnode[7])
bistds.append(thisnode[8])
biindx.append(thisnode[9])
bistrs.append(thisnode[10])
bidips.append(thisnode[11])
bideps.append(thisnode[12])
multi = thisnode[14]
if len(multi) > 0:
postmulti = pd.concat([postmulti, multi],sort=True)
del pts3
else:
for nodeno in shift_out['nID'].values:
crdepth, crstd, crstrike, crdip, crrake, cbilats, cbilons, cbinods, cbistds, cbiindx, cbistrs, cbidips, cbideps, cnID, cpostmulti, cpeak_lon, cpeak_lat = loops.loop3(shift_out, testarea, used_all, eventlist, sdr, ddr, seismo_thick, these_params, slab, maxthickness, mindip, taper, nodeno)
if np.isfinite(crdepth):
nID = cnID
shift_out.loc[shift_out.nID == nID, 'depth'] = crdepth
shift_out.loc[shift_out.nID == nID, 'stdv'] = crstd
shift_out.loc[shift_out.nID == nID, 'avstr'] = crstrike
shift_out.loc[shift_out.nID == nID, 'avdip'] = crdip
shift_out.loc[shift_out.nID == nID, 'avrke'] = crrake
shift_out.loc[shift_out.nID == nID, 'lon'] = cpeak_lon
shift_out.loc[shift_out.nID == nID, 'lat'] = cpeak_lat
if np.isfinite(cbilats):
bilats.append(cbilats)
bilons.append(cbilons)
binods.append(cbinods)
bistds.append(cbistds)
biindx.append(cbiindx)
bistrs.append(cbistrs)
bidips.append(cbidips)
bideps.append(cbideps)
multi = cpostmulti
if len(multi) > 0:
postmulti = pd.concat([postmulti, multi],sort=True)
shift_out.loc[shift_out.lon < 0, 'lon']+=360
for j in range(len(bilats)):
lon = bilons[j]
lat = bilats[j]
nID = binods[j]
stdv = bistds[j]
stk = bistrs[j]
dep = bideps[j]
dip = bidips[j]
if dip <= mindip:
peak_depth = s2f.findMultiDepth(lon, lat, nID, shift_out, grid, postmulti, stk, slab, dep, alen, printtest)
peak_lon = lon
peak_lat = lat
else:
peak_lon, peak_lat, peak_depth = s2f.findMultiDepthP(lon, lat, nID, shift_out, grid, postmulti, stk, slab, dep, dip, alen, printtest)
shift_out.loc[shift_out.nID == nID, 'lon'] = peak_lon
shift_out.loc[shift_out.nID == nID, 'lat'] = peak_lat
shift_out.loc[shift_out.nID == nID, 'depth'] = peak_depth
# Save nodes to file
shift_out.loc[shift_out.lon < 0, 'lon']+=360
dip90s = 90.0-shift_out['ogdip'].values
vertunc = shift_out['stdv'].values * (np.sin(np.radians(dip90s)))
horzunc = shift_out['stdv'].values * (np.cos(np.radians(dip90s)))
shift_out['vstdv'] = vertunc
shift_out['hstdv'] = horzunc
if slab == 'sum' or slab == 'kur':
shift_out, rempts = s2f.removeSZnodes(shift_out, fracS, 0.4, seismo_thick)
elif slab == 'camz' or slab == 'sulz':
shift_out, rempts = s2f.removeSZnodes(shift_out, fracS, 0.8, seismo_thick)
elif slab != 'sol' and slab != 'phi' and slab != 'sul' and slab != 'alu' and slab != 'sum':
shift_out, rempts = s2f.removeSZnodes(shift_out, fracS, 0.1, seismo_thick)
else:
rempts = pd.DataFrame()
if len(rempts) > 0:
rempts = rempts[['lon', 'lat', 'depth', 'stdv', 'smag', 'shiftstd', 'avstr', 'avdip', 'avrke', 'psdepth', 'sstr', 'sdip', 'nID', 'pslon', 'pslat', 'bzlon', 'bzlat', 'centsurf','thickness', 'alen', 'blen', 'clen', 'ogstr', 'ogdip','hstdv','vstdv']]
rempts.to_csv(rempFile, header=True, index=False, na_rep=np.nan, float_format='%.2f')
shift_out = shift_out[['lon', 'lat', 'depth', 'stdv', 'smag', 'shiftstd', 'avstr', 'avdip', 'avrke', 'psdepth', 'sstr', 'sdip', 'nID', 'pslon', 'pslat', 'bzlon', 'bzlat', 'centsurf','thickness', 'alen', 'blen', 'clen', 'ogstr', 'ogdip','hstdv','vstdv']]
shift_out.to_csv(nodeFile, header=True, index=False, na_rep=np.nan, float_format='%.2f')
if slab == 'manz' or slab == 'solz' or slab == 'phiz':
lowernodes, shift_out = s2f.nodesift(shift_out, grid)
if slab == 'izuz':
midshiftout = shift_out[(shift_out.lat > 15)&(shift_out.lat < 28)]
outshiftout = shift_out[(shift_out.lat <= 15)|(shift_out.lat >= 28)]
midshiftout = midshiftout[midshiftout.depth<300]
shift_out = pd.concat([midshiftout,outshiftout],sort=True)
if slab == 'solz' or slab == 'sumz':
nodesOG, projnodes = s2f.extendEdges(shift_out,grid,slab)
shift_out = pd.concat([projnodes, shift_out],sort=True)
'''Section 7: Create output
Here we put together all of the output data into the correct form for saving to output files.
First we create a surface with fine spacing of the final data, then we filter it and apply the clipping mask.
Second we populate the output array, and finally we save it.
The output file is of the format [lon lat dep_raw str_raw dip_raw shift_mag dep_shift dep_shift_smooth str_shift_smooth dip_shift_smooth dz1 dz2 dz3 avg_str avg_dip avg_rak]
This file has a regular spacing of fine nodes corresponding to the final surface
The columns for shift_mag, avg_str, avg_dip, and avg_rak are only populated where there was a pre-shift datum.
'''
print("Start Section 7 of 7: Create output")
# Create final surfaces for output
print(" Creating surfaces...")
shift_out = shift_out[(shift_out.nID != 2642178)& (shift_out.nID != 2646182)& (shift_out.nID != 2646184)& (shift_out.nID != 2646186)& (shift_out.nID != 1454068)& (shift_out.nID != 1122062)& (shift_out.nID != 1123062)&(shift_out.nID !=1454068)& (shift_out.nID != 16790448) & (shift_out.nID != 16790449)]
if slab == 'man':
shift_out = shift_out[(shift_out.bzlat > 13.5)|(shift_out.bzlon < 121)]
surfdata = np.zeros((len(shift_out), 4))
surfdata[:, 0], surfdata[:, 1], surfdata[:, 2], surfdata[:, 3] = shift_out['lon'].values, shift_out['lat'].values, shift_out['depth'].values, shift_out['stdv'].values
errordata = np.zeros((len(shift_out), 4))
errordata[:, 0], errordata[:, 1], errordata[:, 2], errordata[:, 3] = shift_out['lon'].values, shift_out['lat'].values, shift_out['stdv'].values, np.ones(len(shift_out))
errordataB = np.zeros((len(shift_out), 4))
errordataB[:, 0], errordataB[:, 1], errordataB[:, 2], errordataB[:, 3] = shift_out['lon'].values, shift_out['lat'].values, shift_out['shiftstd'].values, np.ones(len(shift_out))
thickdata = np.zeros((len(shift_out),4))
thickdata[:, 0], thickdata[:, 1], thickdata[:, 2], thickdata[:, 3] = shift_out['lon'].values, shift_out['lat'].values, shift_out['thickness'].values, np.ones(len(shift_out))
if slab == 'sum':
Surfgrid, xi, dl = s2f.chunksurface(surfdata, node, T, slab, grid, 'depth', time, 'test.txt', filt, pd.DataFrame(), npass, TR_data, meanBA, kdeg, knot_no, rbfs, shift_out,'fin','og','lon',100,110,105)
flipornot = 'flip'
elif slab == 'jap':
Surfgrid, xi, dl = s2f.chunksurface(surfdata, node, T, slab, grid, 'depth', time, 'test.txt', filt, pd.DataFrame(), npass, TR_data, meanBA, kdeg, knot_no, rbfs, shift_out,'fin','og','lat',30,40,35)
flipornot = 'flip'
else:
Surfgrid, xi, dl = s2f.pySurface3(surfdata, node, T, slab, grid, 'depth', time, 'test.txt', filt, pd.DataFrame(), npass, TR_data, meanBA, kdeg, knot_no, rbfs, shift_out,'fin','og')
flipornot = 'dontflip'
sigma = (filt/2.0) / node
Errorgrid = s2f.makeErrorgrid(Surfgrid, xi, errordata)
Errorgrid2 = s2f.makeErrorgrid(Surfgrid, xi, errordataB)
thickgrid = s2f.makeErrorgrid(Surfgrid, xi, thickdata)
if slab == 'puy':
filt2 = 0.6
Filtgrid = s2f.specialpuyfilt(Surfgrid,xi,filt,filt2,node)
Errorgrid = s2f.specialpuyfilt(Errorgrid,xi,filt,filt2,node)
Errorgrid2 = s2f.specialpuyfilt(Errorgrid2,xi,filt,filt2,node)
thickgrid = s2f.specialpuyfilt(thickgrid,xi,filt,filt2,node)
elif slab == 'kur':
filt2 = 1.5
Filtgrid = s2f.specialkurfilt(Surfgrid,xi,filt,filt2,node)
Errorgrid = s2f.specialkurfilt(Errorgrid,xi,filt,filt2,node)
Errorgrid2 = s2f.specialkurfilt(Errorgrid2,xi,filt,filt2,node)
thickgrid = s2f.specialkurfilt(thickgrid,xi,filt,filt2,node)
elif slab == 'izu':
filt2 = 1.5
Filtgrid = s2f.specializufilt(Surfgrid,xi,filt,filt2,node)
Errorgrid = s2f.specializufilt(Errorgrid,xi,filt,filt2,node)
Errorgrid2 = s2f.specializufilt(Errorgrid2,xi,filt,filt2,node)
thickgrid = s2f.specializufilt(thickgrid,xi,filt,filt2,node)
else:
Filtgrid = ndimage.filters.gaussian_filter(Surfgrid, sigma, mode='reflect')
Errorgrid = ndimage.filters.gaussian_filter(Errorgrid, sigma, mode='reflect')
Errorgrid2 = ndimage.filters.gaussian_filter(Errorgrid2, sigma, mode='reflect')
thickgrid = ndimage.filters.gaussian_filter(thickgrid, sigma, mode='reflect')
strgrid3, dipgrid3 = s2f.mkSDgrddata(xi, Filtgrid, flipornot)
resdata = np.zeros((len(xi),5))
resdata[:,0] = xi[:,0]
resdata[:,1] = xi[:,1]
resdata[:,2] = Filtgrid.flatten()
resdata[:,3] = strgrid3.flatten()
resdata[:,4] = dipgrid3.flatten()
print(" Identifying contour extents for clipping mask...")
newres = s2f.mkContourClip(shift_out, TR_data, node, resdata, False,slab)
print(" Assigning and sorting clipping mask polygon...")
if len(TR_data)>0:
clip = s2f.clippingmask(newres,TR_data,node,False,slab,'first')
else:
clip = s2f.noTrenchPolygon(newres, node, False, slab)
mask = s2f.maskdatag(clip, xi)
mask.shape = Surfgrid.shape
Filtgrid = (Filtgrid*mask)
Surfgrid = (Surfgrid*mask)
Errorgrid = (Errorgrid*mask)
Errorgrid2 = (Errorgrid2*mask)
thickgrid = (thickgrid*mask)
dipgrid3 = (dipgrid3*mask)
strgrid3 = (strgrid3*mask)
smooth_dif = Surfgrid.flatten()-Filtgrid.flatten()
# Create output array
print(" Populating output array...")
output = (np.zeros([len(xi), 10]) * np.nan)
output[:, 0] = xi[:, 0] # lon Longitude at node (not shifted)
output[:, 1] = xi[:, 1] # lat Latitude at node
output[:, 2] = Surfgrid.flatten() # dep_shift Post-shift surface depth before smoothing
output[:, 3] = Filtgrid.flatten() # dep_shift_smooth Post-shift surface depth after smoothing
output[:, 4] = strgrid3.flatten() # str_shift_smooth Post-shift surface strike after smoothing (strike was not smoothed - only depth was smoothed)
output[:, 5] = dipgrid3.flatten() # dip_shift_smooth Post-shift surface dip after smoothing
output[:, 6] = Errorgrid.flatten() # dz1 Interpolated, but unsmoothed uncertainty from raw data
output[:, 7] = Errorgrid2.flatten() #dz2 Interpolated, unsmoothed uncertainty from shift
output[:, 8] = smooth_dif.flatten() # dz3 error induced by smoothing (taken as the standard deviation of smoothed-unsmoothed)
output[:, 9] = thickgrid.flatten() #dz2 Interpolated, unsmoothed thickness
output[:, 0][output[:, 0]<0]+=360
clip.loc[clip.lon < 0, 'lon']+=360
output[:,2][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,3][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,4][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,5][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,6][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,7][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,8][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,9][output[:,3] > shift_out['depth'].max()] = np.nan
if slab == 'phi' or slab == 'sul' or slab == 'cot':
halfolder = 'hal_slab2_12.22.17'
print ('clipping grid by underriding model: %s ... '%undergrid)
output = s2f.underclip(output,undergrid)
finoutput = output[np.isfinite(output[:,3])]
newres = pd.DataFrame({'lon':finoutput[:,0], 'lat':finoutput[:,1], 'depth':finoutput[:,3], 'strike':finoutput[:,4], 'dip':finoutput[:,5]})
clip = s2f.clippingmask(newres,TR_data,node,False,slab,'first')
if slab == 'ryu':
kurfolder = 'kur_slab2_12.22.17'
print ('clipping grid by underriding model: %s ... '%undergrid)
output = s2f.underclip(output,undergrid)
finoutput = output[np.isfinite(output[:,3])]
newres = pd.DataFrame({'lon':finoutput[:,0], 'lat':finoutput[:,1], 'depth':finoutput[:,3], 'strike':finoutput[:,4], 'dip':finoutput[:,5]})
clip = s2f.clippingmask(newres,TR_data,node,False,slab,'first')
if slab == 'mue':
carfolder = 'car_slab2_12.22.17'
print ('clipping grid by underriding model: %s ... '%undergrid)
output = s2f.underclip(output,undergrid)
finoutput = output[np.isfinite(output[:,3])]
newres = pd.DataFrame({'lon':finoutput[:,0], 'lat':finoutput[:,1], 'depth':finoutput[:,3], 'strike':finoutput[:,4], 'dip':finoutput[:,5]})
clip = s2f.clippingmask(newres,TR_data,node,False,slab,'first')
if slab == 'kur':
output = output[output[:,1] >= 35]
clip = clip[clip.lat >= 35]
clip['dist1'] = np.abs(35-clip['lat'].values)
closest = clip[clip.dist1 == clip['dist1'].min()]
lonc, latc = closest['lon'].values[0], closest['lat'].values[0]
clip['dist2'] = np.abs(lonc-clip['lon'].values)
clip['dist3'] = clip['dist1'].values/clip['dist2'].values/clip['dist2'].values
closest2 = clip[clip.dist3 == clip['dist3'].min()]
lonc2, latc2 = closest2['lon'].values[0], closest2['lat'].values[0]
clip.loc[(clip.lon == lonc)&(clip.lat == latc), 'lat'] = 35.0
clip.loc[(clip.lon == lonc2)&(clip.lat == latc2), 'lat'] = 35.0
if clip['lon'].values[0] != clip['lon'].values[-1] or clip['lat'].values[0] != clip['lat'].values[-1]:
pointbeg = clip.iloc[[0]]
clip = pd.concat([clip, pointbeg],sort=True)
clip = clip[['lon','lat']]
# Save results to file
print(" Saving results and data to file...")
np.savetxt(outFile, output, header='lon,lat,raw_dep,dep_shift_smooth,str_shift_smooth,dip_shift_smooth,dz1,dz2,dz3,thickness',fmt='%.2f', delimiter=',',comments='')
# Save clipping mask to file
clip = clip[['lon', 'lat']]
clip.to_csv(clipFile, float_format='%.2f', sep=' ', header=False, index=False)
if slab == 'izu' or slab == 'jap' or slab == 'sol' or slab == 'man' or slab == 'ker' or slab == 'hinz' or slab == 'pamz':
print(" PSYCH! Solving for vertical component of this slab region ...")
clip, output, supplement, nodes, deepnodes = s2f.splitsurface(nodeFile,outFile,clipFile,trenches,node,filt,grid,slab, knot_no, kdeg, rbfs, folder)
supplement = supplement[['lon','lat','depth','strike','dip','dz1','dz2','dz3','thickness']]
nodes.to_csv(nodeuFile, header=True, index=False, na_rep=np.nan, float_format='%.2f')
deepnodes.to_csv(nodexFile, header=True, index=False, na_rep=np.nan, float_format='%.2f')
supplement.to_csv(suppFile, header=True, index=False, na_rep=np.nan, float_format='%.4f')
if slab == 'izu':
output = output[output[:,1] <= 35]
clip = clip[clip.lat <= 35]
clip['dist1'] = np.abs(35-clip['lat'].values)
closest = clip[clip.dist1 == clip['dist1'].min()]
lonc, latc = closest['lon'].values[0], closest['lat'].values[0]
clip['dist2'] = np.abs(lonc-clip['lon'].values)
clip['dist3'] = clip['dist1'].values/clip['dist2'].values/clip['dist2'].values
closest2 = clip[clip.dist3 == clip['dist3'].min()]
lonc2, latc2 = closest2['lon'].values[0], closest2['lat'].values[0]
clip.loc[(clip.lon == lonc)&(clip.lat == latc), 'lat'] = 35.0
clip.loc[(clip.lon == lonc2)&(clip.lat == latc2), 'lat'] = 35.0
if clip['lon'].values[0] != clip['lon'].values[-1] or clip['lat'].values[0] != clip['lat'].values[-1]:
pointbeg = clip.iloc[[0]]
clip = pd.concat([clip, pointbeg],sort=True)
clip = clip[['lon','lat']]
print(" Saving results and data to file...")
clip = clip[['lon', 'lat']]
clip.to_csv(clipFile, float_format='%.2f', sep=' ', header=False, index=False)
np.savetxt(outFile, output, header='lon,lat,raw_dep,dep_shift_smooth,str_shift_smooth,dip_shift_smooth,dz1,dz2,dz3,thickness',fmt='%.2f', delimiter=',',comments='')
xmin = np.min(output[:,0])
xmax = np.max(output[:,0])
ymin = np.min(output[:,1])
ymax = np.max(output[:,1])
deps = pd.DataFrame({'lon':output[:,0], 'lat': output[:,1], 'depth':output[:,3]*-1.0})
strs = pd.DataFrame({'lon':output[:,0], 'lat': output[:,1], 'str':output[:,4]})
dips = pd.DataFrame({'lon':output[:,0], 'lat': output[:,1], 'dip':output[:,5]})
uncs = pd.DataFrame({'lon':output[:,0], 'lat': output[:,1], 'unc':output[:,6]})
thicks = pd.DataFrame({'lon':output[:,0], 'lat': output[:,1], 'thick':output[:,9]})
deps = deps[['lon','lat','depth']]
strs = strs[['lon','lat','str']]
dips = dips[['lon','lat','dip']]
uncs = uncs[['lon','lat','unc']]
thicks = thicks[['lon','lat','thick']]
deps.to_csv(depTextFile, header=False, index=False, sep=' ', na_rep=np.nan)
strs.to_csv(strTextFile, header=False, index=False, sep=' ', na_rep=np.nan)
dips.to_csv(dipTextFile, header=False, index=False, sep=' ', na_rep=np.nan)
uncs.to_csv(uncTextFile, header=False, index=False, sep=' ', na_rep=np.nan)
thicks.to_csv(thickTextFile, header=False, index=False, sep=' ', na_rep=np.nan)
clip.to_csv(clipFile, float_format='%.2f', header=False, index=False)
# Write ascii files out to netCDF4 grid (python version of GMT command xyz2grd)
# KLH 11/01/2019
s2f.xyz2grd(depTextFile,np.floor(xmin),np.ceil(xmax),np.floor(ymin),np.ceil(ymax),node,depGridFile,slab)
s2f.xyz2grd(strTextFile,np.floor(xmin),np.ceil(xmax),np.floor(ymin),np.ceil(ymax),node,strGridFile,slab)
s2f.xyz2grd(dipTextFile,np.floor(xmin),np.ceil(xmax),np.floor(ymin),np.ceil(ymax),node,dipGridFile,slab)
s2f.xyz2grd(uncTextFile,np.floor(xmin),np.ceil(xmax),np.floor(ymin),np.ceil(ymax),node,uncGridFile,slab)
s2f.xyz2grd(thickTextFile,np.floor(xmin),np.ceil(xmax),np.floor(ymin),np.ceil(ymax),node,thickGridFile,slab)
os.system("rm %s" % depTextFile)
os.system("rm %s" % strTextFile)
os.system("rm %s" % dipTextFile)
os.system("rm %s" % uncTextFile)
os.system("rm %s" % thickTextFile)
if slab == 'izu' or slab == 'jap' or slab == 'sol' or slab == 'man' or slab == 'ker' or slab == 'hinz' or slab == 'pamz':
os.system("rm Output/%s/%s_slab2_con_%s.txt"%(folder,slab,date))
# make array of contours and depths in file
cint = 20
contourlist = [cint]
thisc = cint
while thisc < supplement['depth'].max():
thisc += cint
contourlist.append(thisc)
depthlist = np.array(list((set(supplement.depth))))
# identify value spacing
n = 0
sumd = 0
for i in range(1,len(depthlist)):
diff = depthlist[i] - depthlist[i-1]
sumd += diff
n += 1
maxdiff = math.ceil(sumd/n)
with open('Output/%s/%s_slab2_con_%s.txt'%(folder,slab,date),'a') as f:
for c in contourlist:
distdepths = np.abs(c-depthlist)
supdep = depthlist[np.argmin(distdepths)]
dat = supplement[supplement.depth == supdep]
if len(dat) > 0 and min(distdepths) <= maxdiff:
if slab == 'izu' or slab == 'man' or slab == 'ker':
dat = dat.sort_values(by=['lat'], ascending=False)
if slab == 'sol' or slab == 'hin' or slab == 'pam':
dat = dat.sort_values(by=['lon'], ascending=False)
f.write('> %i \n'%c)
dat = dat[['lon','lat']]
dat.to_csv(f,header=False,index=False,sep=' ')
f.close()
print(" All files have been saved in directory: %s/Output/%s"%(os.getcwd(),folder))
print(" File descriptions:")
print(" %s_slab2_res_%s.csv: ascii format of output grids listed below"%(slab,date))
print(" %s_slab2_dep_%s.grd: depth grid (res columns: lon,lat,dep_shift_smooth)"%(slab,date))
print(" %s_slab2_str_%s.grd: strike grid (res columns: lon,lat,str_shift_smooth)"%(slab,date))
print(" %s_slab2_dip_%s.grd: dip grid (res columns: lon,lat,dip_shift_smooth)"%(slab,date))
print(" %s_slab2_thk_%s.grd: uncertainty grid (res columns: lon,lat,dz1)"%(slab,date))
print(" %s_slab2_unc_%s.grd: thickness grid (res columns: lon,lat,thickness)"%(slab,date))
print(" %s_slab2_nod_%s.csv: info for all grid nodes constraining final surface "%(slab,date))
print(" %s_slab2_dat_%s.csv: filtered input dataset "%(slab,date))
print(" %s_slab2_clp_%s.csv: clipping mask "%(slab,date))
print(" %s_slab2_par_%s.csv: list of parameters used in this model "%(slab,date))
print(" %s_slab2_szt_%s.csv: file listing events used to determine seismogenic width "%(slab,date))
print(" %s_slab2_szt_%s.png: depth histogram and PDF of slab related events "%(slab,date))
# Help/description and command line argument parser
if __name__=='__main__':
desc = '''
Expected slab regions include:
Aleutians alu
Calabria cal
Central America cam
Caribbean car
Cascadia cas
Cotabato cot
Halmahera hal
Hellenic hel
Himalaya him
Hindu Kush hin
Izu-Bonin izu
Kermadec ker
Kuril kur
Makran mak
Manila man
Muertos mue
Pamir pam
New Guinea png
Philippines phi
Puysegur puy
Ryukyu ryu
South America sam
Scotia sco
Solomon Islands sol
Sulawesi sul
Sumatra/Java sum
Vanuatu van
'''
parser = argparse.ArgumentParser(description=desc, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-p', '--parFile', dest='parFile', type=str,
required=True, help='file listing slab parameters')
parser.add_argument('-c', '--nCores', dest='nCores', type=int,
help='number of cores to run with')
parser.add_argument('-t', '--test', metavar=('lonmin', 'lonmax',
'latmin', 'latmax'),
dest='test', type=float, nargs=4,
help='test box [lonmin lonmax latmin latmax]')
parser.add_argument('-u', '--undergrid', dest='undergrid', type=str,
help='depth grid for slab abutting this one, required for ryu (kur grid), mue (car grid), phi, sul, cot (hal grid)')
pargs = parser.parse_args()
#cProfile.run('main(pargs)')
main(pargs)
|
{"hexsha": "49fa5b9ce9b469153fc198ba4a2f0fc2f5e253b8", "size": 82778, "ext": "py", "lang": "Python", "max_stars_repo_path": "slab2code/slab2.py", "max_stars_repo_name": "ftbernales/slab2", "max_stars_repo_head_hexsha": "0070903421eb2ede8cb86bd06609389b0ecf52dd", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2015-06-10T16:16:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T18:47:05.000Z", "max_issues_repo_path": "slab2code/slab2.py", "max_issues_repo_name": "ftbernales/slab2", "max_issues_repo_head_hexsha": "0070903421eb2ede8cb86bd06609389b0ecf52dd", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-10-23T14:23:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-10T18:23:41.000Z", "max_forks_repo_path": "slab2code/slab2.py", "max_forks_repo_name": "ftbernales/slab2", "max_forks_repo_head_hexsha": "0070903421eb2ede8cb86bd06609389b0ecf52dd", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2015-06-10T16:16:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-11T13:38:08.000Z", "avg_line_length": 45.159847245, "max_line_length": 449, "alphanum_fraction": 0.5702239726, "include": true, "reason": "import numpy,from scipy", "num_tokens": 23265}
|
module DBdatatype # Note: correspondence here is complicated by platform issues.
# See http://julia.readthedocs.org/en/release-0.3/manual/calling-c-and-fortran-code/
using Compat
const DB_INT = 16
const DB_SHORT = 17
const DB_LONG = 18
const DB_FLOAT = 19
const DB_DOUBLE = 20
const DB_CHAR = 21
const DB_LONG_LONG = 22
const DB_NOTYPE = 25 #=unknown type =#
const JuliaSilotypemap= @compat Dict(Int64 => DB_LONG_LONG, Int32 => DB_INT, Int16 =>DB_SHORT, Int8=>DB_CHAR, Float32 => DB_FLOAT, Float64 => DB_DOUBLE)
# const CJuliaSilotypemap=[Clonglong => DB_LONG_LONG, Cint => DB_INT, Cshort =>DB_SHORT, Cchar=>DB_CHAR, Cfloat => DB_FLOAT, Cdouble => DB_DOUBLE]
const SiloJuliatypemap = @compat Dict(DB_LONG_LONG => Int64, DB_INT=> Int32, DB_SHORT=>Int16, DB_CHAR=>Int8, DB_FLOAT=>Float32, DB_DOUBLE=>Float64 )
# const SiloCJuliatypemap=[DB_LONG_LONG => Clonglong, DB_INT=> Cint, DB_SHORT=>Cshort , DB_CHAR=>Cchar, Cfloat => DB_FLOAT, DB_DOUBLE=>Cdouble]
end #module
|
{"hexsha": "68d681896852c12e7d01ff46653d70d1f3c745f7", "size": 976, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/DBdatatype.jl", "max_stars_repo_name": "UnofficialJuliaMirror/Silo.jl-1d21c727-5350-5715-a0f1-d07632c10ec8", "max_stars_repo_head_hexsha": "f33c1166064914ab67eb9acf8398c70551bcdb15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/DBdatatype.jl", "max_issues_repo_name": "UnofficialJuliaMirror/Silo.jl-1d21c727-5350-5715-a0f1-d07632c10ec8", "max_issues_repo_head_hexsha": "f33c1166064914ab67eb9acf8398c70551bcdb15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2015-01-04T14:32:58.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T08:29:54.000Z", "max_forks_repo_path": "src/DBdatatype.jl", "max_forks_repo_name": "UnofficialJuliaMirror/Silo.jl-1d21c727-5350-5715-a0f1-d07632c10ec8", "max_forks_repo_head_hexsha": "f33c1166064914ab67eb9acf8398c70551bcdb15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2015-01-30T18:26:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T19:27:09.000Z", "avg_line_length": 51.3684210526, "max_line_length": 152, "alphanum_fraction": 0.7489754098, "num_tokens": 302}
|
[STATEMENT]
theorem (in graph) init_root [simp]:
"DataRefinement ({.Init.} o Q2_a) R1_a R2_a Q2'_a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. DataRefinement ({. Init .} \<circ> Q2_a) R1_a R2_a Q2'_a
[PROOF STEP]
by (simp add: data_refinement_hoare hoare_demonic Q2'_a_def Init_def
Loop'_def R1_a_def R2_a_def Q2_a_def angelic_def subset_eq)
|
{"llama_tokens": 168, "file": "GraphMarkingIBP_StackMark", "length": 1}
|
/*
* The MIT License - see LICENSE file for details
*/
#include "DebugPanel.h"
#include "DebugThread.h"
#include "FileUtils.h"
#include <boost/foreach.hpp>
#include <boost/format.hpp>
DEFINE_EVENT_TYPE(wxEVT_MY_EVENT_PRINT_LINE)
DEFINE_EVENT_TYPE(wxEVT_MY_EVENT_NOTIFY_FILE_AND_LINE)
DEFINE_EVENT_TYPE(wxEVT_MY_EVENT_NOTIFY_BREAKPOINT_UPDATE)
DebugPanel::DebugPanel(wxWindow* parent, wxWindowID id, const wxPoint& pos,
const wxSize& size, long style) :
DebugPanelLayout(parent, id, pos, size, style), debugThread(NULL)
{
this->Connect(wxID_ANY, wxEVT_MY_EVENT_PRINT_LINE,
wxCommandEventHandler( DebugPanel::OnPrintLine ));
this->Connect(wxID_ANY, wxEVT_MY_EVENT_NOTIFY_FILE_AND_LINE,
wxCommandEventHandler( DebugPanel::OnNotifyFileAndLine ));
this->Connect(wxID_ANY, wxEVT_MY_EVENT_NOTIFY_BREAKPOINT_UPDATE,
wxCommandEventHandler( DebugPanel::OnNotifyBreakpointUpdate ));
}
DebugPanel::~DebugPanel()
{
this->Disconnect(wxID_ANY, wxEVT_MY_EVENT_PRINT_LINE,
wxCommandEventHandler( DebugPanel::OnPrintLine ));
this->Disconnect(wxID_ANY, wxEVT_MY_EVENT_NOTIFY_FILE_AND_LINE,
wxCommandEventHandler( DebugPanel::OnNotifyFileAndLine ));
this->Disconnect(wxID_ANY, wxEVT_MY_EVENT_NOTIFY_BREAKPOINT_UPDATE,
wxCommandEventHandler( DebugPanel::OnNotifyBreakpointUpdate ));
}
void DebugPanel::evalCurrentSelection()
{
wxString selection = m_code->GetSelectedText();
if (selection.Length() > 0)
{
assert(debugThread != NULL);
debugThread->postline(std::string((wxT("eval ") + selection).mb_str()));
}
}
void DebugPanel::updateBreakpoints()
{
assert(debugThread != NULL);
m_code->MarkerDefine(1, wxSCI_MARK_CIRCLE);
m_code->MarkerDeleteAll(1);
std::vector<unsigned int> breakpoints = debugThread->listBreakpointOfFile(
currentFile);
BOOST_FOREACH(unsigned int line, breakpoints)
{
// mark current line
m_code->MarkerAdd(line - 1, 1);
}
}
void DebugPanel::OnNotifyBreakpointUpdate(wxCommandEvent& event)
{
updateBreakpoints();
}
void DebugPanel::OnNotifyFileAndLine(wxCommandEvent& event)
{
wxString file = event.GetString();
unsigned int line = (unsigned int) event.GetInt();
currentFile = std::string(file.mb_str());
// set current source
std::string src = FileUtils::getContent(std::string(file.mb_str()));
m_code->SetReadOnly(false);
m_code->SetText(wxString(src.c_str(), wxConvUTF8));
m_code->SetReadOnly(true);
// scroll to current line
m_code->EnsureVisible(line - 1);
m_code->ScrollToLine(line - 1 - 4);
// line numbers
int marginWidth = m_code->TextWidth(wxSCI_STYLE_LINENUMBER, _T("99999"));
m_code->SetMarginWidth(0, marginWidth);
m_code->SetWrapMode(wxSCI_WRAP_NONE);
// mark current line
m_code->MarkerDefine(0, wxSCI_MARK_ARROW);
m_code->MarkerDeleteAll(0);
m_code->MarkerAdd(line - 1, 0);
wxFont font(10, wxTELETYPE, wxFIXED, wxNORMAL);
m_code->StyleSetFont(wxSCI_STYLE_DEFAULT, font);
m_code->StyleSetForeground(wxSCI_STYLE_DEFAULT, wxColour(wxT("BLACK")));
m_code->StyleSetBackground(wxSCI_STYLE_DEFAULT, wxColour(wxT("WHITE")));
m_code->StyleSetForeground(wxSCI_STYLE_LINENUMBER,
wxColour(wxT("DARK BLUE")));
m_code->StyleSetBackground(wxSCI_STYLE_LINENUMBER, wxColour(wxT("WHITE")));
m_code->StyleSetForeground(wxSCI_STYLE_INDENTGUIDE,
wxColour(wxT("DARK GREY")));
// enable lua syntax highlight
m_code->SetLexerLanguage(wxT("lua"));
m_code->Colourise(0, wxSCI_INVALID_POSITION);
updateBreakpoints();
}
void DebugPanel::OnPrintLine(wxCommandEvent& event)
{
m_textCtrlOutput->AppendText(event.GetString() + wxT("\n"));
}
void DebugPanel::assignDebugThread(DebugThread *thread)
{
debugThread = thread;
}
void DebugPanel::toggleBreakpoint()
{
assert(debugThread != NULL);
unsigned int line = m_code->GetCurrentLine();
line = line + 1;
if (debugThread->hasBreakpointAt(currentFile, line))
{
debugThread ->postline(
(boost::format("delb %1% %2%") % currentFile % line).str());
}
else
{
debugThread ->postline(
(boost::format("setb %1% %2%") % currentFile % line).str());
}
updateBreakpoints();
}
void DebugPanel::OnTextEnter(wxCommandEvent& event)
{
assert(debugThread != NULL);
std::string line(m_textCtrlConsole->GetValue().mb_str());
debugThread->postline(line);
m_textCtrlConsole->SetValue(wxT(""));
}
|
{"hexsha": "a6da309bc52d64972aaa7bf6432e20445a33773a", "size": 4261, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "gui/DebugPanel.cpp", "max_stars_repo_name": "hagish/lua-debugger", "max_stars_repo_head_hexsha": "ea74561dec68e09896f42ad49b65cc721227d781", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2015-10-07T15:10:09.000Z", "max_stars_repo_stars_event_max_datetime": "2015-10-07T15:10:09.000Z", "max_issues_repo_path": "gui/DebugPanel.cpp", "max_issues_repo_name": "hagish/lua-debugger", "max_issues_repo_head_hexsha": "ea74561dec68e09896f42ad49b65cc721227d781", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gui/DebugPanel.cpp", "max_forks_repo_name": "hagish/lua-debugger", "max_forks_repo_head_hexsha": "ea74561dec68e09896f42ad49b65cc721227d781", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-05-21T18:13:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-21T18:13:26.000Z", "avg_line_length": 27.8496732026, "max_line_length": 76, "alphanum_fraction": 0.7533442854, "num_tokens": 1101}
|
import matplotlib.pyplot as plt
import numpy as np
import cv2
SIZE_RATIO = 5
def is_black_square(square, threshold = 0.8):
N = square.shape[0]
total_area = N*N
black_area = np.sum(square == 0)
print("ratio", black_area/total_area)
if (black_area/total_area) > threshold:
return True
else:
return False
def is_white_square(square, threshold = 0.8):
N = square.shape[0]
total_area = N*N
white_area = np.sum(square == 255)
print("ratio", white_area/total_area)
if (white_area/total_area) > threshold:
return True
else:
return False
def is_main_square(image, draw, box):
config = [
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]
]
a, b, c, d = box
u = (b - a) / SIZE_RATIO
v = (d - a) / SIZE_RATIO
sx, sy = a
dx = u[0] + v[0]
dy = u[1] + v[1]
print(dx, dy)
for i in range(SIZE_RATIO):
for j in range(SIZE_RATIO):
x = int(sx + i*dx)
y = int(sy + j*dy)
xp = int(sx + (i+1)*dx)
yp = int(sy + (j+1)*dy)
if xp < x:
[x, xp] = [xp, x]
if yp < y:
[y, yp] = [yp, y]
print("center", x, y)
square = image[x:xp, y:yp]
# print("square", square.shape)
cv2.circle(draw, (x, y), 5, (255, 0, 0), -1)
if config[i][j] == 0 and not is_black_square(square):
return False
if config[i][j] == 1 and not is_white_square(square):
return False
x += dx
y += dy
return True
def is_corner_square(image, draw, box):
a, b, c, d = box
u = (b - a) / SIZE_RATIO
v = (d - a) / SIZE_RATIO
sx, sy = a
dx = u[0] + v[0]
dy = u[1] + v[1]
print(dx, dy)
for i in range(SIZE_RATIO):
for j in range(SIZE_RATIO):
x = int(sx + i*dx)
y = int(sy + j*dy)
xp = int(sx + (i+1)*dx)
yp = int(sy + (j+1)*dy)
if xp < x:
[x, xp] = [xp, x]
if yp < y:
[y, yp] = [yp, y]
print("center", x, y)
square = image[x:xp, y:yp]
# print("square", square.shape)
cv2.circle(draw, (x, y), 5, (255, 0, 0), -1)
if i == 2 and j == 2:
if not is_black_square(square):
return False
else:
if not is_white_square(square):
print("==========================")
print("==========================")
return False
x += dx
y += dy
return True
# def is_corner_square(image, box):
# a, b, c, d = box
# u = (b - a) / SIZE_RATIO
# v = (d - a) / SIZE_RATIO
# # norm_u = np.linalg.norm(u)
# # norm_v = np.linalg.norm(v)
# # u = u / norm_u
# # v = v / norm_v
# for i in range(SIZE_RATIO):
# for j in range(SIZE_RATIO):
# center = a + 0.5*u + 0.5*v + u*i + v*j
# center = (int(center[0]), int(center[1]))
# print(center)
# cv2.circle(image, center, 5, (255, 0, 0), -1)
image = cv2.imread("qr.png")
black_lower = (0, 0, 0)
black_upper = (180, 255, 30)
blurred = cv2.GaussianBlur(image, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, black_lower, black_upper)
# mask = cv2.erode(mask, None, iterations=2)
# mask = cv2.dilate(mask, None, iterations=2)
print(mask.shape)
print(mask.dtype)
# print(mask[80:100, 80:100])
contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
image = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
# cv2.drawContours(image, contours, -1, (0,255,0), 3)
for i, cnt in enumerate(contours):
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
if i == 2 or i == 6 or i == 7:
# cv2.drawContours(image, [box] ,0, (0, 0, 255), 2)
if is_corner_square(mask, image, box):
cv2.drawContours(image, [box] ,0, (0, 0, 255), 2)
if is_main_square(mask, image, box):
cv2.drawContours(image, [box] ,0, (0, 0, 255), 2)
# print(box)
# print()
# is_corner_square(image, box)
# image = cv2.cvtColor(mask, cv2.COLOR_HSV2BGR)
cv2.imshow("frame", image)
while True:
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
|
{"hexsha": "37348b3c0535ab53af8482c787d2b34657224708", "size": 4547, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "tomasr8/qr", "max_stars_repo_head_hexsha": "40eda9a040139b2e800abc798c6d67c6e864fa32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "tomasr8/qr", "max_issues_repo_head_hexsha": "40eda9a040139b2e800abc798c6d67c6e864fa32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "tomasr8/qr", "max_forks_repo_head_hexsha": "40eda9a040139b2e800abc798c6d67c6e864fa32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.6218905473, "max_line_length": 65, "alphanum_fraction": 0.4860347482, "include": true, "reason": "import numpy", "num_tokens": 1450}
|
!***********************************************************************************************************************************
!** S U B R O U T I N E G A T E F L O W **
!***********************************************************************************************************************************
SUBROUTINE GATE_FLOW
USE STRUCTURES; USE GLOBAL; USE GEOMC
IMPLICIT NONE
INTEGER :: JG,ISUB,IGT
REAL(R8) :: ELIU,ELID,HTAIL,HENERGY,DLEL
DO JG=1,NGT
IF(DYNGTC(JG) == ' FLOW')THEN
QGT(JG) = BGT(JG)
ELSE
! ELIU = ELWS(IUGT(JG)) !EL(KTWB(JWUGT(JG)),IUGT(JG))-Z(IUGT(JG))*COSA(BS(JWUGT(JG)))
IF (LATERAL_GATE(JG)) THEN
ELIU = ELWS(IUGT(JG)) !EL(KTWB(JWUGT(JG)),IUGT(JG))-Z(IUGT(JG))*COSA(BS(JWUGT(JG)))
ELSE
!ELIU=ELWS(IUGT(JG))-SINA(JBUGT(JG))*DLX(IUGT(JG))*0.5 !EL(KTWB(JWUGT(JG)),IUGT(JG))-Z(IUGT(JG))*COSA(BS(JWUGT(JG)))-SINA(JBUGT(JG))*DLX(IUGT(JG))*0.5
ELIU= ELWS(IUGT(JG)) + (ELWS(IUGT(JG))-ELWS(IUGT(JG)-1))/(0.5*(DLX(IUGT(JG))+DLX(IUGT(JG)-1)))*DLX(IUGT(JG))*0.5 ! LINEAR INTERPOLATION OF THE WATER LEVEL TO THE EDGE
END IF
IF (IDGT(JG) /= 0)THEN
IF (US(JBDGT(JG)) /= IDGT(JG)) THEN
ELID = ELWS(IDGT(JG)) !EL(KTWB(JWDGT(JG)),IDGT(JG))-Z(IDGT(JG))*COSA(BS(JWDGT(JG)))
ELSE
!ELID = ELWS(IDGT(JG))+SINA(JBDGT(JG))*DLX(IDGT(JG))*0.5 !EL(KTWB(JWDGT(JG)),IDGT(JG))-Z(IDGT(JG))*COSA(BS(JWDGT(JG)))+SINA(JBDGT(JG))*DLX(IDGT(JG))*0.5
ELID = ELWS(IDGT(JG)) - (ELWS(IDGT(JG)+1) - ELWS(IDGT(JG)))/(0.5*(DLX(IDGT(JG))+DLX(IDGT(JG)+1)))*DLX(IDGT(JG))*0.5
END IF
ELSE
ELID = -100.0
END IF
IF (BGT(JG) /= 0.0) THEN
IF (ELID > EGT(JG) .OR. ELIU > EGT(JG)) THEN
ISUB = 0
IF (A2GT(JG) /= 0.0 .AND. IDGT(JG) /= 0) THEN ! SW 8/21/2013
HTAIL = ELID-EGT(JG) ! SW 5/10/05
IF (HTAIL > 0) THEN
HENERGY = (U(KTWB(JWUGT(JG)),IUGT(JG))**2)/(2.0*G)+ELIU-EGT(JG) ! SW 5/10/05
IF (HTAIL/HENERGY > 0.67) ISUB = 1
END IF
END IF
IGT = 0
IF (BGT(JG) >= 0.8*(ELIU-EGT(JG)) .AND. GTA1(JG) /= 0.0) IGT = 1
IF (IGT == 0) THEN
IF (ISUB == 0) THEN
DLEL = ELIU-EGT(JG)
IF (A2GT(JG) == 0.0 .AND. G2GT(JG) /= 0.0) DLEL = ELIU-G2GT(JG)
IF (DLEL < 0.0) THEN
DLEL = -DLEL
QGT(JG) = -A1GT(JG)*(DLEL**B1GT(JG))*BGT(JG)**G1GT(JG)
ELSE
QGT(JG) = A1GT(JG)*(DLEL**B1GT(JG))*BGT(JG)**G1GT(JG)
END IF
ELSE IF (ELID > ELIU) THEN
DLEL = ELID-ELIU
QGT(JG) = -A2GT(JG)*DLEL**B2GT(JG)*BGT(JG)**G2GT(JG)
ELSE
DLEL = ELIU-ELID
QGT(JG) = A2GT(JG)*DLEL**B2GT(JG)*BGT(JG)**G2GT(JG)
END IF
ELSE IF (ISUB == 0) THEN
DLEL = ELIU-EGT(JG)
IF (ELID > EGT(JG)) DLEL = ELIU-ELID
IF (DLEL < 0.0) THEN
DLEL = -DLEL
QGT(JG) = -GTA1(JG)*DLEL**GTB1(JG)
ELSE
QGT(JG) = GTA1(JG)*DLEL**GTB1(JG)
END IF
ELSE IF (ELID > ELIU) THEN
DLEL = ELID-ELIU
QGT(JG) = -GTA2(JG)*DLEL**GTB2(JG)
ELSE
DLEL = ELIU-ELID
QGT(JG) = GTA2(JG)*DLEL**GTB2(JG)
END IF
ELSE
QGT(JG) = 0.0
END IF
ELSE
QGT(JG) = 0.0
END IF
endif
END DO
END SUBROUTINE GATE_FLOW
!***********************************************************************************************************************************
!** S U B R O U T I N E S P I L L W A Y F L O W **
!***********************************************************************************************************************************
SUBROUTINE SPILLWAY_FLOW
USE STRUCTURES; USE GLOBAL; USE GEOMC
INTEGER :: JS,ISUB
REAL(R8):: ELIU,ELID,HTAIL,HENERGY,DLEL
DO JS=1,NSP
IF (LATERAL_SPILLWAY(JS)) THEN
ELIU = ELWS(IUSP(JS)) !EL(KTWB(JWUSP(JS)),IUSP(JS))-Z(IUSP(JS))*COSA(BS(JWUSP(JS)))
ELSE
! ELIU = ELWS(IUSP(JS))-SINA(JBUSP(JS))*DLX(IUSP(JS))*0.5 !EL(KTWB(JWUSP(JS)),IUSP(JS))-Z(IUSP(JS))*COSA(BS(JWUSP(JS)))-SINA(JBUSP(JS))*DLX(IUSP(JS))*0.5
ELIU= ELWS(IUSP(JS)) + (ELWS(IUSP(JS))-ELWS(IUSP(JS)-1))/(0.5*(DLX(IUSP(JS))+DLX(IUSP(JS)-1)))*DLX(IUSP(JS))*0.5 ! LINEAR INTERPOLATION OF THE WATER LEVEL TO THE EDGE
END IF
IF (IDSP(JS) /= 0) THEN
IF (US(JBDSP(JS)) /= IDSP(JS)) THEN
ELID = ELWS(IDSP(JS)) !EL(KTWB(JWDSP(JS)),IDSP(JS))-Z(IDSP(JS))*COSA(BS(JWDSP(JS)))
ELSE
! ELID = ELWS(IDSP(JS))+SINA(JBDSP(JS))*DLX(IDSP(JS))*0.5 !EL(KTWB(JWDSP(JS)),IDSP(JS))-Z(IDSP(JS))*COSA(BS(JWDSP(JS)))+SINA(JBDSP(JS))*DLX(IDSP(JS))*0.5
ELID = ELWS(IDSP(JS)) - (ELWS(IDSP(JS)+1) - ELWS(IDSP(JS)))/(0.5*(DLX(IDSP(JS))+DLX(IDSP(JS)+1)))*DLX(IDSP(JS))*0.5
END IF
ELSE
ELID = -1.0
END IF
IF (ELID >= ESP(JS) .OR. ELIU >= ESP(JS)) THEN
ISUB = 0
IF (A2SP(JS) /= 0.0 .AND. IDSP(JS) /= 0) THEN
HTAIL = ELID-ESP(JS) ! SW 5/10/05
IF (HTAIL > 0) THEN
HENERGY = (U(KTWB(JWUSP(JS)),IUSP(JS))**2)/(2.0*G)+ELIU-ESP(JS) ! SW 5/10/05
IF (HTAIL/HENERGY > 0.67) ISUB = 1
END IF
END IF
IF (ISUB == 0) THEN
DLEL = ELIU-ESP(JS)
IF (DLEL < 0.0) THEN
DLEL = -DLEL
QSP(JS) = -A1SP(JS)*DLEL**B1SP(JS)
ELSE
QSP(JS) = A1SP(JS)*DLEL**B1SP(JS)
END IF
ELSE IF (ELID > ELIU) THEN
DLEL = ELID-ELIU
QSP(JS) = -A2SP(JS)*DLEL**B2SP(JS)
ELSE
DLEL = ELIU-ELID
QSP(JS) = A2SP(JS)*DLEL**B2SP(JS)
END IF
ELSE
QSP(JS) = 0.0
END IF
END DO
END SUBROUTINE SPILLWAY_FLOW
!***********************************************************************************************************************************
!** S U B R O U T I N E P I P E F L O W **
!***********************************************************************************************************************************
SUBROUTINE PIPE_FLOW_INITIALIZE
USE GLOBAL; USE GEOMC; USE STRUCTURES; USE SCREENC, ONLY: NIT
REAL(R8) :: DTQ,DLTX,EL1,EL2,HIE,EPS,DCHECK,D1,D2,DTEST,DCRIT,DEPTHCRIT,VTOT,TOTT
INTEGER :: JP !,NIT
SAVE
ALLOCATE (BEGIN(NPI), WLFLAG(NPI), VMAX(NPI))
QOLD = 0.01; VMAX = 0.01
BEGIN = .TRUE.; WLFLAG = .TRUE.
RETURN
ENTRY PIPE_FLOW !(NIT)
DTQ = DLT/10.0
DO JP=1,NPI
DIA = WPI(JP)
CLEN = DLXPI(JP)
FMAN = FPI(JP)
CLOSS = FMINPI(JP)
UPIE = EUPI(JP)
DNIE = EDPI(JP)
DLTX = CLEN/(REAL(NC-1)*0.5)
IF (LATERAL_PIPE(JP)) THEN
EL1 = ELWS(IUPI(JP)) !EL(KTWB(JWUPI(JP)),IUPI(JP))-Z(IUPI(JP))*COSA(JBUPI(JP))
ELSE
! EL1 = ELWS(IUPI(JP))-SINA(JBDPI(JP))*DLX(IUPI(JP))*0.5 !EL(KTWB(JWUPI(JP)),IUPI(JP))-Z(IUPI(JP))*COSA(JBUPI(JP))-SINA(JBDPI(JP))*DLX(IUPI(JP))*0.5
EL1 = ELWS(IUPI(JP)) + (ELWS(IUPI(JP))-ELWS(IUPI(JP)-1))/(0.5*(DLX(IUPI(JP))+DLX(IUPI(JP)-1)))*DLX(IUPI(JP))*0.5 ! LINEAR INTERPOLATION OF THE WATER LEVEL TO THE EDGE
END IF
IF (IDPI(JP) /= 0) THEN
IF (US(JBDPI(JP)) /= IDPI(JP)) THEN
EL2 = ELWS(IDPI(JP)) !EL(KTWB(JWDPI(JP)),IDPI(JP))-Z(IDPI(JP))*COSA(JBDPI(JP))
ELSE
! EL2 = ELWS(IDPI(JP))+SINA(JBDPI(JP))*DLX(IDPI(JP))*0.5 !EL(KTWB(JWDPI(JP)),IDPI(JP))-Z(IDPI(JP))*COSA(JBDPI(JP))+SINA(JBDPI(JP))*DLX(IDPI(JP))*0.5
EL2 = ELWS(IDPI(JP)) - (ELWS(IDPI(JP)+1) - ELWS(IDPI(JP)))/(0.5*(DLX(IDPI(JP))+DLX(IDPI(JP)+1)))*DLX(IDPI(JP))*0.5
END IF
ELSE
EL2 = -1.0
END IF
HIE = MAX(UPIE,DNIE)
IF (DIA == 0.0) THEN
QPI(JP) = 0.0
WLFLAG(JP) = .TRUE.
GO TO 140
END IF
EPS = 0.001
IF ((HIE+EPS) >= EL1 .AND. (HIE+EPS) >= EL2) THEN
QPI(JP) = 0.0
WLFLAG(JP) = .TRUE.
GO TO 140
END IF
IF (EL1 > EL2) THEN
DCHECK = EL1-UPIE
ELSE
DCHECK = EL2-DNIE
END IF
IF (DCHECK < 0.02) THEN
QPI(JP) = 0.0
WLFLAG(JP) = .TRUE.
GO TO 140
END IF
IF (ABS(QOLD(JP)) < 0.001) QOLD(JP) = 0.001
IF (EL1 >= (UPIE+DIA) .AND. EL2 >= (DNIE+DIA)) THEN
D1 = EL1
D2 = EL2
GO TO 120
END IF
IF (EL1 > EL2) THEN
DTEST = EL2-DNIE
ELSE
DTEST = EL1-UPIE
END IF
DCRIT = DEPTHCRIT(ABS(QOLD(JP)))
IF (DTEST <= DCRIT) THEN
IF (EL1 <= EL2) THEN
D1 = UPIE+DCRIT
D2 = EL2
ELSE
D1 = EL1
D2 = DNIE+DCRIT
END IF
VTOT = 0.0
TOTT = 0.0
110 CONTINUE
IF (NIT /= 0) THEN
DTQ = OMEGA*DLTX/VMAX(JP)
IF (DTQ > (DLT-TOTT)) THEN
DTQ = DLT-TOTT
ELSE IF ((2.0*DTQ) > (DLT-TOTT)) THEN
DTQ = (DLT-TOTT)*0.5
END IF
END IF
CALL OPEN_CHANNEL (D1,D2,QPI(JP),JP,DTQ)
DCRIT = DEPTHCRIT(ABS(QPI(JP)))
IF (EL1 <= EL2) THEN
D1 = UPIE+DCRIT
ELSE
D2 = DNIE+DCRIT
END IF
VTOT = VTOT+DTQ*QPI(JP)
TOTT = DTQ+TOTT
IF (TOTT < (DLT-EPS2)) GO TO 110
QPI(JP) = VTOT/DLT
GO TO 140
END IF
D1 = EL1
D2 = EL2
120 CONTINUE
TOTT = 0.0
VTOT = 0.0
130 CONTINUE
IF (NIT /= 0) THEN
DTQ = OMEGA*DLTX/VMAX(JP)
IF (DTQ > (DLT-TOTT)) THEN
DTQ = DLT-TOTT
ELSE IF ((2.0*DTQ) > (DLT-TOTT)) THEN
DTQ = (DLT-TOTT)*0.5
END IF
END IF
CALL OPEN_CHANNEL (D1,D2,QPI(JP),JP,DTQ)
VTOT = VTOT+DTQ*QPI(JP)
TOTT = DTQ+TOTT
IF (TOTT < (DLT-EPS2)) GO TO 130
QPI(JP) = VTOT/DLT
140 CONTINUE
QOLD(JP) = QPI(JP)
IF (QPI(JP) == 0.0) WLFLAG(JP) = .TRUE.
END DO
RETURN
ENTRY DEALLOCATE_PIPE_FLOW
DEALLOCATE (BEGIN, WLFLAG, VMAX)
RETURN
END SUBROUTINE PIPE_FLOW_INITIALIZE
!***********************************************************************************************************************************
!** S U B R O U T I N E O P E N C H A N N E L **
!***********************************************************************************************************************************
SUBROUTINE OPEN_CHANNEL_INITIALIZE
USE GLOBAL; USE STRUCTURES
REAL(R8), PARAMETER :: THETA=0.55
REAL(R8) :: DLTX,PHI,VTOT,SLOPE,DIST,BEPR1,BEPR2,BC1,EL1,BC2,EL2,WLSLOPE,DLTX2,BAR1,BAREA,RAD1,RAD2
REAL(R8) :: TWIDTH,VAVG,QSUM,QAVG,QOUT,WETPER,BAR2
! Type declarations
INTEGER :: J,IC,N,NP,NQCNT
REAL(R8) :: DT,D
REAL(R8), ALLOCATABLE, DIMENSION(:) :: Y, B, V, CAREA, TOPW, BELEV, Q, VOLD, YOLD ! CB 10/4/07
REAL(R8), ALLOCATABLE, DIMENSION(:) :: YT, VT, VPR, YPR, TAREA, TOPWT, RT
REAL(R8), ALLOCATABLE, DIMENSION(:,:) :: DAA, AL
INTEGER, ALLOCATABLE, DIMENSION(:) :: INDX
LOGICAL :: SMOOTH_WATER_LEVELS !, OPENWRN
SAVE
! Allocation declarations
ALLOCATE (Y(NN), V(NN), CAREA(NN), TOPW(NN), BELEV(NN), Q(NN), VOLD(NN), YOLD(NN), B(NN)) ! CB 10/4/07
ALLOCATE (YT(NN), VT(NN), VPR(NN), YPR(NN), TAREA(NN), TOPWT(NN), RT(NN), INDX(NN))
ALLOCATE (AL(NN,2), DAA(NN,NN))
RETURN
ENTRY OPEN_CHANNEL (EL1,EL2,QOUT,IC,DT)
! Variable initializtion
B = 0.0; Y = 0.0; V = 0.0; VT = 0.0; YT = 0.0; RT = 0.0; DAA = 0.0; YPR = 0.0; VPR = 0.0; TOPW = 0.0; TOPWT = 0.0
CAREA = 0.0; TAREA = 0.0
BELEV(1) = UPIE
BELEV(NC) = DNIE
PHI = ASIN((UPIE-DNIE)/CLEN)
DLTX = CLEN/(REAL(NC-1)*0.5)
DO J=2,NC-1
DLTX2 = DLTX*0.5
SLOPE = (UPIE-DNIE)/CLEN
DIST = (REAL(J-1)*DLTX2)
BELEV(J) = UPIE-SLOPE*DIST
END DO
BEPR1 = UPIE+SLOPE*DLTX2
BEPR2 = DNIE-SLOPE*DLTX2
BC1 = (EL1-BEPR1)*COS(PHI)
IF (BC1 <= 0.0) BC1 = EL1-UPIE
BC2 = (EL2-BEPR2)*COS(PHI)
IF (BC2 <= 0.0) BC2 = EL2-DNIE
IF (.NOT. BEGIN(IC)) THEN
IF (WLFLAG(IC)) THEN
DO J=2,NC-1,2
WLSLOPE = ((BC1-BC2)/(CLEN+DLTX))*DCOS(PHI)
DIST = (REAL(J-1)*0.5*DLTX)+DLTX2
Y(J) = BC1-WLSLOPE*DIST
YT(J) = Y(J)
DTP(IC) = DT
END DO
ELSE
DO I=2,NC-1,2
Y(I) = YS(I,IC)
YT(I) = YST(I,IC)
END DO
END IF
END IF
DO I=1,NC,2
V(I) = VS(I,IC)
VT(I) = VST(I,IC)
END DO
IF (BEGIN(IC)) THEN
BEGIN(IC) = .FALSE.
DO J=2,NC-1,2
WLSLOPE = ((BC1-BC2)/(CLEN+DLTX))*DCOS(PHI)
DIST = (REAL(J-1)*0.5*DLTX)+DLTX2
Y(J) = BC1-WLSLOPE*DIST
YT(J) = Y(J)
DTP(IC) = DT
END DO
DO J=1,NC,2
V(J) = 0.0
VT(J) = V(J)
END DO
! OPENWRN = .TRUE.
END IF
SMOOTH_WATER_LEVELS = .FALSE.
DO N=1,NC,2
IF (N == NC) THEN
BAR1 = BAREA(BC2,DIA)
RAD1 = BAR1/WETPER(BC2,DIA)
ELSE
BAR1 = BAREA(Y(N+1),DIA)
RAD1 = BAR1/WETPER(Y(N+1),DIA)
END IF
IF (N == 1) THEN
BAR2 = BAREA(BC1,DIA)
RAD2 = BAR2/WETPER(BC1,DIA)
ELSE
BAR2 = BAREA(Y(N-1),DIA)
RAD2 = BAR2/WETPER(Y(N-1),DIA)
END IF
RT(N) = (RAD1+RAD2)*0.5
END DO
DO N=2,NC-1,2
TAREA(N) = BAREA(Y(N),DIA)
TOPWT(N) = TWIDTH(Y(N),DIA)
CAREA(N) = BAREA(Y(N),DIA)
END DO
! Projected water levels and velocities
DO J=1,NC,2
VPR(J) = V(J)+DT*(V(J)-VT(J))/DTP(IC)
END DO
DO J=2,NC-1,2
YPR(J) = Y(J)+DT*(Y(J)-YT(J))/DTP(IC)
END DO
! Matrix setup
VTOT = 0.0
DO J=1,NC,2
VTOT = VTOT+V(J)
END DO
VAVG = VTOT/(REAL(NC-1)*0.5)
! Continuity
DO N=2,NC-1,2
VPR(N) = (VPR(N-1)+VPR(N+1))*0.5D0
V(N) = (V(N-1)+V(N+1))*0.5D0
IF (N /= 2) DAA(N,N-2) = -THETA*(DT/DLTX)*(VPR(N)*0.5)
DAA(N,N-1) = -THETA*(DT/DLTX)*(TAREA(N)/TOPWT(N))
DAA(N,N) = 1.0D0
DAA(N,N+1) = THETA*(DT/DLTX)*(TAREA(N)/TOPWT(N))
IF (N /= NC-1) DAA(N,N+2) = THETA*(DT/DLTX)*(VPR(N)*0.5D0)
IF (N == 2) THEN
B(N) = Y(N)-(1.0D0-THETA)*(DT/DLTX)*(TAREA(N)/TOPWT(N))*(V(N+1)-V(N-1))-(1.0D0-THETA)*(DT/DLTX)*(V(N)*0.5D0)*(Y(N+2)-BC1) &
+THETA*(DT/DLTX)*(VPR(N)*0.5D0)*BC1
ELSE IF (N == NC-1) THEN
B(N) = Y(N)-(1.0D0-THETA)*(DT/DLTX)*(TAREA(N)/TOPWT(N))*(V(N+1)-V(N-1))-(1.0D0-THETA)*(DT/DLTX)*(V(N)*0.5D0)*(BC2-Y(N-2)) &
-THETA*(DT/DLTX)*(VPR(N)*0.5D0)*BC2
ELSE
B(N) = Y(N)-(1.0D0-THETA)*(DT/DLTX)*(TAREA(N)/TOPWT(N))*(V(N+1)-V(N-1))-(1.0D0-THETA)*(DT/DLTX)*(V(N)*0.5D0)*(Y(N+2)-Y(N-2))
END IF
END DO
IF (VAVG > 0.0 .OR. (VAVG == 0.0 .AND. EL1 > EL2)) THEN
!** Momentum
DO N=1,NC,2
IF (N /= 1) THEN
DAA(N,N-2) = -THETA*(DT/DLTX)*VPR(N)
DAA(N,N-1) = -THETA*(DT/DLTX)*G*DCOS(PHI)
END IF
DAA(N,N) = 1.0+THETA*DT*G*(FMAN**2)*DABS(VPR(N))/(RT(N)**(4.0/3.0))+THETA*(DT/DLTX)*VPR(N)+THETA*(CLOSS*0.5D0)*(DT/CLEN) &
*DABS(VPR(N))
IF (N /= NC) DAA(N,N+1) = THETA*(DT/DLTX)*G*DCOS(PHI)
IF (N == 1) THEN
B(N) = V(N)-(1.0D0-THETA)*(DT/DLTX)*G*(Y(N+1)-BC1)*DCOS(PHI)-(1.0D0-THETA)*V(N)*(DT/DLTX)*V(N)-(1.0D0-THETA)*DT*G*(FMAN**2) &
/(RT(N)**(4.0/3.0))*V(N)*DABS(V(N))+DT*G*DSIN(PHI)-(1.0D0-THETA)*(DT/CLEN)*(CLOSS*0.5D0)*V(N)*DABS(V(N))+THETA*(DT/DLTX) &
*G*DCOS(PHI)*BC1
ELSE IF (N == NC) THEN
B(N) = V(N)-(1.0D0-THETA)*(DT/DLTX)*G*(BC2-Y(N-1))*DCOS(PHI)-(1.0D0-THETA)*V(N)*(DT/DLTX)*(V(N)-V(N-2))-(1.0D0-THETA) &
*DT*G*(FMAN**2)/(RT(N)**(4.0/3.0))*V(N)*DABS(V(N))+DT*G*DSIN(PHI)-(1.0D0-THETA)*(DT/CLEN)*(CLOSS*0.5D0)*V(N)*DABS(V(N)) &
-THETA*(DT/DLTX)*G*DCOS(PHI)*BC2
ELSE
B(N) = V(N)-(1.0D0-THETA)*(DT/DLTX)*G*(Y(N+1)-Y(N-1))*COS(PHI)-(1.0D0-THETA)*V(N)*(DT/DLTX)*(V(N)-V(N-2))-(1.0D0-THETA) &
*DT*G*(FMAN**2)/(RT(N)**(4.0/3.0))*V(N)*DABS(V(N))+DT*G*DSIN(PHI)-(1.0D0-THETA)*(DT/CLEN)*(CLOSS*0.5D0)*V(N)*DABS(V(N))
END IF
END DO
ELSE
DO N=1,NC,2
IF (N /= NC) THEN
DAA(N,N+2) = THETA*(DT/DLTX)*VPR(N)
DAA(N,N+1) = THETA*(DT/DLTX)*G*DCOS(PHI)
END IF
DAA(N,N) = 1.0+THETA*DT*G*(FMAN**2)*DABS(VPR(N))/(RT(N)**(4.0/3.0))-THETA*(DT/DLTX)*VPR(N)+THETA*(CLOSS*0.5D0)*(DT/CLEN) &
*DABS(VPR(N))
IF (N /= 1) DAA(N,N-1) = -THETA*(DT/DLTX)*G*DCOS(PHI)
IF (N == NC) THEN
B(N) = V(N)-(1.0D0-THETA)*(DT/DLTX)*G*(BC2-Y(N-1))*DCOS(PHI)-(1.0-THETA)*V(N)*(DT/DLTX)*(-V(N))-(1.0D0-THETA)*DT*G*(FMAN**2) &
/(RT(N)**(4.0/3.0))*V(N)*DABS(V(N))+DT*G*DSIN(PHI)-(1.0-THETA)*(DT/CLEN)*(CLOSS*0.5)*V(N)*DABS(V(N))-THETA*(DT/DLTX) &
*G*DCOS(PHI)*BC2
ELSE IF (N == 1) THEN
B(N) = V(N)-(1.0D0-THETA)*(DT/DLTX)*G*(Y(N+1)-BC1)*DCOS(PHI)-(1.0-THETA)*V(N)*(DT/DLTX)*(V(N+2)-V(N))-(1.0D0-THETA) &
*DT*G*(FMAN**2)/(RT(N)**(4.0/3.0))*V(N)*ABS(V(N))+DT*G*SIN(PHI)-(1.0-THETA)*(DT/CLEN)*(CLOSS*0.5D0)*V(N)*DABS(V(N)) &
+THETA*(DT/DLTX)*G*DCOS(PHI)*BC1
ELSE
B(N) = V(N)-(1.0D0-THETA)*(DT/DLTX)*G*(Y(N+1)-Y(N-1))*DCOS(PHI)-(1.0D0-THETA)*V(N)*(DT/DLTX)*(V(N+2)-V(N))-(1.0D0-THETA) &
*DT*G*(FMAN**2)/(RT(N)**(4.0/3.0))*V(N)*DABS(V(N))+DT*G*DSIN(PHI)-(1.0D0-THETA)*(DT/CLEN)*(CLOSS*0.5D0)*V(N)*DABS(V(N))
END IF
END DO
END IF
NP = NN
CALL LUDCMP (DAA,NC,NP,INDX,D)
CALL LUBKSB (DAA,NC,NP,INDX,B)
DO I=2,NC-1,2
YOLD(I) = Y(I)
YST(I,IC) = Y(I)
END DO
DO I=2,NC-1,2
Y(I) = B(I)
END DO
! Smooth water levels
DO I=2,NC-1,2
IF (Y(I) <= 0.0) THEN
! IF (OPENWRN) THEN
! OPEN (391,FILE='culvert.wrn',STATUS='unknown')
! OPENWRN = .FALSE.
! END IF
SMOOTH_WATER_LEVELS = .TRUE.
END IF
END DO
IF (SMOOTH_WATER_LEVELS) THEN
DO J=2,NC-1,2
WLSLOPE = ((BC1-BC2)/(CLEN+DLTX))*DCOS(PHI)
DIST = (REAL(J-1)*0.5D0*DLTX)+DLTX2
Y(J) = BC1-WLSLOPE*DIST
END DO
! WRITE (391,10010) IC, JDAY
SMOOTH_WATER_LEVELS = .FALSE.
END IF
! Flows
NQCNT = 0
QSUM = 0.0
DO I=1,NC,2
VOLD(I) = V(I)
VST(I,IC) = V(I)
V(I) = B(I)
IF (I == NC) THEN
BAR1 = BAREA(BC2,DIA)
ELSE
BAR1 = BAREA(Y(I+1),DIA)
END IF
IF (I == 1) THEN
BAR2 = BAREA(BC1,DIA)
ELSE
BAR2 = BAREA(Y(I-1),DIA)
END IF
CAREA(I) = (BAR1+BAR2)*0.5D0
Q(I) = V(I)*CAREA(I)
NQCNT = NQCNT+1
QSUM = QSUM+Q(I)
END DO
QAVG = QSUM/REAL(NQCNT)
DO I=2,NC-1,2
YS(I,IC) = Y(I)
END DO
VMAX(IC) = 0.0
DO I=1,NC,2
VS(I,IC) = V(I)
VMAX(IC) = MAX(ABS(V(I)),VMAX(IC))
END DO
DTP(IC) = DT
QOUT = QAVG
QOLD(IC) = QOUT
WLFLAG(IC) = .FALSE.
10010 FORMAT ('water levels for culvert ',I3,' on Julian Day ',F10.3,' are <= 0 - predictions have been smoothed')
RETURN
ENTRY DEALLOCATE_OPEN_CHANNEL
DEALLOCATE (Y, V, CAREA, TOPW, BELEV, Q, VOLD, YOLD, B, YT, VT, VPR, YPR, TAREA, TOPWT, RT, INDX, AL, DAA) ! CB 10/4/07
RETURN
END SUBROUTINE OPEN_CHANNEL_INITIALIZE
!***********************************************************************************************************************************
!** S U B R O U T I N E G R I D A R E A 1 **
!***********************************************************************************************************************************
SUBROUTINE GRID_AREA1 (EL1,EL2,DIFF,BTOP)
USE GLOBAL; USE GEOMC; USE PREC
REAL(R8) :: BAREA1,BAREA2,DIST,DIST1,SLPE,DIST2
INTEGER :: K,K1,K2
REAL(R8) :: EL1,EL2,DIFF,BTOP
! Difference in areas for trapezoidal geometry
DO K=2,KB(I)
IF (EL(K,I) <= EL1) THEN
K1 = K
EXIT
END IF
END DO
DO K=2,KB(I)
IF (EL(K,I) <= EL2) THEN
K2 = K
EXIT
END IF
END DO
BAREA1 = 0.0
BAREA2 = 0.0
DO K=KB(I),K1,-1
BAREA1 = BAREA1+BH(K,I)
END DO
DIST = EL1-EL(K1,I)
IF (H(K1-1,JW)/2.0 < DIST) THEN
DIST1 = H(K1-1,JW)*0.5
SLPE = (B(K1-1,I)-BB(K1-1,I))/(0.5*H(K1-1,JW))
BAREA1 = BAREA1+BB(K1-1,I)*DIST1+0.5*SLPE*DIST1*DIST1
DIST2 = DIST-H(K1-1,JW)*0.5
SLPE = (BB(K1-2,I)-B(K1-1,I))/(0.5*H(K1-1,JW))
BAREA1 = BAREA1+B(K1-1,I)*DIST2+0.5*SLPE*DIST2*DIST2
BTOP = B(K1-1,I)+DIST2*SLPE
ELSE
SLPE = (B(K1-1,I)-BB(K1-1,I))/(0.5*H(K1-1,JW))
BAREA1 = BAREA1+BB(K1-1,I)*DIST+0.5*SLPE*DIST*DIST
BTOP = BB(K1-1,I)+DIST*SLPE
END IF
DO K=KB(I),K2,-1
BAREA2 = BAREA2+BH(K,I)
END DO
DIST = EL2-EL(K2,I)
IF (H(K2-1,JW)/2. < DIST) THEN
DIST1 = H(K2-1,JW)*0.5
SLPE = (B(K2-1,I)-BB(K2-1,I))/(0.5*H(K2-1,JW))
BAREA2 = BAREA2+BB(K2-1,I)*DIST1+0.5*SLPE*DIST1*DIST1
DIST2 = DIST-H(K2-1,JW)*0.5
SLPE = (BB(K2-2,I)-B(K2-1,I))/(0.5*H(K2-1,JW))
BAREA2 = BAREA2+B(K2-1,I)*DIST2+0.5*SLPE*DIST2*DIST2
ELSE
SLPE = (B(K2-1,I)-BB(K2-1,I))/(0.5*H(K2-1,JW))
BAREA2 = BAREA2+BB(K2-1,I)*DIST+0.5*SLPE*DIST*DIST
END IF
DIFF = BAREA1-BAREA2
RETURN
END SUBROUTINE
!***********************************************************************************************************************************
!** S U B R O U T I N E G R I D A R E A 2 **
!***********************************************************************************************************************************
SUBROUTINE GRID_AREA2
USE GLOBAL; USE GEOMC; USE RSTART
INTEGER :: K
REAL(R8) :: AREA,SL,A_COEF,B_COEF,C_COEF
AREA = (EL(KT,I)-SZ(I)-(EL(KT,I)-Z(I)))*BI(KT,I)
SL = (B(KT,I)-BB(KT,I))/(0.5*H(KT,JW))
A_COEF = -1.0
B_COEF = SZ(I)*2.+BI(KT,I)/(0.5*SL)
C_COEF = -AREA/(0.5*SL)-SZ(I)**2-BI(KT,I)*2.*SZ(I)/SL
Z(I) = (-B_COEF+SQRT(B_COEF**2-4.*A_COEF*C_COEF))/(2.0*A_COEF)
KTI(I) = 2
DO K=2,KB(I)
IF (EL(K,I) <= EL(KT,I)-Z(I)) THEN
KTI(I) = K-1
EXIT
END IF
END DO
RETURN
END SUBROUTINE
!***********************************************************************************************************************************
!** S U B R O U T I N E L U D C M P **
!***********************************************************************************************************************************
SUBROUTINE LUDCMP (A,N,NP,INDX,D)
USE PREC
INTEGER :: I,J,K,IMAX,NP,N
INTEGER :: INDX(NP)
REAL(R8) :: A(NP,NP),D
REAL(R8) :: VV(500),AAMAX,SUM,DUM
REAL, PARAMETER :: TINY=1.0E-20
D = 1.0
DO I=1,N
AAMAX = 0.0
DO J=1,N
IF (ABS(A(I,J)) > AAMAX) AAMAX = ABS(A(I,J))
END DO
VV(I) = 1.0/AAMAX
END DO
DO J=1,N
DO I=1,J-1
SUM = A(I,J)
DO K=1,I-1
SUM = SUM-A(I,K)*A(K,J)
END DO
A(I,J) = SUM
END DO
AAMAX = 0.0
DO I=J,N
SUM = A(I,J)
DO K=1,J-1
SUM = SUM-A(I,K)*A(K,J)
END DO
A(I,J) = SUM
DUM = VV(I)*ABS(SUM)
IF (DUM >= AAMAX) THEN
IMAX = I
AAMAX = DUM
END IF
END DO
IF (J /= IMAX) THEN
DO K=1,N
DUM = A(IMAX,K)
A(IMAX,K) = A(J,K)
A(J,K) = DUM
END DO
D = -D
VV(IMAX) = VV(J)
END IF
INDX(J) = IMAX
IF (A(J,J) == 0.0) A(J,J) = TINY
IF (J /= N) THEN
DUM = 1.0/A(J,J)
DO I=J+1,N
A(I,J) = A(I,J)*DUM
END DO
END IF
END DO
END SUBROUTINE LUDCMP
!***********************************************************************************************************************************
!** S U B R O U T I N E L U B K S B **
!***********************************************************************************************************************************
SUBROUTINE LUBKSB (A,N,NP,INDX,B)
USE PREC
INTEGER :: N, NP
REAL(R8):: A(NP,NP), B(N),SUM
INTEGER :: INDX(NP)
INTEGER :: I, II, J, LL
II = 0
DO I=1,N
LL = INDX(I)
SUM = B(LL)
B(LL) = B(I)
IF (II /= 0) THEN
DO J=II,I-1
SUM = SUM-A(I,J)*B(J)
END DO
ELSE IF (SUM /= 0.0) THEN
II = I
END IF
B(I) = SUM
END DO
DO I=N,1,-1
SUM = B(I)
DO J=I+1,N
SUM = SUM-A(I,J)*B(J)
END DO
B(I) = SUM/A(I,I)
END DO
END SUBROUTINE LUBKSB
!***********************************************************************************************************************************
!** F U N C T I O N B A R E A **
!***********************************************************************************************************************************
REAL (R8) FUNCTION BAREA (DEPTH,DIA)
USE PREC
REAL(R8), PARAMETER ::PI=3.14159265359D0
REAL(R8) :: DEPTH,DIA
IF (DEPTH < DIA) THEN
BAREA = (DEPTH-DIA*0.5D0)*DSQRT(DEPTH*DIA-DEPTH**2)+(DIA**2*0.25D0)*DASIN((2.0D0/DIA)*(DEPTH-DIA*0.5D0))+(PI*DIA**2)/8.0D0
ELSE
BAREA = (PI*DIA**2)*0.25D0
END IF
END FUNCTION BAREA
!***********************************************************************************************************************************
!** F U N C T I O N T W I D T H **
!***********************************************************************************************************************************
REAL (R8) FUNCTION TWIDTH (DEPTH,DIA)
USE PREC
REAL(R8) :: DEPTH,DIA
IF (DEPTH < DIA) THEN
TWIDTH = 2.0D0*DSQRT((DIA*DEPTH)-DEPTH**2)
ELSE
TWIDTH = 0.005D0*DIA
END IF
END FUNCTION TWIDTH
!***********************************************************************************************************************************
!** F U N C T I O N W E T P E R **
!***********************************************************************************************************************************
REAL (R8) FUNCTION WETPER (DEPTH,DIA)
USE PREC
REAL(R8) :: DEPTH,DIA
REAL(R8), PARAMETER :: PI=3.14159265359D0
IF (DEPTH < DIA) THEN
WETPER = DIA*(DASIN((2.0D0/DIA)*(DEPTH-DIA*0.5D0))+PI*0.5D0)
ELSE
WETPER = PI*DIA
END IF
END FUNCTION WETPER
!***********************************************************************************************************************************
!** F U N C T I O N D E P T H C R I T **
!***********************************************************************************************************************************
REAL (R8) FUNCTION DEPTHCRIT (FLOW)
USE STRUCTURES
REAL(R8) :: FLOW,X1,X2,TOL,ZBRENT1
X1 = DIA/1.0D7
X2 = DIA
TOL = 0.001
DEPTHCRIT = ZBRENT1(X1,X2,TOL,FLOW)
END FUNCTION DEPTHCRIT
!***********************************************************************************************************************************
!** F U N C T I O N C D F U N C **
!***********************************************************************************************************************************
REAL (R8) FUNCTION CDFUNC (DEPTH,FLOW)
USE STRUCTURES
REAL(R8) :: DEPTH,FLOW,BAREA,TWIDTH
CDFUNC = (FLOW**2*TWIDTH(DEPTH,DIA))/(BAREA(DEPTH,DIA)**3*9.81D0)-1.0D0
END FUNCTION CDFUNC
!***********************************************************************************************************************************
!** F U N C T I O N Z B R E N T **
!***********************************************************************************************************************************
REAL (R8) FUNCTION ZBRENT1 (X1,X2,TOL,BARG)
USE PREC
EXTERNAL CDFUNC
REAL, PARAMETER :: FACTOR=0.1,NTRY=50,ITMAX=100,EPS=3.E-8
INTEGER :: I,J,ITER
REAL(R8) :: F1,F2,X1,X2,TOL,BARG,BA,B,FA,FB,FC,CDFUNC
REAL(R8) :: C,D,E,TOL1,XM,S,P,Q,R
F1 = CDFUNC(X1,BARG)
F2 = CDFUNC(X2,BARG)
IF (F1 <= 0.0) THEN
DO I=1,40
X1 = X1/10.0
F1 = CDFUNC(X1,BARG)
IF (F1 > 0.0) EXIT
END DO
END IF
DO J=1,NTRY
IF (F1*F2 < 0.0) EXIT
IF (ABS(F1) < ABS(F2)) THEN
X1 = X1+FACTOR*(X1-X2)
F1 = CDFUNC(X1,BARG)
ELSE
X2 = X2+FACTOR*(X2-X1)
F2 = CDFUNC(X2,BARG)
END IF
END DO
BA = X1
B = X2
FA = CDFUNC(BA,BARG)
FB = CDFUNC(B,BARG)
FC = FB
DO ITER=1,ITMAX
IF (FB*FC > 0.0) THEN
C = BA
FC = FA
D = B-BA
E = D
END IF
IF (ABS(FC) < ABS(FB)) THEN
BA = B
B = C
C = BA
FA = FB
FB = FC
FC = FA
END IF
TOL1 = 2.0*EPS*ABS(B)+0.5*TOL
XM = 0.5*(C-B)
IF (ABS(XM) <= TOL1 .OR. FB == 0.0) THEN
ZBRENT1 = B; EXIT
END IF
IF (ABS(E) >= TOL1 .AND. ABS(FA) > ABS(FB)) THEN
S = FB/FA
IF (BA == C) THEN
P = 2.0*XM*S
Q = 1.0-S
ELSE
Q = FA/FC
R = FB/FC
P = S*(2.*XM*Q*(Q-R)-(B-BA)*(R-1.0))
Q = (Q-1.0)*(R-1.0)*(S-1.0)
END IF
IF (P > 0.0) Q = -Q
P = ABS(P)
IF (2.0*P < MIN(3.0*XM*Q-ABS(TOL1*Q),ABS(E*Q))) THEN
E = D
D = P/Q
ELSE
D = XM
E = D
END IF
ELSE
D = XM
E = D
END IF
BA = B
FA = FB
IF (ABS(D) > TOL1) THEN
B = B+D
ELSE
B = B+SIGN(TOL1,XM)
END IF
FB = CDFUNC(B,BARG)
END DO
ZBRENT1 = B
END FUNCTION ZBRENT1
|
{"hexsha": "7fcd15aa4a862305381e85643078be3666d65f48", "size": 30489, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "gate-spill-pipe.f90", "max_stars_repo_name": "WQDSS/CE-QUAL-W2-Linux", "max_stars_repo_head_hexsha": "62479d6c1ae8a2dcb632327d96e5084b52d6f9b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-12-20T15:21:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-04T23:04:11.000Z", "max_issues_repo_path": "gate-spill-pipe.f90", "max_issues_repo_name": "WQDSS/CE-QUAL-W2-Linux", "max_issues_repo_head_hexsha": "62479d6c1ae8a2dcb632327d96e5084b52d6f9b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-12-05T01:09:25.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-10T19:57:41.000Z", "max_forks_repo_path": "gate-spill-pipe.f90", "max_forks_repo_name": "WQDSS/CE-QUAL-W2-Linux", "max_forks_repo_head_hexsha": "62479d6c1ae8a2dcb632327d96e5084b52d6f9b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-18T23:29:38.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-14T00:14:48.000Z", "avg_line_length": 34.0659217877, "max_line_length": 185, "alphanum_fraction": 0.4166092689, "num_tokens": 11824}
|
#####################################################################
# Task 2 : identify an image region by hue
#####################################################################
import cv2
import numpy as np
#####################################################################
# define video capture with access to camera 0
camera = cv2.VideoCapture(0)
# read an image from the camera
_, image = camera.read()
# convert the RGB images to HSV
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# print the HSV values of the middle pixel
height, width, _ = image.shape
print('Middle pixel HSV: ', image_hsv[int(height/2)][int(width/2)])
# define the range of hues to detect - adjust these to detect different colours
lower_green = np.array([55, 50, 50])
upper_green = np.array([95, 255, 255])
# create a mask that identifies the pixels in the range of hues
mask = cv2.inRange(image_hsv, lower_green, upper_green)
mask_inverted = cv2.bitwise_not(mask)
# create a grey image and black out the masked area
image_grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_grey = cv2.bitwise_and(image_grey, image_grey, mask=mask_inverted)
# black out unmasked area of original image
image_masked = cv2.bitwise_and(image, image, mask=mask)
# combine the two images for display
image_grey = cv2.cvtColor(image_grey, cv2.COLOR_GRAY2BGR)
image_combined = cv2.add(image_grey, image_masked)
# display the image in the window
cv2.imshow("HSV - colour selected image", image_combined)
# wait indefinitely for any key press to exist
cv2.waitKey(0)
#####################################################################
# Author : Toby Breckon / Magnus Bordewich
# Copyright (c) 2022 Dept Computer Science, Durham University, UK
#####################################################################
|
{"hexsha": "b3814c3f98426c7a6236dcb75e444fa2e197da2d", "size": 1811, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/hsv_colour.py", "max_stars_repo_name": "tobybreckon/colour-filtering", "max_stars_repo_head_hexsha": "1679db2c075036f68dcc8a75c575c8f362e9ec94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2022-03-23T23:02:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T21:48:31.000Z", "max_issues_repo_path": "src/hsv_colour.py", "max_issues_repo_name": "tobybreckon/chroma-keying", "max_issues_repo_head_hexsha": "1679db2c075036f68dcc8a75c575c8f362e9ec94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/hsv_colour.py", "max_forks_repo_name": "tobybreckon/chroma-keying", "max_forks_repo_head_hexsha": "1679db2c075036f68dcc8a75c575c8f362e9ec94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0298507463, "max_line_length": 79, "alphanum_fraction": 0.6096079514, "include": true, "reason": "import numpy", "num_tokens": 402}
|
import os
import numpy as np
import pandas as pd
from models.predictor import predict
from evaluation.metrics import evaluate
from plots.rec_plots import pandas_bar_plot
def getGroup(user_counts):
sorted_user_counts = np.sort(user_counts)
full_length = len(user_counts)
first_quater = sorted_user_counts[full_length//4]
median = sorted_user_counts[full_length // 2]
third_quater = sorted_user_counts[full_length // 4 * 3]
patents = [[0, first_quater], [first_quater+1, median], [median+1, third_quater], [third_quater+1, full_length]]
group = []
for user_count in user_counts:
for patent in patents:
if user_count >= patent[0] and user_count <= patent[1]:
group.append(str(patent))
return group
def usercategory(Rtrain, Rvalid, df_input, topK, metric, problem, model_folder, gpu_on=True):
user_observation_counts = np.array(np.sum(Rtrain, axis=1)).flatten()
user_observation_counts = user_observation_counts[np.array(np.sum(Rvalid, axis=1)).flatten() != 0]
index = None
evaluated_metrics = None
medians = []
giant_dataframes = []
for idx, row in df_input.iterrows():
row = row.to_dict()
RQ = np.load('{2}/U_{0}_{1}.npy'.format(row['model'], row['rank'], model_folder))
Y = np.load('{2}/V_{0}_{1}.npy'.format(row['model'], row['rank'], model_folder))
if os.path.isfile('{2}/B_{0}_{1}.npy'.format(row['model'], row['rank'], model_folder)):
Bias = np.load('{2}/B_{0}_{1}.npy'.format(row['model'], row['rank'], model_folder))
else:
Bias = None
prediction = predict(matrix_U=RQ,
matrix_V=Y,
bias=Bias,
topK=topK[-1],
matrix_Train=Rtrain,
measure=row['similarity'],
gpu=gpu_on)
result = evaluate(prediction, Rvalid, metric, topK, analytical=True)
df = pd.DataFrame(result)
df['model'] = row['model']
df['user_count'] = user_observation_counts
giant_dataframes.append(df)
if evaluated_metrics is None:
evaluated_metrics = result.keys()
giant_df = pd.concat(giant_dataframes)
giant_df['group'] = getGroup(giant_df['user_count'].values)
giant_df = giant_df.sort_values('group', ascending=True).reset_index(drop=True)
for metric in evaluated_metrics:
pandas_bar_plot(x='group', y=metric, hue='model', x_name='User Category', y_name=metric, df=giant_df,
folder='analysis/{0}/numofrating'.format(problem), name=metric)
|
{"hexsha": "cb9141f713e83dad1e1f7ad534bed70cf376b14e", "size": 2685, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiment/usercategory.py", "max_stars_repo_name": "wuga214/MultiModesPreferenceEstimation", "max_stars_repo_head_hexsha": "f80c2feb196cb498a8b417f2037aadad151cceb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2018-11-06T22:17:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T23:32:10.000Z", "max_issues_repo_path": "experiment/usercategory.py", "max_issues_repo_name": "wuga214/MultiModesPreferenceEstimation", "max_issues_repo_head_hexsha": "f80c2feb196cb498a8b417f2037aadad151cceb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-12-17T06:07:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-09T00:17:14.000Z", "max_forks_repo_path": "experiment/usercategory.py", "max_forks_repo_name": "wuga214/MultiModesPreferenceEstimation", "max_forks_repo_head_hexsha": "f80c2feb196cb498a8b417f2037aadad151cceb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-12-07T05:48:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-28T13:49:42.000Z", "avg_line_length": 34.4230769231, "max_line_length": 116, "alphanum_fraction": 0.6175046555, "include": true, "reason": "import numpy", "num_tokens": 645}
|
# import os
# import numpy as np
# from PIL import Image
# from .. import utils
# import logging
# logger = logging.getLogger()
# # ----- parsers
# # These objects are mux, they consume and streamline the output
# # Don't know what mux are? Study electronics.
# class BaseParser:
# """This is the base parser class which has the required methods to process
# the three kind of structures:
# #. None
# #. list: ``process_list()``
# #. dict: ``process_dict()``
# and a function dedicated to processing the primitives
# #. ``process_primitive()``
# These functions are called from the ``__call__`` method.
# """
# def __init__(self):
# pass
# def process_primitive(self):
# raise NotImplementedError
# def process_dict(self):
# raise NotImplementedError
# def process_list(self):
# raise NotImplementedError
# def __call__(self, input_object):
# if isinstance(input_object, list):
# logger.info(f" - {self.__class__.__name__} - (A1)")
# out = self.process_list(input_object)
# elif isinstance(input_object, dict):
# logger.info(f" - {self.__class__.__name__} - (A2)")
# out = self.process_dict(input_object)
# else:
# logger.info(f" - {self.__class__.__name__} - (A3)")
# out = self.process_primitive(input_object)
# # apply self.post_proc_fn if it exists
# if self.post_proc_fn:
# # either you are going to get a dict or you are going to get a np.ndarray
# if isinstance(out, dict):
# if isinstance(next(iter(out.values())), dict):
# # dict of dicts kinda thingy (say when two inputs are given)
# out = {k: {_k: self.post_proc_fn(_v) for _k, _v in v.items()} for k, v in out.items()}
# else:
# out = {k: self.post_proc_fn(v) for k, v in out.items()}
# else:
# out = self.post_proc_fn(out)
# return out
# # ----- parsers for each category
# class ImageParser(BaseParser):
# def __init__(self, post_proc_fn=None, cloud_infer=False, **kwargs):
# """single unified Image parser that consumes different types of data and returns a processed numpy array
# Args:
# post_proc_fn (callable, optional): post processing function, this takes in the torch tensor and performs
# operation
# cloud_infer (bool, optional): whether the input is from cloud inference
# kwargs (dict, optional): keyword arguments to store here
# """
# super().__init__()
# self.post_proc_fn = post_proc_fn
# self.cloud_infer = cloud_infer
# for k, v in kwargs.items():
# setattr(self, k, v)
# # common operations
# # if not cloud_infer and input is int then rescale it 0, 1
# def rescale(self, x: np.ndarray):
# if not self.cloud_infer and "int" in str(x.dtype):
# x = x / 255
# return x
# def rearrange(self, x: np.ndarray):
# if len(x.shape) == 3 and x.shape[0] != 3:
# return x.transpose(2, 0, 1)
# elif len(x.shape) == 4 and x.shape[1] != 3:
# return x.transpose(0, 3, 1, 2)
# return x
# def process_primitive(self, x, target_shape=None):
# """primitive can be string, array, Image"""
# if isinstance(x, np.ndarray):
# logger.info(" - ImageParser - (C2) np.ndarray")
# # if shape == 3, unsqueeze it, numpy arrays cannot be reshaped
# out = x[None, ...] if len(x.shape) == 3 else x
# elif isinstance(x, Image.Image):
# logger.info(" - ImageParser - (C1) Image.Image object")
# img = x
# elif isinstance(x, str):
# if os.path.isfile(x):
# logger.info(" - ImageParser - (C3) string - file")
# img = Image.open(x)
# elif x.startswith("http"):
# logger.info(" - ImageParser - (C4) string - url")
# img = utils.get_image(x)
# else:
# try:
# # probably base64
# from io import BytesIO
# import base64
# logger.info(" - ImageParser - (C5) string - base64")
# img = Image.open(BytesIO(base64.b64decode(x)))
# except:
# raise Exception("Unable to parse string as Image")
# else:
# raise ValueError("Unknown primitive type: {}".format(type(x)))
# if not isinstance(x, np.ndarray):
# img = img.convert("RGB")
# # this checks when only the primitive is sent directly but there is just one template
# # so just use the only shape it has
# if target_shape is None and hasattr(self, "templates") and len(self.templates) == 1:
# target_shape = self.templates[next(iter(self.templates.keys()))]
# target_shape = target_shape[-2:][::-1] # [h,w] -> [w,h]
# # if a certain target shape is given, then resize it to that shape
# if target_shape is not None:
# target_shape = target_shape
# img = img.resize(target_shape)
# out = self.process_primitive(np.array(img))
# # finally perform rearrange and rescale
# return self.rescale(self.rearrange(out))
# def process_dict(self, input_object, r_depth=0):
# """takes in a dict, check if values are list, if list send to process_list
# else process_primitive"""
# out = {}
# # if hasattar "templates" then the keys in input object should match
# if hasattr(self, "templates"):
# assert set(input_object.keys()) == set(
# self.templates.keys()
# ), f"input object keys do not match templates: {set(input_object.keys()) - set(self.templates.keys())}"
# for k, v in input_object.items():
# # if templates are given and the ket is same, then load that
# target_shape = None
# if hasattr(self, "templates"):
# target_shape = self.templates[k]
# target_shape = target_shape[-2:][::-1] # [h,w] -> [w,h]
# # call the underlying object (structure or primitive)
# if isinstance(v, list):
# out[k] = self.process_list(v, target_shape, r_depth=r_depth + 1)
# elif isinstance(v, dict):
# out[k] = self.process_dict(v, r_depth=r_depth + 1)
# else:
# out[k] = self.process_primitive(v, target_shape)
# return out
# def process_list(self, input_object, target_shape=None, r_depth=0):
# """takes in a list. This function is very tricky because the input
# can be a list or a p-list, so we first check if input is not a string, Image or dict"""
# # r_depth is the depth in the current reccursion, possible depths
# # r_depth=0 -> [URL, URL]
# # r_depth=1 -> {k:[URL, URL], l:[URL, URL]}
# # r_depth=2 -> [{k:[URL, URL], l:[URL, URL]}, {k:[URL, URL], l:[URL, URL]}]}]
# # thus if r_depth > 3, raise error
# if r_depth >= 3:
# raise RecursionError("Cannot go deeper with a list input")
# if r_depth == 0 and hasattr(self, "templates") and len(self.templates) > 1:
# raise ValueError(f"Template has more than 1 input, please input a dict with keys: {tuple(self.templates.keys())}")
# if isinstance(input_object[0], (str, Image.Image)):
# logger.info(" - ImageParser - (B1) list - (str, Image.Image)")
# out = [self.process_primitive(x, target_shape) for x in input_object]
# return np.vstack(out)
# elif isinstance(input_object[0], dict):
# logger.info(" - ImageParser - (B2) list - (dict)")
# assert all([set(input_object[0].keys()) == set(i.keys()) for i in input_object]), "All keys must be same in all dicts in list"
# out = [self.process_dict(x) for x in input_object]
# out = {k: np.vstack([x[k] for x in out]) for k in out[0].keys()}
# return out
# else:
# # check if this is a list of lists or np.ndarrays
# if isinstance(input_object[0], list):
# logger.info(" - ImageParser - (B3) list - (list)")
# # convert input_object to a np.array and check shapes - used in nbox-dply
# out = np.array(input_object)
# if len(out.shape) == 3:
# out = out[None, ...]
# return out
# else:
# logger.info(" - ImageParser - (B4) list - (primitive)")
# out = [self.process_primitive(x, target_shape) for x in input_object]
# return np.vstack(out)
# class TextParser(BaseParser):
# def __init__(self, tokenizer, max_len=None, **kwargs):
# """Unified Text parsing engine, returns tokenized dictionaries
# Args:
# tokenizer (Tokenizer): tokenizer object for the text
# max_len (int): maximum length of the text
# """
# super().__init__()
# # tokenizer is supposed to be AutoTokenizer object, check that
# self.tokenizer = tokenizer
# self.max_len = max_len
# for k, v in kwargs.items():
# setattr(self, k, v)
# def process_primitive(self, x):
# # in case of text this is quite simple because only primitive is strings
# if isinstance(x, str):
# logger.info(" - TextParser - (C1) string")
# return {
# k: np.array(v)[None, ...]
# for k, v in self.tokenizer(
# text=x,
# add_special_tokens=True,
# max_length=self.max_len,
# padding="max_length" if self.max_len is not None else False,
# ).items()
# }
# elif isinstance(x, np.ndarray):
# logger.info(" - TextParser - (C2) ndarray")
# return x
# else:
# raise ValueError(f"Unsupported type for TextParser: {type(x)}")
# def process_dict(self, input_object):
# """takes in a dict and for each key's type call that method"""
# out = {}
# for k, v in input_object.items():
# if isinstance(v, list):
# out[k] = self.process_list(v)
# elif isinstance(v, dict):
# out[k] = self.process_dict(v)
# else:
# out[k] = self.process_primitive(v)
# return out
# def process_list(self, input_object):
# """takes in and tokenises the strings"""
# assert all([isinstance(x, str) for x in input_object]), "TextParser - (B1) input must be list of strings"
# return {k: np.array(v) for k, v in self.tokenizer(input_object, padding="longest").items()}
# ssympple parsing
class Mux():
@staticmethod
def process_list(x):
# type checking/
t0 = type(x[0])
if t0 == list:
raise ValueError("Mux does not support nested lists")
if any([type(x_) != t0 for x_ in x]):
raise ValueError("Mux does not support mixed types")
# logic/
if isinstance(t0, dict):
x = {k: Mux.process_list([x_[k] for x_ in x]) for k in x[0].keys()}
else:
x = Mux.primitive(x)
return x
@staticmethod
def process_dict(x):
for k, v in x.items():
if isinstance(v, dict):
x[k] = Mux.process_dict(v)
elif isinstance(v, list):
x[k] = Mux.process_list(v)
else:
x[k] = Mux.primitive(v)
return x
@staticmethod
def parse(x, *a, **b):
if isinstance(x, dict):
return Mux.process_dict(x, *a, **b)
elif isinstance(x, list):
return Mux.process_list(x, *a, **b)
else:
return Mux.primitive(x, *a, **b)
def primitive(x):
pass
|
{"hexsha": "cb54c9458b037f16c03d89b29f04291304b49605", "size": 11001, "ext": "py", "lang": "Python", "max_stars_repo_path": "nbox/framework/parsers.py", "max_stars_repo_name": "cshubhamrao/nbox", "max_stars_repo_head_hexsha": "df32552e94c436b3d55b197263e5834bdbb8b724", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nbox/framework/parsers.py", "max_issues_repo_name": "cshubhamrao/nbox", "max_issues_repo_head_hexsha": "df32552e94c436b3d55b197263e5834bdbb8b724", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nbox/framework/parsers.py", "max_forks_repo_name": "cshubhamrao/nbox", "max_forks_repo_head_hexsha": "df32552e94c436b3d55b197263e5834bdbb8b724", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6019417476, "max_line_length": 134, "alphanum_fraction": 0.607035724, "include": true, "reason": "import numpy", "num_tokens": 2931}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.