text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import os
import shutil
from pathlib import Path
import numpy as np
from .extraction_tools import load_extractor_from_pickle, load_extractor_from_dict, \
load_extractor_from_json
def check_recordings_equal(RX1, RX2, return_scaled=True, force_dtype=None, check_times=True):
N = RX1.get_num_frames()
# get_channel_ids
assert np.allclose(RX1.get_channel_ids(), RX2.get_channel_ids())
# get_num_channels
assert np.allclose(RX1.get_num_channels(), RX2.get_num_channels())
# get_num_frames
assert np.allclose(RX1.get_num_frames(), RX2.get_num_frames())
# get_sampling_frequency
assert np.allclose(RX1.get_sampling_frequency(), RX2.get_sampling_frequency())
# get_traces
if force_dtype is None:
assert np.allclose(RX1.get_traces(return_scaled=return_scaled), RX2.get_traces(return_scaled=return_scaled))
else:
assert np.allclose(RX1.get_traces(return_scaled=return_scaled).astype(force_dtype),
RX2.get_traces(return_scaled=return_scaled).astype(force_dtype))
sf = 0
ef = N
ch = [RX1.get_channel_ids()[0], RX1.get_channel_ids()[-1]]
if force_dtype is None:
assert np.allclose(RX1.get_traces(channel_ids=ch, start_frame=sf, end_frame=ef, return_scaled=return_scaled),
RX2.get_traces(channel_ids=ch, start_frame=sf, end_frame=ef, return_scaled=return_scaled))
else:
assert np.allclose(RX1.get_traces(channel_ids=ch, start_frame=sf, end_frame=ef,
return_scaled=return_scaled).astype(force_dtype),
RX2.get_traces(channel_ids=ch, start_frame=sf, end_frame=ef,
return_scaled=return_scaled).astype(force_dtype))
if check_times:
for f in range(0, RX1.get_num_frames(), 10):
assert np.isclose(RX1.frame_to_time(f), RX2.frame_to_time(f))
assert np.isclose(RX1.time_to_frame(RX1.frame_to_time(f)), RX2.time_to_frame(RX2.frame_to_time(f)))
# get_snippets
frames = [30, 50, 80]
snippets1 = RX1.get_snippets(reference_frames=frames, snippet_len=20, return_scaled=return_scaled)
snippets2 = RX2.get_snippets(reference_frames=frames, snippet_len=(10, 10), return_scaled=return_scaled)
if force_dtype is None:
for ii in range(len(frames)):
assert np.allclose(snippets1[ii], snippets2[ii])
else:
for ii in range(len(frames)):
assert np.allclose(snippets1[ii].astype(force_dtype), snippets2[ii].astype(force_dtype))
def check_recording_properties(RX1, RX2):
# check properties
assert sorted(RX1.get_shared_channel_property_names()) == sorted(RX2.get_shared_channel_property_names())
for prop in RX1.get_shared_channel_property_names():
for ch in RX1.get_channel_ids():
if not isinstance(RX1.get_channel_property(ch, prop), str):
assert np.allclose(np.array(RX1.get_channel_property(ch, prop)),
np.array(RX2.get_channel_property(ch, prop)))
else:
assert RX1.get_channel_property(ch, prop) == RX2.get_channel_property(ch, prop)
def check_recording_return_types(RX):
channel_ids = RX.get_channel_ids()
assert isinstance(RX.get_num_channels(), (int, np.integer))
assert isinstance(RX.get_num_frames(), (int, np.integer))
assert isinstance(RX.get_sampling_frequency(), (float, np.float))
assert isinstance(RX.get_traces(start_frame=0, end_frame=10), (np.ndarray, np.memmap))
for channel_id in channel_ids:
assert isinstance(channel_id, (int, np.integer))
def check_sorting_return_types(SX):
unit_ids = SX.get_unit_ids()
assert (all(isinstance(id, int) or isinstance(id, np.integer) for id in unit_ids))
for id in unit_ids:
train = SX.get_unit_spike_train(id)
# print(train)
assert (all(isinstance(x, int) or isinstance(x, np.integer) for x in train))
def check_sortings_equal(SX1, SX2):
# get_unit_ids
ids1 = np.sort(np.array(SX1.get_unit_ids()))
ids2 = np.sort(np.array(SX2.get_unit_ids()))
assert (np.allclose(ids1, ids2))
for id in ids1:
train1 = np.sort(SX1.get_unit_spike_train(id))
train2 = np.sort(SX2.get_unit_spike_train(id))
assert np.array_equal(train1, train2)
def check_sorting_properties_features(SX1, SX2):
# check properties
print(SX1.__class__)
print('Properties', sorted(SX1.get_shared_unit_property_names()), sorted(SX2.get_shared_unit_property_names()))
assert sorted(SX1.get_shared_unit_property_names()) == sorted(SX2.get_shared_unit_property_names())
for prop in SX1.get_shared_unit_property_names():
for u in SX1.get_unit_ids():
if not isinstance(SX1.get_unit_property(u, prop), str):
assert np.allclose(np.array(SX1.get_unit_property(u, prop)),
np.array(SX2.get_unit_property(u, prop)))
else:
assert SX1.get_unit_property(u, prop) == SX2.get_unit_property(u, prop)
# check features
print('Features', sorted(SX1.get_shared_unit_spike_feature_names()), sorted(SX2.get_shared_unit_spike_feature_names()))
assert sorted(SX1.get_shared_unit_spike_feature_names()) == sorted(SX2.get_shared_unit_spike_feature_names())
for feat in SX1.get_shared_unit_spike_feature_names():
for u in SX1.get_unit_ids():
assert np.allclose(np.array(SX1.get_unit_spike_features(u, feat)),
np.array(SX2.get_unit_spike_features(u, feat)))
def check_dumping(extractor):
# dump to dict
d = extractor.dump_to_dict()
extractor_loaded = load_extractor_from_dict(d)
if 'Recording' in str(type(extractor)):
check_recordings_equal(extractor, extractor_loaded)
elif 'Sorting' in str(type(extractor)):
check_sortings_equal(extractor, extractor_loaded)
# dump to json
# without file_name
extractor.dump_to_json()
if 'Recording' in str(type(extractor)):
extractor_loaded = load_extractor_from_json('spikeinterface_recording.json')
check_recordings_equal(extractor, extractor_loaded)
elif 'Sorting' in str(type(extractor)):
extractor_loaded = load_extractor_from_json('spikeinterface_sorting.json')
check_sortings_equal(extractor, extractor_loaded)
# with file_name
extractor.dump_to_json(file_path='test_dumping/test.json')
extractor_loaded = load_extractor_from_json('test_dumping/test.json')
if 'Recording' in str(type(extractor)):
check_recordings_equal(extractor, extractor_loaded)
elif 'Sorting' in str(type(extractor)):
check_sortings_equal(extractor, extractor_loaded)
# dump to pickle
# without file_name
extractor.dump_to_pickle()
if 'Recording' in str(type(extractor)):
extractor_loaded = load_extractor_from_pickle('spikeinterface_recording.pkl')
check_recordings_equal(extractor, extractor_loaded)
check_recording_properties(extractor, extractor_loaded)
elif 'Sorting' in str(type(extractor)):
extractor_loaded = load_extractor_from_pickle('spikeinterface_sorting.pkl')
check_sortings_equal(extractor, extractor_loaded)
check_sorting_properties_features(extractor, extractor_loaded)
# with file_name
extractor.dump_to_pickle(file_path='test_dumping/test.pkl')
extractor_loaded = load_extractor_from_pickle('test_dumping/test.pkl')
if 'Recording' in str(type(extractor)):
check_recordings_equal(extractor, extractor_loaded)
check_recording_properties(extractor, extractor_loaded)
elif 'Sorting' in str(type(extractor)):
check_sortings_equal(extractor, extractor_loaded)
check_sorting_properties_features(extractor, extractor_loaded)
shutil.rmtree('test_dumping')
if Path('spikeinterface_recording.json').is_file():
os.remove('spikeinterface_recording.json')
if Path('spikeinterface_sorting.json').is_file():
os.remove('spikeinterface_sorting.json')
if Path('spikeinterface_recording.pkl').is_file():
os.remove('spikeinterface_recording.pkl')
if Path('spikeinterface_sorting.pkl').is_file():
os.remove('spikeinterface_sorting.pkl')
|
{"hexsha": "a3b5a6dde33050bb8678bddfae7d60d2645066cd", "size": 8296, "ext": "py", "lang": "Python", "max_stars_repo_path": "spikeextractors/testing.py", "max_stars_repo_name": "sronchi/spikeextractors", "max_stars_repo_head_hexsha": "2d425239a9c78b6fda469b1054df3adf0d4ce8e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "spikeextractors/testing.py", "max_issues_repo_name": "sronchi/spikeextractors", "max_issues_repo_head_hexsha": "2d425239a9c78b6fda469b1054df3adf0d4ce8e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spikeextractors/testing.py", "max_forks_repo_name": "sronchi/spikeextractors", "max_forks_repo_head_hexsha": "2d425239a9c78b6fda469b1054df3adf0d4ce8e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.8342541436, "max_line_length": 123, "alphanum_fraction": 0.7069672131, "include": true, "reason": "import numpy", "num_tokens": 1921}
|
using DiffEqBayes, OrdinaryDiffEq, ParameterizedFunctions,
RecursiveArrayTools, Distributions, Test
println("One parameter case")
f1 = @ode_def begin
dx = a*x - x*y
dy = -3y + x*y
end a
u0 = [1.0,1.0]
tspan = (0.0,10.0)
p = [1.5]
prob1 = ODEProblem(f1,u0,tspan,p)
sol = solve(prob1,Tsit5())
t = collect(range(1,stop=10,length=50))
randomized = VectorOfArray([(sol(t[i]) + .01randn(2)) for i in 1:length(t)])
data = convert(Array,randomized)
priors = [truncated(Normal(1.5,0.1),1.0,1.8)]
bayesian_result = stan_inference(prob1,t,data,priors;num_samples=300,
num_warmups=500,likelihood=Normal)
@test mean(get(bayesian_result.chains,:theta_1)[1]) ≈ 1.5 atol=3e-1
# Test norecompile
bayesian_result2 = stan_inference(prob1,t,data,priors,bayesian_result.model;
num_samples=300,num_warmups=500,likelihood=Normal)
@test mean(get(bayesian_result2.chains,:theta_1)[1]) ≈ 1.5 atol=3e-1
priors = [truncated(Normal(1.,0.01),0.5,2.0),truncated(Normal(1.,0.01),0.5,2.0),truncated(Normal(1.5,0.01),1.0,2.0)]
bayesian_result = stan_inference(prob1,t,data,priors;num_samples=300,
num_warmups=500,likelihood=Normal,sample_u0=true)
@test mean(get(bayesian_result.chains,:theta_1)[1]) ≈ 1. atol=3e-1
@test mean(get(bayesian_result.chains,:theta_2)[1]) ≈ 1. atol=3e-1
@test mean(get(bayesian_result.chains,:theta_3)[1]) ≈ 1.5 atol=3e-1
sol = solve(prob1,Tsit5(),save_idxs=[1])
randomized = VectorOfArray([(sol(t[i]) + .01 * randn(1)) for i in 1:length(t)])
data = convert(Array,randomized)
priors = [truncated(Normal(1.5,0.1),0.5,2)]
bayesian_result = stan_inference(prob1,t,data,priors;num_samples=300,
num_warmups=500,likelihood=Normal,save_idxs=[1])
@test mean(get(bayesian_result.chains,:theta_1)[1]) ≈ 1.5 atol=3e-1
priors = [truncated(Normal(1.,0.01),0.5,2),truncated(Normal(1.5,0.01),0.5,2)]
bayesian_result = stan_inference(prob1,t,data,priors;num_samples=300,
num_warmups=500,likelihood=Normal,save_idxs=[1],sample_u0=true)
@test mean(get(bayesian_result.chains,:theta_1)[1]) ≈ 1. atol=3e-1
@test mean(get(bayesian_result.chains,:theta_2)[1]) ≈ 1.5 atol=3e-1
println("Four parameter case")
f1 = @ode_def begin
dx = a*x - b*x*y
dy = -c*y + d*x*y
end a b c d
u0 = [1.0,1.0]
tspan = (0.0,10.0)
p = [1.5,1.0,3.0,1.0]
prob1 = ODEProblem(f1,u0,tspan,p)
sol = solve(prob1,Tsit5())
t = collect(range(1,stop=10,length=50))
randomized = VectorOfArray([(sol(t[i]) + .01randn(2)) for i in 1:length(t)])
data = convert(Array,randomized)
priors = [truncated(Normal(1.5,0.01),0.5,2),truncated(Normal(1.0,0.01),0.5,1.5),
truncated(Normal(3.0,0.01),0.5,4),truncated(Normal(1.0,0.01),0.5,2)]
bayesian_result = stan_inference(prob1,t,data,priors;num_samples=100,num_warmups=500,vars =(DiffEqBayes.StanODEData(),InverseGamma(4,1)))
@test mean(get(bayesian_result.chains,:theta_1)[1]) ≈ 1.5 atol=1e-1
@test mean(get(bayesian_result.chains,:theta_2)[1]) ≈ 1.0 atol=1e-1
@test mean(get(bayesian_result.chains,:theta_3)[1]) ≈ 3.0 atol=1e-1
@test mean(get(bayesian_result.chains,:theta_4)[1]) ≈ 1.0 atol=1e-1
|
{"hexsha": "5cdf2ffc8a98366f5472cf05bee8b313bf0d6d0e", "size": 3176, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/stan.jl", "max_stars_repo_name": "JuliaDiffEq/BayesDiffEq.jl", "max_stars_repo_head_hexsha": "3f1da73c0e999e3198bb7e59f31ef2e1771bddf4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/stan.jl", "max_issues_repo_name": "JuliaDiffEq/BayesDiffEq.jl", "max_issues_repo_head_hexsha": "3f1da73c0e999e3198bb7e59f31ef2e1771bddf4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/stan.jl", "max_forks_repo_name": "JuliaDiffEq/BayesDiffEq.jl", "max_forks_repo_head_hexsha": "3f1da73c0e999e3198bb7e59f31ef2e1771bddf4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7894736842, "max_line_length": 137, "alphanum_fraction": 0.6772670025, "num_tokens": 1177}
|
import random
import numpy as np
class Compose:
def __init__(self, transforms, prob=1.):
self.transforms = [t for t in transforms if t is not None]
self.prob = prob
def __call__(self, **data):
if random.random() < self.prob:
for t in self.transforms:
data = t(**data)
return data
class OneOf:
def __init__(self, transforms, prob=.5):
self.transforms = transforms
self.prob = prob
transforms_probs = [t.prob for t in transforms]
s = sum(transforms_probs)
self.transforms_probs = [t / s for t in transforms_probs]
def __call__(self, **data):
if random.random() < self.prob:
t = np.random.choice(self.transforms, p=self.transforms_probs)
t.prob = 1.
data = t(**data)
return data
class OneOrOther:
def __init__(self, first, second, prob=.5):
self.first = first
first.prob = 1.
self.second = second
second.prob = 1.
self.prob = prob
def __call__(self, **data):
return self.first(**data) if random.random() < self.prob else self.second(**data)
|
{"hexsha": "15be97b75ad5f3a6d14f41614cab2a74382b3b34", "size": 1213, "ext": "py", "lang": "Python", "max_stars_repo_path": "polus-cell-nuclei-segmentation/src/dsb2018_topcoders/victor/augmentations/composition.py", "max_stars_repo_name": "nishaq503/polus-plugins-dl", "max_stars_repo_head_hexsha": "511689e82eb29a84761538144277d1be1af7aa44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-28T12:50:38.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-28T12:50:38.000Z", "max_issues_repo_path": "polus-cell-nuclei-segmentation/src/dsb2018_topcoders/victor/augmentations/composition.py", "max_issues_repo_name": "nishaq503/polus-plugins-dl", "max_issues_repo_head_hexsha": "511689e82eb29a84761538144277d1be1af7aa44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-09T23:22:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-09T23:22:16.000Z", "max_forks_repo_path": "polus-cell-nuclei-segmentation/src/dsb2018_topcoders/victor/augmentations/composition.py", "max_forks_repo_name": "nishaq503/polus-plugins-dl", "max_forks_repo_head_hexsha": "511689e82eb29a84761538144277d1be1af7aa44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-06-22T13:54:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T19:23:39.000Z", "avg_line_length": 28.2093023256, "max_line_length": 90, "alphanum_fraction": 0.5638911789, "include": true, "reason": "import numpy", "num_tokens": 282}
|
import torch
import numpy as np
class DeepFool():
# max_iter=50, clip_max=0.5, clip_min=-0.5
def __init__(self, max_iter, clip_max, clip_min):
self.max_iter = max_iter
self.clip_max = clip_max
self.clip_min = clip_min
def __call__(self, model, x, y):
nx = torch.unsqueeze(x, 0)
nx.requires_grad_()
eta = torch.zeros(nx.shape)
out = model(nx+eta)
n_class = out.shape[1]
py = out.max(1)[1].item()
ny = out.max(1)[1].item()
i_iter = 0
while py == ny and i_iter < self.max_iter:
out[0, py].backward(retain_graph=True)
grad_np = nx.grad.data.clone()
value_l = np.inf
ri = None
for i in range(n_class):
if i == py:
continue
nx.grad.data.zero_()
out[0, i].backward(retain_graph=True)
grad_i = nx.grad.data.clone()
wi = grad_i - grad_np
fi = out[0, i] - out[0, py]
value_i = np.abs(fi.item()) / np.linalg.norm(wi.numpy().flatten())
if value_i < value_l:
ri = value_i/np.linalg.norm(wi.numpy().flatten()) * wi
eta += ri.clone()
nx.grad.data.zero_()
out = model(nx+eta)
py = out.max(1)[1].item()
i_iter += 1
x_adv = nx + eta
x_adv.clamp_(self.clip_min, self.clip_max)
x_adv.squeeze_(0)
return x_adv.detach()
|
{"hexsha": "54ff33959a75dcb3d75d6916bbb1b9cbbd012c43", "size": 1553, "ext": "py", "lang": "Python", "max_stars_repo_path": "plexiglass/adversarial/deepfool.py", "max_stars_repo_name": "jkartzman/plexiglass", "max_stars_repo_head_hexsha": "257e3305e31f032c26300b0a9c78260ccd251cd6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plexiglass/adversarial/deepfool.py", "max_issues_repo_name": "jkartzman/plexiglass", "max_issues_repo_head_hexsha": "257e3305e31f032c26300b0a9c78260ccd251cd6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plexiglass/adversarial/deepfool.py", "max_forks_repo_name": "jkartzman/plexiglass", "max_forks_repo_head_hexsha": "257e3305e31f032c26300b0a9c78260ccd251cd6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7592592593, "max_line_length": 82, "alphanum_fraction": 0.4919510625, "include": true, "reason": "import numpy", "num_tokens": 395}
|
using PositiveFactorizations
using LinearAlgebra, Test
@testset "PositiveFactorizations" begin
for pivot in (Val{false}, Val{true})
A = [1 0; 0 1]
F = cholesky(Positive, A, pivot)
@test Matrix(F) ≈ A
F, d = ldlt(Positive, A, pivot)
@test Matrix(F) ≈ A
@test d == [1,1]
A = [1 0; 0 -1]
F = cholesky(Positive, A, pivot)
@test Matrix(F) ≈ Matrix(1.0I,2,2)
F, d = ldlt(Positive, A, pivot)
@test Matrix(F) ≈ Matrix(1.0I,2,2)
@test d == [1,-1]
A = [-1 0.5; 0.5 4]
target = pivot == Val{false} ? [1 -0.5; -0.5 4.5] : [1.125 0.5; 0.5 4]
dtarget = pivot == Val{false} ? [-1,1] : [1,-1]
F = cholesky(Positive, A, pivot)
@test Matrix(F) ≈ target
F = cholesky(Positive, 10*A, pivot)
@test Matrix(F) ≈ 10*target
F, d = ldlt(Positive, A, pivot)
@test Matrix(F) ≈ target
@test d == dtarget
A = [0 1; 1 0]
F = cholesky(Positive, A, pivot)
@test Matrix(F) ≈ Matrix(1.0I,2,2)
F, d = ldlt(Positive, A, pivot)
@test Matrix(F) ≈ Matrix(1.0I,2,2)
@test d == [0,0]
A = rand(201,200); A = A'*A
F = cholesky(Positive, A, pivot)
@test Matrix(F) ≈ A
F, d = ldlt(Positive, A, pivot)
@test Matrix(F) ≈ A
@test all(d .== 1)
# factorization of (not too small) BigFloat matrices passes
a = BigFloat.(1:15); A = a * a'
F = cholesky(Positive, A, pivot)
F, d = ldlt(Positive, A, pivot)
end
A = [1 0; 0 -2]
F = eigen(Positive, A)
# TODO: Use this when we drop v0.4 support
# @test Matrix(F) ≈ abs.(A)
absA = abs.(A)
absA = convert(Array{Int}, absA) # v0.4 fix
@test Matrix(F) ≈ absA
A = [1 0; 0 0]
F = eigen(Positive, A)
@test Matrix(F) ≈ Matrix(1.0I,2,2)
# Test whether necessary matrix operations are supported for SubArrays
n = PositiveFactorizations.default_blocksize(Float64)
B = rand(n+3,n+4); C = rand(size(B)...); A = B'*B - C'*C
ldlt!(Positive, A)
end # @testset
|
{"hexsha": "801b13922a8988e0356dff60183e15a9e89be478", "size": 1900, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/PositiveFactorizations.jl-85a6dd25-e78a-55b7-8502-1745935b8125", "max_stars_repo_head_hexsha": "12fb36039fa463d4e4b2a297154baddb8b3f8ce1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/PositiveFactorizations.jl-85a6dd25-e78a-55b7-8502-1745935b8125", "max_issues_repo_head_hexsha": "12fb36039fa463d4e4b2a297154baddb8b3f8ce1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/PositiveFactorizations.jl-85a6dd25-e78a-55b7-8502-1745935b8125", "max_forks_repo_head_hexsha": "12fb36039fa463d4e4b2a297154baddb8b3f8ce1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5362318841, "max_line_length": 74, "alphanum_fraction": 0.5773684211, "num_tokens": 757}
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_basics:
#
# The basics
# ==========
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_intro:
#
# Creating figures
# ----------------
#
# ProPlot works by creating a `proplot.figure.Figure` subclass of the
# matplotlib figure class `~matplotlib.figure.Figure`, and a `proplot.axes.Axes`
# subclass of the matplotlib axes class `~matplotlib.axes.Axes`.
# All plotting in ProPlot begins by generating
# an instance of the new figure class filled with instances of the new
# axes classes using the `~proplot.ui.subplots` command, which is modeled
# after `matplotlib.pyplot.subplots`.
# ProPlot's `~proplot.ui.subplots` command can be used as follows:
#
# * Without any arguments, `~proplot.ui.subplots` returns a figure with a
# single subplot.
# * With `ncols` or `nrows`, `~proplot.ui.subplots` returns a
# figure with a simple grid of subplots.
# * With `array`, `~proplot.ui.subplots` returns an
# *arbitrarily complex* grid of subplots. This is a 2D array representing
# a "picture" of the subplot layout, where each unique integer indicates a
# `~matplotlib.gridspec.GridSpec` slot that is occupied by the corresponding
# subplot and ``0`` indicates an empty space.
#
# In the below examples, we create subplot grids with `~proplot.ui.subplots`
# and modify the axes labels. See the :ref:`formatting guide <ug_format>`
# and :ref:`subplots container <ug_container>` sections for details.
# %% [raw] raw_mimetype="text/restructuredtext"
# .. note::
#
# ProPlot figure backgrounds are only gray when displayed by the
# `matplotlib backend <https://matplotlib.org/faq/usage_faq#what-is-a-backend>`__
# -- the default background color is white when the figure is saved. This is done
# by setting :rcraw:`figure.facecolor` to gray, in order to improve contrast
# when working with figures.
# ProPlot also makes the default saved figure background *transparent*
# by setting :rcraw:`savefig.transparent` to ``True``
# and changes the default :rcraw:`savefig.format` from PNG to PDF
# for the following reasons:
#
# #. Vector graphic formats are infinitely scalable.
# #. Vector graphic formats are preferred by academic journals.
# #. Most academic journals accept PDF figures alongside the traditional
# `EPS <https://en.wikipedia.org/wiki/Encapsulated_PostScript>`__ format.
# #. The EPS format does not support transparent graphic elements.
#
# In case you *do* need raster graphics, ProPlot sets the default
# :rcraw:`savefig.dpi` to 1200 dots per inch, which is
# `recommended by most journals <https://www.pnas.org/page/authors/format>`__
# as the minimum resolution for rasterized figures containing lines and text.
# See the :ref:`configuration section <ug_proplotrc>` for how to change
# any of these settings.
# %%
# Generate sample data
import numpy as np
state = np.random.RandomState(51423)
data = 2 * (state.rand(100, 5) - 0.5).cumsum(axis=0)
# %%
# Single subplot
import proplot as plot
fig, ax = plot.subplots()
ax.plot(data, lw=2)
ax.format(suptitle='Single subplot', xlabel='x axis', ylabel='y axis')
# %%
# Simple subplot grid
import proplot as plot
fig, axs = plot.subplots(ncols=2)
axs[0].plot(data, lw=2)
axs[0].format(xticks=20, xtickminor=False)
axs.format(
suptitle='Simple subplot grid', title='Title',
xlabel='x axis', ylabel='y axis'
)
# %%
# Complex grid
import proplot as plot
array = [ # the "picture" (0 == nothing, 1 == subplot A, 2 == subplot B, etc.)
[1, 1, 2, 2],
[0, 3, 3, 0],
]
fig, axs = plot.subplots(array, axwidth=1.8)
axs.format(
abc=True, abcloc='ul', suptitle='Complex subplot grid',
xlabel='xlabel', ylabel='ylabel'
)
axs[2].plot(data, lw=2)
# %%
# Really complex grid
import proplot as plot
array = [ # the "picture" (1 == subplot A, 2 == subplot B, etc.)
[1, 1, 2],
[1, 1, 6],
[3, 4, 4],
[3, 5, 5],
]
fig, axs = plot.subplots(array, width=5, span=False)
axs.format(
suptitle='Really complex subplot grid',
xlabel='xlabel', ylabel='ylabel', abc=True
)
axs[0].plot(data, lw=2)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_plots:
#
# Plotting data
# -------------
#
# Matplotlib has
# `two different APIs <https://matplotlib.org/3.2.1/api/index.html>`__:
# an object-oriented API and a MATLAB-style
# `~matplotlib.pyplot` API (which uses the object-oriented API internally).
# Plotting in ProPlot is just like plotting in matplotlib with
# the *object-oriented* API. Rather than creating
# a brand new interface, ProPlot simply builds upon the existing matplotlib
# constructs of the `~matplotlib.axes.Axes` and the `~matplotlib.figure.Figure`
# by adding new commands and new options to existing commands, without changing
# the usage or syntax. This means a shallow learning curve for the average
# matplotlib user.
#
# In the below example, we create a 4-panel figure with the familiar matplotlib
# commands `~matplotlib.axes.Axes.plot`, `~matplotlib.axes.Axes.scatter`,
# `~matplotlib.axes.Axes.pcolormesh`, and `~matplotlib.axes.Axes.contourf`.
# See the :ref:`1d plotting <ug_1dplots>` and :ref:`2d plotting <ug_2dplots>`
# sections for details on the plotting features added by ProPlot.
# %%
import proplot as plot
import numpy as np
# Sample data
N = 20
state = np.random.RandomState(51423)
data = (state.rand(N, N) - 0.5).cumsum(axis=0).cumsum(axis=1)
# Example plots
cycle = plot.Cycle('greys', left=0.2, N=5)
fig, axs = plot.subplots(ncols=2, nrows=2, share=0, width=5)
axs[0].plot(data[:, :5], linewidth=2, linestyle='--', cycle=cycle)
axs[1].scatter(data[:, :5], marker='x', cycle=cycle)
axs[2].pcolormesh(data, cmap='greys')
axs[3].contourf(data, cmap='greys')
axs.format(abc=True, xlabel='xlabel', ylabel='ylabel', suptitle='Quick plotting demo')
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_format:
#
# Formatting plots
# ----------------
#
# Every `~matplotlib.axes.Axes` returned by `~proplot.ui.subplots` has a
# ``format`` method. This is your one-stop-shop for changing axes settings.
# Keyword arguments passed to ``format`` are interpreted as follows:
#
# 1. Any keyword matching the name of an `~proplot.config.rc` setting
# is used to update the axes. If the name has "dots", you can omit them
# (e.g. ``titleloc='left'`` to change the :rcraw:`title.loc` property).
# See the :ref:`configuration section <ug_config>` for details.
# 2. Valid keywords arguments are passed to
# `proplot.axes.CartesianAxes.format`, `proplot.axes.PolarAxes.format`, or
# `proplot.axes.GeoAxes.format`. These change settings that are
# specific to the axes type. For example:
# * To change the *x* axis bounds on a `~proplot.axes.CartesianAxes`,
# use e.g. ``xlim=(0, 5)``.
# * To change the radial bounds on a `~proplot.axes.PolarAxes`, use e.g.
# ``rlim=(0, 10)``.
# * To change the meridional bounds on a `~proplot.axes.GeoAxes`,
# use e.g. ``lonlim=(-90, 0)``.
#
# .. rst-class:: dummy-line-break-class
#
# 3. Remaining keyword arguments are passed to the base `proplot.axes.Axes.format`
# method. `~proplot.axes.Axes` is the base class for all other axes classes.
# This changes things that are the same for all axes types, like titles and
# a-b-c subplot labels (e.g. ``title='Title'``).
#
# The ``format`` methods let you use simple shorthands for changing all kinds
# of settings at once, instead of one-liner setter methods like
# ``ax.set_title()`` and ``ax.set_xlabel()``. They are also integrated with
# the `~proplot.constructor.Locator`, `~proplot.constructor.Formatter`,
# and `~proplot.constructor.Scale` constructor functions (see the
# :ref:`Cartesian axis settings <ug_cartesian>` section for details).
#
# The below example shows the many different keyword arguments accepted by
# ``format``, and demonstrates how ``format`` can be used to succinctly and
# efficiently customize your plots.
# %%
import proplot as plot
import numpy as np
fig, axs = plot.subplots(ncols=2, nrows=2, share=0, tight=True, axwidth=2)
state = np.random.RandomState(51423)
N = 60
x = np.linspace(1, 10, N)
y = (state.rand(N, 5) - 0.5).cumsum(axis=0)
axs[0].plot(x, y, linewidth=1.5)
axs.format(
suptitle='Format command demo',
abc=True, abcloc='ul', abcstyle='A.',
title='Main', ltitle='Left', rtitle='Right', # different titles
urtitle='Title A', lltitle='Title B', lrtitle='Title C', # extra titles
collabels=['Column label 1', 'Column label 2'],
rowlabels=['Row label 1', 'Row label 2'],
xlabel='x-axis', ylabel='y-axis',
xscale='log',
xlim=(1, 10), xticks=1,
ylim=(-3, 3), yticks=plot.arange(-3, 3),
yticklabels=('a', 'bb', 'c', 'dd', 'e', 'ff', 'g'),
ytickloc='both', yticklabelloc='both',
xtickdir='inout', xtickminor=False, ygridminor=True,
)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_rc:
#
# Changing rc settings
# --------------------
#
# A special object named `~proplot.config.rc` is created whenever you import
# ProPlot. `~proplot.config.rc` is similar to the matplotlib
# `~matplotlib.rcParams` dictionary, but can be used to change both
# `matplotlib settings <https://matplotlib.org/users/customizing.html>`__ and
# :ref:`ProPlot settings <rc_proplot>`. `~proplot.config.rc` also
# provides a ``style`` parameter that can be used to switch between
# `matplotlib stylesheets\
# <https://matplotlib.org/3.1.1/gallery/style_sheets/style_sheets_reference.html>`__.
# See the :ref:`configuration section <ug_config>` for details.
#
# To modify a setting for just one subplot, you can pass it to the
# `~proplot.axes.Axes` `~proplot.axes.Axes.format` method. To temporarily
# modify setting(s) for a block of code, use
# `~proplot.config.RcConfigurator.context`. To modify setting(s) for the
# entire python session, just assign it to the `~proplot.config.rc` object or
# use `~proplot.config.RcConfigurator.update`. To reset everything to the
# default state, use `~proplot.config.RcConfigurator.reset`. See the below
# example.
# %%
import proplot as plot
import numpy as np
# Update global settings in several different ways
plot.rc.cycle = 'colorblind'
plot.rc.color = 'gray6'
plot.rc.update({'fontname': 'Source Sans Pro', 'fontsize': 11})
plot.rc['figure.facecolor'] = 'gray3'
plot.rc.axesfacecolor = 'gray4'
# plot.rc.save() # save the current settings to ~/.proplotrc
# Apply settings to figure with context()
with plot.rc.context({'suptitle.size': 13}, toplabelcolor='gray6', linewidth=1.5):
fig, axs = plot.subplots(ncols=2, aspect=1, width=6, span=False, sharey=2)
# Plot lines
N, M = 100, 6
state = np.random.RandomState(51423)
values = np.arange(1, M + 1)
for i, ax in enumerate(axs):
data = np.cumsum(state.rand(N, M) - 0.5, axis=0)
lines = ax.plot(data, linewidth=3, cycle='Grays')
# Apply settings to axes with format()
axs.format(
grid=False, xlabel='x label', ylabel='y label',
collabels=['Column label 1', 'Column label 2'],
suptitle='Rc settings demo',
suptitlecolor='gray7',
abc=True, abcloc='l', abcstyle='A)',
title='Title', titleloc='r', titlecolor='gray7'
)
ay = axs[-1].twinx()
ay.format(ycolor='red', linewidth=1.5, ylabel='secondary axis')
ay.plot((state.rand(100) - 0.2).cumsum(), color='r', lw=3)
# Reset persistent modifications from head of cell
plot.rc.reset()
# %%
import proplot as plot
import numpy as np
# plot.rc.style = 'style' # set the style everywhere
# Set up figure
styles = ('ggplot', 'seaborn', '538', 'bmh')
state = np.random.RandomState(51423)
data = state.rand(10, 5)
fig, axs = plot.subplots(ncols=2, nrows=2, span=False, share=False)
# Apply different styles to different axes with format()
axs.format(suptitle='Stylesheets demo')
for ax, style in zip(axs, styles):
ax.format(style=style, xlabel='xlabel', ylabel='ylabel', title=style)
ax.plot(data, linewidth=3)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_container:
#
# Subplots containers
# -------------------
#
# Instead of an `~numpy.ndarray` of axes, `~proplot.ui.subplots` returns a
# `~proplot.ui.SubplotsContainer` instance. This container behaves like an
# `~matplotlib.axes.Axes` object when it contains just one axes, and behaves
# like a list otherwise. It supports both 1D indexing (e.g. ``axs[1]``) and
# 2D indexing (e.g. ``axs[0, 1]``), and is row-major by default. Slicing a
# `~proplot.ui.SubplotsContainer` returns another container (e.g. ``axs[:, 0]``),
# and `~proplot.axes.Axes` methods can be called simultaneously for all axes in the
# container by calling the method from the container (e.g. ``axs.format(abc=True)``).
#
# In the below example, the `~proplot.ui.SubplotsContainer` returned by
# `~proplot.ui.subplots` is used to cusomtize several axes at once with
# `proplot.axes.Axes.format`.
# %%
import proplot as plot
import numpy as np
state = np.random.RandomState(51423)
fig, axs = plot.subplots(ncols=4, nrows=4, axwidth=1.2)
axs.format(
xlabel='xlabel', ylabel='ylabel', suptitle='SubplotsContainer demo',
grid=False, xlim=(0, 50), ylim=(-4, 4)
)
# Various ways to select subplots in the container
axs[:, 0].format(facecolor='blush', color='gray7', linewidth=1)
axs[0, :].format(facecolor='sky blue', color='gray7', linewidth=1)
axs[0].format(color='black', facecolor='gray5', linewidth=1.4)
axs[1:, 1:].format(facecolor='gray1')
for ax in axs[1:, 1:]:
ax.plot((state.rand(50, 5) - 0.5).cumsum(axis=0), cycle='Grays', lw=2)
|
{"hexsha": "bfcdc2e0c93fb09bcaa2ed94d3130ab10d310da9", "size": 13745, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/basics.py", "max_stars_repo_name": "zmoon92/proplot", "max_stars_repo_head_hexsha": "2c6f7af8a044567bb9409d3f67d844bac05c7d14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-30T00:34:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-30T00:34:11.000Z", "max_issues_repo_path": "docs/basics.py", "max_issues_repo_name": "zmoon92/proplot", "max_issues_repo_head_hexsha": "2c6f7af8a044567bb9409d3f67d844bac05c7d14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/basics.py", "max_forks_repo_name": "zmoon92/proplot", "max_forks_repo_head_hexsha": "2c6f7af8a044567bb9409d3f67d844bac05c7d14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1486486486, "max_line_length": 86, "alphanum_fraction": 0.6926154965, "include": true, "reason": "import numpy", "num_tokens": 3953}
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import os
import pytest
from unittest.mock import patch
from intake import open_catalog
import dask.dataframe as dd
@pytest.fixture
def local_prefix_cat():
path = os.path.dirname(__file__)
return open_catalog(os.path.join(path, 'data', 'local.catalog.yaml'))
def test_local_prefix_catalog(local_prefix_cat):
source = local_prefix_cat['mogreps_g_manifest'].get()
ds = source.read()
assert isinstance(ds, pd.DataFrame)
assert len(ds) == 622007
def test_s3_prefix_catalog():
orig_read_csv = dd.read_csv
def read_csv(path, *args, **kwargs):
return orig_read_csv(_s3_url_to_local(path), *args, **kwargs)
def _s3_url_to_local(url):
return url.replace('s3://', './tests/data/')
def opener(url, mode='r'):
return open(_s3_url_to_local(url), mode)
with patch('s3fs.S3FileSystem') as mockopen, patch('dask.dataframe.read_csv') as mock_read_csv:
# patch('intake_s3_manifests.s3_manifest') as s3_manifest,
mockopen.return_value.open.side_effect = opener
mock_read_csv.side_effect = read_csv
path = os.path.dirname(__file__)
s3_prefix_cat = open_catalog(os.path.join(path, 'data', 's3.catalog.yaml'))
source = s3_prefix_cat['mogreps_g_manifest'].get()
ds = source.read()
assert isinstance(ds, pd.DataFrame)
assert len(ds) == 622007
|
{"hexsha": "c467ca5bb140386c9f03a7f6e80576ceb415b2d4", "size": 1434, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_catalog.py", "max_stars_repo_name": "hamed2005/intake-azure-blob-storage", "max_stars_repo_head_hexsha": "314f67c9c46d8fab69c2deb5d643417989519f54", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-23T09:09:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T09:09:10.000Z", "max_issues_repo_path": "tests/test_catalog.py", "max_issues_repo_name": "hamed2005/intake-azure-blob-storage", "max_issues_repo_head_hexsha": "314f67c9c46d8fab69c2deb5d643417989519f54", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-10-03T13:48:13.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-25T15:03:24.000Z", "max_forks_repo_path": "tests/test_catalog.py", "max_forks_repo_name": "hamed2005/intake-azure-blob-storage", "max_forks_repo_head_hexsha": "314f67c9c46d8fab69c2deb5d643417989519f54", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-07-30T09:39:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-27T21:48:09.000Z", "avg_line_length": 29.875, "max_line_length": 99, "alphanum_fraction": 0.690376569, "include": true, "reason": "import numpy", "num_tokens": 361}
|
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import interactive, interactive_output, fixed, HBox, VBox
import ipywidgets as widgets
def get_interactive_logistic_regression(X, y):
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(1, 1, 1)
xmin = X.min(axis=0)
xmax = X.max(axis=0)
xrange_ = xmax - xmin
lim_x = (xmin[0] - 0.1 * xrange_[0], xmax[0] + 0.1 * xrange_[0])
ax.set_xlim(lim_x[0], lim_x[1])
ax.set_ylim(xmin[1] - 0.1 * xrange_[1], xmax[1] + 0.1 * xrange_[1])
ax.set_xlabel(r"$x_1$")
ax.set_ylabel(r"$x_2$")
ax.set_aspect("equal")
scatter_handle = plt.scatter(X[:, 0], X[:, 1], c=y)
w1 = 0.1
w2 = 1.0
projection_vector, = ax.plot([0, w1], [0, w2], color="blue")
projection_vector2, = ax.plot([-w1, w1], [-w2, w2], linestyle="--", color="blue")
decision_boundary, = ax.plot([0, -w2], [0, w1], color="orange")
vector_tip, = ax.plot(w1, w2, marker="x", markersize=15, color="blue")
ax.plot(0, 0, markersize=10, color="red", marker="o")
xx = np.linspace(lim_x[0], lim_x[1], num=100)
yy = -(w1/w2) * xx
top_filler = ax.fill_between(xx, y1=-10, y2=yy, color="purple", alpha=0.1)
bottom_filler = ax.fill_between(xx, y1=yy, y2=10, color="yellow", alpha=0.1)
def update(w1=0.1, w2=1.0, bias=0.0):
vector_tip.set_data(w1, w2)
if not w1:
w1 = 0.0001
if not w2:
w2 = 0.0001
w = np.array([w1, w2])
# bias_vec = w * bias / np.linalg.norm(w) # TODO figure this out
# b1, b2 = bias_vec
# decision_boundary.set_data([w2+b1, -w2+b1], [-w1+b2, w1+b2])
projection_vector.set_data([0, w1], [0, w2])
decision_boundary.set_data([lim_x[0], lim_x[1]], [- w1/w2 * lim_x[0] + bias/w2, - w1/w2 * lim_x[1] + bias/w2])
if not w2 == 0:
# yy = -(w1/w2) * xx + b2 + w1/w2 * b1
yy = -(w1/w2) * xx + bias/w2
ax.collections = ax.collections[:1]
if w2 > 0:
top_filler = ax.fill_between(xx, y1=-10, y2=yy, color="purple", alpha=0.1)
bottom_filler = ax.fill_between(xx, y1=yy, y2=10, color="yellow", alpha=0.1)
else:
top_filler = ax.fill_between(xx, y1=-10, y2=yy, color="yellow", alpha=0.1)
bottom_filler = ax.fill_between(xx, y1=yy, y2=10, color="purple", alpha=0.1)
w /= np.linalg.norm(w)
w *= 5
w1, w2 = w
projection_vector2.set_data([-w1, w1], [-w2, w2])
fig.canvas.draw_idle()
interactive_plot = interactive(update, w1=(-2.0, 2.0), w2=(-2.0, 2.0), bias=(-3.0, 3.0))
return interactive_plot
def get_interactive_logistic_regression_advanced(X, y, X_test=None, y_test=None):
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(1, 1, 1)
w1 = 0.5
w2 = 0.5
bias = 0.0
x1 = 0.0
x2 = 0.0
h = x1*w1 + x2*w2 - bias
y_hat = int(h >= 0)
w1_slider = widgets.FloatSlider(
value=0.5,
min=-5.0,
max=5.0,
step=0.1,
description="w1",
disabled=False,
continuous_update=False,
# orientation='horizontal',
readout=True,
readout_format='.1f',
)
w2_slider = widgets.FloatSlider(
value=-1.0,
min=-5.0,
max=5.0,
step=0.1,
description="w2",
disabled=False,
continuous_update=False,
# orientation='horizontal',
readout=True,
readout_format='.1f',
)
bias_slider = widgets.FloatSlider(
value=0.0,
min=-5.0,
max=5.0,
step=0.1,
description=r'$\theta$',
disabled=False,
continuous_update=False,
# orientation='horizontal',
readout=True,
readout_format='.1f',
)
x1_slider = widgets.FloatSlider(
value=0.0,
min=-2.0,
max=2.0,
step=0.1,
description="x1",
disabled=False,
continuous_update=True,
# orientation='horizontal',
readout=True,
readout_format='.1f',
)
x2_slider = widgets.FloatSlider(
value=0.0,
min=-2.0,
max=2.0,
step=0.1,
description="x2",
disabled=False,
continuous_update=True,
# orientation='horizontal',
readout=True,
readout_format='.1f',
)
show_train = widgets.Checkbox(
value=False,
description='Show train data',
disabled=False
)
show_test = widgets.Checkbox(
value=False,
description='Show test data',
disabled=False
)
show_boundary = widgets.Checkbox(
value=False,
description='Show decision boundary',
disabled=False
)
show_prediction = widgets.Checkbox(
value=False,
description='Show prediction',
disabled=False
)
show_weight_vector = widgets.Checkbox(
value=False,
description='Show weight vector',
disabled=False
)
show_h = widgets.Checkbox(
value=False,
description=r'Show $h$',
disabled=False
)
caption1 = widgets.Label(
value=r"$h = w_1 \cdot x_1 + w_2 \cdot x_2 - \theta$"
)
caption2 = widgets.Label(
#value=f"{w1} * {x1} + {w2} * {x2} - {bias}"
# value=f"({w1}) * ({x1}) + ({w2}) * ({x2}) - ({bias})"
value=f"{format(h, '.3f')} = ({w1}) * ({x1}) + ({w2}) * ({x2}) - ({bias})"
)
caption3 = widgets.Label(
value=r"$\hat{y} = f(h)$"
)
caption4 = widgets.Label(
value=f"{y_hat} = f({h})"
)
#label2 = widgets.Label(
# value=fr"${w1} * {x1} + {w2} * {x2} - {bias} = {y_hat} $"
#)
box1 = VBox(
children=[x1_slider, x2_slider, show_train, show_test]
)
box2 = VBox(
children=[w1_slider, w2_slider, bias_slider, show_boundary, show_prediction]
)
box3 = VBox(
children=[caption1, caption2, caption3, caption4]
)
ui = HBox(
children=[box3, box2, box1]
)
xmin = X.min(axis=0)
xmax = X.max(axis=0)
xrange_ = xmax - xmin
lim_x = (xmin[0] - 0.1 * xrange_[0], xmax[0] + 0.1 * xrange_[0])
ax.set_xlim(lim_x[0], lim_x[1])
ax.set_ylim(xmin[1] - 0.1 * xrange_[1], xmax[1] + 0.1 * xrange_[1])
ax.set_xlabel(r"$x_1$")
ax.set_ylabel(r"$x_2$")
ax.set_aspect("equal")
training_data_handle = plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.0, marker="x", s=15)
has_test = X_test is not None and y_test is not None
if has_test:
test_data_handle = plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, alpha=0.0, marker="D", s=15)
test_point_handle1 = plt.scatter([x1], [x2], s=150, linewidth=2, facecolors='none', edgecolors='black')
test_point_handle2 = plt.scatter([x1], [x2], s=50, edgecolors='none', alpha=(y_hat==0), c=0.0, vmin=0.0, vmax=1.0)
test_point_handle3 = plt.scatter([x1], [x2], s=50, edgecolors='none', alpha=(y_hat==1), c=1.0, vmin=0.0, vmax=1.0)
# test_point_handle4 = plt.scatter([x1], [x2], s=50, edgecolors='none', alpha=0.0, c=h, vmin=-2.0, vmax=2.0)
decision_boundary, = ax.plot([0, -w2], [0, w1], color="red", alpha=0.0)
#projection_vector, = ax.plot([0, w1], [0, w2], color="blue")
#projection_vector2, = ax.plot([-w1, w1], [-w2, w2], linestyle="--", color="blue")
#vector_tip, = ax.plot(w1, w2, marker="x", markersize=15, color="blue")
#ax.plot(0, 0, markersize=10, color="red", marker="o")
xx = np.linspace(lim_x[0], lim_x[1], num=100)
yy = -(w1/w2) * xx
top_filler = ax.fill_between(xx, y1=-10, y2=yy, color="purple", alpha=0.0)
bottom_filler = ax.fill_between(xx, y1=yy, y2=10, color="yellow", alpha=0.0)
def update(w1=0.5, w2=0.5, bias=0.0, x1=0.0, x2=0.0, show_train=False, show_test=False, show_boundary=False, show_prediction=False):
# vector_tip.set_data(w1, w2)
h = x1*w1 + x2*w2 - bias
y_hat = int(h >= 0)
w = np.array([w1, w2])
# bias_vec = w * bias / np.linalg.norm(w) # TODO figure this out
# b1, b2 = bias_vec
# UPDATE HANDLES
# training data scatterplot - set alpha to enable/disable
training_data_handle.set_alpha(0.75 if show_train else 0.0)
if has_test:
test_data_handle.set_alpha(0.75 if show_test else 0.0)
# test point - move along x1/x2 and switch color
test_point_handle1.set_offsets([x1, x2])
test_point_handle2.set_offsets([x1, x2])
test_point_handle3.set_offsets([x1, x2])
# test_point_handle4.set_offsets([x1, x2])
#if not show_h:
test_point_handle2.set_alpha((y_hat==0))
test_point_handle3.set_alpha((y_hat==1))
# test_point_handle4.set_alpha(0.0)
# else:
#test_point_handle2.set_alpha(0.0)
#test_point_handle3.set_alpha(0.0)
#test_point_handle4.set_alpha(1.0)
caption2.value = f"{format(round(h, 3), '.3f')} = ({round(w1, 2)}) * ({round(x1, 2)}) + ({round(w2, 2)}) * ({round(x2, 2)}) - ({round(bias, 2)})"
caption4.value = f"{y_hat} = f({round(h, 2)})"
# decision_boundary.set_data([w2+b1, -w2+b1], [-w1+b2, w1+b2])
# projection_vector.set_data([0, w1], [0, w2])
if w2:
decision_boundary.set_data([lim_x[0], lim_x[1]], [- w1/w2 * lim_x[0] + bias/w2, - w1/w2 * lim_x[1] + bias/w2])
else:
if w1:
decision_boundary.set_data([bias/w1, bias/w1], [lim_x[0], lim_x[1]])
else:
decision_boundary.set_data([], [])
decision_boundary.set_alpha(0.0)
decision_boundary.set_alpha(1.0 if show_boundary else 0.0)
# yy = -(w1/w2) * xx + b2 + w1/w2 * b1
ax.collections = ax.collections[:-2]
alpha = 0.1 if show_prediction else 0.0
if w2:
yy = -(w1/w2) * xx + bias/w2
if w2 > 0:
top_filler = ax.fill_between(xx, y1=-10, y2=yy, color="purple", alpha=alpha)
bottom_filler = ax.fill_between(xx, y1=yy, y2=10, color="yellow", alpha=alpha)
else:
top_filler = ax.fill_between(xx, y1=-10, y2=yy, color="yellow", alpha=alpha)
bottom_filler = ax.fill_between(xx, y1=yy, y2=10, color="purple", alpha=alpha)
else:
if w1:
if w1 > 0:
top_filler = ax.fill_betweenx([lim_x[0], lim_x[1]], x1=-10, x2=bias/w1, color="purple", alpha=alpha)
bottom_filler = ax.fill_betweenx([lim_x[0], lim_x[1]], x1=bias/w1, x2=10, color="yellow", alpha=alpha)
else:
top_filler = ax.fill_betweenx([lim_x[0], lim_x[1]], x1=-10, x2=bias/w1, color="yellow", alpha=alpha)
bottom_filler = ax.fill_betweenx([lim_x[0], lim_x[1]], x1=bias/w1, x2=10, color="purple", alpha=alpha)
else:
if bias > 0:
top_filler = ax.fill_betweenx([lim_x[0], lim_x[1]], x1=-10, x2=10, color="purple", alpha=0.1)
bottom_filler = ax.fill_betweenx([lim_x[0], lim_x[1]], x1=-10, x2=10, color="yellow", alpha=0.0)
else:
top_filler = ax.fill_betweenx([lim_x[0], lim_x[1]], x1=-10, x2=10, color="purple", alpha=0.0)
bottom_filler = ax.fill_betweenx([lim_x[0], lim_x[1]], x1=-10, x2=10, color="yellow", alpha=0.1)
#w /= np.linalg.norm(w)
#w *= 5
#w1, w2 = w
#projection_vector2.set_data([-w1, w1], [-w2, w2])
fig.canvas.draw_idle()
interactive_plot = interactive_output(
update,
{"w1":w1_slider,
"w2":w2_slider,
"bias":bias_slider,
"x1":x1_slider,
"x2":x2_slider,
"show_train":show_train,
"show_test":show_test,
"show_boundary": show_boundary,
"show_prediction": show_prediction
}
)
#interactive_plot = interactive(
# update,
# w1=w1_slider,
# w2=w2_slider,
# bias=bias_slider,
# x1=x1_slider,
# x2=x2_slider,
# train=show_train,
# test=show_test
#)
return interactive_plot, ui
def stepwise(h):
return (h >= 0).astype(np.int32)
def get_interactive_logistic_regression_univariate(x, y):
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(1, 1, 1)
ax.set_ylim(-0.1, 1.1)
ax.set_xlim(-2, 2)
ax.set_xlabel(r"$x_1$")
ax.set_ylabel(r"$y_T$")
ax.set_yticks([0, 1])
# ax.set_aspect("equal")
scatter_handle = plt.scatter(x, y, c=y)
w1 = 1.0
bias = 0.0
if w1 == 0:
w1 = 0.001
# projection_vector, = ax.plot([0, w1], [0, w2])
# decision_boundary, = ax.plot([0, -w2], [0, w1])
# vector_tip, = ax.plot(w1, w2, marker="x", markersize=15, color="blue")
# ax.plot(0, 0, markersize=10, color="red", marker="o")
x_dfunc = np.linspace(-2, 2, num=1000)
h_dfunc = w1 * x_dfunc - bias
y_dfunc = stepwise(h_dfunc)
decision_func, = ax.plot(x_dfunc, y_dfunc, color="black")
if w1 >=0:
left_filler = ax.fill_betweenx([-0.1, 1.1], x1=-5, x2=bias/w1, color="purple", alpha=0.1)
right_filler = ax.fill_betweenx([-0.1, 1.1], x1=bias/w1, x2=5, color="yellow", alpha=0.1)
else:
left_filler = ax.fill_betweenx([-0.1, 1.1], x1=-5, x2=bias/w1, color="yellow", alpha=0.1)
right_filler = ax.fill_betweenx([-0.1, 1.1], x1=bias/w1, x2=5, color="purple", alpha=0.1)
def update(w1=1.0, bias=0.0):
# vector_tip.set_data(w1, w2)
if w1 == 0.0:
w1 = 0.001
# w = np.array([w1, w2])
# bias_vec = w * bias/np.linalg.norm(w)
# b1, b2 = bias_vec
# w /= np.linalg.norm(w)
# w *= 5
# w1, w2 = w
# projection_vector.set_data([-w1, w1], [-w2, w2])
# decision_boundary.set_data([w2+b1, -w2+b1], [-w1+b2, w1+b2])
h_dfunc = w1 * x_dfunc - bias
y_dfunc = stepwise(h_dfunc)
decision_func.set_data(x_dfunc, y_dfunc)
# y = -(w1/w2)*x + b2 + w1/w2*b1
ax.collections = ax.collections[:1]
if w1 >=0:
left_filler = ax.fill_betweenx([-0.1, 1.1], x1=-5, x2=bias/w1, color="purple", alpha=0.1)
right_filler = ax.fill_betweenx([-0.1, 1.1], x1=bias/w1, x2=5, color="yellow", alpha=0.1)
else:
left_filler = ax.fill_betweenx([-0.1, 1.1], x1=-5, x2=bias/w1, color="yellow", alpha=0.1)
right_filler = ax.fill_betweenx([-0.1, 1.1], x1=bias/w1, x2=5, color="purple", alpha=0.1)
fig.canvas.draw_idle()
interactive_plot = interactive(update, w1=(-2.0, 2.0), bias=(-3.0, 3.0))
return interactive_plot
class InteractiveConnectionistNeuron:
def __init__(
self,
w1_range=(-5.0, 5.0, 0.05),
w2_range=(-5.0, 5.0, 0.05),
bias_range=(-3.0, 3.0, 0.01),
xlabel=None,
ylabel=None
):
self.w1 = None
self.w2 = None
self.bias = None
self.w1_range = w1_range
self.w2_range = w2_range
self.bias_range = bias_range
if xlabel is None:
self.xlabel = "price per sqft / maximum price per sqft"
else:
self.xlabel = xlabel
if ylabel is None:
self.ylabel = "elevation / maximum elevation"
else:
self.ylabel = ylabel
def fit(self, X, y):
if not X.ndim == 2:
raise ValueError
if not X.shape[1] == 2:
raise ValueError("Matrix X should have only two features.")
xx = np.linspace(0, 2 * np.pi)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1, 1, 1)
ax.set_ylim(-0.2, 1.2)
ax.set_xlim(-0.2, 1.2)
ax.set_aspect("equal")
scatter_handle = plt.scatter(X[:, 0], X[:, 1], c=y)
w1 = 0.1
w2 = 1.0
final_w1 = w1
final_w2 = w2
projection_vector, = ax.plot([0, w1], [0, w2], color="blue")
projection_vector2, = ax.plot([-w1, w1], [-w2, w2], linestyle="--", color="blue")
decision_boundary, = ax.plot([0, -w2], [0, w1], color="orange", label="Entscheidungsgrenze")
vector_tip, = ax.plot(w1, w2, marker="x", markersize=15, color="blue", label="[w1, w2]")
ax.plot(0, 0, markersize=10, color="red", marker="o", label="[0, 0]")
xx = np.linspace(-2, 2, num=100)
yy = - (w1 / w2) * xx
top_filler = ax.fill_between(xx, y1=-10, y2=yy, color="purple", alpha=0.1)
bottom_filler = ax.fill_between(xx, y1=yy, y2=10, color="yellow", alpha=0.1)
handles, labels = scatter_handle.legend_elements(prop="colors", alpha=0.6)
legend1 = ax.legend(loc="lower right")
ax.add_artist(legend1)
legend2 = ax.legend(handles, labels, loc="upper right", title="Labels")
def update(w1=0.1, w2=1.0, bias=0.0):
vector_tip.set_data(w1, w2)
self.w1 = w1
self.w2 = w2
self.bias = bias
if not w1:
w1 = 0.0001
if not w2:
w2 = 0.0001
w = np.array([w1, w2])
projection_vector.set_data([0, w1], [0, w2])
decision_boundary.set_data([-2, 2], [- w1/w2 * (-2) + bias/w2, - w1/w2 * 2 + bias/w2])
if not w2 == 0:
yy = - (w1 / w2) * xx + bias / w2
ax.collections = ax.collections[:1]
if w2 > 0:
top_filler = ax.fill_between(xx, y1=-10, y2=yy, color="purple", alpha=0.1)
bottom_filler = ax.fill_between(xx, y1=yy, y2=10, color="yellow", alpha=0.1)
else:
top_filler = ax.fill_between(xx, y1=-10, y2=yy, color="yellow", alpha=0.1)
bottom_filler = ax.fill_between(xx, y1=yy, y2=10, color="purple", alpha=0.1)
w /= np.linalg.norm(w)
w *= 5
w1, w2 = w
projection_vector2.set_data([-w1, w1], [-w2, w2])
fig.canvas.draw_idle()
interactive_plot = interactive(update, w1=(-6.0, 6.0, 0.05), w2=(-5.0, 5.0, 0.05), bias=(-3.0, 3.0, 0.01))
return interactive_plot
def predict(self, X):
raise NotImplementedError
|
{"hexsha": "27a1e1fb1476823036287c8e8336a27558029c7b", "size": 18511, "ext": "py", "lang": "Python", "max_stars_repo_path": "2_1_connectionist_neuron/utils_logistic.py", "max_stars_repo_name": "layerwise/training", "max_stars_repo_head_hexsha": "21ad2a5684a3712192fb13f8214bc3bb4c975f3e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2_1_connectionist_neuron/utils_logistic.py", "max_issues_repo_name": "layerwise/training", "max_issues_repo_head_hexsha": "21ad2a5684a3712192fb13f8214bc3bb4c975f3e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2_1_connectionist_neuron/utils_logistic.py", "max_forks_repo_name": "layerwise/training", "max_forks_repo_head_hexsha": "21ad2a5684a3712192fb13f8214bc3bb4c975f3e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-20T11:38:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-20T11:38:47.000Z", "avg_line_length": 34.0902394107, "max_line_length": 153, "alphanum_fraction": 0.5393009562, "include": true, "reason": "import numpy", "num_tokens": 5981}
|
import numpy as np
import pandas as pd
from scipy.stats import binom_test
# calculate p_value_2sided here:
p_value_2sided = binom_test(41, n=500, p=0.1)
print(p_value_2sided)
# calculate p_value_1sided here:
p_value_1sided = binom_test(41, n=500, p=0.1, alternative = 'less')
print(p_value_1sided)
|
{"hexsha": "4f8f77467abbfdd2746c13e9e633a0a89eb3651a", "size": 299, "ext": "py", "lang": "Python", "max_stars_repo_path": "Data Scientist Career Path/8. Hypothesis Testing/1. Testing a Sample Statistics/2. Binomial Test/11. scipy.py", "max_stars_repo_name": "myarist/Codecademy", "max_stars_repo_head_hexsha": "2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2021-06-06T15:35:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T06:53:42.000Z", "max_issues_repo_path": "Data Scientist Career Path/8. Hypothesis Testing/1. Testing a Sample Statistics/2. Binomial Test/11. scipy.py", "max_issues_repo_name": "shivaniverma1/Data-Scientist", "max_issues_repo_head_hexsha": "f82939a411484311171465591455880c8e354750", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Data Scientist Career Path/8. Hypothesis Testing/1. Testing a Sample Statistics/2. Binomial Test/11. scipy.py", "max_forks_repo_name": "shivaniverma1/Data-Scientist", "max_forks_repo_head_hexsha": "f82939a411484311171465591455880c8e354750", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2021-06-08T01:32:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T15:38:09.000Z", "avg_line_length": 27.1818181818, "max_line_length": 67, "alphanum_fraction": 0.7826086957, "include": true, "reason": "import numpy,from scipy", "num_tokens": 105}
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
def eggholder(x, y):
return -(y + 47) * np.sin(np.sqrt(np.abs(y + x/2 + 47))) + \
-x * np.sin(np.sqrt(np.abs(x - (y + 47))))
if __name__ == '__main__':
x, y = np.meshgrid(
np.linspace(-256, 256, 200),
np.linspace(-256, 256, 200)
)
z = eggholder(x, y)
fig = plt.figure(figsize=(16, 9))
ax = fig.gca(projection='3d')
ax.plot_surface(x, y, z, cstride=1, rstride=1, cmap=plt.cm.Spectral)
fig.savefig('eggholder_fun.png', bbox_inches='tight', pad_inches=0)
|
{"hexsha": "0ef0fc2aa36aa1d4706ef10e99f426ff49056194", "size": 631, "ext": "py", "lang": "Python", "max_stars_repo_path": "slides/figures/eggholder_fun.py", "max_stars_repo_name": "gcampanella/pydata-london-2018", "max_stars_repo_head_hexsha": "6be3232f3766c56d4db8053b10d1e0268809b8f5", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2018-04-26T21:00:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T17:24:32.000Z", "max_issues_repo_path": "slides/figures/eggholder_fun.py", "max_issues_repo_name": "gcampanella/pydata-london-2018", "max_issues_repo_head_hexsha": "6be3232f3766c56d4db8053b10d1e0268809b8f5", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slides/figures/eggholder_fun.py", "max_forks_repo_name": "gcampanella/pydata-london-2018", "max_forks_repo_head_hexsha": "6be3232f3766c56d4db8053b10d1e0268809b8f5", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-04-26T17:15:37.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-18T10:14:17.000Z", "avg_line_length": 26.2916666667, "max_line_length": 72, "alphanum_fraction": 0.6069730586, "include": true, "reason": "import numpy", "num_tokens": 206}
|
//
// Copyright 2005-2007 Adobe Systems Incorporated
// Copyright 2018 Mateusz Loskot <mateusz@loskot.net>
//
// Use, modification and distribution are subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_GIL_TYPEDEFS_HPP
#define BOOST_GIL_TYPEDEFS_HPP
#include <boost/gil/cmyk.hpp>
#include <boost/gil/device_n.hpp>
#include <boost/gil/gray.hpp>
#include <boost/gil/point.hpp>
#include <boost/gil/rgb.hpp>
#include <boost/gil/rgba.hpp>
#include <cstdint>
#include <memory>
// B - bits size/signedness, CM - channel model, CS - colour space, LAYOUT - pixel layout
// Example: B = '8', CM = 'uint8_t', CS = 'bgr, LAYOUT='bgr_layout_t'
#define BOOST_GIL_DEFINE_BASE_TYPEDEFS_INTERNAL(B, CM, CS, LAYOUT) \
template <typename, typename> struct pixel; \
template <typename, typename> struct planar_pixel_reference; \
template <typename, typename> struct planar_pixel_iterator; \
template <typename> class memory_based_step_iterator; \
template <typename> class point; \
template <typename> class memory_based_2d_locator; \
template <typename> class image_view; \
template <typename, bool, typename> class image; \
using CS##B##_pixel_t = pixel<CM, LAYOUT>; \
using CS##B##c_pixel_t = pixel<CM, LAYOUT> const; \
using CS##B##_ref_t = pixel<CM, LAYOUT>&; \
using CS##B##c_ref_t = pixel<CM, LAYOUT> const&; \
using CS##B##_ptr_t = CS##B##_pixel_t*; \
using CS##B##c_ptr_t = CS##B##c_pixel_t*; \
using CS##B##_step_ptr_t = memory_based_step_iterator<CS##B##_ptr_t>; \
using CS##B##c_step_ptr_t = memory_based_step_iterator<CS##B##c_ptr_t>; \
using CS##B##_loc_t \
= memory_based_2d_locator<memory_based_step_iterator<CS##B##_ptr_t>>; \
using CS##B##c_loc_t \
= memory_based_2d_locator<memory_based_step_iterator<CS##B##c_ptr_t>>; \
using CS##B##_step_loc_t \
= memory_based_2d_locator<memory_based_step_iterator<CS##B##_step_ptr_t>>; \
using CS##B##c_step_loc_t \
= memory_based_2d_locator<memory_based_step_iterator<CS##B##c_step_ptr_t>>; \
using CS##B##_view_t = image_view<CS##B##_loc_t>; \
using CS##B##c_view_t = image_view<CS##B##c_loc_t>; \
using CS##B##_step_view_t = image_view<CS##B##_step_loc_t>; \
using CS##B##c_step_view_t = image_view<CS##B##c_step_loc_t>; \
using CS##B##_image_t = image<CS##B##_pixel_t, false, std::allocator<unsigned char>>;
// Example: B = '8', CM = 'uint8_t', CS = 'bgr' CS_FULL = 'rgb_t' LAYOUT='bgr_layout_t'
#define BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(B, CM, CS, CS_FULL, LAYOUT) \
BOOST_GIL_DEFINE_BASE_TYPEDEFS_INTERNAL(B, CM, CS, LAYOUT) \
using CS##B##_planar_ref_t = planar_pixel_reference<CM&, CS_FULL>; \
using CS##B##c_planar_ref_t = planar_pixel_reference<CM const&, CS_FULL>; \
using CS##B##_planar_ptr_t = planar_pixel_iterator<CM*, CS_FULL>; \
using CS##B##c_planar_ptr_t = planar_pixel_iterator<CM const*, CS_FULL>; \
using CS##B##_planar_step_ptr_t = memory_based_step_iterator<CS##B##_planar_ptr_t>; \
using CS##B##c_planar_step_ptr_t \
= memory_based_step_iterator<CS##B##c_planar_ptr_t>; \
using CS##B##_planar_loc_t \
= memory_based_2d_locator<memory_based_step_iterator<CS##B##_planar_ptr_t>>; \
using CS##B##c_planar_loc_t \
= memory_based_2d_locator<memory_based_step_iterator<CS##B##c_planar_ptr_t>>; \
using CS##B##_planar_step_loc_t \
= memory_based_2d_locator<memory_based_step_iterator<CS##B##_planar_step_ptr_t>>; \
using CS##B##c_planar_step_loc_t \
= memory_based_2d_locator<memory_based_step_iterator<CS##B##c_planar_step_ptr_t>>; \
using CS##B##_planar_view_t = image_view<CS##B##_planar_loc_t>; \
using CS##B##c_planar_view_t = image_view<CS##B##c_planar_loc_t>; \
using CS##B##_planar_step_view_t = image_view<CS##B##_planar_step_loc_t>; \
using CS##B##c_planar_step_view_t = image_view<CS##B##c_planar_step_loc_t>; \
using CS##B##_planar_image_t \
= image<CS##B##_pixel_t, true, std::allocator<unsigned char>>;
#define BOOST_GIL_DEFINE_BASE_TYPEDEFS(B, CM, CS) \
BOOST_GIL_DEFINE_BASE_TYPEDEFS_INTERNAL(B, CM, CS, CS##_layout_t)
#define BOOST_GIL_DEFINE_ALL_TYPEDEFS(B, CM, CS) \
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(B, CM, CS, CS##_t, CS##_layout_t)
namespace boost { namespace gil {
// forward declarations
template <typename B, typename Mn, typename Mx> struct scoped_channel_value;
template <typename T> struct float_point_zero;
template <typename T> struct float_point_one;
//////////////////////////////////////////////////////////////////////////////////////////
/// Built-in channel models
//////////////////////////////////////////////////////////////////////////////////////////
/// \ingroup ChannelModel
/// \brief 8-bit unsigned integral channel type (alias from uint8_t). Models ChannelValueConcept
using std::uint8_t;
/// \ingroup ChannelModel
/// \brief 16-bit unsigned integral channel type (alias from uint16_t). Models ChannelValueConcept
using std::uint16_t;
/// \ingroup ChannelModel
/// \brief 32-bit unsigned integral channel type (alias from uint32_t). Models ChannelValueConcept
using std::uint32_t;
/// \ingroup ChannelModel
/// \brief 8-bit signed integral channel type (alias from int8_t). Models ChannelValueConcept
using std::int8_t;
/// \ingroup ChannelModel
/// \brief 16-bit signed integral channel type (alias from int16_t). Models ChannelValueConcept
using std::int16_t;
/// \ingroup ChannelModel
/// \brief 32-bit signed integral channel type (alias from int32_t). Models ChannelValueConcept
using std::int32_t;
/// \ingroup ChannelModel
/// \brief 32-bit floating point channel type with range [0.0f ... 1.0f]. Models ChannelValueConcept
using float32_t = scoped_channel_value<float, float_point_zero<float>, float_point_one<float>>;
/// \ingroup ChannelModel
/// \brief 64-bit floating point channel type with range [0.0f ... 1.0f]. Models ChannelValueConcept
using float64_t = scoped_channel_value<double, float_point_zero<double>, float_point_one<double>>;
BOOST_GIL_DEFINE_BASE_TYPEDEFS(8, uint8_t, gray)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(8s, int8_t, gray)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(16, uint16_t, gray)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(16s, int16_t, gray)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32, uint32_t, gray)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32s, int32_t, gray)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32f, float32_t, gray)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(8, uint8_t, bgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(8s, int8_t, bgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(16, uint16_t, bgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(16s, int16_t, bgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32, uint32_t, bgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32s, int32_t, bgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32f, float32_t, bgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(8, uint8_t, argb)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(8s, int8_t, argb)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(16, uint16_t, argb)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(16s, int16_t, argb)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32, uint32_t, argb)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32s, int32_t, argb)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32f, float32_t, argb)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(8, uint8_t, abgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(8s, int8_t, abgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(16, uint16_t, abgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(16s, int16_t, abgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32, uint32_t, abgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32s, int32_t, abgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32f, float32_t, abgr)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(8, uint8_t, bgra)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(8s, int8_t, bgra)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(16, uint16_t, bgra)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(16s, int16_t, bgra)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32, uint32_t, bgra)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32s, int32_t, bgra)
BOOST_GIL_DEFINE_BASE_TYPEDEFS(32f, float32_t, bgra)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(8, uint8_t, rgb)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(8s, int8_t, rgb)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(16, uint16_t, rgb)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(16s, int16_t, rgb)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(32, uint32_t, rgb)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(32s, int32_t, rgb)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(32f, float32_t, rgb)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(8, uint8_t, rgba)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(8s, int8_t, rgba)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(16, uint16_t, rgba)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(16s, int16_t, rgba)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(32, uint32_t, rgba)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(32s, int32_t, rgba)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(32f, float32_t, rgba)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(8, uint8_t, cmyk)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(8s, int8_t, cmyk)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(16, uint16_t, cmyk)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(16s, int16_t, cmyk)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(32, uint32_t, cmyk)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(32s, int32_t, cmyk)
BOOST_GIL_DEFINE_ALL_TYPEDEFS(32f, float32_t, cmyk)
template <int N> struct devicen_t;
template <int N> struct devicen_layout_t;
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(8, uint8_t, dev2n, devicen_t<2>, devicen_layout_t<2>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(8s, int8_t, dev2n, devicen_t<2>, devicen_layout_t<2>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(16, uint16_t, dev2n, devicen_t<2>, devicen_layout_t<2>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(16s, int16_t, dev2n, devicen_t<2>, devicen_layout_t<2>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32, uint32_t, dev2n, devicen_t<2>, devicen_layout_t<2>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32s, int32_t, dev2n, devicen_t<2>, devicen_layout_t<2>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32f, float32_t, dev2n, devicen_t<2>, devicen_layout_t<2>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(8, uint8_t, dev3n, devicen_t<3>, devicen_layout_t<3>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(8s, int8_t, dev3n, devicen_t<3>, devicen_layout_t<3>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(16, uint16_t, dev3n, devicen_t<3>, devicen_layout_t<3>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(16s, int16_t, dev3n, devicen_t<3>, devicen_layout_t<3>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32, uint32_t, dev3n, devicen_t<3>, devicen_layout_t<3>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32s, int32_t, dev3n, devicen_t<3>, devicen_layout_t<3>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32f, float32_t, dev3n, devicen_t<3>, devicen_layout_t<3>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(8, uint8_t, dev4n, devicen_t<4>, devicen_layout_t<4>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(8s, int8_t, dev4n, devicen_t<4>, devicen_layout_t<4>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(16, uint16_t, dev4n, devicen_t<4>, devicen_layout_t<4>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(16s, int16_t, dev4n, devicen_t<4>, devicen_layout_t<4>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32, uint32_t, dev4n, devicen_t<4>, devicen_layout_t<4>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32s, int32_t, dev4n, devicen_t<4>, devicen_layout_t<4>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32f, float32_t, dev4n, devicen_t<4>, devicen_layout_t<4>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(8, uint8_t, dev5n, devicen_t<5>, devicen_layout_t<5>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(8s, int8_t, dev5n, devicen_t<5>, devicen_layout_t<5>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(16, uint16_t, dev5n, devicen_t<5>, devicen_layout_t<5>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(16s, int16_t, dev5n, devicen_t<5>, devicen_layout_t<5>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32, uint32_t, dev5n, devicen_t<5>, devicen_layout_t<5>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32s, int32_t, dev5n, devicen_t<5>, devicen_layout_t<5>)
BOOST_GIL_DEFINE_ALL_TYPEDEFS_INTERNAL(32f, float32_t, dev5n, devicen_t<5>, devicen_layout_t<5>)
}} // namespace boost::gil
#endif
|
{"hexsha": "4ff10d52d72e81e7ee133f6e9871349e5dff374f", "size": 13415, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/gil/typedefs.hpp", "max_stars_repo_name": "Harshitha91/Tmdb-react-native-node", "max_stars_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 153.0, "max_stars_repo_stars_event_min_datetime": "2015-02-03T06:03:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T15:06:34.000Z", "max_issues_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/gil/typedefs.hpp", "max_issues_repo_name": "Harshitha91/Tmdb-react-native-node", "max_issues_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 429.0, "max_issues_repo_issues_event_min_datetime": "2015-03-22T09:49:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T08:32:08.000Z", "max_forks_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/gil/typedefs.hpp", "max_forks_repo_name": "Harshitha91/Tmdb-react-native-node", "max_forks_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 215.0, "max_forks_repo_forks_event_min_datetime": "2015-03-15T09:20:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T12:40:07.000Z", "avg_line_length": 57.8232758621, "max_line_length": 100, "alphanum_fraction": 0.6755870294, "num_tokens": 3854}
|
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from common import *
from color_space import color_select, draw_sub_plots, stack_binary_images
# MODIFY THIS FUNCTION TO GENERATE OUTPUT
# THAT LOOKS LIKE THE IMAGE ABOVE
def corners_unwarp(img, nx, ny, mtx, dist):
# Pass in your image into this function
# Write code to do the following steps
# 1) Undistort using mtx and dist
# 2) Convert to grayscale
# 3) Find the chessboard corners
# 4) If corners found:
# a) draw corners
# b) define 4 source points src = np.float32([[,],[,],[,],[,]])
# Note: you could pick any four of the detected corners
# as long as those four corners define a rectangle
# One especially smart way to do this would be to use four well-chosen
# corners that were automatically detected during the undistortion steps
# We recommend using the automatic detection of corners in your code
# c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])
# d) use cv2.getPerspectiveTransform() to get M, the transform matrix
# e) use cv2.warpPerspective() to warp your image to a top-down view
# img = cv2.imread(file_path)
img = cv2.undistort(img, mtx, dist, None, mtx)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret:
cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny * nx, 2), np.float32)
objp[:, :2] = np.mgrid[1:nx+1, 1:ny+1].T.reshape(-1, 2)
img_size = (img.shape[1], img.shape[0])
Coxeter = img_size[0]/(nx+1)
dest = np.float32([objp[0], objp[7], objp[40], objp[47]])
offset = np.float32([[-.5, -.5], [.5, -.5], [-.5, .5], [.5, .5]])
#print(dest)
dest = dest + offset
#print(dest)
#src = np.float32([corners[0], corners[7], corners[40], corners[47]])
src = np.float32([corners[0], corners[nx - 1], corners[-nx], corners[-1]])
dst1 = np.float32(dest)*Coxeter
offset = 100
#dst1 = np.float32([[offset, offset], [img_size[0] - offset, offset], [offset, img_size[1] - offset], [img_size[0] - offset, img_size[1] - offset]])
print(len(src), len(dst1))
if False:
plt.imshow(img)
plt.plot(*dst1[0],'*')
plt.plot(*dst1[1],'*')
plt.plot(*dst1[2],'*')
plt.plot(*dst1[3],'*')
M = cv2.getPerspectiveTransform(src, dst1)
#Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
return warped, M
# Define a function that takes an image, number of x and y points,
# camera matrix and distortion coefficients
def corners_unwarp1(img, nx, ny, mtx, dist):
# Use the OpenCV undistort() function to remove distortion
undist = cv2.undistort(img, mtx, dist, None, mtx)
# Convert undistorted image to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
plt.imshow(undist)
plt.show()
# Search for corners in the grayscaled image
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
# If we found corners, draw them! (just for fun)
cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
# Choose offset from image corners to plot detected corners
# This should be chosen to present the result at the proper aspect ratio
# My choice of 100 pixels is not exact, but close enough for our purpose here
offset = 100 # offset for dst points
# Grab the image shape
img_size = (gray.shape[1], gray.shape[0])
# For source points I'm grabbing the outer four detected corners
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(undist, M, img_size)
# Return the resulting image and matrix
return warped, M
def main():
top_down, perspective_M = corners_unwarp(img, nx, ny, mtx, dist)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(top_down)
ax2.set_title('Undistorted and Warped Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show()
def get_source_dist_points_test(img):
img = np.copy(img)
img1 = np.copy(img)
(s1, s2, s3, s4), (d1, d2, d3, d4) = get_source_dist_points(img)
vertices = np.array([[s1, s2, s3, s4]], dtype=np.int32)
#masked_edges = region_of_interest(img, vertices, (100,100,100))
lines_s = [[list(np.concatenate([s1, s2])), list(np.concatenate([s2 , s3])), list(np.concatenate([s3 , s4])), list(np.concatenate([s1 , s4]))]]
lines_d = [[list(np.concatenate([d1 , d2])), list(np.concatenate([d2 , d3])), list(np.concatenate([d3 , d4])), list(np.concatenate([d1 , d4]))]]
lines = lines_s + lines_d
draw_lines(img, lines, color=[255, 255, 255], thickness=5)
img = stack_binary_images(np.array([]),img1*255, img*255 )
return img
def get_source_dist_points(img, n=PRESPECTIVE_N):
img = np.copy(img)
ysize = img.shape[0]
xsize = img.shape[1]
#Mask the edges
cut = 20
if 0:
s1 = (3.5 * int(xsize / cut), int(ysize))
s2 = (int(xsize / 2 - 2 * xsize / (2 * cut)), int(ysize / 2 + 3 * ysize / cut))
s3 = (int(xsize / 2 + 1.25 * xsize / cut), int(ysize / 2 + 3 * ysize / cut))
s4 = (int(xsize - 2.75 * xsize / (cut)), int(ysize))
dx1 = 6 * int(xsize / cut)
dx2 = int(xsize - 5 * xsize / (cut))
d1 = (dx1, int(ysize))
d2 = (dx1, int(0))
d3 = (dx2, int(0))
d4 = (dx2, int(ysize))
else:
s1 = (278 - n, 678)
s2 = (601 - n, 446 + n)
s3 = (680 + n, 446 + n)
s4 = (1035 + n, 678)
dx1 = 6 * int(xsize / cut) - n
dx2 = int(xsize - 5 * xsize / (cut)) + n
d1 = (dx1, int(ysize))
d2 = (dx1, int(n))
d3 = (dx2, int(n))
d4 = (dx2, int(ysize))
return np.float32([s1, s2, s3, s4]), np.float32([d1, d2, d3, d4])
def unwrap(gray, src, dst):
img_size = (gray.shape[1], gray.shape[0])
M = cv2.getPerspectiveTransform(src, dst)
#Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(gray, M, img_size, flags=cv2.INTER_LINEAR)
return warped
# -------------------------------------
# Entry point for the script
# -------------------------------------
if __name__ == '__main__':
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load(open("calibration_wide/wide_dist_pickle.p", "rb"))
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# Read in an image
img = cv2.imread('calibration_wide/GOPR0032.jpg')
nx = 8 # the number of inside corners in x
ny = 6 # the number of inside corners in y
main()
pass
|
{"hexsha": "db2699e49e0fa5a4b1336ff2d01ddb31576fca6d", "size": 7694, "ext": "py", "lang": "Python", "max_stars_repo_path": "perspective_transformation.py", "max_stars_repo_name": "mhhm2005eg/CarND-Advanced-Lane-Lines", "max_stars_repo_head_hexsha": "1f571e4714df0dcca21fbf2b09b5af73caddb8f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "perspective_transformation.py", "max_issues_repo_name": "mhhm2005eg/CarND-Advanced-Lane-Lines", "max_issues_repo_head_hexsha": "1f571e4714df0dcca21fbf2b09b5af73caddb8f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "perspective_transformation.py", "max_forks_repo_name": "mhhm2005eg/CarND-Advanced-Lane-Lines", "max_forks_repo_head_hexsha": "1f571e4714df0dcca21fbf2b09b5af73caddb8f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4564102564, "max_line_length": 152, "alphanum_fraction": 0.6143748375, "include": true, "reason": "import numpy", "num_tokens": 2317}
|
REBOL [
System: "REBOL [R3] Language Interpreter and Run-time Environment"
Title: "REBOL 3 Boot Sys: Top Context Functions"
Rights: {
Copyright 2012 REBOL Technologies
REBOL is a trademark of REBOL Technologies
}
License: {
Licensed under the Apache License, Version 2.0
See: http://www.apache.org/licenses/LICENSE-2.0
}
Context: sys
Note: {
Follows the BASE lib init that provides a basic set of functions
to be able to evaluate this code.
The boot binding of this module is SYS then LIB deep.
Any non-local words not found in those contexts WILL BE
UNBOUND and will error out at runtime!
}
]
; It is desirable to express the logic of PRINT as user code, but it is
; also desirable to use PRINT from the C code. This should likely be
; optimized as a native, but is easier to explore at the moment like this.
;
print*: :print
; If the host wants to know if a script or module is loaded, e.g. to print out
; a message. (Printing directly from this code would be presumptuous.)
;
script-pre-load-hook: _
; DO of functions, blocks, paths, and other do-able types is done directly by
; C code in REBNATIVE(do). But that code delegates to this Rebol function
; for ANY-STRING! and BINARY! types (presumably because it would be laborious
; to express as C).
;
do*: func [
{SYS: Called by system for DO on datatypes that require special handling}
return: [<opt> any-value!]
source "Files, urls and modules evaluate as scripts, other strings don't"
[file! url! text! binary! tag!]
args "Args passed as system/script/args to a script (normally a string)"
[<opt> any-value!]
only "Do not catch quits...propagate them"
[logic!]
][
; !!! DEMONSTRATION OF CONCEPT... this translates a tag into a URL!, but
; it should be using a more "official" URL instead of on individuals
; websites. There should also be some kind of local caching facility.
;
; force-remote-import is defined in sys-load.r
;
let old-force-remote-import: force-remote-import
if tag? source [
set 'force-remote-import true
; Convert value into a URL!
source: switch source
(load system/locale/library/utilities)
else [
fail [
{Module} source {not in system/locale/library}
]
]
]
; Note that DO of file path evaluates in the directory of the target file.
;
; !!! There are some issues with this idea of preserving the path--one of
; which is that WHAT-DIR may return null.
;
let original-path: try what-dir
let original-script: _
let finalizer: func [
value [<opt> any-value!]
/quit
<with> return
][
let quit_FINALIZER: quit
quit: :lib/quit
; Restore system/script and the dir if they were changed
if original-script [system/script: original-script]
if original-path [change-dir original-path]
if quit_FINALIZER and [only] [
quit :value ; "rethrow" the QUIT if DO/ONLY
]
set 'force-remote-import old-force-remote-import
return :value ; returns from DO*, because of <with> return
]
; If a file is being mentioned as a DO location and the "current path"
; is a URL!, then adjust the source to be a URL! based from that path.
;
if all [url? original-path | file? source] [
source: join original-path source
]
; Load the code (do this before CHANGE-DIR so if there's an error in the
; LOAD it will trigger before the failure of changing the working dir)
; It is loaded as UNBOUND so that DO-NEEDS runs before INTERN.
;
let hdr
let code
[code hdr]: load/type source 'unbound
; !!! This used to LOCK the header, but the module processing wants to
; do some manipulation to it. Review. In the meantime, in order to
; allow mutation of the OBJECT! we have to actually TAKE the hdr out
; of the returned result to avoid LOCKing it when the code array is locked
; because even with series not at their head, LOCK NEXT CODE will lock it.
;
ensure block! code
ensure [object! blank!] hdr: default [_]
let is-module: 'module = select hdr 'type
let result
if (text? source) and [not is-module] [
;
; Return result without "script overhead" (e.g. don't change the
; working directory to the base of the file path supplied)
;
do-needs hdr ; Load the script requirements
intern code ; Bind the user script
catch/quit [
;
; The source string may have been mutable or immutable, but the
; loaded code is not locked for this case. So this works:
;
; do "append {abc} {de}"
;
result: do code ; !!! pass args implicitly?
] then :finalizer/quit
] else [
; Otherwise we are in script mode. When we run a script, the
; "current" directory is changed to the directory of that script.
; This way, relative path lookups to find dependent files will look
; relative to the script.
;
; We want this behavior for both FILE! and for URL!, which means
; that the "current" path may become a URL!. This can be processed
; with change-dir commands, but it will be protocol dependent as
; to whether a directory listing would be possible (HTTP does not
; define a standard for that)
;
all [
match [file! url!] source
let file: find-last/tail source slash
elide change-dir copy/part source file
]
; Make the new script object
original-script: system/script ; and save old one
system/script: make system/standard/script compose [
title: try select hdr 'title
header: hdr
parent: :original-script
path: what-dir
args: (try :args)
]
if set? 'script-pre-load-hook [
script-pre-load-hook is-module hdr ; chance to print it out
]
; Eval the block or make the module, returned
either is-module [ ; Import the module and set the var
catch/quit [
import module/mixin hdr code (opt do-needs/no-user hdr)
; !!! It would be nice if you could modularize a script and
; still be able to get a result. Until you can, make module
; execution return void so that it doesn't give a verbose
; output when you DO it (so you can see whatever the script
; might have PRINT-ed)
;
; https://github.com/rebol/rebol-issues/issues/2373
;
result: void
] then :finalizer/quit
][
do-needs hdr ; Load the script requirements
intern code ; Bind the user script
catch/quit [
result: do code
] then :finalizer/quit
]
]
return finalizer :result
]
export: func [
"Low level export of values (e.g. functions) to lib."
words [block!] "Block of words (already defined in local context)"
][
for-each word words [
append lib reduce [word get word]
]
]
|
{"hexsha": "88be36150f98d532903f2d3746ad9f59e044093d", "size": 7432, "ext": "r", "lang": "R", "max_stars_repo_path": "src/mezz/sys-base.r", "max_stars_repo_name": "BlackATTR/ren-c", "max_stars_repo_head_hexsha": "533a998a30e6a74a528718df58d5ece997a43138", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mezz/sys-base.r", "max_issues_repo_name": "BlackATTR/ren-c", "max_issues_repo_head_hexsha": "533a998a30e6a74a528718df58d5ece997a43138", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mezz/sys-base.r", "max_forks_repo_name": "BlackATTR/ren-c", "max_forks_repo_head_hexsha": "533a998a30e6a74a528718df58d5ece997a43138", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3904761905, "max_line_length": 78, "alphanum_fraction": 0.6124865447, "num_tokens": 1756}
|
import os
import pickle
import matplotlib
import matplotlib.pyplot as plt
import itertools
import numpy as np
from astrodash.multilayer_convnet import convnet_variables
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ModuleNotFoundError:
import tensorflow as tf
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.RdBu, fig_dir='.',
name='', fontsize_labels=15, fontsize_matrix=18):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
np.savetxt(os.path.join(fig_dir, 'confusion_matrix_raw_%s.csv' % name), cm)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
# Multiply off diagonal by -1
off_diag = ~np.eye(cm.shape[0], dtype=bool)
cm[off_diag] *= -1
np.savetxt(os.path.join(fig_dir, 'confusion_matrix_%s.csv' % name), cm)
print(cm)
plt.rcParams['text.usetex'] = True
plt.rcParams['font.serif'] = ['Computer Modern Roman'] + plt.rcParams['font.serif']
font = {'family': 'normal',
'size': 16}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(15, 12))
plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=-1, vmax=1)
plt.title(title)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=23)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90, fontsize=fontsize_labels)
plt.yticks(tick_marks, classes, fontsize=fontsize_labels)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(abs(cm[i, j]), fmt), horizontalalignment="center",
color="white" if abs(cm[i, j]) > thresh else "black", fontsize=fontsize_matrix)
plt.tight_layout()
plt.ylabel('True label', fontsize=26)
plt.xlabel('Predicted label', fontsize=26)
plt.tight_layout()
plt.savefig(os.path.join(fig_dir, 'confusion_matrix_%s.pdf' % name))
def get_aggregated_conf_matrix(aggregateIndexes, testLabels, predictedLabels):
testLabelsAggregated = np.digitize(testLabels, aggregateIndexes) - 1
predictedLabelsAggregated = np.digitize(predictedLabels, aggregateIndexes) - 1
confMatrixAggregated = tf.confusion_matrix(testLabelsAggregated, predictedLabelsAggregated).eval()
np.set_printoptions(precision=2)
print(confMatrixAggregated)
return confMatrixAggregated
def calc_model_metrics(modelFilename, testLabels, testImages, testTypeNames, typeNamesList, snTypes=None, fig_dir='.'):
tf.reset_default_graph()
nw = len(testImages[0])
nBins = len(typeNamesList)
imWidthReduc = 8
imWidth = 32 # Image size and width
x, y_, keep_prob, y_conv, W, b = convnet_variables(imWidth, imWidthReduc, nw, nBins)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, modelFilename)
yy = y_conv.eval(feed_dict={x: testImages, keep_prob: 1.0})
# CONFUSION MATRIX
predictedLabels = []
for i, name in enumerate(testTypeNames):
predictedLabels.append(np.argmax(yy[i]))
predictedLabels = np.array(predictedLabels)
confMatrix = tf.confusion_matrix(testLabels, predictedLabels).eval()
# Aggregate age conf matrix
aggregateAgesIndexes = np.arange(0, nBins + 1, int(nBins / len(snTypes)))
confMatrixAggregateAges = get_aggregated_conf_matrix(aggregateAgesIndexes, testLabels, predictedLabels)
classnames = np.copy(snTypes)
if confMatrixAggregateAges.shape[0] < len(classnames):
classnames = classnames[:-1]
plot_confusion_matrix(confMatrixAggregateAges, classes=classnames, normalize=True, title='', fig_dir=fig_dir,
name='aggregate_ages', fontsize_labels=23, fontsize_matrix=21)
# Aggregate age and subtypes conf matrix
aggregateSubtypesIndexes = np.array([0, 108, 180, 234, 306])
broadTypes = ['Ia', 'Ib', 'Ic', 'II']
confMatrixAggregateSubtypes = get_aggregated_conf_matrix(aggregateSubtypesIndexes, testLabels, predictedLabels)
plot_confusion_matrix(confMatrixAggregateSubtypes, classes=broadTypes, normalize=True, title='',
fig_dir=fig_dir, name='aggregate_subtypes', fontsize_labels=35, fontsize_matrix=35)
# plt.show()
np.set_printoptions(precision=2)
print(confMatrix)
plot_confusion_matrix(confMatrix, classes=typeNamesList, normalize=True, title='', fig_dir=fig_dir, name='all',
fontsize_labels=2, fontsize_matrix=1)
# ACTUAL ACCURACY, broadTYPE ACCURACY, AGE ACCURACY
typeAndAgeCorrect = 0
typeCorrect = 0
broadTypeCorrect = 0
broadTypeAndAgeCorrect = 0
typeAndNearAgeCorrect = 0
broadTypeAndNearAgeCorrect = 0
for i in range(len(testTypeNames)):
predictedIndex = np.argmax(yy[i])
classification = testTypeNames[i].split(': ')
if len(classification) == 2:
testType, testAge = classification
else:
testGalType, testType, testAge = classification
actual = typeNamesList[predictedIndex].split(': ')
if len(actual) == 2:
actualType, actualAge = actual
else:
actualGalType, actualType, actualAge = actual
testBroadType = testType[0:2]
actualBroadType = actualType[0:2]
if testType[0:3] == 'IIb':
testBroadType = 'Ib'
if actualType[0:3] == 'IIb':
actualBroadType = 'Ib'
nearTestAge = testAge.split(' to ')
if testTypeNames[i] == typeNamesList[predictedIndex]:
typeAndAgeCorrect += 1
if testType == actualType: # correct type
typeCorrect += 1
if (nearTestAge[0] in actualAge) or (
nearTestAge[1] in actualAge): # check if the age is in the neigbouring bin
typeAndNearAgeCorrect += 1 # all correct except nearby bin
if testBroadType == actualBroadType: # correct broadtype
broadTypeCorrect += 1
if testAge == actualAge:
broadTypeAndAgeCorrect += 1
if (nearTestAge[0] in actualAge) or (
nearTestAge[1] in actualAge): # check if the age is in the neigbouring bin
broadTypeAndNearAgeCorrect += 1 # Broadtype and nearby bin
typeAndAgeAccuracy = float(typeAndAgeCorrect) / len(testTypeNames)
typeAccuracy = float(typeCorrect) / len(testTypeNames)
broadTypeAccuracy = float(broadTypeCorrect) / len(testTypeNames)
broadTypeAndAgeAccuracy = float(broadTypeAndAgeCorrect) / len(testTypeNames)
typeAndNearAgeAccuracy = float(typeAndNearAgeCorrect) / len(testTypeNames)
broadTypeAndNearAgeAccuracy = float(broadTypeAndNearAgeCorrect) / len(testTypeNames)
print("typeAndAgeAccuracy : " + str(typeAndAgeAccuracy))
print("typeAccuracy : " + str(typeAccuracy))
print("broadTypeAccuracy : " + str(broadTypeAccuracy))
print("broadTypeAndAgeAccuracy: " + str(broadTypeAndAgeAccuracy))
print("typeAndNearAgeAccuracy : " + str(typeAndNearAgeAccuracy))
print("broadTypeAndNearAgeAccuracy : " + str(broadTypeAndNearAgeAccuracy))
def main():
dirModel = "/Users/danmuth/PycharmProjects/astrodash/data_files_train80_splitspectra_zeroZ/"
modelFilename = dirModel + "tensorflow_model.ckpt"
fig_dir = os.path.join('..', 'Figures', 'zeroZ_train80_splitspectra')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
with open(os.path.join(dirModel, "training_params.pickle"), 'rb') as f:
pars = pickle.load(f)
snTypes = pars['typeList']
dirTestSet = "/Users/danmuth/PycharmProjects/astrodash/data_files_train80_splitspectra_zeroZ/training_set/"
testImagesAll = np.load(dirTestSet + 'testImages.npy', mmap_mode='r')
testLabelsAll = np.load(dirTestSet + 'testLabels.npy', mmap_mode='r')
typeNamesList = np.load(dirTestSet + 'typeNamesList.npy')
testTypeNamesAll = np.load(dirTestSet + 'testTypeNames.npy')
calc_model_metrics(modelFilename, testLabelsAll[:50000], testImagesAll[:50000], testTypeNamesAll[:50000],
typeNamesList, snTypes, fig_dir=fig_dir)
if __name__ == '__main__':
main()
|
{"hexsha": "86243b7a0bd098076cf14843a0cbe1dd637278f5", "size": 8517, "ext": "py", "lang": "Python", "max_stars_repo_path": "astrodash/model_metrics.py", "max_stars_repo_name": "TorshaMajumder/astrodash", "max_stars_repo_head_hexsha": "6064d8e8dc9f64691bea100a033c63d132d04af5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2018-11-06T03:09:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:44:03.000Z", "max_issues_repo_path": "astrodash/model_metrics.py", "max_issues_repo_name": "TorshaMajumder/astrodash", "max_issues_repo_head_hexsha": "6064d8e8dc9f64691bea100a033c63d132d04af5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2017-01-13T00:01:52.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-18T23:31:32.000Z", "max_forks_repo_path": "astrodash/model_metrics.py", "max_forks_repo_name": "TorshaMajumder/astrodash", "max_forks_repo_head_hexsha": "6064d8e8dc9f64691bea100a033c63d132d04af5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2018-08-22T13:31:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T22:59:22.000Z", "avg_line_length": 41.5463414634, "max_line_length": 119, "alphanum_fraction": 0.6779382412, "include": true, "reason": "import numpy", "num_tokens": 2150}
|
[STATEMENT]
lemma i_shrink_eq_NoMsg_iAll_conv: "
0 < k \<Longrightarrow> ((s \<div>\<^sub>i k) t = \<NoMsg>) = (\<box> t1 [t * k\<dots>,k - Suc 0]. s t1 = \<NoMsg>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < k \<Longrightarrow> ((s \<div> k) t = NoMsg) = (\<box> t1 [t * k\<dots>,k - Suc 0]. s t1 = NoMsg)
[PROOF STEP]
apply (simp add: i_shrink_nth last_message_NoMsg_conv iAll_def Ball_def iIN_iff)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < k \<Longrightarrow> (\<forall>i<k. s (t * k + i) = NoMsg) = (\<forall>x. t * k \<le> x \<and> x \<le> t * k + k - Suc 0 \<longrightarrow> s x = NoMsg)
[PROOF STEP]
apply (rule iffI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>0 < k; \<forall>i<k. s (t * k + i) = NoMsg\<rbrakk> \<Longrightarrow> \<forall>x. t * k \<le> x \<and> x \<le> t * k + k - Suc 0 \<longrightarrow> s x = NoMsg
2. \<lbrakk>0 < k; \<forall>x. t * k \<le> x \<and> x \<le> t * k + k - Suc 0 \<longrightarrow> s x = NoMsg\<rbrakk> \<Longrightarrow> \<forall>i<k. s (t * k + i) = NoMsg
[PROOF STEP]
apply (clarify, rename_tac i)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>i. \<lbrakk>0 < k; \<forall>i<k. s (t * k + i) = NoMsg; t * k \<le> i; i \<le> t * k + k - Suc 0\<rbrakk> \<Longrightarrow> s i = NoMsg
2. \<lbrakk>0 < k; \<forall>x. t * k \<le> x \<and> x \<le> t * k + k - Suc 0 \<longrightarrow> s x = NoMsg\<rbrakk> \<Longrightarrow> \<forall>i<k. s (t * k + i) = NoMsg
[PROOF STEP]
apply (drule_tac x="i - t * k" in spec)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>i. \<lbrakk>0 < k; t * k \<le> i; i \<le> t * k + k - Suc 0; i - t * k < k \<longrightarrow> s (t * k + (i - t * k)) = NoMsg\<rbrakk> \<Longrightarrow> s i = NoMsg
2. \<lbrakk>0 < k; \<forall>x. t * k \<le> x \<and> x \<le> t * k + k - Suc 0 \<longrightarrow> s x = NoMsg\<rbrakk> \<Longrightarrow> \<forall>i<k. s (t * k + i) = NoMsg
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>0 < k; \<forall>x. t * k \<le> x \<and> x \<le> t * k + k - Suc 0 \<longrightarrow> s x = NoMsg\<rbrakk> \<Longrightarrow> \<forall>i<k. s (t * k + i) = NoMsg
[PROOF STEP]
apply (clarify, rename_tac i)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>i. \<lbrakk>0 < k; \<forall>x. t * k \<le> x \<and> x \<le> t * k + k - Suc 0 \<longrightarrow> s x = NoMsg; i < k\<rbrakk> \<Longrightarrow> s (t * k + i) = NoMsg
[PROOF STEP]
apply (drule_tac x="t * k + i" in spec)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>i. \<lbrakk>0 < k; i < k; t * k \<le> t * k + i \<and> t * k + i \<le> t * k + k - Suc 0 \<longrightarrow> s (t * k + i) = NoMsg\<rbrakk> \<Longrightarrow> s (t * k + i) = NoMsg
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 1256, "file": "AutoFocus-Stream_IL_AF_Stream", "length": 9}
|
#! /usr/bin/env python
import roslib; roslib.load_manifest('jaco2_driver')
import rospy
import sys
import numpy as np
import actionlib
import kinova_msgs.msg
import std_msgs.msg
import geometry_msgs.msg
from util.util import *
import math
import argparse
from abc import abstractmethod
def QuaternionNorm(Q_raw):
qx_temp,qy_temp,qz_temp,qw_temp = Q_raw[0:4]
qnorm = math.sqrt(qx_temp*qx_temp + qy_temp*qy_temp + qz_temp*qz_temp + qw_temp*qw_temp)
qx_ = qx_temp/qnorm
qy_ = qy_temp/qnorm
qz_ = qz_temp/qnorm
qw_ = qw_temp/qnorm
Q_normed_ = [qx_, qy_, qz_, qw_]
return Q_normed_
def Quaternion2EulerXYZ(Q_raw):
Q_normed = QuaternionNorm(Q_raw)
qx_ = Q_normed[0]
qy_ = Q_normed[1]
qz_ = Q_normed[2]
qw_ = Q_normed[3]
tx_ = math.atan2((2 * qw_ * qx_ - 2 * qy_ * qz_), (qw_ * qw_ - qx_ * qx_ - qy_ * qy_ + qz_ * qz_))
ty_ = math.asin(2 * qw_ * qy_ + 2 * qx_ * qz_)
tz_ = math.atan2((2 * qw_ * qz_ - 2 * qx_ * qy_), (qw_ * qw_ + qx_ * qx_ - qy_ * qy_ - qz_ * qz_))
EulerXYZ_ = [tx_,ty_,tz_]
return EulerXYZ_
def EulerXYZ2Quaternion(EulerXYZ_):
tx_, ty_, tz_ = EulerXYZ_[0:3]
sx = math.sin(0.5 * tx_)
cx = math.cos(0.5 * tx_)
sy = math.sin(0.5 * ty_)
cy = math.cos(0.5 * ty_)
sz = math.sin(0.5 * tz_)
cz = math.cos(0.5 * tz_)
qx_ = sx * cy * cz + cx * sy * sz
qy_ = -sx * cy * sz + cx * sy * cz
qz_ = sx * sy * cz + cx * cy * sz
qw_ = -sx * sy * sz + cx * cy * cz
Q_ = [qx_, qy_, qz_, qw_]
return Q_
class RobotPose:
@abstractmethod
def set_cartesian(self, pose):
pass
@abstractmethod
def get_cartesian(self):
pass
class Jaco2Pose(RobotPose):
def __init__(self, arm, prefix="j2n6s300_"):
""" Initialize Jaco2 6-DOF 3-Finger Robot """
self.arm = arm
self.prefix = prefix
self.currentCartesianCommand = [0] * 6
self.__get_currentCartesianCommand()
def set_cartesian(self, pose, relative=False):
""" Set angles of robot """
position = pose[:3]
orientation = pose[3:]
if relative:
position_absolute = [position[i] + self.currentCartesianCommand[i] for i in range(3)]
orientation_deg_list = list(map(math.degrees, self.currentCartesianCommand[3:]))
orientation_deg = [orientation[i] + orientation_deg_list[i] for i in range(3)]
orientation_rad = list(map(math.radians, orientation_deg))
orientation_q = EulerXYZ2Quaternion(orientation_rad)
orientation_absolute = orientation_q
else:
position_absolute = position
orientation_deg = orientation
orientation_rad = list(map(math.radians, orientation_deg))
orientation_q = EulerXYZ2Quaternion(orientation_rad)
orientation_absolute = orientation_q
result = self.__cartesian_pose_client(position_absolute, orientation_absolute)
return result
def get_cartesian(self):
return self.__get_currentCartesianCommand()
def __get_currentCartesianCommand(self):
topic_address = '/' + self.arm + '_driver/out/cartesian_command'
#rospy.Subscriber(topic_address, kinova_msgs.msg.KinovaPose, self.__set_currentCartesianCommand)
#rospy.wait_for_message(topic_address, kinova_msgs.msg.KinovaPose)
print 'position listener obtained message for Cartesian pose. '
def __set_currentCartesianCommand(feedback):
currentCartesianCommand_str_list = str(feedback).split("\n")
for index in range(0, len(currentCartesianCommand_str_list)):
temp_str = currentCartesianCommand_str_list[index].split(": ")
self.currentCartesianCommand[index] = float(temp_str[1])
def __cartesian_pose_client(self, position, orientation):
"""Send a cartesian goal to the action server."""
action_address = '/' + self.arm + '_driver/pose_action/tool_pose'
client = actionlib.SimpleActionClient(action_address, kinova_msgs.msg.ArmPoseAction)
client.wait_for_server()
goal = kinova_msgs.msg.ArmPoseGoal()
goal.pose.header = std_msgs.msg.Header(frame_id=(self.arm + '_link_base'))
goal.pose.pose.position = geometry_msgs.msg.Point(
x=position[0], y=position[1], z=position[2])
goal.pose.pose.orientation = geometry_msgs.msg.Quaternion(
x=orientation[0], y=orientation[1], z=orientation[2], w=orientation[3])
# print('goal.pose in client 1: {}'.format(goal.pose.pose)) # debug
client.send_goal(goal)
if client.wait_for_result(rospy.Duration(10.0)):
return client.get_result()
else:
client.cancel_all_goals()
print(' the cartesian action timed-out')
return None
|
{"hexsha": "0c5dd0ac16e526d13545890dd2aff1574eccd7cf", "size": 4843, "ext": "py", "lang": "Python", "max_stars_repo_path": "drivers/jaco2_driver/nodes/jaco2_driver/pose_action_client.py", "max_stars_repo_name": "s4480417/thesisrobotics", "max_stars_repo_head_hexsha": "dad7723ff14af92380c21a5aaccb5ae94a7d8bd2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "drivers/jaco2_driver/nodes/jaco2_driver/pose_action_client.py", "max_issues_repo_name": "s4480417/thesisrobotics", "max_issues_repo_head_hexsha": "dad7723ff14af92380c21a5aaccb5ae94a7d8bd2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "drivers/jaco2_driver/nodes/jaco2_driver/pose_action_client.py", "max_forks_repo_name": "s4480417/thesisrobotics", "max_forks_repo_head_hexsha": "dad7723ff14af92380c21a5aaccb5ae94a7d8bd2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1712328767, "max_line_length": 104, "alphanum_fraction": 0.6462936197, "include": true, "reason": "import numpy", "num_tokens": 1315}
|
import numpy as np
import math
import matplotlib.pyplot as plt
import random
X=np.array([[2,-1],[2,1],[0,-1],[0,1],[1,0.5],[1,-0.5],[1,1],[1,-1]])
plt.scatter(X[:,0],X[:,1])
cent1=np.array([[0.5,0]])
cent2=np.array([[1.5,0]])
plt.scatter(cent1[0][0],cent1[0][1], label="Centroid - class1", marker='x')
plt.scatter(cent2[0][0],cent2[0][1], label="Centroid - class2", marker='x')
#calculating distances and labelling
arry1 = []
arry2 = []
for row in X:
d1=dist = np.linalg.norm(row-cent1)
d2=dist = np.linalg.norm(row-cent2)
if d1 > d2:
arry1.append(row)
elif(d1<d2):
arry2.append(row)
elif(d1==d2):
#Randomly assignmed class
choice = random.randint(0, 1)
if(choice == 0):
arry1.append(row)
else:
arry2.append(row)
continue
arry1 = np.array(arry1)
arry2 = np.array(arry2)
plt.scatter(arry1[:,0],arry1[:,1],c='r',label="Class1")
plt.scatter(arry2[:,0],arry2[:,1],c='b', label="Class2")
plt.legend()
plt.show()
|
{"hexsha": "52ba7b03e39e4bb0d758ea20d4abf737358daf44", "size": 1012, "ext": "py", "lang": "Python", "max_stars_repo_path": "Supervised Learning/SMAI HWS/23/1.py", "max_stars_repo_name": "shailymishra/Machine-Learning-Advanced", "max_stars_repo_head_hexsha": "048e7c816f8c673c7b73ffb7555ebdfee6f0dc5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Supervised Learning/SMAI HWS/23/1.py", "max_issues_repo_name": "shailymishra/Machine-Learning-Advanced", "max_issues_repo_head_hexsha": "048e7c816f8c673c7b73ffb7555ebdfee6f0dc5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Supervised Learning/SMAI HWS/23/1.py", "max_forks_repo_name": "shailymishra/Machine-Learning-Advanced", "max_forks_repo_head_hexsha": "048e7c816f8c673c7b73ffb7555ebdfee6f0dc5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3, "max_line_length": 75, "alphanum_fraction": 0.5918972332, "include": true, "reason": "import numpy", "num_tokens": 346}
|
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from sklearn.model_selection import train_test_split
from src.predictor.dataset_extractor import DatasetExtractor
def get_dataset_split(main_folder: str, target_data_folder: str, day_interval: int = 4):
"""
:param main_folder: Main folder of resources
:param target_data_folder: Target folder where the dataset information is obtained like Total Deaths
:param day_interval: Date interval where data will be given with that interval to RNN
:return: extractor, (*dataset)
"""
# Get dataset into memory
_target_directory = '{0}{1}'.format(main_folder, target_data_folder)
data_extractor = DatasetExtractor(_target_directory)
data_extractor.load_all_data_under_target_directory()
data_extractor.scale_loaded_data()
# Get dataset and reshape it
X, y = data_extractor.get_dataset(day_interval=day_interval)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
y_train = np.reshape(y_train, (y_train.shape[0], 1))
y_test = np.reshape(y_test, (y_test.shape[0], 1))
return data_extractor, (X_train, X_test, y_train, y_test)
def build_model(input_dimension: int, optimizer='Adam', layer1=30, layer2=20, layer3=20, dropout=0.10) -> Sequential:
"""
Model building functionality
"""
# Create model as RNN with stacked LSTM layers
predictor = Sequential()
# First LSTM layer
predictor.add(LSTM(units=layer1, return_sequences=True, input_shape=(input_dimension, 1)))
predictor.add(Dropout(dropout))
# Second LSTM layer
predictor.add(LSTM(units=layer2, return_sequences=True))
predictor.add(Dropout(dropout))
# Last LSTM layer
predictor.add(LSTM(units=layer3))
predictor.add(Dropout(dropout))
# Final output
predictor.add(Dense(units=1))
# Compile model
predictor.compile(optimizer=optimizer, loss='mean_squared_error', metrics=['mse', 'mae', 'mape', 'cosine'])
return predictor
def make_prediction_with_model(_model: Sequential, _data_extractor: DatasetExtractor, _data: np.ndarray) -> np.ndarray:
"""
Predictor function over the model by applying data transformations on the givven data with the given extractor
:param _model: Model which will perform prediction
:param _data_extractor: Data processor which will scale input and inverse scale output
:param _data: Numpy vector which is expected to be length of day_interval i.e. shape of (day_interval,)
:return: Numeric value
"""
scaled_data = data_extractor.scale_given_data(_data.reshape(-1, 1, ))
prediction = model.predict(scaled_data.reshape(1, -1, 1))
return data_extractor.inverse_scale_given_data(prediction)
def make_series_of_prediction(_model: Sequential, _data_extractor: DatasetExtractor, initial_data: np.ndarray,
prediction_count: int) -> np.ndarray:
"""
Function to make series of predictions
:param _model: Model which will perform prediction
:param _data_extractor: Data processor which will scale input and inverse scale output
:param initial_data: Initial data to feed model
:param prediction_count: Count of predictions to make
:return: Made prediction array
"""
predictions = []
current_data_window = initial_data
for prediction_index in range(prediction_count):
prediction = make_prediction_with_model(_model, _data_extractor, current_data_window).item(0, 0)
predictions.append(prediction)
current_data_window = current_data_window[1:]
current_data_window = np.concatenate([current_data_window, np.array([prediction])])
return np.array(predictions)
def create_time_series(_model: Sequential, _data_extractor: DatasetExtractor, data: np.ndarray, day_interval: int,
prediction_count: int, plot_title=None, x_label=None, y_label=None) -> np.ndarray:
"""
Creating timeseries from the given data up to given prediction count and drawing function by combining given data
and predictions on a plot
:param _model: Model which will perform prediction
:param _data_extractor: Data processor which will scale input and inverse scale output
:param data: Data to be used for making predictions (Expected size if (x,) i.e. as a vector)
:param day_interval: Day interval to structuring data
:param prediction_count: Prediction count on how many predictions will be done
:return: Made predictions
"""
given_data_size = data.size
if given_data_size < day_interval:
raise RuntimeError(
'Not enough data is provided. Expected at least {0} number of data, but given {1}.'.format(data.size,
day_interval))
predictions = make_series_of_prediction(_model, _data_extractor, data[-day_interval:], prediction_count)
original_x = np.arange(0, given_data_size)
prediction_x = np.arange(given_data_size, given_data_size + prediction_count)
plt.plot(original_x, data, 'go', label='Original data')
plt.plot(prediction_x, predictions, 'r+', label='Predictions')
plt.plot(np.concatenate([original_x, prediction_x]), np.concatenate([data, predictions]), 'k', color='grey',
alpha=0.3)
if plot_title is not None:
plt.title(plot_title)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
plt.legend()
plt.grid()
plt.show()
return predictions
def dump_model_and_extractor(_model: Sequential, _data_extractor: DatasetExtractor, main_folder: str,
name: str) -> None:
"""
Dump model under the given main folder
"""
import pickle
with open('{0}{1}'.format(main_folder, name), 'wb') as f:
pickle.dump(_model, f)
pickle.dump(_data_extractor, f)
def load_model_and_extractor(main_folder: str, name: str):
"""
Load model from the given main folder
"""
import pickle
with open('{0}{1}'.format(main_folder, name), 'rb') as f:
_model = pickle.load(f)
_data_extractor = pickle.load(f)
return _model, _data_extractor
if __name__ == '__main__':
should_built = True
day_interval_for_rnn = 10
resource_folder = '../../resources/'
total_death_folder = 'Total Deaths/'
if should_built:
# Get dataset and create model
data_extractor, (X_train, X_test, y_train, y_test) = get_dataset_split(resource_folder, total_death_folder,
day_interval=day_interval_for_rnn)
model = build_model(X_train.shape[1])
history = model.fit(X_train, y_train, epochs=30, batch_size=20, validation_split=0.2)
# Evaluate model and get metrics
train_evaluation = model.evaluate(X_train, y_train, verbose=0)
test_evaluation = model.evaluate(X_test, y_test, verbose=0)
# Dump model
dump_model_and_extractor(model, data_extractor, resource_folder, 'corona_rnn_model')
else:
# Load model
model, data_extractor = load_model_and_extractor(resource_folder, 'corona_rnn_model')
data = data_extractor.get_specific_country_data('japan.csv')
predictions = create_time_series(model, data_extractor, data.reshape(-1), day_interval_for_rnn, 10, 'Total Death Count in Japan', 'Day Number', 'Total Death Count')
|
{"hexsha": "4af13eea631942d50f4ef910829fdfe43a4cdcfb", "size": 7747, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/predictor/rnn_predictor.py", "max_stars_repo_name": "yakuza8/coronavirus-timeseries-predictor", "max_stars_repo_head_hexsha": "d6ec329ec36335d2f3dc20c2158dbfa9ddad3526", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-04-11T20:12:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-02T07:47:13.000Z", "max_issues_repo_path": "src/predictor/rnn_predictor.py", "max_issues_repo_name": "yakuza8/coronavirus-timeseries-predictor", "max_issues_repo_head_hexsha": "d6ec329ec36335d2f3dc20c2158dbfa9ddad3526", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-14T15:17:09.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-14T15:17:09.000Z", "max_forks_repo_path": "src/predictor/rnn_predictor.py", "max_forks_repo_name": "yakuza8/coronavirus-timeseries-predictor", "max_forks_repo_head_hexsha": "d6ec329ec36335d2f3dc20c2158dbfa9ddad3526", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-08T06:27:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-03T16:40:40.000Z", "avg_line_length": 43.5224719101, "max_line_length": 172, "alphanum_fraction": 0.698722086, "include": true, "reason": "import numpy", "num_tokens": 1743}
|
r"""
Module for handling Exact Gradient corrected Screened (EGS) Potential.
Potential
*********
The exact-gradient screened (EGS) potential introduces new parameters that can be easily calculated from initial inputs.
Density gradient corrections to the free energy functional lead to the first parameter, :math:`\nu`,
.. math::
\nu = - \frac{3\lambda}{\pi^{3/2}} \frac{4\pi \bar{e}^2 \beta }{\Lambda_{e}} \frac{d}{d\eta} \mathcal I_{-1/2}(\eta),
where :math:`\lambda` is a correction factor; :math:`\lambda = 1/9` for the true gradient corrected Thomas-Fermi model
and :math:`\lambda = 1` for the traditional von Weissaecker model, :math:`\mathcal I_{-1/2}[\eta_0]` is the
Fermi Integral of order :math:`-1/2`, and :math:`\Lambda_e` is the de Broglie wavelength of the electrons.
In the case :math:`\nu < 1` the EGS potential takes the form
.. math::
U_{ab}(r) = \frac{Z_a Z_b \bar{e}^2 }{2r}\left [ ( 1+ \alpha ) e^{-r/\lambda_-} + ( 1 - \alpha) e^{-r/\lambda_+} \right ],
with
.. math::
\lambda_\pm^2 = \frac{\nu \lambda_{\textrm{TF}}^2}{2b \pm 2b\sqrt{1 - \nu}}, \quad \alpha = \frac{b}{\sqrt{b - \nu}},
where the parameter :math:`b` arises from exchange-correlation contributions, see below.\n
On the other hand :math:`\nu > 1`, the pair potential has the form
.. math::
U_{ab}(r) = \frac{Z_a Z_b \bar{e}^2}{r}\left [ \cos(r/\gamma_-) + \alpha' \sin(r/\gamma_-) \right ] e^{-r/\gamma_+}
with
.. math::
\gamma_\pm^2 = \frac{\nu\lambda_{\textrm{TF}}^2}{\sqrt{\nu} \pm b}, \quad \alpha' = \frac{b}{\sqrt{\nu - b}}.
Neglect of exchange-correlational effects leads to :math:`b = 1` otherwise
.. math::
b = 1 - \frac{2}{8} \frac{1}{k_{\textrm{F}}^2 \lambda_{\textrm{TF}}^2 } \left [ h\left ( \Theta \right ) - 2 \Theta h'(\Theta) \right ]
where :math:`k_{\textrm{F}}` is the Fermi wavenumber and :math:`\Theta = (\beta E_{\textrm{F}})^{-1}` is the electron
degeneracy parameter` calculated from the Fermi energy.
.. math::
h \left ( \Theta \right) = \frac{N(\Theta)}{D(\Theta)}\tanh \left( \Theta^{-1} \right ),
.. math::
N(\Theta) = 1 + 2.8343\Theta^2 - 0.2151\Theta^3 + 5.2759\Theta^4,
.. math::
D \left ( \Theta \right ) = 1 + 3.9431\Theta^2 + 7.9138\Theta^4.
Force Error
***********
The EGS potential is always smaller than pure Yukawa. Therefore the force error is chosen to be the same as Yukawa's
.. math::
\Delta F = \frac{q^2}{4 \pi \epsilon_0} \sqrt{\frac{2 \pi n}{\lambda_{-}}}e^{-r_c/\lambda_-}
This overestimates it, but it doesn't matter.
Potential Attributes
********************
The elements of :attr:`sarkas.potentials.core.Potential.pot_matrix` are
if :attr:`sarkas.core.Parameters.nu` less than 1:
.. code-block::
pot_matrix[0] = q_iq_j/4pi eps0
pot_matrix[1] = nu
pot_matrix[2] = 1 + alpha
pot_matrix[3] = 1 - alpha
pot_matrix[4] = 1.0 / lambda_minus
pot_matrix[5] = 1.0 / lambda_plus
else
.. code-block::
pot_matrix[0] = q_iq_j/4pi eps0
pot_matrix[1] = nu
pot_matrix[2] = 1.0
pot_matrix[3] = alpha prime
pot_matrix[4] = 1.0 / gamma_minus
pot_matrix[5] = 1.0 / gamma_plus
"""
from numba import jit
from numba.core.types import float64, UniTuple
from numpy import cos, cosh, exp, pi, sin, sqrt, tanh, zeros
from ..utilities.exceptions import AlgorithmError
from ..utilities.fdints import fdm3h
from ..utilities.maths import force_error_analytic_lcl
def update_params(potential, params):
"""
Assign potential dependent simulation's parameters.
Parameters
----------
potential : :class:`sarkas.potentials.core.Potential`
Class handling potential form.
params : :class:`sarkas.core.Parameters`
Simulation's parameters.
Raises
------
`~sarkas.utilities.exceptions.AlgorithmError`
If the chosen algorithm is pppm.
"""
# lambda factor : 1 = von Weizsaecker, 1/9 = Thomas-Fermi
if not hasattr(potential, "lmbda"):
potential.lmbda = 1.0 / 9.0
# eq. (14) of Ref. [1]_
params.nu = 3.0 / pi**1.5 * potential.electron_landau_length / potential.electron_deBroglie_wavelength
dIdeta = -3.0 / 2.0 * fdm3h(potential.electron_dimensionless_chemical_potential)
potential.nu *= potential.lmbda * dIdeta
# Degeneracy Parameter
theta = potential.electron_degeneracy_parameter
if 0.1 <= theta <= 12:
# Regime of validity of the following approximation Perrot et al. Phys Rev A 302619 (1984)
# eq. (33) of Ref. [1]_
Ntheta = 1.0 + 2.8343 * theta**2 - 0.2151 * theta**3 + 5.2759 * theta**4
# eq. (34) of Ref. [1]_
Dtheta = 1.0 + 3.9431 * theta**2 + 7.9138 * theta**4
# eq. (32) of Ref. [1]_
h = Ntheta / Dtheta * tanh(1.0 / theta)
# grad h(x)
gradh = -(Ntheta / Dtheta) / cosh(1 / theta) ** 2 / (theta**2) - tanh( # derivative of tanh(1/x)
1.0 / theta
) * (
Ntheta * (7.8862 * theta + 31.6552 * theta**3) / Dtheta**2 # derivative of 1/Dtheta
+ (5.6686 * theta - 0.6453 * theta**2 + 21.1036 * theta**3) / Dtheta
) # derivative of Ntheta
# eq.(31) of Ref. [1]_
b = 1.0 - 2.0 / (8.0 * (potential.electron_Fermi_wavenumber * potential.electron_TF_wavelength) ** 2) * (
h - 2.0 * theta * gradh
)
else:
b = 1.0
potential.b = b
# Monotonic decay
if potential.nu <= 1:
# eq. (29) of Ref. [1]_
potential.lambda_p = potential.electron_TF_wavelength * sqrt(
potential.nu / (2.0 * b + 2.0 * sqrt(b**2 - potential.nu))
)
potential.lambda_m = potential.electron_TF_wavelength * sqrt(
potential.nu / (2.0 * b - 2.0 * sqrt(b**2 - potential.nu))
)
potential.alpha = b / sqrt(b - potential.nu)
# Oscillatory behavior
if potential.nu > 1:
# eq. (29) of Ref. [1]_
potential.gamma_m = potential.electron_TF_wavelength * sqrt(potential.nu / (sqrt(potential.nu) - b))
potential.gamma_p = potential.electron_TF_wavelength * sqrt(potential.nu / (sqrt(potential.nu) + b))
potential.alphap = b / sqrt(potential.nu - b)
potential.matrix = zeros((7, potential.num_species, potential.num_species))
potential.matrix[1, :, :] = potential.nu
for i, q1 in enumerate(potential.species_charges):
for j, q2 in enumerate(potential.species_charges):
if potential.nu <= 1:
potential.matrix[0, i, j] = q1 * q2 / (2.0 * potential.fourpie0)
potential.matrix[2, i, j] = 1.0 + potential.alpha
potential.matrix[3, i, j] = 1.0 - potential.alpha
potential.matrix[4, i, j] = 1.0 / potential.lambda_m
potential.matrix[5, i, j] = 1.0 / potential.lambda_p
if potential.nu > 1:
potential.matrix[0, i, j] = q1 * q2 / potential.fourpie0
potential.matrix[2, i, j] = 1.0
potential.matrix[3, i, j] = potential.alphap
potential.matrix[4, i, j] = 1.0 / potential.gamma_m
potential.matrix[5, i, j] = 1.0 / potential.gamma_p
potential.matrix[6, :, :] = potential.a_rs
if potential.method == "pppm":
raise AlgorithmError("pppm algorithm not implemented yet.")
potential.force = egs_force
# EGS is always smaller than pure Yukawa.
# Therefore the force error is chosen to be the same as Yukawa's.
# This overestimates it, but it doesn't matter.
# The rescaling constant is sqrt ( na^4 ) = sqrt( 3 a/(4pi) )
potential.force_error = force_error_analytic_lcl(
potential.type, potential.rc, potential.matrix, sqrt(3.0 * potential.a_ws / (4.0 * pi))
)
@jit(UniTuple(float64, 2)(float64, float64[:]), nopython=True)
def egs_force(r_in, pot_matrix):
"""
Numba'd function to calculate the potential and force between particles using the EGS Potential.
Parameters
----------
r_in : float
Particles' distance.
pot_matrix : array
EGS potential parameters. \n
Shape = (6, :attr:`sarkas.core.Parameters.num_species`, :attr:`sarkas.core.Parameters.num_species`)
Returns
-------
U : float
Potential.
fr : float
Force.
Examples
--------
>>> from numpy import array, pi
>>> from scipy.constants import epsilon_0
>>> r = 2.0
>>> alpha = 1.3616
>>> lambda_p = 1.778757e-09
>>> lambda_m = 4.546000e-09
>>> charge = 1.440961e-09
>>> c_const = charge**2/( 4.0 * pi * epsilon_0)
>>> pot_mat = array([c_const * 0.5, 1.0 + alpha, 1.0 - alpha, 1.0/lambda_m, 1.0 / lambda_p, 1.0e-14])
>>> egs_force(r, pot_mat)
(-0.9067719924627385, 270184640.33105946)
"""
rs = pot_matrix[6]
# Branchless programming
r = r_in * (r_in >= rs) + rs * (r_in < rs)
# nu = pot_matrix[1]
if pot_matrix[1] <= 1.0:
temp1 = pot_matrix[2] * exp(-r * pot_matrix[4])
temp2 = pot_matrix[3] * exp(-r * pot_matrix[5])
# Potential
U = (temp1 + temp2) * pot_matrix[0] / r
# Force
fr = U / r + pot_matrix[0] * (temp1 * pot_matrix[4] + temp2 * pot_matrix[5]) / r
else:
coskr = cos(r * pot_matrix[4])
sinkr = sin(r * pot_matrix[4])
expkr = pot_matrix[0] * exp(-r * pot_matrix[5])
U = (coskr + pot_matrix[3] * sinkr) * expkr / r
fr = U / r # derivative of 1/r
fr += U * pot_matrix[5] # derivative of exp
fr += pot_matrix[4] * (sinkr - pot_matrix[3] * coskr) * expkr / r
return U, fr
def pretty_print_info(potential):
"""
Print potential specific parameters in a user-friendly way.
Parameters
----------
potential : :class:`sarkas.potentials.core.Potential`
Class handling potential form.
"""
# print('electron temperature = {:1.4e} [K] = {:1.4e} eV'.format(
# potential.electron_temperature,
# potential.electron_temperature / potential.eV2K))
print(f"kappa = {potential.a_ws / potential.screening_length:.4f}")
print(f"SGA Correction factor: lmbda = {potential.lmbda:.4f}")
# print('lambda_TF = {:1.4e} '.format(potential.electron_TF_wavelength), end='')
# print("[cm]" if potential.units == "cgs" else "[m]")
print(f"nu = {potential.nu:.4f}")
if potential.nu < 1:
print("Exponential decay:")
print(f"lambda_p = {potential.lambda_p:.6e} ", end="")
print("[cm]" if potential.units == "cgs" else "[m]")
print(f"lambda_m = {potential.lambda_m:.6e} ", end="")
print("[cm]" if potential.units == "cgs" else "[m]")
print(f"alpha = {potential.alpha:.4f}")
# print('Theta = {:1.4e}'.format(potential.electron_degeneracy_parameter))
print(f"b = {potential.b:.4f}")
else:
print("Oscillatory potential:")
print(f"gamma_p = {potential.gamma_p:.6e} ", end="")
print("[cm]" if potential.units == "cgs" else "[m]")
print(f"gamma_m = {potential.gamma_m:.6e} ", end="")
print("[cm]" if potential.units == "cgs" else "[m]")
print(f"alpha = {potential.alphap:.4f}")
print(f"b = {potential.b:.4f}")
print(f"Gamma_eff = {potential.coupling_constant:.2f}")
|
{"hexsha": "739349473f95f62aef493a9d2af00bfae33ca620", "size": 11192, "ext": "py", "lang": "Python", "max_stars_repo_path": "sarkas/potentials/egs.py", "max_stars_repo_name": "lucianogsilvestri/sarkas", "max_stars_repo_head_hexsha": "f4ab00014d09976561fbd4349b9d0610e47a61e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sarkas/potentials/egs.py", "max_issues_repo_name": "lucianogsilvestri/sarkas", "max_issues_repo_head_hexsha": "f4ab00014d09976561fbd4349b9d0610e47a61e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sarkas/potentials/egs.py", "max_forks_repo_name": "lucianogsilvestri/sarkas", "max_forks_repo_head_hexsha": "f4ab00014d09976561fbd4349b9d0610e47a61e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5301587302, "max_line_length": 139, "alphanum_fraction": 0.6021265189, "include": true, "reason": "from numpy,from scipy,from numba", "num_tokens": 3580}
|
(*
* Copyright (c) 2017-present,
* Programming Research Laboratory (ROPAS), Seoul National University, Korea
* This software is distributed under the term of the BSD-3 clause license.
*)
Definition veq T (x : MId.m T) (y : Acc.MAcc.m T) : Prop := x = Acc.get_v y.
Lemma bind_veq :
forall T (x1 : MId.m T) (x2 : Acc.MAcc.m T)
U (f1 : T -> MId.m U) (f2 : T -> Acc.MAcc.m U)
(Hx : veq x1 x2) (Hf : forall x, veq (f1 x) (f2 x)),
veq (MId.bind x1 f1) (Acc.MAcc.bind x2 f2).
Proof.
i; unfold MId.bind, Acc.MAcc.bind.
rewrite Hf, Hx. destruct x2; s. destruct (f2 t); s. by auto.
Qed.
Ltac dest_veq :=
match goal with
| |- veq (MId.bind ?x1 ?f1) (Acc.MAcc.bind ?x2 ?f2) =>
let Hx := fresh "Hx" in
let Hf := fresh "Hf" in
assert (veq x1 x2) as Hx
; [| assert (forall x, veq (f1 x) (f2 x)) as Hf
; [|eapply bind_veq; [by apply Hx|by apply Hf]] ]
| |- veq (MId.ret ?x1) (Acc.MAcc.ret ?y1) => reflexivity
end.
Definition mem_join (x : Acc.MAcc.m Mem.t) (y : Mem.t) :=
(Mem.join (Acc.get_v x) y, Acc.get_acc x).
Lemma bind_mor
T (teq : T -> T -> Prop) (Hteq : DLat.zb_equiv teq)
U (ueq : U -> U -> Prop) (Hueq : DLat.zb_equiv ueq) :
Proper (Acc.MAcc.eq Hteq
==> (teq ==> Acc.MAcc.eq Hueq)
==> Acc.MAcc.eq Hueq)
(Acc.MAcc.bind (A:=T) (B:=U)).
Proof.
intros t1 t2 Ht u1 u2 Hu. unfold Acc.MAcc.bind, Acc.MAcc.eq.
destruct t1 as [t1 a1], t2 as [t2 a2].
remember (u1 t1) as v1; destruct v1 as [v1 a1'].
remember (u2 t2) as v2; destruct v2 as [v2 a2'].
assert (Acc.MAcc.eq Hueq (v1, a1') (v2, a2')) as Heqv.
{ rewrite Heqv1, Heqv2. by apply Hu, Ht. }
split; [by apply Heqv|apply Acc.join_eq; [by apply Ht|by apply Heqv]].
Qed.
Lemma ret_mor T (teq : T -> T -> Prop) (Hteq : DLat.zb_equiv teq) :
Proper (teq ==> Acc.MAcc.eq Hteq) (Acc.MAcc.ret (A:=T)).
Proof.
intros x1 x2 Hx. unfold Acc.MAcc.ret, Acc.MAcc.eq.
split; [by auto|by apply Acc.eq_refl].
Qed.
Lemma if_dec_mor
P (P_dec : sumbool P (~ P)) Q (Q_dec : sumbool Q (~ Q))
(HPQ1 : P -> Q) (HPQ2 : Q -> P)
T (teq : T -> T -> Prop) (Hteq : DLat.zb_equiv teq) :
forall e1 e1' (He1 : Acc.MAcc.eq Hteq e1 e1')
e2 e2' (He2 : Acc.MAcc.eq Hteq e2 e2'),
Acc.MAcc.eq Hteq (if P_dec then e1 else e2) (if Q_dec then e1' else e2').
Proof.
i. destruct P_dec; destruct Q_dec.
- by auto.
- elim f. by auto.
- elim f. by auto.
- by auto.
Qed.
Lemma if_dec_not_mor
P (P_dec : sumbool P (~ P)) Q (Q_dec : sumbool Q (~ Q))
(HPQ1 : P -> Q) (HPQ2 : Q -> P)
T (teq : T -> T -> Prop) (Hteq : DLat.zb_equiv teq) :
forall e1 e1' (He1 : Acc.MAcc.eq Hteq e1 e1')
e2 e2' (He2 : Acc.MAcc.eq Hteq e2 e2'),
Acc.MAcc.eq Hteq
(if Sumbool.sumbool_not P (~ P) P_dec then e1 else e2)
(if Sumbool.sumbool_not Q (~ Q) Q_dec then e1' else e2').
Proof.
i. unfold Sumbool.sumbool_not.
destruct P_dec; destruct Q_dec.
- by auto.
- elim f. by auto.
- elim f. by auto.
- by auto.
Qed.
Lemma eqlistA_eq_refl T : forall (l : list T), SetoidList.eqlistA eq l l.
Proof. induction l; [by auto|constructor; by auto]. Qed.
Definition list_zb_eq T (teq : T -> T -> Prop) (Hteq : DLat.zb_equiv teq) :
DLat.zb_equiv (SetoidList.eqlistA teq).
Proof.
constructor.
- induction x.
+ by constructor.
+ constructor; [by apply (DLat.zb_equiv_refl Hteq)|by auto].
- induction 1.
+ by constructor.
+ constructor; [by apply (DLat.zb_equiv_sym Hteq)|by apply IHHeq].
- intros x y z Hxy.
generalize x y Hxy z; clear x y Hxy z.
induction 1; inversion 1; subst.
+ by constructor.
+ constructor; [by apply (DLat.zb_equiv_trans Hteq) with x'|by apply IHHxy].
Qed.
Definition list_val_zb_eq : DLat.zb_equiv (SetoidList.eqlistA Val.eq) :=
list_zb_eq Val.zb_eq.
Definition aeqm1 (f : Mem.t -> Acc.MAcc.m Mem.t) : Prop :=
forall m m' (Hm' : disjoint m' (Acc.get_acc (f m))),
Acc.MAcc.eq Mem.zb_eq (f (Mem.join m m')) (mem_join (f m) m').
Definition aeqm2 (f : Mem.t -> Mem.t -> Acc.MAcc.m Mem.t) : Prop :=
forall m1 m2 m' (Hm' : disjoint m' (Acc.get_acc (f m1 m2))),
Acc.MAcc.eq Mem.zb_eq
(f (Mem.join m1 m') (Mem.join m2 m'))
(mem_join (f m1 m2) m').
Lemma aeqm_deg1 : forall f (Hf : aeqm1 f), aeqm2 (fun m _ => f m).
Proof. unfold aeqm1, aeqm2; i. by apply Hf. Qed.
Lemma aeqm_deg2 : forall f (Hf : aeqm1 f), aeqm2 (fun _ m => f m).
Proof. unfold aeqm1, aeqm2; i. by apply Hf. Qed.
Lemma aeqm2_sym : forall f (Hf : aeqm2 f), aeqm2 (fun m m' => f m' m).
Proof. unfold aeqm2; i. by apply Hf. Qed.
Definition aeqv T (t_eq : T -> T -> Prop) (Ht : DLat.zb_equiv t_eq)
(f : Mem.t -> Acc.MAcc.m T) : Prop :=
forall m m' (Hm' : disjoint m' (Acc.get_acc (f m))),
Acc.MAcc.eq Ht (f (Mem.join m m')) (f m).
Lemma aeqv_1
T (t_eq : T -> T -> Prop) (Hteq : DLat.zb_equiv t_eq)
U (u_eq : U -> U -> Prop) (Hueq : DLat.zb_equiv u_eq) :
forall (f : T -> Acc.MAcc.m U)
(Hf_mor : Proper (t_eq ==> Acc.MAcc.eq Hueq) f)
v (Hv : aeqv Hteq v),
aeqv Hueq (fun m => Acc.MAcc.bind (v m) f).
Proof. {
unfold aeqv; i.
specialize (Hv m m').
remember (v m) as v1; destruct v1 as [v1 a1].
remember (v (Mem.join m m')) as v2; destruct v2 as [v2 a2].
simpl in *.
remember (f v1) as fv1; destruct fv1 as [fv1 fa1].
remember (f v2) as fv2; destruct fv2 as [fv2 fa2].
exploit Hv
; [eapply disjoint_mono; [|by apply Hm']; s; by apply Acc.join_left|].
intros [Hv21 Ha12].
assert (Acc.MAcc.eq Hueq (fv1, fa1) (fv2, fa2)) as Hf.
{ rewrite Heqfv1, Heqfv2. by apply Hf_mor, (DLat.zb_equiv_sym Hteq). }
constructor.
- by apply (DLat.zb_equiv_sym Hueq), Hf.
- apply Acc.join_eq; [by auto|by apply Acc.eq_sym, Hf].
} Qed.
Lemma aeqv_2
T (t_eq : T -> T -> Prop) (Hteq : DLat.zb_equiv t_eq)
U (u_eq : U -> U -> Prop) (Hueq : DLat.zb_equiv u_eq) :
forall (f : Mem.t -> T -> Acc.MAcc.m U)
(Hf : forall x, aeqv Hueq (fun m => f m x))
(Hf_mor : Proper (Mem.eq ==> t_eq ==> Acc.MAcc.eq Hueq) f)
v (Hv : aeqv Hteq v),
aeqv Hueq (fun m => Acc.MAcc.bind (v m) (f m)).
Proof. {
unfold aeqv; i.
specialize (Hv m m').
remember (v m) as v1; destruct v1 as [v1 a1].
remember (v (Mem.join m m')) as v2; destruct v2 as [v2 a2].
simpl in *.
remember (f m v1) as fv1; destruct fv1 as [fv1 fa1].
remember (f (Mem.join m m') v2) as fv2; destruct fv2 as [fv2 fa2].
exploit Hv
; [eapply disjoint_mono; [|by apply Hm']; s; by apply Acc.join_left|].
intros [Hv21 Ha12].
assert (Acc.MAcc.eq Hueq (fv2, fa2) (fv1, fa1)) as Hf'.
{ rewrite Heqfv1, Heqfv2.
apply (DLat.zb_equiv_trans (Acc.MAcc.eq_equiv Hueq))
with (f (Mem.join m m') v1); [apply Hf_mor; [by apply Mem.eq_refl|by auto]|].
apply Hf.
rewrite <- Heqfv1; eapply disjoint_right; by apply Hm'. }
constructor.
- by apply Hf'.
- apply Acc.join_eq; [by auto|by apply Hf'].
} Qed.
Lemma aeqv_3 T (t_eq : T -> T -> Prop) (Hteq : DLat.zb_equiv t_eq) :
forall x, aeqv Hteq (fun _ => x).
Proof.
unfold aeqv; i.
by apply (DLat.zb_equiv_refl (Acc.MAcc.eq_equiv Hteq)).
Qed.
Definition aeqmm (f : Acc.MAcc.m Mem.t -> Acc.MAcc.m Mem.t) : Prop :=
forall m m' (Hm' : disjoint m' (Acc.get_acc (f m))),
Acc.MAcc.eq Mem.zb_eq (f (mem_join m m')) (mem_join (f m) m').
Definition aeqmv T (t_eq : T -> T -> Prop) (Ht : DLat.zb_equiv t_eq)
(f : Mem.t -> Acc.MAcc.m T -> Acc.MAcc.m T) : Prop :=
forall m m' v (Hm' : disjoint m' (Acc.get_acc (f m v))),
Acc.MAcc.eq Ht (f (Mem.join m m') v) (f m v).
Lemma aeqmv_1 T (t_eq : T -> T -> Prop) (Ht : DLat.zb_equiv t_eq) :
forall (f : Mem.t -> Acc.MAcc.m T -> Acc.MAcc.m T) (Hf : aeqmv Ht f) v,
aeqv Ht (fun m => f m v).
Proof. unfold aeqmv, aeqv; i. by apply Hf, Hm'. Qed.
Lemma aeqm1_1 :
forall (f : Acc.MAcc.m Mem.t -> Acc.MAcc.m Mem.t) (Hf : aeqmm f)
g (Hg : forall m m', g (Mem.join m m') = mem_join (g m) m'),
aeqm1 (fun m => f (g m)).
Proof. {
unfold aeqmm, aeqm1; i.
eapply (DLat.zb_equiv_trans (Acc.MAcc.eq_equiv Mem.zb_eq))
; [|by apply Hf].
rewrite Hg; apply (DLat.zb_equiv_refl (Acc.MAcc.eq_equiv Mem.zb_eq)).
} Qed.
Lemma bind_mem1 :
forall f (Hf : aeqm1 f)
(Hf_mor : Proper (Mem.eq ==> Acc.MAcc.eq Mem.zb_eq) f)
v (Hv : aeqm1 v),
aeqm1 (fun m => Acc.MAcc.bind (v m) f).
Proof. {
unfold aeqm1; i.
unfold Acc.MAcc.bind.
remember (v (Mem.join m m')) as va1; destruct va1 as [v1 a1].
remember (f v1) as va2; destruct va2 as [v2 a2].
remember (v m) as va1'; destruct va1' as [v1' a1'].
remember (f v1') as va2'; destruct va2' as [v2' a2'].
s in Hm'. rewrite <- Heqva2' in Hm'. s in Hm'.
assert (Acc.MAcc.eq Mem.zb_eq (v1, a1) (mem_join (v1', a1') m')) as Heq1.
{ rewrite Heqva1, Heqva1'. apply Hv.
rewrite <- Heqva1'. s.
eapply disjoint_left; by apply Hm'. }
destruct Heq1 as [Heq11 Heq12]. simpl in Heq11, Heq12.
assert (Acc.MAcc.eq Mem.zb_eq (v2, a2) (mem_join (v2', a2') m')) as Heq2.
{ rewrite Heqva2, Heqva2'.
apply (DLat.zb_equiv_trans (Acc.MAcc.eq_equiv Mem.zb_eq))
with (f (Mem.join v1' m')).
- by apply Hf_mor.
- apply Hf.
rewrite <- Heqva2'. s.
eapply disjoint_right; by apply Hm'. }
destruct Heq2 as [Heq21 Heq22]; simpl in Heq21, Heq22.
unfold mem_join. s. split.
- by auto.
- by apply Acc.join_eq.
} Qed.
Lemma bind_mem2 :
forall f (Hf : aeqm2 f)
(Hf_mor : Proper (Mem.eq ==> Mem.eq ==> Acc.MAcc.eq Mem.zb_eq) f)
v (Hv : aeqm1 v),
aeqm1 (fun m => Acc.MAcc.bind (v m) (fun m' => f m' m)).
Proof. {
unfold aeqm1; i.
unfold Acc.MAcc.bind.
remember (v (Mem.join m m')) as va1; destruct va1 as [v1 a1].
remember (f v1 (Mem.join m m')) as va2; destruct va2 as [v2 a2].
remember (v m) as va1'; destruct va1' as [v1' a1'].
remember (f v1' m) as va2'; destruct va2' as [v2' a2'].
s in Hm'. rewrite <- Heqva2' in Hm'. s in Hm'.
assert (Acc.MAcc.eq Mem.zb_eq (v1, a1) (mem_join (v1', a1') m')) as Heq1.
{ rewrite Heqva1, Heqva1'. apply Hv.
rewrite <- Heqva1'. s.
eapply disjoint_left; by apply Hm'. }
destruct Heq1 as [Heq11 Heq12]. simpl in Heq11, Heq12.
assert (Acc.MAcc.eq Mem.zb_eq (v2, a2) (mem_join (v2', a2') m')) as Heq2.
{ rewrite Heqva2, Heqva2'.
apply (DLat.zb_equiv_trans (Acc.MAcc.eq_equiv Mem.zb_eq))
with (f (Mem.join v1' m') (Mem.join m m')).
- apply Hf_mor; [by auto|by apply Mem.eq_refl].
- apply Hf.
rewrite <- Heqva2'. s.
eapply disjoint_right; by apply Hm'. }
destruct Heq2 as [Heq21 Heq22]; simpl in Heq21, Heq22.
unfold mem_join. s. split.
- by auto.
- by apply Acc.join_eq.
} Qed.
Lemma bind_mem3 :
forall (v : Mem.t -> Acc.MAcc.m Mem.t) (f : Mem.t -> Mem.t -> Acc.MAcc.m Mem.t)
(Hv : aeqm1 v) (Hf : aeqm2 f)
(Hf_mor : Proper (Mem.eq ==> Mem.eq ==> Acc.MAcc.eq Mem.zb_eq) f),
aeqm2 (fun m1 m2 => Acc.MAcc.bind (v m1) (fun m3 => f m2 m3)).
Proof.
unfold aeqm1, aeqm2; i.
exploit (Hv m1 m').
{ destruct (v m1). simpl in *.
destruct (f m2 t). simpl in Hm'.
eapply disjoint_left. by apply Hm'. }
clear Hv; intro Hv.
remember (v m1). destruct m as [v1 a1].
remember (v (Mem.join m1 m')). destruct m as [v1' a1'].
simpl in *. destruct Hv as [Hvm' Hva'].
exploit (Hf m2 v1 m').
{ destruct (f m2 v1). simpl in *.
eapply disjoint_right. by apply Hm'. }
clear Hf; intro Hf.
assert (Acc.MAcc.eq Mem.zb_eq (f (Mem.join m2 m') v1') (mem_join (f m2 v1) m'))
as Hf'.
{ eapply (DLat.zb_equiv_trans (Acc.MAcc.eq_equiv Mem.zb_eq)); [|by apply Hf].
apply Hf_mor; [by apply Mem.eq_refl|by auto]. }
remember (f m2 v1). destruct m as [v2 a2].
remember (f (Mem.join m2 m') v1'). destruct m as [v2' a2'].
simpl in *. destruct Hf' as [Hfm' Hfa'].
split; [by auto|by apply Acc.join_eq].
Qed.
Lemma bind_val1 T (t_eq : T -> T -> Prop) (Ht : DLat.zb_equiv t_eq):
forall f (Hf : forall v, aeqm1 (f v))
(Hf_mor : Proper (t_eq ==> Mem.eq ==> Acc.MAcc.eq Mem.zb_eq) f)
(v : Mem.t -> Acc.MAcc.m T) (Hv : aeqv Ht v),
aeqm1 (fun m => Acc.MAcc.bind (v m) (fun v' => f v' m)).
Proof. {
i. unfold aeqm1, Acc.MAcc.eq. i.
remember (v (Mem.join m m')) as va1; destruct va1 as [v1 a1].
remember (v m) as va2; destruct va2 as [v2 a2].
simpl in Hm'. unfold Acc.MAcc.bind at 1.
remember (f v1 (Mem.join m m')) as ma3; destruct ma3 as [m3 a3].
remember (f v2 m) as ma4; destruct ma4 as [m4 a4].
simpl in Hm'. unfold Acc.MAcc.bind at 1.
unfold mem_join. rewrite <- Heqma4. s.
assert (Acc.MAcc.eq Ht (v1, a1) (v2, a2)) as Heq1.
{ rewrite Heqva1, Heqva2. apply Hv.
rewrite <- Heqva2. s. eapply disjoint_left; by apply Hm'. }
destruct Heq1 as [Heq_v12 Heq_a12].
remember (f v1 m) as ma5; destruct ma5 as [m5 a5].
assert (Acc.MAcc.eq Mem.zb_eq (m5, a5) (m4, a4)) as Heq4.
{ rewrite Heqma5, Heqma4. apply Hf_mor; [by auto|by apply Mem.eq_refl]. }
destruct Heq4 as [Heq_m54 Heq_a54].
assert (Acc.MAcc.eq Mem.zb_eq (m3, a3) (mem_join (m5, a5) m')) as Heq3.
{ rewrite Heqma3, Heqma5. apply Hf.
rewrite <- Heqma5. s.
eapply disjoint_mor
; [by apply Mem.eq_refl|by apply Acc.eq_sym, Heq_a54|].
eapply disjoint_right; by apply Hm'. }
destruct Heq3 as [Heq_m35 Heq_a35]; simpl in Heq_m35, Heq_a35.
split.
- eapply Mem.eq_trans; [by apply Heq_m35|].
apply Mem.join_eq; [by auto|by apply Mem.eq_refl].
- apply Acc.join_eq; [by auto|].
eapply Acc.eq_trans; [by apply Heq_a35|by auto].
} Qed.
Lemma bind_val2 T (t_eq : T -> T -> Prop) (Ht : DLat.zb_equiv t_eq):
forall f (Hf : forall v, aeqm2 (f v))
(Hf_mor : Proper (t_eq ==> Mem.eq ==> Mem.eq ==> Acc.MAcc.eq Mem.zb_eq) f)
(v : Mem.t -> Acc.MAcc.m T) (Hv : aeqv Ht v),
aeqm2 (fun m m' : Mem.t => Acc.MAcc.bind (v m) (fun v' => f v' m m')).
Proof. {
i. unfold aeqm2, Acc.MAcc.eq. i.
remember (v (Mem.join m1 m')) as va1; destruct va1 as [v1 a1].
remember (v m1) as va2; destruct va2 as [v2 a2].
simpl in Hm'. unfold Acc.MAcc.bind.
remember (f v1 (Mem.join m1 m') (Mem.join m2 m')) as ma3; destruct ma3 as [m3 a3].
remember (f v2 m1 m2) as ma4; destruct ma4 as [m4 a4].
simpl in Hm'.
unfold mem_join. s.
assert (Acc.MAcc.eq Ht (v1, a1) (v2, a2)) as Heq1.
{ rewrite Heqva1, Heqva2. apply Hv.
rewrite <- Heqva2. s. eapply disjoint_left; by apply Hm'. }
destruct Heq1 as [Heq_v12 Heq_a12].
remember (f v1 m1 m2) as ma5; destruct ma5 as [m5 a5].
assert (Acc.MAcc.eq Mem.zb_eq (m5, a5) (m4, a4)) as Heq4.
{ rewrite Heqma5, Heqma4.
apply Hf_mor; [by auto|by apply Mem.eq_refl|by apply Mem.eq_refl]. }
destruct Heq4 as [Heq_m54 Heq_a54].
assert (Acc.MAcc.eq Mem.zb_eq (m3, a3) (mem_join (m5, a5) m')) as Heq3.
{ rewrite Heqma3, Heqma5. apply Hf.
rewrite <- Heqma5. s.
eapply disjoint_mor
; [by apply Mem.eq_refl|by apply Acc.eq_sym, Heq_a54|].
eapply disjoint_right; by apply Hm'. }
destruct Heq3 as [Heq_m35 Heq_a35]; simpl in Heq_m35, Heq_a35.
split.
- eapply Mem.eq_trans; [by apply Heq_m35|].
apply Mem.join_eq; [by auto|by apply Mem.eq_refl].
- apply Acc.join_eq; [by auto|].
eapply Acc.eq_trans; [by apply Heq_a35|by auto].
} Qed.
Lemma bind_mmem :
forall f (Hf : aeqm1 f), aeqmm (fun m_a => Acc.MAcc.bind m_a f).
Proof. {
unfold aeqm1, aeqmm; i.
destruct m as [m a]; s. specialize (Hf m m'). s in Hm'.
destruct (f m) as [fm' fa']. simpl in Hf, Hm'.
remember (f (Mem.join m m')) as fmm; destruct fmm as [fmm' fma']; s.
s in Hf.
assert (disjoint m' fa') as Hdis; [eapply disjoint_right; by apply Hm'|].
specialize (Hf Hdis).
destruct Hf as [Hm Ha].
split; [by auto|apply Acc.join_eq; [by apply Acc.eq_refl|by auto]].
} Qed.
Lemma bind_mval T (t_eq : T -> T -> Prop) (Ht : DLat.zb_equiv t_eq) :
forall (f : T -> Mem.t -> Acc.MAcc.m T) (Hf : forall v, aeqv Ht (f v)),
aeqmv Ht (fun m acc_a => Acc.MAcc.bind acc_a (fun v => f v m)).
Proof. {
unfold aeqmv, aeqv; i.
unfold Acc.MAcc.bind, Acc.MAcc.ret in *.
destruct v as [v1 a1]. specialize (Hf v1 m m').
remember (f v1 m) as v2; destruct v2 as [v2 a2].
remember (f v1 (Mem.join m m')) as v2'; destruct v2' as [v2' a2'].
exploit Hf; [eapply disjoint_mono; [|by apply Hm']; s; apply Acc.join_right|].
s. intros [Hv Ha]. split; [by apply Hv|].
apply Acc.join_eq; [by apply Acc.eq_refl|by auto].
} Qed.
Lemma ret_mem1 :
forall x (Hx : forall m m', Mem.eq (x (Mem.join m m')) (Mem.join (x m) m')),
aeqm1 (fun m => Acc.MAcc.ret (x m)).
Proof. {
unfold Acc.MAcc.ret, aeqm1. i. split; s; [by apply Hx|by apply Acc.eq_refl].
} Qed.
Lemma ret_mem2 :
forall (f : Acc.MAcc.m Mem.t -> Acc.MAcc.m Mem.t) (Hf : aeqmm f),
aeqm1 (fun m => f (Acc.MAcc.ret m)).
Proof. i; apply aeqm1_1; by auto. Qed.
Lemma ret_mem3 :
forall (x : Mem.t -> Mem.t -> Mem.t)
(Hx : forall m1 m2 m', Mem.eq (x (Mem.join m1 m') (Mem.join m2 m'))
(Mem.join (x m1 m2) m')),
aeqm2 (fun m1 m2 => Acc.MAcc.ret (x m1 m2)).
Proof. {
unfold Acc.MAcc.ret, aeqm2. i. split; s; [by apply Hx|by apply Acc.eq_refl].
} Qed.
Lemma fold_access_sound :
forall f s (Hf_ext : forall e m, Acc.le (Acc.get_acc m) (Acc.get_acc (f e m)))
(Hf_access_sound : forall e (He : PowLoc.mem e s = true), aeqmm (f e))
(Hf_mor : forall e,
Proper
(Acc.MAcc.eq Mem.zb_eq ==> Acc.MAcc.eq Mem.zb_eq)
(f e)),
aeqmm (PowLoc.fold f s).
Proof.
i. unfold aeqmm. i. generalize Hm'; clear Hm'.
apply PowLoc.fold2_5 with (f1:=f) (f2:=f) (i1:=mem_join m m') (i2:=m); i
; [|by apply (DLat.zb_equiv_refl (Acc.MAcc.eq_equiv Mem.zb_eq))].
apply (DLat.zb_equiv_trans (Acc.MAcc.eq_equiv Mem.zb_eq))
with (f e (mem_join t2 m')).
- apply Hf_mor, Ht.
eapply disjoint_mono; [by apply Hf_ext|by apply Hm'].
- apply Hf_access_sound; [by auto|by auto].
Qed.
Lemma fold_access_sound' T (t_eq : T -> T -> Prop) (Ht : DLat.zb_equiv t_eq) :
forall f s (Hf_ext : forall m e v, Acc.le (Acc.get_acc v) (Acc.get_acc (f m e v)))
(Hf_access_sound : forall e (He : PowLoc.mem e s = true), aeqmv Ht (fun m => f m e))
(Hf_mor :
forall m e, Proper (Acc.MAcc.eq Ht ==> Acc.MAcc.eq Ht) (f m e)),
aeqmv Ht (fun m => PowLoc.fold (f m) s).
Proof.
i; unfold aeqmv; i. generalize Hm'; clear Hm'.
apply PowLoc.fold2_5 with (f1:=f (Mem.join m m')) (f2:=f m) (i1:=v) (i2:=v); i
; [|by apply (DLat.zb_equiv_refl (Acc.MAcc.eq_equiv Ht))].
apply (DLat.zb_equiv_trans (Acc.MAcc.eq_equiv Ht))
with (f (Mem.join m m') e t2).
- apply Hf_mor, Ht0.
eapply disjoint_mono; [by apply Hf_ext|by apply Hm'].
- apply Hf_access_sound; [by auto|by auto].
Qed.
Lemma list_fold_access_sound
(T : Type) (t_eq : T -> T -> Prop) (Ht : DLat.zb_equiv t_eq)
(U : Type) (u_eq : U -> U -> Prop) (Hu : DLat.zb_equiv u_eq) :
forall (l : list U) f (i : Mem.t -> Acc.MAcc.m T)
(Hf : forall i (Hi : aeqv Ht i)
(Hi_mor : Proper (Mem.eq ==> Acc.MAcc.eq Ht) i)
e,
aeqv Ht (fun m : Mem.t => f m e (i m)))
(Hf_mor : Proper (Mem.eq ==> u_eq ==> Acc.MAcc.eq Ht ==> Acc.MAcc.eq Ht) f)
(Hi : aeqv Ht i) (Hi_mor : Proper (Mem.eq ==> Acc.MAcc.eq Ht) i),
aeqv Ht (fun m : Mem.t => list_fold (f m) l (i m)).
Proof.
unfold list_fold; induction l; i.
- s. by auto.
- simpl List.fold_left. apply IHl.
+ i; by apply Hf.
+ by auto.
+ by apply Hf.
+ intros m1 m2 Hm; apply Hf_mor
; [by auto|by apply (DLat.zb_equiv_refl Hu)|by apply Hi_mor].
Qed.
Lemma list_fold_access_sound1 U :
forall (l : list U) f
(Hf : forall i (Hi : aeqm1 i) e, aeqm1 (fun m : Mem.t => f m e (i m)))
(i : Mem.t -> Acc.MAcc.m Mem.t) (Hi : aeqm1 i),
aeqm1 (fun m : Mem.t => list_fold (f m) l (i m)).
Proof.
unfold list_fold; induction l; i.
- s. by auto.
- simpl List.fold_left. by apply IHl, Hf.
Qed.
|
{"author": "ropas", "repo": "zooberry", "sha": "17b1cb1a44c2a796d6b7d85c2026b142685d291b", "save_path": "github-repos/coq/ropas-zooberry", "path": "github-repos/coq/ropas-zooberry/zooberry-17b1cb1a44c2a796d6b7d85c2026b142685d291b/spec/Proof/VeqCommon.v"}
|
/*
* Stanford Whole-Body Control Framework http://stanford-wbc.sourceforge.net/
*
* Copyright (C) 2010 The Board of Trustees of The Leland Stanford Junior University. All rights reserved.
*
* This program is free software: you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>
*/
/**
\file sejong/wrap_eigen.hpp
\author Roland Philippsen
*/
#ifndef SEJONG_WRAP_EIGEN_HPP
#define SEJONG_WRAP_EIGEN_HPP
#define SJ_SAFE_DELETE(p) if(p) { delete (p); (p) = NULL; }
#define SJ_SAFE_DESTROY_WINDOW(p) if(p) { p->DestroyWindow(); delete (p); (p) = NULL; }
#define SJ_SAFE_DELETE_AR(p) if(p) { delete [] p; (p) = NULL; }
#define SJ_SAFE_RELEASE(p) if(p) { (p)->Release(); (p) = NULL; }
#define SJ_ISZERO(x) (fabs(x) < 1.e-6) // zero test for floating point numbers
#define SJ_ISEQUAL(x,y) (fabs((x) - (y)) < 1.e-6) // test for equality of float numbers
#define SJ_ROUND(x) (floor((x) + 0.5)) // floating point number rounding
#define SJ_RAND(l,u) ((double)rand() / RAND_MAX * ((u) - (l)) + (l)) // float random number from interval < l ; u >
#define SJ_RAND_INT(l,u) (rand() % (u - l) + l) // int random number in interval [l,u) including l, excluding u
#include <Eigen/Geometry>
#include <Eigen/Dense>
#include <vector>
#include <list>
#include <string>
#include <cmath>
#include <ctime>
namespace sejong {
typedef Eigen::Transform<double, 3, Eigen::Affine> Transform;
typedef Eigen::Translation3d Translation;
typedef Eigen::Quaternion<double> Quaternion;
typedef Eigen::Matrix<double,2,1> Vect2;
typedef Eigen::Matrix<double,3,1> Vect3;
typedef Eigen::Matrix<double,4,1> Vect4;
typedef Eigen::VectorXd Vector;
typedef Eigen::MatrixXd Matrix;
// Euler ange (Yaw, Pitch, Roll) to Quaternion
void convert(double yaw, double pitch, double roll, sejong::Quaternion& to);
// Quaternion to Euler ZYX
void convert(const sejong::Quaternion& from, double & yaw, double & pitch, double & roll);
// Quaternion to so(3)
void convert(sejong::Quaternion const & from, sejong::Vector & to);
// so(3) to Quaternion
void convert(sejong::Vector const & from, sejong::Quaternion & to);
// sejong::Vector to std::vector
void convert(sejong::Vector const & from, std::vector<double> & to);
// std::vector to sejong::Vector
void convert(std::vector<double> const & from, sejong::Vector & to);
// double array to sejong::Vector
void convert(double const * from, size_t length, sejong::Vector & to);
Quaternion QuatMultiply(const Quaternion & q1, const Quaternion & q2);
bool compare(sejong::Matrix const & lhs, sejong::Matrix const & rhs, double precision);
bool compare(sejong::Quaternion const & lhs, sejong::Quaternion const & rhs, double precision);
double _bind_half_pi(double);
void Copy(const sejong::Vector & sub, double* obj);
void Copy(const double* sub, double* obj, int dim);
void SetArrayZero(double* array, int dim);
double Dot(const double* a, const double * b, int dim);
// Signum returns (-1, 0, or 1) depending on the sign of the argument
template <typename T> int sgn(T val) {
return (T(0) < val) - (val < T(0)) ;
}
}
#endif
|
{"hexsha": "3d0b12fe25ebfb01dc57d68c87a8522695aec4dd", "size": 3703, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "utils/include/utils/wrap_eigen.hpp", "max_stars_repo_name": "junhyeokahn/DracoNodelet", "max_stars_repo_head_hexsha": "0f87331ceaf4fe42f9bab164954c5e9cb9c010f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2019-01-31T13:51:59.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-13T12:42:09.000Z", "max_issues_repo_path": "utils/include/utils/wrap_eigen.hpp", "max_issues_repo_name": "junhyeokahn/DracoNodelet", "max_issues_repo_head_hexsha": "0f87331ceaf4fe42f9bab164954c5e9cb9c010f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/include/utils/wrap_eigen.hpp", "max_forks_repo_name": "junhyeokahn/DracoNodelet", "max_forks_repo_head_hexsha": "0f87331ceaf4fe42f9bab164954c5e9cb9c010f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2018-12-05T04:11:49.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-05T04:11:49.000Z", "avg_line_length": 39.8172043011, "max_line_length": 115, "alphanum_fraction": 0.7005130975, "num_tokens": 1009}
|
# coding = utf-8
from copy import deepcopy
import numpy as np
from mykit.core.bandstructure import BandStructure
from mykit.core.dos import Dos
from mykit.core.kmesh import KSYMBOL_LATEX, KmeshError, kpath_decoder
from mykit.core.log import Verbose
def init(*objs, **kwargs):
'''Convenient function to initialize a visualizer
Args:
objs: the objects to parse to the visualizer.
kwargs: keyword arguments to parse to the visualizer.
'''
if len(objs) == 0:
raise ValueError("should parse at least one object")
if len(objs) == 1:
o = objs[0]
if isinstance(o, BandStructure):
return BSVisualizer(o, **kwargs)
if isinstance(o, Dos):
raise NotImplementedError
if len(objs) == 2:
pass
raise NotImplementedError
class BSVisualizer(Verbose):
'''The class for drawing band structure with the help of matplotlib.pyplot.
Currently only support drawing one bandstructure, i.e. 1 Axes in Fig.
Can only plot one spin channel.
Args:
bs (``BandStructure``): the band structure to plot
ispin (int): the spin channel to show.
dos (`Dos` object or dict): density of states to draw with the band structure
if a dict is parsed, it will be used as the keyword arguments for ``get_dos``
method of ``BandStructure``
projStyle ('dot', 'stripe'): the style to draw the wave projections
kwargs: keyword arguments to parse to initialize with `pyplot.subplots`
TODO:
simultaneous drawing with xmgrace object
'''
_SUPPORT_PROJ_STYLE = ['dot', 'stripe']
def __init__(self, bs, align_vbm=False, dos=None, proj_style='dot', ispin=0,
**kwargs):
assert isinstance(bs, BandStructure)
# argument type check
if not bs.isKpath:
self.print_warn(
"k-vectors of Band structure do not form a kpath. Plotting anyway...")
try:
import matplotlib.pyplot as plt
except ModuleNotFoundError:
raise ValueError("Matplotlib is required to use pyplot plotter")
self._drawDos = False
if dos is not None:
self._drawDos = True
if isinstance(dos, dict):
self._dos = bs.get_dos(**dos)
elif isinstance(dos, Dos):
_check_bs_dos_consistency(bs, dos)
self._dos = deepcopy(dos)
else:
raise TypeError(
"dos should be a number or Dos objest")
if proj_style not in self._SUPPORT_PROJ_STYLE:
raise ValueError("projStyle {} is not supported. {}".format(
proj_style, self._SUPPORT_PROJ_STYLE))
self._bs = deepcopy(bs)
self._xs = bs._generate_kpath_x()
self._efermi = bs.efermi
self._nspins = bs.nspins
self.ispin = ispin
self.alignAtVbm = align_vbm
self._drawnKsym = False
self._projStyle = proj_style
self._useLabel = False
# initialize the Figure and Axes
if self._drawDos:
kwargs.pop("nrows", None)
kwargs.pop("ncols", None)
kwargs.pop("sharey", None)
self._fig, (self._axbs, self._axdos) = plt.subplots(
1, 2, gridspec_kw={"width_ratios": [2, 1]}, sharey=True, **kwargs)
self._fig.subplots_adjust(wspace=0)
else:
self._fig, self._axbs = plt.subplots(**kwargs)
# set band structre axis
self._axbs.set_xlim([self._xs[0][0], self._xs[-1][-1]])
# set x tick length to zero to make them invisible
self._axbs.tick_params(axis=u'x', which=u'both', length=0)
self._axbs.set_ylabel("Energy ({})".format(bs.unit))
# set dos axis
if self._drawDos:
# hide marks and tick of x axis
self._axdos.get_xaxis().set_ticks([])
self._axdos.set_xlim([0.0, np.max(self._dos.dos)*1.1])
self._draw_total_dos()
@property
def alignAtVbm(self):
'''bool. whether the VBM is set as energy level'''
return self._alignAtVbm
@alignAtVbm.setter
def alignAtVbm(self, newValue):
if not isinstance(newValue, bool):
raise TypeError("alignAtVbm should be bool")
self._alignAtVbm = newValue
@property
def drawnSym(self):
'''bool. If the kpoints symbols have been drawn'''
return self._drawnKsym
@property
def ispin(self):
'''int. index of spin channel to plot'''
return self._ispin
@ispin.setter
def ispin(self, newValue):
if not isinstance(newValue, int):
raise TypeError("ispin should be int")
elif newValue >= self._nspins:
raise ValueError(
"spin channel index overflow. nspins = %d" % self._nspins)
self._ispin = newValue
def set_title(self, title, **kwargs):
'''Set the title of figure
Wrapper of pyplot.Axes.set_title
'''
self._axbs.set_title(title, **kwargs)
def set_elim(self, bottom, top, **kwargs):
'''Set the energy limit to show
Wrapper of Axes.set_ylim
Args:
bottom (float): the lower bound of the energy range
top (float): the upper bound of the energy range
kwargs: the keyword arguments to parse to Axes.set_ylim
'''
self._axbs.set_ylim(bottom=bottom, top=top, **kwargs)
def _draw_total_dos(self):
'''draw the total dos
'''
dos = self._dos
edos = dos.edos - dos.efermi * int(self.alignAtVbm)
# self._bs.vbmPerSpin[self.ispin] * int(self.alignAtVbm)
self._axdos.plot(dos.dos[:, self.ispin], edos, color="k")
# draw Fermi level
if self.alignAtVbm:
self._axdos.axhline(0.0, color="k", linestyle='dashed')
else:
self._axdos.axhline(dos.efermi, color="k", linestyle='dashed')
def draw(self, *bands, **kwargs):
'''draw the selected bands
Args:
bands (int): the indices of bands to plot
All bands will be plot by default.
kwargs: keywords to parse to Axes.plot
'''
# iterate for each line segments
if 'color' not in kwargs:
kwargs['color'] = 'k'
bs = self._bs
xs = self._xs
b = bs.get_band_indices(*bands)
for i, (stk, edk) in enumerate(bs.kLineSegs):
for ib in b:
eigen = bs.eigen[self.ispin, stk:edk+1, ib] - \
bs.vbmPerSpin[self.ispin] * int(self.alignAtVbm)
# bs.vbmPerSpin[self.ispin] * int(self.alignAtVbm)
self._axbs.plot(xs[i], eigen, **kwargs)
# draw vertical line to separate the different line segments
if i != len(bs.kLineSegs) - 1:
self._axbs.axvline(xs[i][-1], lw=2, color="k")
# draw Fermi level
if self.alignAtVbm:
self._axbs.axhline(0.0, color="k", lw=2, linestyle='dashed')
else:
self._axbs.axhline(bs.efermi, color="k", linestyle='dashed')
def mark_ksymbols(self, kpath):
'''Mark the kpoints symbols on the plot.
Args
kpath (str): the kpath string.
The string will be decoded by `kpath_decoder` to a list of kpoints symbols.
followed by a consistency check.
If the check fail, a warning will be prompted and no symbols plotted
'''
try:
ksyms = kpath_decoder(kpath)
except KmeshError:
return
if len(ksyms) / 2 != len(self._bs.kLineSegs):
self.print_warn(
"kpath string and extracted data are inconsistent. Skip")
return
locs = []
labels = []
# draw first and last symbol
for i in [0, -1]:
ksym = ksyms[i]
s = KSYMBOL_LATEX.get(ksym, ksym)
# coord = abs(i)
coord = self._xs[i][i]
# self._axes.annotate(s, xy=(coord, 0), xycoords="axes fraction", ha="center")
locs.append(coord)
labels.append(s)
# draw intermediate points
for i, x in enumerate(self._xs):
if i == len(self._xs) - 1:
break
ksymLeft = ksyms[2*i+1]
ksymRight = ksyms[2*i+2]
if ksymLeft == ksymRight:
s = KSYMBOL_LATEX.get(ksymLeft, ksymLeft)
else:
s = KSYMBOL_LATEX.get(ksymLeft, ksymLeft) + "|" + \
KSYMBOL_LATEX.get(ksymRight, ksymRight)
# coord = xs[i][-1] / xs[-1][-1]
coord = x[-1]
# self._axes.annotate(s, xy=(coord, 0), xycoords="axes fraction", ha="center")
locs.append(coord)
labels.append(s)
self._axbs.set_xticks(locs)
self._axbs.set_xticklabels(labels)
self._drawnKsym = True
def draw_proj(self, atom, proj, *bands, **kwargs):
'''draw the wave projection on the band structure diagram
Args:
atom (int, str or Iterable):
proj (int, str or Iterable):
bands (int): the indices of bands to draw the projection
kwargs: keyword argument to parse to pyplot.Axes.scatter or pyplot.Axes.fill_between
depending on the projStyle set at initialization.
'''
bs = self._bs
xs = self._xs
amplifier_dot = 150.0
# use triple band gap as multiplier for stripe mode
amplifier_stripe = bs.fundGap[self.ispin] * 3
if not bs.hasProjection:
raise AttributeError("no projection data is available")
# get projection data
pWave = bs.sum_atom_proj_comp(atom, proj, fail_one=False)
binds = bs.get_band_indices(*bands)
if 'color' not in kwargs:
kwargs['color'] = 'k'
if 's' in kwargs:
kwargs.pop('s')
if 'label' in kwargs:
self._useLabel = True
# draw DOS
if self._drawDos:
dos = self._dos
edos = dos.edos - dos.efermi * int(self.alignAtVbm)
pDos = dos.sum_atom_proj_comp(atom, proj, fail_one=False)
self._axdos.plot(pDos[:, self.ispin], edos, **kwargs)
# draw band
for i, (stk, edk) in enumerate(bs.kLineSegs):
for _j, bi in enumerate(binds):
eigen = bs.eigen[self.ispin, stk:edk+1, bi] - \
bs.vbmPerSpin[self.ispin] * int(self.alignAtVbm)
if self._projStyle == 'dot':
s = pWave[self.ispin, stk:edk+1, bi] * amplifier_dot
self._axbs.scatter(xs[i], eigen, s=s, **kwargs)
if self._projStyle == 'stripe':
self._axbs.fill_between(xs[i], eigen,
eigen - pWave[self.ispin, stk:edk+1, bi] * amplifier_stripe, **kwargs)
# pop the label keyword such that label is only added for once
if 'label' in kwargs:
kwargs.pop('label')
def show(self, legend=True, tight_layout=False):
'''Preview the band structure diagram.
A wrapper for pyplot.legend and pyplot.show
'''
import matplotlib.pyplot as plt
# # hide the xticks if the kpoints symbols are not drawn
# if not self._drawnKsym:
# self._axes.get_xaxis().set_ticks([])
if self._useLabel and legend:
plt.legend()
if tight_layout:
plt.tight_layout()
plt.show()
def export(self, *args, **kwargs):
'''Wrapper to pyplot.savefig
TODO:
export agr file
Args:
args, kwargs: arguments parsed to savefig
'''
import matplotlib.pyplot as plt
plt.savefig(*args, **kwargs)
def _check_bs_dos_consistency(bs, dos):
'''Check the consistency of BandStructure and Dos objects
Spin, unit, projections (projectors and atoms)
'''
assert isinstance(bs, BandStructure)
assert isinstance(dos, Dos)
assert bs.nspins == dos.nspins
assert bs.unit == dos.unit
assert bs.hasProjection == dos.hasProjection
if bs.hasProjection:
assert bs.natoms == dos.natoms
assert bs.nprojs == dos.nprojs
for i, a in enumerate(bs.atoms):
assert a == dos.atoms[i]
for i, p in enumerate(bs.projs):
assert p == dos.projs[i]
|
{"hexsha": "f49b086ad79c2392d91b50c4ac839a2072fa7f8f", "size": 12574, "ext": "py", "lang": "Python", "max_stars_repo_path": "mykit/visualizer.py", "max_stars_repo_name": "minyez/mykit", "max_stars_repo_head_hexsha": "911413120c081be2cfcaef06d62dc40b2abd2747", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-01-02T09:17:54.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-26T07:15:59.000Z", "max_issues_repo_path": "mykit/visualizer.py", "max_issues_repo_name": "minyez/mykit", "max_issues_repo_head_hexsha": "911413120c081be2cfcaef06d62dc40b2abd2747", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-03-06T03:16:12.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-14T14:36:01.000Z", "max_forks_repo_path": "mykit/visualizer.py", "max_forks_repo_name": "minyez/mykit", "max_forks_repo_head_hexsha": "911413120c081be2cfcaef06d62dc40b2abd2747", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4463768116, "max_line_length": 114, "alphanum_fraction": 0.5710195642, "include": true, "reason": "import numpy", "num_tokens": 3103}
|
import numpy as np
import random
import json
from matplotlib import pyplot as plt
#from base import activation_function, loss_function
from .base import activation_function, loss_function
class FNN():
"""
Feedforward Neural network Class.
Attributes
----------
sizes: list
List of number of nodes in the respective layers of the NN.
n_layers: int
Total number of layers in the NN.
n_inputs: int
Number of nodes in the input layer.
weights: list of numpy arrays
Each array has the weights in a particular layer.
biases: list of numpy arrays
Each array has the biases in a particular layer.
prev_update_w: list
List of all previous updates of weights in NN.
prev_update_b: list
List of all previous updates of weights in NN.
activation_types: list
List of all the activation functions used
in the respective layers of NN.
loss_fn: str
Type of loss function used in the NN.
Options:
mse ( Mean squared error )
ce ( Cross entropy )
epoch_list: list
List of numbers from 0 to max epochs.
accuracy: list
List to store the accuracy at each epoch.
Methods
-------
weight_initializer(name="random"):
Initializes weights and biases.
init_params(sizes, epochs):
Initializes parameters in the NN.
get_params():
Return weights and biases of the NN.
add_layer(n_nodes, activation_type):
Adds a layer to the NN.
feedforward(a):
Return the output of NN if input is a.
accuracy(data, task):
Return the accuracy of NN on the given data.
backprop(x, y, weights=None, biases=None):
Returns the gradients of weights and biases
for a given example.
get_batch_size(training_data, mode, batch_size):
Returns the batch size given mode.
update_GD(mini_batch, eta):
Updates weights and biases after
applying Gradient Descent (GD)
on the mini batch.
update_MGD(mini_batch, gamma, eta):
Updates weights and biases after
applying Momentum based Gradient Descent (MGD)
on the mini batch.
update_NAG(mini_batch, eta, gamma):
Updates weights and biases after
applying Nesterov accerelated Gradient Descent (NAG)
on the mini batch.
compile(training_data, test_data=None):
Compiles the NN.
fit(training_data, validation_data=None):
Runs the optimizer on the training data for given number of epochs.
predict(new_data):
Gives NN predictions on the new data
logging(test_data=None):
Given test data, it plots Epoch vs Error graph.
save(filename):
Saves the NN to the file.
load(filename):
laads the NN from the file.
"""
def __init__(self, n_inputs, loss_fn):
"""
Creates the Feedforward Neural Network
Parameters
----------
n_inputs: int
Number of nodes in the input layer.
loss_fn: str
Type of loss function used in the NN.
Options:
mse ( Mean squared error )
ce ( Cross entropy )
"""
self.sizes = [n_inputs]
self.n_layers = 0
self.n_inputs = n_inputs
self.weights = list()
self.biases = list()
self.prev_update_w = list()
self.prev_update_b = list()
self.activation_types = list()
self.loss_fn = loss_fn
self.config = dict()
self.epoch_list = list()
def weight_initializer(self, name="random"):
"""
Initializes weights and biases
Parameters
----------
name: str
Type of weight initialization.
Options:
random ( Gauss distro mean 0, std 1 )
xavier ( n^2 = 1 / n )
he ( n^2 = 2 / n )
Returns
-------
None
"""
if name == "random":
self.weights = [np.random.randn(y, x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
elif name == "xavier":
self.weights = [np.random.randn(y, x)/np.sqrt(x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
elif name == "he":
self.weights = [np.random.randn(y, x)*np.sqrt(2/x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
def init_params(self, sizes, epochs, weight_init_type=None):
"""
Initializes parameters in the NN.
Parameters
----------
sizes: list
List of number of nodes in the respective layers of the NN.
epochs: int
Number of maximum epochs
weight_init_type: str
Type of weight initialization
Default: None
Returns
------
None
"""
self.n_layers = len(sizes)
if weight_init_type: self.weight_initializer(weight_init_type)
self.prev_update_w = [np.zeros(w.shape) for w in self.weights]
self.prev_update_b = [np.zeros(b.shape) for b in self.biases]
self.epoch_list = np.arange(0, epochs)
def get_params(self):
"""
Return weights and biases of the NN.
Returns
-------
List of weights and biases
"""
return [self.weights, self.biases]
def add_layer(self, n_nodes, activation_type):
"""
Adds a layer to the NN.
Parameters
----------
n_nodes: int
Number of nodes in the layer.
activation_type: str
Activation function used in the layer.
Options:
identity
sigmoid
softmax
tanh
relu
Returns
-------
None
"""
self.activation_types.append(activation_type)
self.sizes.append(n_nodes)
self.n_layers += 1
def feedforward(self, a):
"""
Return the output of NN if input is a.
Parameter
---------
a: array
Inputs to the NN.
Returns
-------
a: array
Output of NN
"""
l = 0 # layer count
for b, w in zip(self.biases, self.weights):
a = activation_function(self.activation_types[l], np.dot(w, a) + b)
l += 1
return a
def accuracy(self, data):
"""
Return the no of test inputs for which the NN outputs the correct result.
Parameters
----------
data: list
List of tuples (x, y)
Returns
-------
Returns int
"""
if self.config["task"] == "classification":
results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in data]
elif self.config["task"] == "regression":
results = [(self.feedforward(x), y) for (x, y) in data]
else:
return -1
return sum(int(x == y) for (x, y) in results) / len(data) * 100
def backprop(self, x, y, weights=None, biases=None):
"""
Returns the gradients of weights and biases for a given example.
Backpropagates the error.
Parameters
----------
x: tuple
Input
y: tuple
Output
weights: list
Default: None
biases: list
Default: None
Returns
-------
(gradient_w, gradient_b): tuple of lists of numpy arrays
"""
if weights: self.weights = weights
if biases: self.biases = biases
gradient_w = [np.zeros(w.shape) for w in self.weights]
gradient_b = [np.zeros(b.shape) for b in self.biases]
activation = x
# list to store all the activations, layer by layer
activations = [x]
# list to store all the z vectors, layer by layer
zs = []
# c: layer counter
c = 0
# feedforward
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = activation_function(self.activation_types[c], z)
activations.append(activation)
c += 1
loss_grad = loss_function(self.loss_fn, y, activations[-1], True)
# delta: errors of the output layer
if (self.loss_fn == "mse"):
delta = loss_grad * activation_function(self.activation_types[-1],
zs[-1], True)
elif (self.loss_fn == "ll"):
# if sigmoid or softmax: derivative is out*(1-out):
# numerator and denominator get cancelled.
if (self.activation_types[-1] == "sigmoid" or
self.activation_types[-1] == "softmax"):
delta = activations[-1]
else:
az = activation_function(
self.activation_types[-1], zs[-1], False)
delta = loss_grad * activation_function(
self.activation_types[-1], zs[-1], True)
elif (self.loss_fn == "ce"):
# if sigmoid or softmax: derivative is out*(1-out)
# numerator and denominator get cancelled.
if (self.activation_types[-1] == "sigmoid" or
self.activation_types[-1] == "softmax"):
delta = loss_grad
else:
az = activation_function(
self.activation_types[-1], zs[-1], False)
delta = loss_grad * (activation_function(
self.activation_types[-1], zs[-1], True) /
(az * ( 1 - az )))
gradient_w[-1] = np.dot(delta, activations[-2].transpose())
gradient_b[-1] = delta
# backpropagate the error
for l in range(2, self.n_layers):
z = zs[-l]
d = activation_function(self.activation_types[-l], z, True)
# Here delta is errors of the layer n_layers - l
delta = np.dot(self.weights[-l + 1].transpose(), delta) * d
gradient_b[-l] = delta
gradient_w[-l] = np.dot(delta, activations[-l - 1].transpose())
return (gradient_w, gradient_b)
def get_batch_size(self, training_data, mode, batch_size):
"""
Returns the batch size given mode.
Parameters
----------
training_data: list
List of tuples (x, y)
mode: str
Options:
online
mini_batch
batch
batch_size: int
Size of the mini_batch
Returns
-------
Returns int ( batch size )
"""
if mode == "online":
return 1
elif mode == "mini_batch":
return batch_size
elif mode == "batch":
return len(training_data)
def update_GD(self, mini_batch, eta):
"""
Updates parameters using GD.
Updates weights and biases after
applying Gradient Descent (GD)
on the mini batch.
Parameters
----------
mini_batch: list
List of tuples (x, y)
eta: float
Learning rate
Returns
-------
None
"""
gradient_b = [np.zeros(b.shape) for b in self.biases]
gradient_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_gradient_w, delta_gradient_b = self.backprop(x, y)
gradient_b = [gb + dgb
for gb, dgb in zip(gradient_b, delta_gradient_b)]
gradient_w = [gw + dgw
for gw, dgw in zip(gradient_w, delta_gradient_w)]
self.weights = [w - (eta / len(mini_batch)) * gw
for w, gw in zip(self.weights, gradient_w)]
self.biases = [b - (eta / len(mini_batch)) * gb
for b, gb in zip(self.biases, gradient_b)]
def update_MGD(self, mini_batch, eta, gamma):
"""
Updates parameters using MGD.
Updates weights and biases after applying
Momentum based Gradient Descent (GD)
on the mini batch.
Parameters
----------
mini_batch: list
List of tuples (x, y)
eta: float
Learning rate
gamma: float
Momentum value
Returns
-------
None
"""
gradient_b = [np.zeros(b.shape) for b in self.biases]
gradient_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_gradient_w, delta_gradient_b = self.backprop(x, y)
gradient_b = [gb + dgb
for gb, dgb in zip(gradient_b, delta_gradient_b)]
gradient_w = [gw + dgw
for gw, dgw in zip(gradient_w, delta_gradient_w)]
update_w = [o + n for o, n in zip([gamma * puw
for puw in self.prev_update_w], [eta * gw for gw in gradient_w])]
self.weights = [w - uw for w, uw in zip(self.weights, update_w)]
update_b = [o + n for o, n in zip([gamma * pub for
pub in self.prev_update_b], [eta * gb for gb in gradient_b])]
#update_b = gamma * self.prev_update_b + eta * gradient_b
self.biases = [b - ub for b, ub in zip(self.biases, update_b)]
self.prev_update_w = update_w
self.prev_update_b = update_b
def update_NAG(self, mini_batch, eta, gamma):
"""
Updates parameters using NAG.
Updates weights and biases after applying
Nesterov accerelated Gradient Descent (GD)
on the mini batch.
Parameters
----------
mini_batch: list
List of tuples (x, y)
eta: float
Learning rate
gamma: float
Momentum value
Returns
-------
None
"""
gradient_w = [np.zeros(w.shape) for w in self.weights]
gradient_b = [np.zeros(b.shape) for b in self.biases]
# w look_ahead partial update
#update_w = gamma * self.prev_update_w
update_w = [o + n for o, n in zip([gamma * puw
for puw in self.prev_update_w], [eta * gw for gw in gradient_w])]
#update_b = gamma * self.prev_update_b
update_b = [o + n for o, n in zip([gamma * pub
for pub in self.prev_update_b], [eta * gb for gb in gradient_b])]
for x, y in mini_batch:
delta_gradient_w, delta_gradient_b = self.backprop(
x, y, self.weights - update_w, self.biases - update_b)
gradient_w = [gw + dgw
for gw, dgw in zip(gradient_w, delta_gradient_w)]
gradient_b = [gb + dgb
for gb, dgb in zip(gradient_b, delta_gradient_b)]
# full update
update_w = [o + n for o, n in zip([gamma * puw
for puw in self.prev_update_w], [eta * gw for gw in gradient_w])]
#update_w = gamma * self.prev_update_w + eta * gradient_w
self.weights = [w - uw for w, uw in zip(self.weights, update_w)]
update_b = [o + n for o, n in zip([gamma * pub
for pub in self.prev_update_b], [eta * gb for gb in gradient_b])]
#update_b = gamma * self.prev_update_b + eta * gradient_b
self.biases = [b - ub for b, ub in zip(self.biases, update_b)]
self.prev_update_w = update_w
self.prev_update_b = update_b
def compile(self):
"""
Compiles the NN.
Initializes the parameters of the NN.
Returns
-------
None
"""
self.config["epochs"] = int(input("Number of epochs: "))
pretrain = input("Load Neural Network(Yes/No): ")
if pretrain == "Yes":
self.init_params(self.sizes, self.config["epochs"])
filename = input("Enter the filename: ")
self.load(filename)
else:
print("Weight initialization methods available:")
print("random (Random initialization)")
print("xavier (Xavier initialization)")
print("he (He initialization)")
weight_init_type = input("Weight initialization: ")
self.init_params(self.sizes, self.config["epochs"], weight_init_type)
print("Optimizer types available:")
print("GD (Gradient Desecent)")
print("MGD (Momentum based Gradient Desecent)")
print("NAG (Nesterov accerelated Gradient Desecent)")
self.config["optimizer"] = input("Optimizer: ")
if (self.config["optimizer"] == "MGD" or self.config["optimizer"] == "NAG"):
self.config["gamma"] = float(input("gamma (momentum): "))
else:
self.config["gamma"] = None
self.config["eta"] = float(input("Learning rate: "))
self.config["mode"] = input("learning mode (online/mini_batch/batch): ")
if self.config["mode"] == "mini_batch":
self.config["batch_size"] = int(input("Mini-batch size: "))
else:
self.config["batch_size"] = None
self.config["shuffle"] = bool(input("Random shuffle training data (True/False): "))
self.config["task"] = input("task (classification/regression): ")
'''
self.fit(training_data, epochs, batch_size, eta, gamma,
optimizer, mode, shuffle, test_data, task)
if test_data:
self.logging(test_data)
save_option = input("Save Neural Network(Yes/No): ")
if save_option == "Yes":
filename = input("Enter the filename: ")
self.save(filename)
else:
pass
'''
def fit(self, training_data, validation_data=None):
"""
Runs the optimizer on the training data for given number of epochs.
Parameters
----------
training_data: list
List of tuples (x, y)
epochs: int
Maximum number of epochs.
batch_size: int
Size of the mini_batch
eta: float
Learning rate.
gamma: float
Momentum value
Default: None
optimizer: str
Type of optimizer
Options:
GD ( Gradient Descent)
MGD ( Momentum based GD )
NAG ( Nesterov accelerated GD )
Default: GD
mode: str
Mode of Learning
Options:
online ( Stochastic GD )
mini-batch ( Mini-batch GD )
batch ( Batch GD)
Default: batch
shuffle: bool
Random shuffle the training data.
Default: True
test_data: list
List of tuples (x, y)
type: str
Type of task.
Options:
classification
regression
Returns
-------
None
"""
n = len(training_data)
batch_size = self.get_batch_size(training_data, self.config["mode"]
, self.config["batch_size"])
if self.config["optimizer"] == "MGD":
self.prev_update_w = [np.zeros(w.shape) for w in self.weights]
self.prev_update_b = [np.zeros(b.shape) for b in self.biases]
print("---------------Status---------------")
best_accuracy = 0
best_weights = list()
best_biases = list()
for e in range(self.config["epochs"]):
if self.config["shuffle"]:
random.shuffle(training_data)
mini_batches = [training_data[k:k+batch_size]
for k in range(0, n, batch_size)]
for mini_batch in mini_batches:
if self.config["optimizer"] == "GD":
self.update_GD(mini_batch, self.config["eta"])
elif self.config["optimizer"] == "MGD":
self.update_MGD(mini_batch, self.conifg["eta"],
self.config["gamma"])
elif self.config["optimizer"] == "NAG":
self.update_MGD(mini_batch, self.conifg["eta"],
self.config["gamma"])
if validation_data:
acc = self.accuracy(validation_data)
if acc > best_accuracy:
best_accuracy = acc
best_weights = self.weights
best_biases = self.biases
print("Epoch: ", e, "Accuracy: ", acc)
if e == self.config["epochs"] - 1:
print("Max accuracy achieved on validation data: ",
best_accuracy)
self.weights = best_weights
self.biases = best_biases
else:
print("Epoch {0} complete".format(e))
save_option = input("Save Neural Network(Yes/No): ")
if save_option == "Yes":
filename = input("Enter the filename: ")
self.save(filename)
else:
pass
def predict(self, new_data):
return self.feedforward(x for x in new_data)
def logging(self, test_data=None):
"""
Given test data it plots Epoch vs Error graph.
Parameter
---------
test_data: list
List of tuples (x, y)
Returns
-------
None
"""
if test_data:
error = [(100 - a) for a in self.accuracy ]
plt.plot(self.epoch_list, error)
plt.title("Epoch vs Error")
plt.xlabel("Epoch")
plt.ylabel("Error")
plt.show()
else:
pass
def save(self, filename):
"""
Save the NN to the file ``filename``.
Parameters
----------
filename: str
Returns
-------
None
"""
config = {"sizes": self.sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases],
"activation_fns": self.activation_types,
"loss": self.loss_fn}
fhand = open(filename, "w")
json.dump(config, fhand)
fhand.close()
def load(self, filename):
"""
Load a neural network from the file ``filename``.
Parameters
----------
NN: Neural network object
filename: str
Returns
-------
NN: Neural network object
"""
fhand = open(filename, "r")
config = json.load(fhand)
fhand.close()
self.sizes = config["sizes"]
self.weights = [np.array(w) for w in config["weights"]]
self.biases = [np.array(b) for b in config["biases"]]
self.activation_types = config["activation_fns"]
self.loss_fn = config["loss"]
|
{"hexsha": "186a7f27844782d5266fd80751c6ddbddda5a1ed", "size": 24475, "ext": "py", "lang": "Python", "max_stars_repo_path": "customdl/fnn.py", "max_stars_repo_name": "Taarak9/Custom-Neural-Networks", "max_stars_repo_head_hexsha": "cda83294ed825159d5bd168264f143ea51f056c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "customdl/fnn.py", "max_issues_repo_name": "Taarak9/Custom-Neural-Networks", "max_issues_repo_head_hexsha": "cda83294ed825159d5bd168264f143ea51f056c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "customdl/fnn.py", "max_forks_repo_name": "Taarak9/Custom-Neural-Networks", "max_forks_repo_head_hexsha": "cda83294ed825159d5bd168264f143ea51f056c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2579821201, "max_line_length": 91, "alphanum_fraction": 0.5032890705, "include": true, "reason": "import numpy", "num_tokens": 5225}
|
import math
import numpy as np
import torch
class MinMaxStats(object):
"""A class that holds the min-max values of the tree."""
def __init__(self, min_value_bound=None, max_value_bound=None):
self.maximum = min_value_bound if min_value_bound else -float('inf')
self.minimum = max_value_bound if max_value_bound else float('inf')
def update(self, value: float):
self.maximum = max(self.maximum, value)
self.minimum = min(self.minimum, value)
def normalize(self, value: float) -> float:
if self.maximum > self.minimum:
# We normalize only when we have set the maximum and minimum values.
return (value - self.minimum) / (self.maximum - self.minimum)
return value
class Node(object):
def __init__(self, prior: float):
self.visit_count = 0
self.to_play = -1
self.prior = prior
self.value_sum = 0
self.children = {}
self.hidden_state = None
self.reward = 0
def expanded(self) -> bool:
return len(self.children) > 0
def value(self) -> float:
if self.visit_count == 0:
return 0
return self.value_sum / self.visit_count
def expand(self, to_play, actions, network_output):
self.to_play = to_play
self.hidden_state = network_output.hidden_state
self.reward = network_output.reward
# softmax over policy logits
policy = {a: math.exp(network_output.policy_logits[0][a.index]) for a in actions}
policy_sum = sum(policy.values())
for action, p in policy.items():
self.children[action] = Node(p / policy_sum)
def add_exploration_noise(self, dirichlet_alpha, exploration_fraction):
actions = list(self.children.keys())
noise = np.random.dirichlet([dirichlet_alpha] * len(actions))
frac = exploration_fraction
for a, n in zip(actions, noise):
self.children[a].prior = self.children[a].prior * (1 - frac) + n * frac
class MCTS(object):
def __init__(self, config):
self.config = config
def run(self, root, action_history, model):
min_max_stats = MinMaxStats()
for _ in range(self.config.num_simulations):
history = action_history.clone()
node = root
search_path = [node]
while node.expanded():
action, node = self.select_child(node, min_max_stats)
history.add_action(action)
search_path.append(node)
# Inside the search tree we use the dynamics function to obtain the next
# hidden state given an action and the previous hidden state.
parent = search_path[-2]
network_output = model.recurrent_inference(parent.hidden_state,
torch.tensor([[history.last_action().index]],
device=parent.hidden_state.device))
node.expand(history.to_play(), history.action_space(), network_output)
self.backpropagate(search_path, network_output.value.item(), history.to_play(), min_max_stats)
def select_child(self, node, min_max_stats):
_, action, child = max((self.ucb_score(node, child, min_max_stats), action, child)
for action, child in node.children.items())
return action, child
def ucb_score(self, parent, child, min_max_stats) -> float:
pb_c = math.log(
(parent.visit_count + self.config.pb_c_base + 1) / self.config.pb_c_base) + self.config.pb_c_init
pb_c *= math.sqrt(parent.visit_count) / (child.visit_count + 1)
prior_score = pb_c * child.prior
if child.visit_count > 0:
value_score = child.reward + self.config.discount * min_max_stats.normalize(child.value())
else:
value_score = 0
return prior_score + value_score
def backpropagate(self, search_path, value, to_play, min_max_stats):
for node in reversed(search_path):
node.value_sum += value if node.to_play == to_play else -value
node.visit_count += 1
min_max_stats.update(node.value())
value = node.reward + self.config.discount * value
|
{"hexsha": "bc466654857ffbc2aee782f19edc27e8fa331a9c", "size": 4325, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/mcts.py", "max_stars_repo_name": "bAmpT/muzero-pytorch", "max_stars_repo_head_hexsha": "481c02af04245d49eb545b7313a3f19c20f1cb51", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core/mcts.py", "max_issues_repo_name": "bAmpT/muzero-pytorch", "max_issues_repo_head_hexsha": "481c02af04245d49eb545b7313a3f19c20f1cb51", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/mcts.py", "max_forks_repo_name": "bAmpT/muzero-pytorch", "max_forks_repo_head_hexsha": "481c02af04245d49eb545b7313a3f19c20f1cb51", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9385964912, "max_line_length": 109, "alphanum_fraction": 0.6157225434, "include": true, "reason": "import numpy", "num_tokens": 920}
|
import pySALESetup as pss
import numpy as np
mesh1 = pss.Mesh(X=100,Y=500,cellsize=.5e-5)
# Two test areas A and B in each test area one type of grain is tested in 4 different orientations
# in principle each grain is independent of all the others. How will each respond?
x = .25e-3
yA = 1.875e-3
yB = 0.625e-3
r = 0.5
r *= np.pi
off = 0.
R = [[-1.,-1.],
[1.,-1.],
[0.,1.]]
R[2][1] -= off
#grain = pss.Grain(shape='file',File='grain_arearatio-0.725.txt',eqr=20.,rot=r)
grain1 = pss.Grain(shape='polygon',poly_params=R,eqr=20.,rot=r)
grain2 = pss.Grain(shape='polygon',poly_params=R,eqr=20.,rot=r*3)
grain1.place(x,yA,1,mesh1)
grain2.place(x,yB,2,mesh1)
fill = mesh1.calcVol([1,2])
vfrac = fill/float(mesh1.Ncells)
print "Total volume fraction of particles is: {:3.3f} %".format(vfrac*100.)
mesh1.fillAll(3)
mesh1.plateVel(0.,1.25e-3,1500.,axis=1)
mesh1.plateVel(1.25e-3,2.5e-3,-1500.,axis=1)
mesh1.fillPlate(-1,2.49e-3,2.5e-3)
mesh1.fillPlate(-1,0.,0.01e-3)
mesh1.matrixPorosity(3,50.)
mesh1.viewMats()
#mesh1.viewVels()
mesh1.save(fname='triangle_isoceles_offset{:1.2f}.iSALE'.format(off))
|
{"hexsha": "d309c0a7f71e8832b784cbb23b82b261b4e624d9", "size": 1118, "ext": "py", "lang": "Python", "max_stars_repo_path": "regolith_tests/triangle_tester.py", "max_stars_repo_name": "jgd10/RegolithSetupRoutines", "max_stars_repo_head_hexsha": "4b814ec45292aa7226e1b094aaf6c1472b0ada3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "regolith_tests/triangle_tester.py", "max_issues_repo_name": "jgd10/RegolithSetupRoutines", "max_issues_repo_head_hexsha": "4b814ec45292aa7226e1b094aaf6c1472b0ada3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "regolith_tests/triangle_tester.py", "max_forks_repo_name": "jgd10/RegolithSetupRoutines", "max_forks_repo_head_hexsha": "4b814ec45292aa7226e1b094aaf6c1472b0ada3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8163265306, "max_line_length": 98, "alphanum_fraction": 0.6842576029, "include": true, "reason": "import numpy", "num_tokens": 451}
|
from unittest.case import TestCase
import numpy as np
from tensorflow.keras import initializers
import tensorflow as tf
from mdrnn import MDRNN
class TwoStepRNNTests(TestCase):
def test_feeding_layer_created_with_default_initializer(self):
mdrnn = MDRNN(units=2, input_shape=(None, 1))
x = np.zeros((1, 1, 1)) * 0.5
mdrnn.call(x)
def test_for_two_step_sequence(self):
kernel_initializer = initializers.Zeros()
recurrent_initializer = kernel_initializer
mdrnn = MDRNN(units=2, input_shape=(None, 1),
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=initializers.Constant(5),
return_sequences=True)
x = np.zeros((1, 3, 1))
a = mdrnn.call(x)
expected_result = np.ones((1, 3, 2)) * 0.9999
np.testing.assert_almost_equal(expected_result, a.numpy(), 4)
def test_1d_rnn_produces_correct_output_for_2_steps(self):
kernel_initializer = initializers.identity()
recurrent_initializer = kernel_initializer
bias = 3
bias_initializer = initializers.Constant(bias)
mdrnn = MDRNN(units=3, input_shape=(None, 3),
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
activation=None,
return_sequences=True)
x1 = np.array([1, 2, 4])
x2 = np.array([9, 8, 6])
x = np.array([x1, x2])
a = mdrnn.call(
x.reshape((1, 2, -1))
)
expected_result = np.array([[x1 + bias, x1 + x2 + 2 * bias]])
np.testing.assert_almost_equal(expected_result, a.numpy(), 8)
class CheckingMDRNNOutputAgainstKerasSimpleRNN(TestCase):
def setUp(self):
seed = 1
kwargs = dict(units=3, input_shape=(None, 5),
kernel_initializer=initializers.glorot_uniform(seed),
recurrent_initializer=initializers.he_normal(seed),
bias_initializer=initializers.Constant(2),
return_sequences=True,
activation='relu'
)
self.kwargs = kwargs
def test_output_sequences_match(self):
self.kwargs.update(dict(return_sequences=True))
rnn = MDRNN(**self.kwargs)
keras_rnn = tf.keras.layers.SimpleRNN(**self.kwargs)
x = tf.constant(np.random.rand(3, 4, 5), dtype=tf.float32)
np.testing.assert_almost_equal(rnn(x).numpy(), keras_rnn(x).numpy(), 6)
def test_hidden_states_match(self):
self.kwargs.update(dict(return_sequences=False))
rnn = MDRNN(**self.kwargs)
keras_rnn = tf.keras.layers.SimpleRNN(**self.kwargs)
x = tf.constant(np.random.rand(3, 4, 5), dtype=tf.float32)
np.testing.assert_almost_equal(rnn(x).numpy(), keras_rnn(x).numpy(), 6)
|
{"hexsha": "f9ba381438e6c4db5053559d46f582081ae59280", "size": 3048, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/one_dimensional_RNN/test_two_step_predictions.py", "max_stars_repo_name": "X-rayLaser/multi-directional-mdrnn", "max_stars_repo_head_hexsha": "70b0e1c2e07b5f476c264c6700e8d34d41a2ce10", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-02-27T08:34:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T12:22:32.000Z", "max_issues_repo_path": "tests/unit/one_dimensional_RNN/test_two_step_predictions.py", "max_issues_repo_name": "X-rayLaser/multi-directional-mdrnn", "max_issues_repo_head_hexsha": "70b0e1c2e07b5f476c264c6700e8d34d41a2ce10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-23T16:29:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-07T15:05:54.000Z", "max_forks_repo_path": "tests/unit/one_dimensional_RNN/test_two_step_predictions.py", "max_forks_repo_name": "X-rayLaser/multi-directional-mdrnn", "max_forks_repo_head_hexsha": "70b0e1c2e07b5f476c264c6700e8d34d41a2ce10", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-03-31T15:44:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-10T15:26:35.000Z", "avg_line_length": 35.4418604651, "max_line_length": 79, "alphanum_fraction": 0.6125328084, "include": true, "reason": "import numpy", "num_tokens": 695}
|
function _read(valfn)
try; return parse(Int, read(valfn, String))
catch ingored end
return 0
end
let
@info("Running race tests")
# must match the values on race_script.jl
lkfn = joinpath(@__DIR__, "lock")
valfn = joinpath(@__DIR__, "sum.txt")
logfn = joinpath(@__DIR__, "log.txt")
try
script = joinpath(@__DIR__, "race_script.jl")
@assert isfile(script)
rm(valfn; force = true)
rm(lkfn; force = true)
rm(logfn; force = true)
write(valfn, "0")
currptoj = Base.current_project(@__DIR__)
nprocs = 20
@info("Spawning $(nprocs) competing processes")
for t in 1:nprocs
# run this for debug
# julia_cmd = strip(string(Base.julia_cmd()), ['`'])
# plogfn = joinpath(@__DIR__, "log$(t).txt")
# jlsrc = "$(julia_cmd) --project=$(currptoj) --startup-file=no $(script) 2>&1 > $(plogfn)"
# run(`bash -c $(jlsrc)`; wait = false)
julia_cmd = Base.julia_cmd()
run(`$(julia_cmd) --project=$(currptoj) --startup-file=no $(script)`; wait = false)
sleep(0.1)
end
# wait
mt0 = -1
val = 0
N = nprocs * 10 # must match the values on race_script.jl
@info("Reading")
print(val, "/", N, "\r")
for _ in 1:nprocs
while true
sleep(5.0)
val = _read(valfn)
mt = mtime(valfn)
iszero(mt) && continue
val != 0 && (mt == mt0) && break
mt0 = mt
print(val, "/", N, "\r")
end
(val == N) && break
println(val, "/", N)
println("waiting...")
end
println(val, "/", N)
ok_res = val >= N * 0.99 # > 99% success
@test ok_res
# deb info
if !ok_res && isfile(logfn)
println("\n\n", "-"^60)
println("LOG", "\n")
println(read(logfn, String))
println("\n\n", "-"^60)
end
finally
rm(lkfn; force = true)
rm(valfn; force = true)
rm(logfn; force = true)
end
end
|
{"hexsha": "a46bd32bb54f77beb7d2ece7e08a564e8cedd83a", "size": 2283, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/race_test.jl", "max_stars_repo_name": "josePereiro/SimpleLockFiles.jl", "max_stars_repo_head_hexsha": "4276512bf160a563380556a929965671d4b93e95", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/race_test.jl", "max_issues_repo_name": "josePereiro/SimpleLockFiles.jl", "max_issues_repo_head_hexsha": "4276512bf160a563380556a929965671d4b93e95", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/race_test.jl", "max_forks_repo_name": "josePereiro/SimpleLockFiles.jl", "max_forks_repo_head_hexsha": "4276512bf160a563380556a929965671d4b93e95", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5375, "max_line_length": 103, "alphanum_fraction": 0.4607971967, "num_tokens": 617}
|
##################################################################
# Deprecations
##################################################################
@deprecate bicgstab! bicgstab
|
{"hexsha": "d7acf14d000b367f2f47f43ab3c21a40220f6ecf", "size": 181, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/deprecated.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/NumericalMethodsforEngineers.jl-00e1d38a-71a9-5665-8612-32ae585a75a3", "max_stars_repo_head_hexsha": "e230c3045d98da0cf789e4a6acdccfbfb21ef49e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-07-23T18:12:52.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-25T03:32:45.000Z", "max_issues_repo_path": "src/deprecated.jl", "max_issues_repo_name": "OVGULIU/NumericalMethodsforEngineers.jl", "max_issues_repo_head_hexsha": "7ca0b79965a7abd58af29d8dfd1870a954fb3aec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2018-07-23T21:46:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-27T23:14:46.000Z", "max_forks_repo_path": "src/deprecated.jl", "max_forks_repo_name": "OVGULIU/NumericalMethodsforEngineers.jl", "max_forks_repo_head_hexsha": "7ca0b79965a7abd58af29d8dfd1870a954fb3aec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-10-27T14:13:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-20T18:54:06.000Z", "avg_line_length": 25.8571428571, "max_line_length": 66, "alphanum_fraction": 0.2044198895, "num_tokens": 21}
|
using QuantumAlgebra
using QuantumAlgebra: δ, prodtuples, prodtuple, sumtuple, distribute_indices!
using Test
@testset "QuantumAlgebra.jl" begin
for with_σpm in (false,true)
QuantumAlgebra.use_σpm(with_σpm)
@test QuantumAlgebra.using_σpm == with_σpm
@test QuantumAlgebra.SpatialIndex(QuantumAlgebra.x) == QuantumAlgebra.x
# all equal numbers should be equal scalars (ignore type)
@test scal(0) == scal(0.0)
@test scal(0) == scal(0im)
@test scal(1.5) == scal(3//2)
# test params and their complex conjugation
@test param(:ω) == param(:ω,()) == param(:ω,'n') == param(:ω,'n',())
@test param(:ω,'n',(:i)) == param(:ω,:i)
@test param(:ω,'n',(:i,:j,2)) == param(:ω,:i,:j,2)
@test_throws ErrorException param(:ω,'g')
@test_throws MethodError param(:ω,2,:i,"a")
@test_throws MethodError a("a")
@test adjoint(param(:g,'r')) == param(:g,'r')
@test adjoint(param(:g,'n')) == param(:g,'c')
@test adjoint(param(:g,:i)) == param(:g,'c',:i)
@test adjoint(param(:g,'c',(:i,1,:m))) == param(:g,:i,1,:m)
tmp1 = param(:g,2)*param(:g,1)*param(:g,'c',3)*param(:b)*param(:a)*param(:d,'c',3)*param(:f,'r',1,:i)
tmpc = param(:g,'c',2)*param(:g,'c',1)*param(:g,'n',3)*param(:b,'c')*param(:a,'c')*param(:d,'n',3)*param(:f,'r',(1,:i))
tmp2 = param(:a)*param(:b)*param(:g,1)*param(:g,2)*param(:g,'c',3)*param(:d,'c',3)*param(:f,'r',(1,:i))
@test tmp1 == tmp2
@test tmp1' == tmpc
@test a()*5 == 5*a() == scal(5)*a()
@test a()+5 == 5+a() == scal(5)+a()
@test 5-a() == scal(5)+scal(-1)*a()
@test a()-5 == scal(-5)+a()
@test_throws MethodError σx(:i,"a")
@test σx(:i,:b) == σx((:i,:b))
@test σx(:i,:j) != σx(:i,:b)
@test σx() == σp() + σm()
@test σx(:i) == σp(:i) + σm(:i)
@test σx(:i,:j) == σp((:i,:j)) + σm((:i,:j))
@test σy(:i) == scal(-1im)*(σp(:i) - σm(:i))
@test σz(:i) == scal(2)*σp(:i)*σm(:i) - scal(1)
for s=(σx,σy,σz)
@test s(:i)*s(:i) == scal(1)
@test s(:i)' == s(:i)
end
@test σx(:i)*σy(:i) == scal(1im)*σz(:i)
@test σy(:i)*σx(:i) == scal(-1im)*σz(:i)
@test σy()*σz() == scal(1im)*σx()
@test σx(:i,:j)*σz(:i,:j) == scal(-1im)*σy(:i,:j)
@test σx(:i,:j)*σz(:i,:j) != scal(-1im)*σy(:i,:k)
@test a()' == adag()
@test a(:m)*adag(:m) == adag(:m)*a(:m) + scal(1)
@test (a(:m)*a(1))' == adag(:m)*adag(1)
@test (a(1,2,:k)*a(:m))' == adag(1,2,:k)*adag(:m)
@test fdag(:a)*f(:b) + f(:b)*fdag(:a) == δ(:a,:b)
@test f(:a)*f(:b) + f(:b)*f(:a) == scal(0)
@test fdag(:a)*fdag(:b) + fdag(:b)*fdag(:a) == scal(0)
@test f()' == fdag()
@test fdag()' == f()
@test (fdag()*f())' == fdag()*f()
@test comm(fdag(),f()) == -comm(f(),fdag())
@test scal(1+1im) < scal(2-1im)
@test scal(1+1im) > scal(1-1im)
@test a(1) < σx(1)
@test adag(1) < σx(1)
@test !(a(1) < adag(1))
@test adag(1) < a(1)
@test a(5) < a(:i)
@test !(adag(:m) < adag(2))
@test fdag() < f()
@test adag() < fdag()
@test fdag(:i,:j) < fdag(:k)
@test a(1) * (σy(1) * a(1))' == a(1) * (adag(1) * σy(1)) == adag(1)*a(1)*σy(1) + σy(1)
@test ∑(:j,a(:j))*∑(:i,a(:i)) == ∑(:i,∑(:j,a(:i)*a(:j)))
tmp = OpSumAnalytic(:i,adag(:i)*a(:i))
@test tmp' == tmp
@test a(:i)*OpSumAnalytic(:i,a(:i)) == OpSumAnalytic(:i_1,a(:i_1)*a(:i))
@test adag(:n)*tmp == OpSumAnalytic(:i,adag(:n)*adag(:i)*a(:i))
@test a(:n) *tmp == OpSumAnalytic(:i,adag(:i)*a(:n)*a(:i)) + a(:n)
@test param(:g,:i)*OpSumAnalytic(:i,a(:i)) == OpSumAnalytic(:i_1,param(:g,:i)*a(:i_1))
@test param(:g,:i_1)*a(:i)*OpSumAnalytic(:i,a(:i)) == OpSumAnalytic(:i_2,param(:g,:i_1)*a(:i)*a(:i_2))
@test param(:g,:n)*OpSumAnalytic(:i,a(:i)) == ∑(:i,param(:g,:n)*a(:i))
@test Pr"gz_i,mu" == param(:gz,'r',(:i,:mu))
@test Pc"gz_i,mu" == param(:gz,'n',(:i,:mu))
@test Pc"gz_i,mu"' == param(:gz,'c',(:i,:mu))
@test prodtuple(a(5)) == (a(5),)
# tuples come out ordered!
@test prodtuple(a(5)*a(4)) == (a(4),a(5))
@test prodtuple(a(5)*a(4)) != (a(5),a(4))
@test sumtuple(a(5)) == (a(5),)
# tuples come out ordered!
@test sumtuple(a(5)+a(4)) == (a(4),a(5))
@test sumtuple(a(5)+a(4)) != (a(5),a(4))
@test_throws ErrorException prodtuples(a(5)+a(4))
# tuples come out ordered!
if QuantumAlgebra.using_σpm
tmp1 = scal(3)*param(:ω)*param(:g)*ExpVal(σp(:k))*σp(:k)*adag(5)*a(5)
tmp2 = ( (scal(3),param(:g),param(:ω)), (ExpVal(σp(:k)),), (adag(5),a(5),σp(:k)) )
@test prodtuples(tmp1) == tmp2
else
tmp1 = scal(3)*param(:ω)*param(:g)*ExpVal(σz(:k))*σz(:k)*adag(5)*a(5)
tmp2 = ( (scal(3),param(:g),param(:ω)), (ExpVal(σz(:k)),), (adag(5),a(5),σz(:k)) )
@test prodtuples(tmp1) == tmp2
end
@test δ(:i,:k)*a(:k) == a(:i)*δ(:k,:i)
@test δ(:i,:k)*a(:k,:i) == a(:i,:i)*δ(:k,:i)
@test δ(:i,:k)*δ(:i,:j) == δ(:k,:i)*δ(:j,:k)
# k cannot be equal to 1 and 3 at the same time
@test δ(1,:k)*δ(:k,3)*σx(:k) == scal(0)
@test δ(1,:k)*δ(:k,1)*σx(:k) == δ(1,:k)*σx(1)
@test comm(σx(5),σy(3)) == scal(0)
@test comm(σx(5),σx(5)) == scal(0)
@test comm(σx(1),σz(1)) == scal(-2im)*σy(1)
@test comm(σx(:mu),σy(:muuu)) == scal(2im)*δ(:mu,:muuu)*σz(:mu)
@test scal(1//2im)*comm(σx(:m),σy(:m)) == σz(:m)
@test σx(:a)*σy(:a)*σz(:a) == scal(1im)
@test comm(param(:g),a(5)+a(3)) == scal(0)
@test comm(param(:g),a(5)*a(3)) == scal(0)
@test comm(a(5)+a(3),param(:g)) == scal(0)
@test comm(a(5)*a(3),param(:g)) == scal(0)
@test comm(a(5)+a(3),adag(5)) == comm(a(5),adag(5))+ comm(a(3),adag(5))
@test comm(a(5)+a(3),adag(5)*a(3)) == comm(a(5),adag(5)*a(3))+ comm(a(3),adag(5)*a(3))
@test adag(2)*σy(:i) - scal(14)*param(:ω) == scal(-14)*param(:ω) + σy(:i)*adag(2)
@test adag(2)*σy(:i) == σy(:i)*adag(2)
@test σp(1) * σm(1) == scal(1//2)*σz(1) + scal(1//2)
@test σm(1)*σp(1) == scal(1) - σp(1)*σm(1)
@test σm(1)*σp(1) == σp(1)*σm(1) + comm(σm(1),σp(1))
@test σz(1) == σp(1)*σm(1) - σm(1)*σp(1)
@test comm(σp(1),σp(1)) == scal(0)
@test comm(σp(:n),σm(:n)) == σz(:n)
@test comm(a(1), adag(1)*a(1)) == a(1)
@test comm(adag(1),adag(1)*a(1)) == -adag(1)
@test ExpVal(scal(3)) == scal(3)
@test Corr(scal(3)) == scal(3)
@test Corr(scal(3)+adag(2)) == scal(3) + Corr(adag(2))
@test ascorr(scal(3)) == scal(3)
@test ascorr(a(2)) == ExpVal(a(2))
@test ascorr(scal(3)*param(:g)) == scal(3)*param(:g)
@test ascorr(scal(3)*a(2)) == scal(3)*ExpVal(a(2))
@test ascorr(scal(3)+adag(2)) == scal(3) + ExpVal(adag(2))
@test ascorr(a(2)*a(2)) == Corr(a(2)*a(2)) + ExpVal(a(2))*ExpVal(a(2))
@test ascorr(a(2)*a(:m))' == Corr(adag(2)*adag(:m)) + ExpVal(adag(:m))*ExpVal(adag(2))
tmpas = a.(1:3)
@test *(tmpas...) == a(1)*a(2)*a(3)
tmpEVs = ExpVal.(tmpas)
@test ascorr(*(tmpas...)) == Corr(*(tmpas...)) + *(tmpEVs...) + tmpEVs[1]*Corr(tmpas[2]*tmpas[3]) + tmpEVs[2]*Corr(tmpas[1]*tmpas[3]) + tmpEVs[3]*Corr(tmpas[1]*tmpas[2])
@test a(1) < ascorr(a(1)*a(2)*a(3)*a(4))
@test_throws ErrorException ascorr(a(1)*a(2)*a(3)*a(4)*a(5))
if QuantumAlgebra.using_σpm
@test ascorr(scal(-1)*param(:g,'r',1)*σp(1)) == -param(:g,'r',1)*ExpVal(σp(1))
@test ascorr(OpSumAnalytic(:i,σp(:i)*σm(:n))) == OpSumAnalytic(:i,Corr(σp(:i)*σm(:n))) + OpSumAnalytic(:i,ExpVal(σp(:i))*ExpVal(σm(:n)))
else
@test ascorr(scal(-1)*param(:g,'r',1)*σz(1)) == -param(:g,'r',1)*ExpVal(σz(1))
@test ascorr(OpSumAnalytic(:i,σy(:i)*σy(:n))) == OpSumAnalytic(:i,Corr(σy(:i)*σy(:n))) + OpSumAnalytic(:i,ExpVal(σy(:i))*ExpVal(σy(:n))) - ExpVal(σy(:n))*ExpVal(σy(:n))
end
@test CorrOrExp(a(5)) == ExpVal(a(5))
@test CorrOrExp(a(5)*a(:i)) == Corr(a(5)*a(:i))
H = ∑(:i,param(:ω,'r',:i)*adag(:i)*a(:i))
@test comm(a(:i),H) == param(:ω,'r',:i)*a(:i)
@test comm(a(:n),H) == param(:ω,'r',:n)*a(:n)
@test comm(H,a(:n)) == -param(:ω,'r',:n)*a(:n)
@test comm(adag(:n),H) == -param(:ω,'r',:n)*adag(:n)
@test comm(adag(:n)*a(:m),H) == (param(:ω,'r',:m)-param(:ω,'r',:n))*adag(:n)*a(:m)
@test a()*H == ∑(:i,param(:ω,'r',:i)*adag(:i)*a(:i)*a())
@test a(:k)*H == param(:ω,'r',:k)*a(:k) + ∑(:i,param(:ω,'r',:i)*adag(:i)*a(:i)*a(:k))
HH = ∑(:i,param(:ω,'r',:i,:i)*adag(:i,:i)*a(:i,:i))
@test a(:k,:k)*HH == param(:ω,'r',:k,:k)*a(:k,:k) + ∑(:i,param(:ω,'r',:i,:i)*adag(:i,:i)*a(:i,:i)*a(:k,:k))
@test Avac(H) == scal(0)
@test vacA(H) == scal(0)
@test vacA(adag(3)*σp(1)*σm(1)) == scal(0)
@test vacA(fdag(:n)) == scal(0)
@test vacA(f(:n)) == f(:n)
@test vacA(a(:n)) == a(:n)
@test Avac(fdag(:n)) == fdag(:n)
@test Avac(f(:n)) == scal(0)
@test Avac(a(3)*σp(1)*σm(1)) == scal(0)
@test Avac(σm(1)*σp(1)) == scal(1)
@test Avac(σp(1)*σm(1)) == scal(0)
@test Avac(σp(1)) == σp(1)
@test vacA(σm(1)) == σm(1)
if QuantumAlgebra.using_σpm
@test Avac(σm(1)) == scal(0)
@test vacA(σp(1)) == scal(0)
else
@test Avac(σx(1)) == vacA(σx(1)) == σx(1)
end
@test vacExpVal(σx(1)) == scal(0)
@test vacExpVal(σp(1)) == scal(0)
@test vacExpVal(OpSumAnalytic(:i,σp(:i))) == scal(0)
@test vacExpVal(σp(:i)*σm(:k)) == scal(0)
@test a(:n)*adag(:n)*a(:n)*adag(:n) == scal(1) + scal(3)*adag(:n)*a(:n) + adag(:n)*adag(:n)*a(:n)*a(:n)
S = scal(1/√(2*6))*adag(:n)*adag(:n)*adag(:n) + scal(1/√2)*adag(:m)
for (A,val) in [(scal(1),scal(1)),
(adag(:n)*a(:n),scal(1.5) + 0.5 * δ(:n,:m)),
(adag(:n)*adag(:n)*a(:n)*a(:n),scal(3))]
@test vacExpVal(A,S) ≈ val
end
tmp = scal(1+2im)*OpSumAnalytic(:i,a(:i)*adag(:i)*ascorr(adag(:n)*a(:m)))
@test latex(scal(1+2im)) == "(1+2i)"
@test latex(scal(1+2im//5)) == "\\left(1+\\frac{2}{5}i\\right)"
tmp = OpSumAnalytic(:i,ascorr(adag(:n)*a(:i)))
tmplatex = "\\sum_{i}\\langle a_{n}^\\dagger a_{i} \\rangle_{c} + \\sum_{i}\\langle a_{n}^\\dagger \\rangle \\langle a_{i} \\rangle"
@test latex(tmp) == tmplatex
@test ascorr(tmp) == tmp
@test sprint(show,"text/latex",tmp) == "\$$(tmplatex)\$"
if QuantumAlgebra.using_σpm
@test latex(σp()) == "\\sigma^+"
else
@test latex(σz()) == "\\sigma_{z}"
end
inds = [:a,:b,:c,:d,:e,:f,:g,:h,:i,:j,:k,:l,:m,:n]
tmp1 = param(:ω,:y)*a(1)*adag(1)*a(3)*adag(4)*ExpVal(a(5))*Corr(adag(5)*a(9))
tmp2 = param(:ω,:a)*ExpVal(a(:b))*Corr(adag(:c)*a(:d))*adag(:e)*a(:f) + param(:ω,:g)*ExpVal(a(:h))*Corr(adag(:i)*a(:j))*adag(:k)*adag(:l)*a(:m)*a(:n)
@test distribute_indices!(copy(inds),tmp1) == tmp2
if QuantumAlgebra.using_σpm
tmp1 = a(1,:n)*adag()*σm(1,:n)*σp()
@test distribute_indices!(copy(inds),tmp1) == adag()*a(:a,:b)*σp()*σm(:c,:d)
else
tmp1 = a(1,:n)*adag()*σz(1,:n)*σy()
@test distribute_indices!(copy(inds),tmp1) == adag()*a(:a,:b)*σy()*σz(:c,:d)
end
@test_throws MethodError distribute_indices!(copy(inds),OpSumAnalytic(:i,a(:i)))
@test_throws ArgumentError distribute_indices!([:a,:b],tmp1)
@test QuantumAlgebra.exchange_inds(adag(:j)*a(:k),:k,:j) == adag(:k)*a(:j)
@test QuantumAlgebra.extindices(∑(:i,adag(:i)*a(:k))) == [:k]
@test QuantumAlgebra.symmetric_index_nums(adag(:i)*adag(:j)*a(:k)*a(:l)) == [2,2]
@test string(OpSumAnalytic(:i,a(:i)) * adag(:n)) == "1 + ∑_i a†(n) a(i)"
if QuantumAlgebra.using_σpm
@test string(a(5)*adag(5)*σp(3)*ascorr(adag(5,:i)*a(5))) == "⟨a†(5i)⟩ ⟨a(5)⟩ σ+(3) + ⟨a†(5i) a(5)⟩c σ+(3) + ⟨a†(5i)⟩ ⟨a(5)⟩ a†(5) a(5) σ+(3) + ⟨a†(5i) a(5)⟩c a†(5) a(5) σ+(3)"
else
@test string(a(5)*adag(5)*σz(3)*ascorr(adag(5,:i)*a(5))) == "⟨a†(5i)⟩ ⟨a(5)⟩ σz(3) + ⟨a†(5i) a(5)⟩c σz(3) + ⟨a†(5i)⟩ ⟨a(5)⟩ a†(5) a(5) σz(3) + ⟨a†(5i) a(5)⟩c a†(5) a(5) σz(3)"
end
end
end
|
{"hexsha": "44794a76327244c8e5d8e9d1b34e39952e3c32f0", "size": 12741, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "UnofficialJuliaMirror/QuantumAlgebra.jl-1b9008d5-2a9d-4901-8cfb-44bb87795b64", "max_stars_repo_head_hexsha": "92804d2cda60a1117cb449f745fb489ce9e0950b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "UnofficialJuliaMirror/QuantumAlgebra.jl-1b9008d5-2a9d-4901-8cfb-44bb87795b64", "max_issues_repo_head_hexsha": "92804d2cda60a1117cb449f745fb489ce9e0950b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "UnofficialJuliaMirror/QuantumAlgebra.jl-1b9008d5-2a9d-4901-8cfb-44bb87795b64", "max_forks_repo_head_hexsha": "92804d2cda60a1117cb449f745fb489ce9e0950b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1808510638, "max_line_length": 187, "alphanum_fraction": 0.4625225649, "num_tokens": 5501}
|
import logging
import numpy as np
from scipy.optimize import linear_sum_assignment
from ...utils.time import timeit
from .centroid import TargetCentroid
logger = logging.getLogger(__name__)
class AdaptiveKmeans:
"""Adaptive Kmeans clustering algorithm to cluster tracked targets"""
NEXT_TID = 0
def __init__(self):
self.centroids = {}
def __str__(self):
lines = [ "[TID:{}:{}]".format(tid, tc) for tid, tc in self.centroids.items() ]
content = ", ".join(lines)
return content
def __repr__(self):
return str(self)
def miss(self):
"""Perform miss action on all tracked centroids"""
# Miss for all centroids
_ = [ tc.miss() for tid, tc in self.centroids.values() ]
# Delete dead centroids
tids = list(self.centroids.keys())
for tid in tids:
if self.centroids[tid].is_dead():
del self.centroids[tid]
def predict(self, points):
"""Predict the track ID for each data point
Arguments:
points (ndarray): 2D ndarray representing embeddings from one video
Returns:
a list of track ids representing the label of each points
NOTE:
`points` should only contain the embeddings from one video. As the
labels(track IDs) are not determined by the minimum distance between
points and clusters. They are determined by the result of linear
assignment algorithm. Each unique label (track ID) will only
associate with one point.
The number of centroids should always larger than the number of
points.
"""
# Common setup
centroids = np.array([ tc.embedding for tc in self.centroids.values() ])
label2tid = dict([ (label, tid)
for label, tid in enumerate(self.centroids.keys()) ])
# Predict tids for points
distances = self._pdist(points, centroids)
pindices, cindices = linear_sum_assignment(distances)
tids = np.array([ label2tid[cidx] for cidx in cindices ])
return tids
@timeit(logger)
def fit(self, group_points, n_clusters):
"""Perform adaptive kmeans clustering
Arguments:
group_points (list): list of ndarrays, where each element in the
list representing the embeddings of targets in specific frame.
n_clusters (int): the ideal number of clusters in current state
"""
# Flatten group_points
points = np.concatenate(group_points)
# Initialize clusters
if len(self.centroids) == 0:
self._init_centroids(points, n_clusters)
return
# Common setup
centroids = np.array([ tc.embedding for tc in self.centroids.values() ])
label2tid = dict([ (label, tid)
for label, tid in enumerate(self.centroids.keys()) ])
# Dynamic add new clusters
if len(self.centroids) < n_clusters:
# Extract anomaly points group by group to form two sets:
# - normal points
# - anomaly points
normal_group_points, anomaly_points = [], []
for gpoints in group_points:
# Find labels for each point
distances = self._pdist(gpoints, centroids)
sorted_labels = np.argsort(distances)
# As point to centroid is a one-to-one mapping in each group,
# filter out points that get assigned to the centroids that
# already assigned to some points before
normal_points = []
unique_cindices = set()
for pidx, cindices in enumerate(sorted_labels):
cidx = cindices[0]
if cidx in unique_cindices:
anomaly_points.append(gpoints[pidx])
else:
normal_points.append(gpoints[pidx])
unique_cindices.add(cidx)
normal_group_points.append(np.array(normal_points))
# Add new clusters to fit anomaly points
new_clusters = n_clusters - len(self.centroids)
anomaly_points = np.array(anomaly_points)
self._init_centroids(anomaly_points, new_clusters)
# Normal points for updating current clusters
group_points = normal_group_points
# Assign centroid to each point in each group
hit_cindices = set()
group_labels = {}
for gidx, gpoints in enumerate(group_points):
distances = self._pdist(gpoints, centroids)
pindices, cindices = linear_sum_assignment(distances)
hit_cindices = hit_cindices.union(set(cindices))
group_labels[gidx] = list(zip(pindices, cindices))
# Compute new centroids
new_centroids = []
for target_cidx, c in enumerate(centroids):
new_centroid = []
for gidx, matches in group_labels.items():
for pidx, cidx in matches:
if cidx == target_cidx:
new_centroid.append(group_points[gidx][pidx])
if len(new_centroid) > 0:
new_centroid = np.array(new_centroid).mean(axis=0)
else:
new_centroid = c
new_centroids.append(new_centroid)
new_centroids = np.array(new_centroids)
# Replace new clusters
for label, c in enumerate(new_centroids):
tid = label2tid[label]
self.centroids[tid].embedding = c
# Update state of clusters
hit_cindices = hit_cindices
miss_cindices = list(set(range(len(centroids)))-hit_cindices)
_ = [ self.centroids[label2tid[hidx]].hit() for hidx in hit_cindices ]
_ = [ self.centroids[label2tid[midx]].miss() for midx in miss_cindices ]
# Cleanup outdated clusters
tids = list(self.centroids.keys())
for tid in tids:
if self.centroids[tid].is_dead():
del self.centroids[tid]
# Merge clusters that are too close to each other
self._merge_cluster()
def _init_centroids(self, points, n_clusters):
"""Initialize clusters that fit the specified points
Arguments:
points (ndarray): 2D ndarray data for clustering
n_clusters (int): number of clusters to initialize
"""
# Random select centroids from current data points
centroids = points.copy()
np.random.shuffle(centroids)
centroids = centroids[:n_clusters]
# Fine-tune centroids that best fit data points
centroids = self._fit(points, centroids)
for c in centroids:
self.centroids[AdaptiveKmeans.NEXT_TID] = TargetCentroid(embedding=c)
AdaptiveKmeans.NEXT_TID += 1
def _pdist(self, points, centroids):
"""Compute pair-wise distance between data points and centroids
Arguments:
points (ndarray): 2D ndarray representing data points with N rows
centroids (ndarray): 2D ndarray representing centroids with M rows
Returns:
A NxM 2D ndarray representing the euclidean distances between data
points and centroids
"""
dists = np.sqrt(((points[:, np.newaxis, :]-centroids)**2).sum(axis=2))
return dists
def _fit(self, points, centroids, n_iter=10, threshold=1e-3):
"""Perform kmeans algorithm to fit the centroids to the data points
Arguments:
points (ndarray): 2D ndarray representing data points
centroids (ndarray): 2D ndarray representing centroids
Returns:
A 2D ndarray representing the fine-tuned centroids
"""
counter = 0
while counter < n_iter:
# Find closet centroid to each point
distances = self._pdist(points, centroids)
labels = np.argmin(distances, axis=1)
# Compute new centroids
new_centroids = np.array([ points[labels==label].mean(axis=0)
if np.sum(labels==label) > 0 else c
for label, c in enumerate(centroids) ])
# Break when converge
diff = np.sum(np.sqrt(((centroids - new_centroids)**2).sum(axis=1)))
if diff > threshold:
centroids = new_centroids
else:
break
counter += 1
return new_centroids
def _merge_cluster(self):
# Merge clusters that are too close to each other
centroids = np.array([ tc.embedding for tc in self.centroids.values() ])
label2tid = dict([ (label, tid)
for label, tid in enumerate(self.centroids.keys()) ])
# Find unique clusters
# [ {1, 2}, {3}, {4} ] means there are three unique clusters, and
# {1, 2} clusters are considered as same cluster.
unique_clusters = []
distances = self._pdist(centroids, centroids)
for cidx, distance in enumerate(distances):
# Distance between clusters less than 0.4 should be considered as
# same cluster
same_clusters = set(np.argwhere(distance < 0.4).reshape(-1).tolist())
# Try to merge `same_clusters` into the existing unique cluster
merge_flag = False
for i in range(len(unique_clusters)):
unique_cluster = unique_clusters[i]
if len(unique_cluster.intersection(same_clusters)) > 0:
unique_clusters[i] = unique_cluster.union(same_clusters)
merge_flag = True
break
# From unique cluster from `same_clusters`
if not merge_flag:
unique_clusters.append(same_clusters)
# Merge clusters
for clusters in unique_clusters:
if len(clusters) == 1:
continue
tids = sorted([ label2tid[cidx] for cidx in clusters ])
embeddings = np.array([ self.centroids[tid].embedding
for tid in tids ])
new_centroid = np.mean(embeddings, axis=0)
self.centroids[tids[0]].embedding = new_centroid
for tid in tids[1:]:
del self.centroids[tid]
|
{"hexsha": "3f3bf596a202b39d3b072ddf53be2084feeadafd", "size": 10462, "ext": "py", "lang": "Python", "max_stars_repo_path": "worker/mtmcworker/cluster/__init__.py", "max_stars_repo_name": "johnnylord/mtmc-testbed", "max_stars_repo_head_hexsha": "e3d331505181baa076162e1f5835e566e8f70167", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-25T08:46:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-25T08:46:19.000Z", "max_issues_repo_path": "worker/mtmcworker/cluster/__init__.py", "max_issues_repo_name": "johnnylord/mtmc-testbed", "max_issues_repo_head_hexsha": "e3d331505181baa076162e1f5835e566e8f70167", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "worker/mtmcworker/cluster/__init__.py", "max_forks_repo_name": "johnnylord/mtmc-testbed", "max_forks_repo_head_hexsha": "e3d331505181baa076162e1f5835e566e8f70167", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-18T01:33:45.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-18T01:33:45.000Z", "avg_line_length": 38.3223443223, "max_line_length": 87, "alphanum_fraction": 0.5893710572, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2178}
|
from __future__ import division
import torch
import random
import numpy as np
from sklearn.cluster import DBSCAN, KMeans
import cv2
def set_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def _init_fn():
np.random.seed(0)
def clustering(features, num_cluster=20, method='kmeans', eps = 0.6,):
if method =='DBSCAN':
cluster = DBSCAN(eps, min_samples=5, metric='euclidean', n_jobs=-1)
cluster_ids = cluster.fit_predict(features)
elif method =='kmeans':
cluster = KMeans(num_cluster)
cluster_ids = cluster.fit_predict(features)
return cluster_ids
def pretty_dict_string(d, indent=0):
string = ''
for key, value in d.items():
string += '\t' * indent + str(key)
if isinstance(value, dict):
string += pretty_dict_string(value, indent + 1)
else:
string +=('\t' * (indent + 1) + str(value) + '\n')
return string
def generate_confident_target(pred, confidence_threshold = 0):
prob,target = torch.max(pred,1)
target[prob<confidence_threshold] = 255
return target
def generate_ignore_region_cutmix(mask):
kernel = np.ones((5, 5), np.uint8)
mask = 1- mask.cpu().numpy()
batch = []
for m in mask:
erosion = cv2.erode(m[0], kernel, iterations=1)
dilation = cv2.dilate(m[0], kernel, iterations=1)
ignore_mask = torch.from_numpy(1 - (dilation - erosion))
batch.append(ignore_mask[None, ...])
batch = torch.cat(batch,dim=0)
return batch
|
{"hexsha": "71b946d74f532f693fc2ce6d4b8b6445e2211bf4", "size": 1708, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/misc.py", "max_stars_repo_name": "SIAAAAAA/C3-SemiSeg", "max_stars_repo_head_hexsha": "b2e2489b828661660d9842785e831293bab5cd47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-11-12T11:21:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T01:58:12.000Z", "max_issues_repo_path": "utils/misc.py", "max_issues_repo_name": "SIAAAAAA/C3-SemiSeg", "max_issues_repo_head_hexsha": "b2e2489b828661660d9842785e831293bab5cd47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-12T08:06:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T08:06:56.000Z", "max_forks_repo_path": "utils/misc.py", "max_forks_repo_name": "SIAAAAAA/C3-SemiSeg", "max_forks_repo_head_hexsha": "b2e2489b828661660d9842785e831293bab5cd47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9491525424, "max_line_length": 75, "alphanum_fraction": 0.6533957845, "include": true, "reason": "import numpy", "num_tokens": 437}
|
import tensorflow as tf # noqa
import copy
import os
# import cPickle as pickle
import dill as pickle
import numpy as np
import hashlib
from ..data import helpers as helpers
from ..utils import misc as misc
from ..data import batch_fetcher as bfetchers
from ..experiments import experiment
from ..experiments import config as econfig
from ..model import conditionals as conds
from ..model import transforms as trans # noqa
from ..model import likelihoods as likes # noqa
from datetime import datetime
from functools import reduce
# Hyperparameters.
DEF_ARGS = {
'train_iters': 30000,
'hold_iters': 100,
'hold_interval': 2500,
'ncomps': 40,
'decay_interval': 5000,
'dropout_keeprate_val': None,
'optimizer_class': tf.train.AdamOptimizer,
'momentum': None,
'momentum_iter': 5000,
'max_grad_norm': 1.0,
'trans_alpha': None,
'rescale_init_constant': 1.0,
'trans_state_activation': tf.nn.tanh,
'cond_param_irange': 1e-6,
'first_do_linear_map': True,
'standardize': True,
'base_distribution': 'gaussian',
'sort_dims': None
}
# Base configs for different transformations.
BASE_ARG_CHOICES = {
'lr_decay': (0.5, 0.1),
'init_lr': (0.005, ),
'first_trainable_A': (True, False),
'trans_funcs': [
None,
[trans.additive_coupling, trans.reverse, trans.additive_coupling,
trans.reverse, trans.additive_coupling, trans.reverse,
trans.additive_coupling, trans.log_rescale], # NICE Type
[trans.rnn_coupling, trans.reverse, trans.rnn_coupling, trans.reverse,
trans.rnn_coupling, trans.reverse, trans.rnn_coupling,
trans.log_rescale], # 4xRNN Coup
],
}
# Get configs for standard Gaussian conditional model.
ARG_CHOICES_STDGAU = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_STDGAU['single_marginal'] = (True,)
ARG_CHOICES_STDGAU['standard'] = (True,)
ARG_CHOICES_STDGAU['ncomps'] = (1, )
ARG_CHOICES_STDGAU['cond_func'] = (conds.independent_model,)
ARG_LIST_STDGAU = misc.make_arguments(ARG_CHOICES_STDGAU)
ARG_LIST_STDGAU = filter(
lambda conf: conf['first_trainable_A'] or conf['trans_funcs'] is not None,
ARG_LIST_STDGAU) # Avoid models that have no variables to optimize.
# Get configs for independent GMMs
ARG_CHOICES_IND = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_IND['single_marginal'] = (False,)
ARG_CHOICES_IND['standard'] = (False,)
ARG_CHOICES_IND['cond_func'] = (conds.independent_model,)
ARG_LIST_IND = misc.make_arguments(ARG_CHOICES_IND)
# Get config for Tied conditional model.
ARG_CHOICES_TIED = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_TIED['cond_tied_model'] = (True,)
ARG_CHOICES_TIED['param_nlayers'] = (2,)
ARG_CHOICES_TIED['cond_func'] = (conds.cond_model,)
ARG_LIST_TIED = misc.make_arguments(ARG_CHOICES_TIED)
# Get config for Untied conditional model.
ARG_CHOICES_UNTIED = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_UNTIED['cond_tied_model'] = (False,)
ARG_CHOICES_UNTIED['param_nlayers'] = (2,)
ARG_CHOICES_UNTIED['cond_func'] = (conds.cond_model,)
ARG_LIST_UNTIED = misc.make_arguments(ARG_CHOICES_UNTIED)
# Get config for RNN conditional model.
ARG_CHOICES_RNN = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_RNN['param_nlayers'] = (None, 2)
ARG_CHOICES_RNN['cond_func'] = (conds.rnn_model,)
ARG_LIST_RNN = misc.make_arguments(ARG_CHOICES_RNN)
# Get config for RNN conditional model.
ARG_CHOICES_RNN_FC = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_RNN_FC['param_nlayers'] = (2, )
ARG_CHOICES_RNN_FC['cond_func'] = (conds.rnn_model,)
ARG_LIST_RNN_FC = misc.make_arguments(ARG_CHOICES_RNN_FC)
# Make the default be RNN conditional models.
ARG_LIST = misc.make_arguments(ARG_CHOICES_RNN)
def shorten(obj):
""" Helper function to shorten stringeds from long options, uses hash to
ensure shortening without collision """
string = str(obj)
if len(string) >= 255:
hash_object = hashlib.md5(string.encode('utf-8'))
string_hash = str(hash_object.hexdigest())
return string[:50] + '...' + string[-50:] + '_' + string_hash
return string
def print_value(value):
""" Helper function to print functions, lists, and dictionaries for
filenames and printouts. """
if isinstance(value, str):
return value
try:
try:
string = reduce(lambda x, y: x+'-'+y,
[print_value(v) for v in value.items()])
except AttributeError: # Not dictionary
string = reduce(
lambda x, y: x+','+y, [print_value(v) for v in value])
except TypeError: # Not iterable
try:
string = value.func_name
except AttributeError: # Not function
string = str(value)
return string
def get_exp_name(args):
sorted_keys = np.sort(list(args.keys()))
exp_name = reduce(lambda x, y: x+y,
['{}--{}/'.format(k, shorten(print_value(args[k])))
for k in sorted_keys], '')
return shorten(exp_name)
def make_trainer(dataset, base_save_path, base_log_path,
nepochs=None, exp_class=experiment.Experiment,
fetcher_class=bfetchers.DatasetFetchers,
save_path=None, **kwargs):
# Options.
# Load data.
# TODO: general data load
if isinstance(dataset, str):
print('Loading {}...'.format(dataset))
dataset = pickle.load(open(dataset, 'rb'))
print('Loaded.')
# Make the data fetchers.
if 'train_labels' in dataset and 'valid_labels' in dataset and \
'test_labels' in dataset:
# Labeled data.
fetchers = fetcher_class(
(dataset['train'], dataset['train_labels']),
(dataset['valid'], dataset['valid_labels']),
(dataset['test'], dataset['test_labels']))
else:
fetchers = fetcher_class(
(dataset['train'],), (dataset['valid'],), (dataset['test'],))
def main(args, return_exp=False):
# Make config for trial with defualt and given arguments.
trial_args = copy.copy(kwargs)
for ind in args:
trial_args[ind] = args[ind]
# Data preprocessing
standardize = misc.get_default(trial_args, 'standardize', False)
cov_func = misc.get_default(trial_args, 'cov_func', None)
trial_args['first_do_linear_map'] = misc.get_default(
trial_args, 'first_do_linear_map', False)
# Get initial linear map parameters.
if trial_args['first_do_linear_map']:
try:
(imp, ib, ip) = helpers.get_initmap(
dataset['train'], standardize=standardize,
cov_func=cov_func)
trial_args['first_init_mat_params'] = imp
trial_args['first_init_b'] = ib
trial_args['first_perm'] = ip
except (TypeError, ValueError) as error:
print('No initial linear parameters due to error:\n{}'.format(
error))
# Determine the number of iterations to run nepochs
trial_args['batch_size'] = misc.get_default(
trial_args, 'batch_size', 256)
if nepochs is not None:
N, d = dataset['train'].shape
iters_per_epoch = N/float(trial_args['batch_size'])
trial_args['train_iters'] = int(nepochs*iters_per_epoch)
config = econfig.RedConfig(**trial_args)
# Make directories specific to experiment trial.
# TODO: don't overwrite save path if given
# if save_path is None:
save_path = os.path.join(base_save_path, get_exp_name(args))
misc.make_path(save_path)
if base_log_path is not None:
log_path = os.path.join(base_log_path, get_exp_name(args))
misc.make_path(log_path)
else:
log_path = None
# Save config for easy model loading.
try:
pickle.dump(
trial_args, open(os.path.join(save_path, 'trial_args.p'), 'wb'))
except TypeError:
print('Could not save trial arg pickle file.')
# Set up trial and train.
exp = exp_class(config, log_path, save_path, fetchers)
res_dicts = exp.main()
# Save results.
if log_path is not None:
pickle.dump(
res_dicts, open(os.path.join(log_path, 'result.p'), 'wb'))
else:
pickle.dump(
res_dicts, open(os.path.join(save_path, 'result.p'), 'wb'))
if return_exp:
return res_dicts, exp
return res_dicts
return main
def invalid_result(result):
return result is None or np.isnan(result['loss'])
def run_experiment(data, arg_list=ARG_LIST, def_args=DEF_ARGS,
exp_class=experiment.Experiment,
fetcher_class=bfetchers.DatasetFetchers,
estimator='TAN', retries=1,
log_path=None, save_path=None, experiments_name=None,
home=None, no_log=False, restore_best=False,
trial_iter_ratio=0.25):
assert not restore_best \
or (trial_iter_ratio < 1 and trial_iter_ratio > 0), \
"Cannot run partial trials with the given trial_iter_ratio."
# Set up paths.
if log_path is None or save_path is None:
home = os.path.expanduser('~') if home is None else home
data_name = os.path.basename(data)
experiments_name = \
experiments_name if experiments_name is not None else \
datetime.now().strftime('%Y_%m_%d_%H:%M:%S.%f')
log_path = log_path if log_path is not None else \
os.path.join(
home, 'de_logs', estimator, data_name, experiments_name)
save_path = save_path if save_path is not None else \
os.path.join(
home, 'de_models', estimator, data_name, experiments_name)
if no_log:
log_path = None
else:
misc.make_path(log_path)
misc.make_path(save_path)
print('log path: {}\nsave path: {}'.format(log_path, save_path))
# Get results for all hyperparameter choices
raw_main = make_trainer(data, save_path, log_path, exp_class=exp_class,
fetcher_class=fetcher_class, **def_args)
def main(args, return_exp=False):
try:
output = raw_main(args, return_exp=return_exp)
except tf.errors.InvalidArgumentError as e:
print('\n\n\n\n########')
print(e)
print('########\n\n\n\n')
output = None if not return_exp else [None, None]
return output
if restore_best:
# Adjust how far we're going to train each initialization before
# comparing to others
train_iters = []
for args in arg_list:
train_iters.append(args['train_iters'])
assert all([ti is train_iters[0] for ti in train_iters])
total_train_iters = train_iters[0]
trial_train_iters = int(trial_iter_ratio * total_train_iters)
for ai in range(len(arg_list)):
arg_list[ai]['train_iters'] = trial_train_iters
if no_log:
log_path = save_path
results = []
best = None
print("Restore best: {}".format(restore_best))
for ai in range(len(arg_list)):
args = arg_list[ai]
retries_left = retries
print('RUNNING {}'.format(experiments_name))
print('[{}/{}] {}'.format(ai+1, len(arg_list), args))
main_output = main(args, return_exp=restore_best)
if restore_best:
results_ai, experiment = main_output
else:
results_ai = main_output
results.append(results_ai)
while invalid_result(results[-1]) and retries_left > 0:
print('[{}/{}] Retrying {}'.format(ai+1, len(arg_list), args))
retries_left -= 1
main_output = main(args, return_exp=restore_best)
if restore_best:
results_ai, experiment = main_output
else:
results_ai = main_output
results[-1] = results_ai
better_result = not invalid_result(results[-1]) and (
invalid_result(best) or best['loss'] > results[-1]['loss']
)
if better_result:
print(" Run is better")
best = {}
best['loss'] = results[-1]['loss']
best['results'] = results[-1]
best['args'] = args
if restore_best:
bi = ai
best_experiment = experiment
pickle.dump(
{'best': best, 'trial_results': results,
'trial_args': arg_list[:ai+1]},
open(os.path.join(log_path, experiments_name+'_all_trials.p'),
'wb'))
if restore_best:
best = None
# Intended to test some different models for a few iterations and then
# run the best of the bunch through completion
args = arg_list[bi]
args['train_iters'] = total_train_iters - trial_train_iters
best_experiment.config.train_iters = total_train_iters \
- trial_train_iters
retries_left = retries
print('CONTINUING BEST {}'.format(experiments_name))
print('[{}/{}] {}'.format(ai+1, len(arg_list), args))
# Continue from where we left off
results = [best_experiment.main()] # destroy previous runs
while invalid_result(results[-1]) and retries_left > 0:
print('[{}/{}] Retrying {}'.format(ai+1, len(arg_list), args))
retries_left -= 1
results[-1] = best_experiment.main()
if not invalid_result(results[0]):
best = {}
best['loss'] = results[-1]['loss']
best['results'] = results[-1]
best['args'] = args
else:
# This means we can train to a point but that we regularly approach
# some instability before finishing
# TODO:
# Better Exception type
raise Exception("Best initialization training succeeded. Full "
+ "training failed.")
if best is not None:
best['save_path'] = save_path
best['log_path'] = log_path
best['def_args'] = def_args
best_file = os.path.join(save_path, experiments_name + '_best_trial.p')
with open(best_file, 'wb') as file:
pickle.dump(best, file)
return best, results
|
{"hexsha": "508654db4cba9a3c8a938c391a0b49867a93025a", "size": 14424, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/runner.py", "max_stars_repo_name": "lupalab/flowscan", "max_stars_repo_head_hexsha": "a0fc8fd25cb62c9eeb6583c3d7505d54b969f88c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-10-09T09:03:49.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-08T16:59:48.000Z", "max_issues_repo_path": "experiments/runner.py", "max_issues_repo_name": "lupalab/flowscan", "max_issues_repo_head_hexsha": "a0fc8fd25cb62c9eeb6583c3d7505d54b969f88c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/runner.py", "max_forks_repo_name": "lupalab/flowscan", "max_forks_repo_head_hexsha": "a0fc8fd25cb62c9eeb6583c3d7505d54b969f88c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7591623037, "max_line_length": 80, "alphanum_fraction": 0.6223655019, "include": true, "reason": "import numpy", "num_tokens": 3326}
|
import tensorflow as tf
import pickle
import pandas as pd
import numpy as np
from pathlib import Path
from absl import app
from konlpy.tag import Mecab
from model.data import Corpus
from tqdm import tqdm
from model.net import SenCNN
from configs import FLAGS
def main(argv):
test_data = Path.cwd() / 'data_in' / 'test.txt'
with open(Path.cwd() / 'data_in' / 'vocab.pkl', mode='rb') as io:
vocab = pickle.load(io)
test = tf.data.TextLineDataset(str(test_data)).batch(batch_size=FLAGS.batch_size)
tokenized = Mecab()
processing = Corpus(vocab=vocab, tokenizer=tokenized)
# init params
classes = FLAGS.classes
max_length = FLAGS.length
epochs = FLAGS.epochs
batch_size = FLAGS.batch_size
learning_rate = FLAGS.learning_rate
# create model
sen_cnn = SenCNN(vocab=vocab, classes=classes)
# create optimizer & loss_fn
opt = tf.optimizers.Adam(learning_rate=learning_rate)
loss_fn = tf.losses.SparseCategoricalCrossentropy()
test_loss_metric = tf.keras.metrics.Mean(name='val_loss')
test_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='val_accuracy')
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=sen_cnn)
manager = tf.train.CheckpointManager(ckpt, './data_out/tf_ckpts', max_to_keep=3)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing from scratch.")
tf.keras.backend.set_learning_phase(0)
test_loss_metric.reset_states()
test_acc_metric.reset_states()
for step, val in enumerate(test):
data, label = processing.token2idex(val)
logits = sen_cnn(data)
val_loss = loss_fn(label, logits)
# val_loss += mb_loss.numpy()
test_loss_metric.update_state(val_loss)
test_acc_metric.update_state(label, logits)
test_loss = test_loss_metric.result()
tqdm.write(
'epoch : {}, tr_acc : {:.3f}%, tr_loss : {:.3f}, '.format(1, test_acc_metric.result() * 100, test_loss))
if __name__ == '__main__':
app.run(main)
|
{"hexsha": "d93bf1040192f529d64e8ba73ec96189e5578299", "size": 2149, "ext": "py", "lang": "Python", "max_stars_repo_path": "jmkim/CNN_SC/prediction.py", "max_stars_repo_name": "modudeepnlp/NLP_Tensorflow2.0", "max_stars_repo_head_hexsha": "b0fcc5a1521e865b0a7b06042324c0b0d6844d06", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-05-18T06:40:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-24T11:08:16.000Z", "max_issues_repo_path": "jmkim/CNN_SC/prediction.py", "max_issues_repo_name": "modudeepnlp/NLP_Tensorflow2.0", "max_issues_repo_head_hexsha": "b0fcc5a1521e865b0a7b06042324c0b0d6844d06", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jmkim/CNN_SC/prediction.py", "max_forks_repo_name": "modudeepnlp/NLP_Tensorflow2.0", "max_forks_repo_head_hexsha": "b0fcc5a1521e865b0a7b06042324c0b0d6844d06", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-22T15:49:51.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-03T13:50:21.000Z", "avg_line_length": 29.8472222222, "max_line_length": 112, "alphanum_fraction": 0.701721731, "include": true, "reason": "import numpy", "num_tokens": 524}
|
# %%
# Setup
## Packages
import pandas as pd
import numpy as np
import torch
from transformers import RobertaForSequenceClassification, TrainingArguments, Trainer, RobertaTokenizer, RobertaConfig
from datasets import load_metric, load_dataset
from sklearn.metrics import precision_recall_fscore_support, accuracy_score, classification_report
from tqdm import tqdm
## Cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
####### Model Config ############
## Modelname
model_to_use = "roberta-base"
trained_model_name = "ManiBERT_v2"
## Max Sequence Length
max_lengh_parameter = 512
## Anzahl Labels
label_count = 56
## Anzahl Epochs
if n_gpu > 1 :
epoch_count = 5
else:
epoch_count = 1
## Batch Size
if n_gpu > 1 :
batch_size = 16
else:
batch_size = 4
## warmup_steps
warmup_ratio_parameter = 0.05
## weight_decay
weight_decay_parameter = 0.1
## learning_rate
learning_rate_parameter = 5e-05
## Log file
log_name = '01_Report/log_manibert.json'
## Report
validatipon_report_name = '01_Report/validation_report_manibert.txt'
test_report_name = '01_Report/test_report_manibert.txt'
####### Data Config ############
## Train Data
train_data = "00_Data_intern/01_data/trainingsdaten_manibert_27022022.csv"
## Valid Data
valid_data = "00_Data_intern/01_data/validierungsdaten_manibert_27022022.csv"
## Test Data
test_data = "00_Data_intern/01_data/testdaten_manibert_27022022.csv"
## Delimeter
delimeter_char = ","
## Label Names
label_names = [
"Foreign Special Relationships: Positive",
"Foreign Special Relationships: Negative",
"Anti-Imperialism",
"Military: Positive",
"Military: Negative",
"Peace",
"Internationalism: Positive",
"European Community/Union or Latin America Integration: Positive",
"Internationalism: Negative",
"European Community/Union or Latin America Integration: Negative",
"Freedom and Human Rights",
"Democracy",
"Constitutionalism: Positive",
"Constitutionalism: Negative",
"Decentralisation: Positive",
"Centralisation: Positive",
"Governmental and Administrative Efficiency",
"Political Corruption",
"Political Authority",
"Free Market Economy",
"Incentives: Positive",
"Market Regulation",
"Economic Planning",
"Corporatism/ Mixed Economy",
"Protectionism: Positive",
"Protectionism: Negative",
"Economic Goals",
"Keynesian Demand Management",
"Economic Growth: Positive",
"Technology and Infrastructure: Positive",
"Controlled Economy",
"Nationalisation",
"Economic Orthodoxy",
"Marxist Analysis: Positive",
"Anti-Growth Economy and Sustainability",
"Environmental Protection",
"Culture: Positive",
"Equality: Positive",
"Welfare State Expansion",
"Welfare State Limitation",
"Education Expansion",
"Education Limitation",
"National Way of Life: Positive",
"National Way of Life: Negative",
"Traditional Morality: Positive",
"Traditional Morality: Negative",
"Law and Order",
"Civic Mindedness: Positive",
"Multiculturalism: Positive",
"Multiculturalism: Negative",
"Labour Groups: Positive",
"Labour Groups: Negative",
"Agriculture and Farmers",
"Middle Class and Professional Groups",
"Underprivileged Minority Groups",
"Non-economic Demographic Groups"
]
## Config Dicts
id2label_parameter = {
"0": "Foreign Special Relationships: Positive",
"1": "Foreign Special Relationships: Negative",
"2": "Anti-Imperialism",
"3": "Military: Positive",
"4": "Military: Negative",
"5": "Peace",
"6": "Internationalism: Positive",
"7": "European Community/Union or Latin America Integration: Positive",
"8": "Internationalism: Negative",
"9": "European Community/Union or Latin America Integration: Negative",
"10": "Freedom and Human Rights",
"11": "Democracy",
"12": "Constitutionalism: Positive",
"13": "Constitutionalism: Negative",
"14": "Decentralisation: Positive",
"15": "Centralisation: Positive",
"16": "Governmental and Administrative Efficiency",
"17": "Political Corruption",
"18": "Political Authority",
"19": "Free Market Economy",
"20": "Incentives: Positive",
"21": "Market Regulation",
"22": "Economic Planning",
"23": "Corporatism/ Mixed Economy",
"24": "Protectionism: Positive",
"25": "Protectionism: Negative",
"26": "Economic Goals",
"27": "Keynesian Demand Management",
"28": "Economic Growth: Positive",
"29": "Technology and Infrastructure: Positive",
"30": "Controlled Economy",
"31": "Nationalisation",
"32": "Economic Orthodoxy",
"33": "Marxist Analysis: Positive",
"34": "Anti-Growth Economy and Sustainability",
"35": "Environmental Protection",
"36": "Culture: Positive",
"37": "Equality: Positive",
"38": "Welfare State Expansion",
"39": "Welfare State Limitation",
"40": "Education Expansion",
"41": "Education Limitation",
"42": "National Way of Life: Positive",
"43": "National Way of Life: Negative",
"44": "Traditional Morality: Positive",
"45": "Traditional Morality: Negative",
"46": "Law and Order",
"47": "Civic Mindedness: Positive",
"48": "Multiculturalism: Positive",
"49": "Multiculturalism: Negative",
"50": "Labour Groups: Positive",
"51": "Labour Groups: Negative",
"52": "Agriculture and Farmers",
"53": "Middle Class and Professional Groups",
"54": "Underprivileged Minority Groups",
"55": "Non-economic Demographic Groups"
}
label2id_parameter = {
"Foreign Special Relationships: Positive": 0,
"Foreign Special Relationships: Negative": 1,
"Anti-Imperialism": 2,
"Military: Positive": 3,
"Military: Negative": 4,
"Peace": 5,
"Internationalism: Positive": 6,
"European Community/Union or Latin America Integration: Positive": 7,
"Internationalism: Negative": 8,
"European Community/Union or Latin America Integration: Negative": 9,
"Freedom and Human Rights": 10,
"Democracy": 11,
"Constitutionalism: Positive": 12,
"Constitutionalism: Negative": 13,
"Decentralisation: Positive": 14,
"Centralisation: Positive": 15,
"Governmental and Administrative Efficiency": 16,
"Political Corruption": 17,
"Political Authority": 18,
"Free Market Economy": 19,
"Incentives: Positive": 20,
"Market Regulation": 21,
"Economic Planning": 22,
"Corporatism/ Mixed Economy": 23,
"Protectionism: Positive": 24,
"Protectionism: Negative": 25,
"Economic Goals": 26,
"Keynesian Demand Management": 27,
"Economic Growth: Positive": 28,
"Technology and Infrastructure: Positive": 29,
"Controlled Economy": 30,
"Nationalisation": 31,
"Economic Orthodoxy": 32,
"Marxist Analysis: Positive": 33,
"Anti-Growth Economy and Sustainability": 34,
"Environmental Protection": 35,
"Culture: Positive": 36,
"Equality: Positive": 37,
"Welfare State Expansion": 38,
"Welfare State Limitation": 39,
"Education Expansion": 40,
"Education Limitation": 41,
"National Way of Life: Positive": 42,
"National Way of Life: Negative": 43,
"Traditional Morality: Positive": 44,
"Traditional Morality: Negative": 45,
"Law and Order": 46,
"Civic Mindedness: Positive": 47,
"Multiculturalism: Positive": 48,
"Multiculturalism: Negative": 49,
"Labour Groups: Positive": 50,
"Labour Groups: Negative": 51,
"Agriculture and Farmers": 52,
"Middle Class and Professional Groups": 53,
"Underprivileged Minority Groups": 54,
"Non-economic Demographic Groups": 55
}
####### Functions ############
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
## Neue Metrics function: https://huggingface.co/transformers/v3.0.2/training.html#trainer
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1_micro, _ = precision_recall_fscore_support(labels, preds, average='micro')
precision2, recall3, f1_macro, _ = precision_recall_fscore_support(labels, preds, average='macro')
precision3, recall4, f1_weighted, _ = precision_recall_fscore_support(labels, preds, average='weighted')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1-micro': f1_micro,
'f1-macro': f1_macro,
'f1-weighted': f1_weighted,
'precision': precision,
'recall': recall
}
# %%
# Daten laden
raw_datasets = load_dataset('csv',data_files={'train':[train_data],'validation':[valid_data],'test': [test_data]},delimiter=delimeter_char)
# %%
# Tokenizer
RobertaTokenizer.from_pretrained(
model_to_use,
model_max_length=max_lengh_parameter
).save_pretrained(trained_model_name)
tokenizer = RobertaTokenizer.from_pretrained(
model_to_use,
model_max_length=max_lengh_parameter
)
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
# %%
# Trainer Argumente
training_args = TrainingArguments(
output_dir=trained_model_name,
warmup_ratio=warmup_ratio_parameter,
weight_decay=weight_decay_parameter,
learning_rate=learning_rate_parameter,
fp16 = True,
evaluation_strategy="epoch",
num_train_epochs=epoch_count,
per_device_train_batch_size=batch_size,
overwrite_output_dir=True,
per_device_eval_batch_size=batch_size,
save_strategy="no",
logging_dir='logs',
logging_strategy= 'steps',
logging_steps=10,
push_to_hub=True,
hub_strategy="end")
# %%
# Modell laden
model = RobertaForSequenceClassification.from_pretrained(
model_to_use,
num_labels=label_count,
id2label=id2label_parameter,
label2id=label2id_parameter
)
# %%
# Trainer definieren
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
compute_metrics=compute_metrics,
)
# %%
# Trainieren
trainer.train()
# %%
# Evaluate for Classification Report
## Validation
predictions, labels, _ = trainer.predict(tokenized_datasets["validation"])
predictions = np.argmax(predictions, axis=1)
with open(validatipon_report_name,'w',encoding='utf-8') as f:
f.truncate(0) # Vorher File leeren
f.write(classification_report(y_pred=predictions,y_true=labels,target_names=label_names))
# %%
# Evaluate for Classification Report
## Test
predictions, labels, _ = trainer.predict(tokenized_datasets["test"])
predictions = np.argmax(predictions, axis=1)
with open(test_report_name,'w',encoding='utf-8') as f:
f.truncate(0) # Vorher File leeren
f.write(classification_report(y_pred=predictions,y_true=labels,target_names=label_names))
# %%
# Abspeichern
## Log speichern
with open(log_name, 'w',encoding='utf-8') as f:
f.truncate(0) # Vorher File leeren
for obj in trainer.state.log_history:
f.write(str(obj)+'\n')
## Modell speichern
trainer.save_model(trained_model_name)
tokenizer.save_pretrained(trained_model_name, push_to_hub=True)
# %%
|
{"hexsha": "01e07ad8588df02b2b29517992a57bb9371ff315", "size": 10984, "ext": "py", "lang": "Python", "max_stars_repo_path": "training.py", "max_stars_repo_name": "NiksMer/ManiBERT", "max_stars_repo_head_hexsha": "00e726ccd3d1b465c614c72b0b79c5286d0e68b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "training.py", "max_issues_repo_name": "NiksMer/ManiBERT", "max_issues_repo_head_hexsha": "00e726ccd3d1b465c614c72b0b79c5286d0e68b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "training.py", "max_forks_repo_name": "NiksMer/ManiBERT", "max_forks_repo_head_hexsha": "00e726ccd3d1b465c614c72b0b79c5286d0e68b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3425414365, "max_line_length": 140, "alphanum_fraction": 0.7113073562, "include": true, "reason": "import numpy", "num_tokens": 2782}
|
#!/usr/bin/env julia
# This attempts to implement call/cc feature in julia
type Continuation_box{name}
value
end
macro call_cc(name::Symbol, content)
quote
u = Symbol(randstring())
$name = v -> throw(Continuation_thrown{u}(v))
try
$content
catch e
if e isa Continuation_box{u}
e.value
end
end
end
end
|
{"hexsha": "35a0f1be0039930194120fa79d53b2e13da0216c", "size": 410, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia-continuation/call_cc.jl", "max_stars_repo_name": "sclereid/collection", "max_stars_repo_head_hexsha": "61782f45314db560657b3081e2fa8e1032fd8dbf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "julia-continuation/call_cc.jl", "max_issues_repo_name": "sclereid/collection", "max_issues_repo_head_hexsha": "61782f45314db560657b3081e2fa8e1032fd8dbf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-01-21T02:02:27.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-06T05:34:19.000Z", "max_forks_repo_path": "julia-continuation/call_cc.jl", "max_forks_repo_name": "sclereid/collection", "max_forks_repo_head_hexsha": "61782f45314db560657b3081e2fa8e1032fd8dbf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.0833333333, "max_line_length": 53, "alphanum_fraction": 0.556097561, "num_tokens": 102}
|
# -*- coding:utf-8 -*-
import unittest
from ddt import ddt, data
import math
import ctypes
import datetime
from ctypes import *
import numpy as np
from numba import cuda
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
@ddt
class TestQuantImpl(unittest.TestCase):
max_thres = 512
data0 = np.array([0])
data1 = np.array([v / 25600 + 1.04
for v in range(25600)] + [100, max_thres])
data2 = np.array([v / 25600 + 1.04
for v in range(25600)] + [100, max_thres])
data2 = np.array([-v / 25600 - 1.04
for v in range(25600)] + [-100, -max_thres])
data3 = np.array(
[0, 1, 2, 2.03992188, 2.03996094, 3, 4, 5, 10, 100, max_thres])
max_thres = 513
data4 = np.array([v / 25600 + 1.04
for v in range(25600)] + [100, max_thres])
data5 = np.array([v / 25600 + 1.04
for v in range(25600)] + [100, max_thres])
data6 = np.array([-v / 25600 - 1.04
for v in range(25600)] + [-100, -max_thres])
data7 = np.array(
[0, 1, 2, 2.03992188, 2.03996094, 3, 4, 5, 10, 100, max_thres])
data8 = np.array([
0, -1, -2, -2.03992188, -2.03996094, -3, -4, -5, -10, -100, -max_thres
])
data9 = np.array(range(1234))
data10 = np.array([-v for v in range(1234)])
@data(data0, data1, data2, data3, data4, data5, data6, data7, data8, data9,
data10)
def test(self, data):
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# load library
dl = ctypes.cdll.LoadLibrary
quant_lib = dl("nnieqat/gpu/lib/libgfpq_gpu.so")
_libcublas = ctypes.cdll.LoadLibrary("libcublas.so")
# struct GFPQ_PARAM_ST in gfpq.hpp
class GFPQ_PARAM_ST(ctypes.Structure):
_fields_ = [("mode", ctypes.c_int), ("buf", ctypes.c_byte * 16)]
class _types:
"""Some alias types."""
handle = ctypes.c_void_p
stream = ctypes.c_void_p
data_origin = data.copy()
print(
"----------------------------------------------------------------------"
)
print("\n\nOriginal data:")
print(data)
data = data.astype(np.float32)
stream = cuda.stream()
_libcublas.cublasCreate_v2.restype = int
_libcublas.cublasCreate_v2.argtypes = [ctypes.c_void_p]
cublas_handle = _types.handle()
_libcublas.cublasCreate_v2(ctypes.byref(cublas_handle))
data_gpu = cuda.to_device(data, stream=stream)
data_p = data_gpu.device_ctypes_pointer
bit_width = 8
param = GFPQ_PARAM_ST()
# init or update param first
param.mode = 0
ret = quant_lib.HI_GFPQ_QuantAndDeQuant_GPU_PY(data_p, data.size,
bit_width,
ctypes.byref(param),
stream.handle,
cublas_handle)
if ret != 0:
print("HI_GFPQ_QuantAndDeQuant failed(%d)\n" % (ret)),
# use apply param
param.mode = 2
ret = quant_lib.HI_GFPQ_QuantAndDeQuant_GPU_PY(data_p, data.size,
bit_width,
ctypes.byref(param),
stream.handle,
cublas_handle)
if ret != 0:
print("HI_GFPQ_QuantAndDeQuant failed(%d)" % (ret)),
data_gpu.copy_to_host(data, stream=stream)
# data may not be available
stream.synchronize()
_libcublas.cublasDestroy_v2(cublas_handle)
import nnieqat
from quant_impl import fake_quantize
import torch
tensor = torch.Tensor(data_origin).cuda()
tensor.data = fake_quantize(tensor.data.detach(), 8)
diff = abs(tensor.cpu().numpy() - data)
# diff_thres = np.max(abs(data)) * 0.001
# print("\nDIFF > 0.1%: ")
# print("idx: ", np.where(diff > diff_thres))
# print("Original data:", data_origin[np.where(diff > diff_thres)])
# print("GFPQ result:", data[np.where(diff > diff_thres)])
# print("Impl result:", tensor.cpu().numpy()[np.where(diff > diff_thres)])
diff_max = np.max(diff)
print("\nDIFF MAX: " + str(diff_max))
print("\nDIFF RATIO: " +
str(diff_max / max(np.max(abs(data)), pow(10, -18))))
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(TestQuantImpl("test"))
runner = unittest.TextTestRunner()
runner.run(suite)
|
{"hexsha": "72bb5ff5a47ccb7536fe27e3ee40d73528073ee9", "size": 4786, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_quant_impl.py", "max_stars_repo_name": "Yerniyaz/nnieqat-pytorch", "max_stars_repo_head_hexsha": "470f5d94a68ce7c55cc384d76054e9546d6ec299", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 213, "max_stars_repo_stars_event_min_datetime": "2020-08-16T13:31:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T17:33:01.000Z", "max_issues_repo_path": "tests/test_quant_impl.py", "max_issues_repo_name": "Yerniyaz/nnieqat-pytorch", "max_issues_repo_head_hexsha": "470f5d94a68ce7c55cc384d76054e9546d6ec299", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-18T02:36:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T07:43:39.000Z", "max_forks_repo_path": "tests/test_quant_impl.py", "max_forks_repo_name": "Yerniyaz/nnieqat-pytorch", "max_forks_repo_head_hexsha": "470f5d94a68ce7c55cc384d76054e9546d6ec299", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 42, "max_forks_repo_forks_event_min_datetime": "2020-08-17T08:43:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-06T07:57:46.000Z", "avg_line_length": 37.1007751938, "max_line_length": 84, "alphanum_fraction": 0.5246552445, "include": true, "reason": "import numpy,from numba", "num_tokens": 1274}
|
import csv
import pandas as pd
import numpy as np
from datetime import datetime, date, time, timedelta
import glob
from lmatools.io.LMA_h5_file import LMAh5File
from lmatools.coordinateSystems import RadarCoordinateSystem, GeographicSystem, TangentPlaneCartesianSystem, MapProjection
def search_files(dir, time_start, time_end):
"""
This function return the lma files in the directory dir within a time interval.
time_start and time end are in datetime format (datetime.datetime(YYYY, MM, DD, HH, MM))
"""
lmafiles = sorted(glob.glob(dir + '*.h5'))
# Start
ind_s = 0
ind_e = len(lmafiles)
for i in np.arange(len(lmafiles)):
lma = LMAh5File(lmafiles[i])
if (lma.start_times[0] == time_start):
ind_s = i
break
# End
for i in np.arange(len(lmafiles)):
lma = LMAh5File(lmafiles[i])
if (lma.start_times[0] == time_end):
ind_e = i
break
lmafiles = lmafiles[ind_s:ind_e]
return lmafiles
import math
def closest_lma_rhi_time(lma_df, dt, radar_time):
"""
Given a lma dataframe, a time threshold in seconds and the (radar) time,
it returns the lma dataframe with the sources within the time entered +/- the threshold dt specified in seconds
"""
# Converting the lma VHF sources times from seconds to datetime format
hr = (lma_df.time.values//3600)
mins = (lma_df.time.values%3600)//60
seg = ((lma_df.time.values%3600)%60)
frac = np.zeros(len(lma_df))
whole = np.zeros(len(lma_df))
for i in np.arange(len(lma_df)):
frac[i], whole[i] = math.modf(seg[i])
microsegs = frac*(10**6)
segs = whole
lma_times = np.zeros(len(lma_df), dtype=object)
for t in np.arange(len(lma_df)):
lma_times[t] = datetime.combine(radar_time.date(),
time(int(hr[t]),int(mins[t]),int(segs[t]),int(microsegs[t])))
# a['datetime'] = pd.to_datetime(lma_times)
# pd.Timestamp(lma_times[i], tz=None).to_pydatetime()
lma_df['datetime'] = lma_times
# flashes on +/- second of the radar scan
tmi = abs(lma_times-(radar_time-timedelta(seconds=dt)))
tma = abs(lma_times-(radar_time+timedelta(seconds=dt)))
flash_tmi = lma_df.flash_id[tmi.argmin() : tmi.argmin() + 1].values[0]
flash_tma = lma_df.flash_id[tma.argmin() : tma.argmin() + 1].values[0]
cond = np.logical_and(lma_df.flash_id >= flash_tmi, lma_df.flash_id <= flash_tma)
cons_lma_df = lma_df[cond]
return cons_lma_df
from radarlma2local import rcs_to_tps, geo_to_tps
from ortho_proj import rot_mat_lma
def closest_lma_rhi(lma_df, lma_ortho, dsi):
"""
Given a lma dataframe lma_df, a distance dsi threshold in km from the (radar rhi) location, and the distance of each source from the RHI scan lma_ortho,
it returns a dataframe like lma_df of the flashes that had at least one source within the threshold.
"""
Xlma_ortho=np.abs(lma_ortho[:,0])
Ylma_ortho=np.abs(lma_ortho[:,1])
# Getting all the sources that is less than dsi (m) away from the x-axis
## IF ONLY ONE SOURCE MEETS THE REQUIREMENT, THE WHOLE FLASH IS INCLUDED
lma_df_close = pd.DataFrame(columns = lma_df.columns) #----- creating new DF
for i in np.arange(len(lma_df.flash_id.values[np.where(Ylma_ortho < dsi)])):
a = lma_df[lma_df.flash_id.values == lma_df.flash_id.values[np.where(Ylma_ortho < dsi)][i]]
lma_df_close = lma_df_close.append(a)
return lma_df_close
def closest_lma_rhi_cs(lma_df, radar, dsi):
"""
Given a lma dataframe, a distance ds threshold in km from the (radar rhi) location,
it returns a 3D array of the location, height and time of lma sources within the threshold specified.
"""
X, Y, Z = rcs_to_tps(radar)
Xlma,Ylma,Zlma = geo_to_tps(lma_df, radar)
lma_file_ortho = ortho_proj_lma(radar, lma_df)
Ylma_ortho=np.abs(lma_file_ortho[:,1])
lma_file_ortho_xnew = lma_file_ortho[np.where(Ylma_ortho<dsi),0][0,:]
Zlma_new = Zlma[np.where(Ylma_ortho<dsi)]
time_new = lma_df.time.values[np.where(Ylma_ortho<dsi)]
# putting 3 arrays together
lma_ortho_new = np.zeros(shape=(len(lma_file_ortho_xnew), 3))
lma_ortho_new[:,0] = lma_file_ortho_xnew
lma_ortho_new[:,1] = Zlma_new
lma_ortho_new[:,2] = time_new
return lma_ortho_new
def one_flash(lma_df, flash_id):
"""
Given a lma file and the flash id, it returns the lma dataframe with only the VHF sources with the specified flsah id.
"""
return lma_df[lma_df.flash_id == flash_id]
def loc_1source(lma_df, flash_id):
"""
Given lma file dataframe and the flash id, it returns a list of [latiude, longitude and altitude]
of the average of the 4 first VHF sources identified for the specific flash.
"""
sel = lma_df[lma_df.flash_id.values == flash_id]
sel = sel.sort_values(by=['time'])
loc = []
loc.append(np.mean(sel.lat.values[0:4]))
loc.append(np.mean(sel.lon.values[0:4]))
loc.append(np.mean(sel.alt.values[0:4]))
return loc
|
{"hexsha": "bd0fe121a5d8f7112f3660057e7b6dc4d37fa0f7", "size": 5084, "ext": "py", "lang": "Python", "max_stars_repo_path": "select_lma.py", "max_stars_repo_name": "jcssouza/LMAinterceptRHI", "max_stars_repo_head_hexsha": "f6877f53f383fbaecac8c42baa08680831dbe42c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "select_lma.py", "max_issues_repo_name": "jcssouza/LMAinterceptRHI", "max_issues_repo_head_hexsha": "f6877f53f383fbaecac8c42baa08680831dbe42c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "select_lma.py", "max_forks_repo_name": "jcssouza/LMAinterceptRHI", "max_forks_repo_head_hexsha": "f6877f53f383fbaecac8c42baa08680831dbe42c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1076923077, "max_line_length": 156, "alphanum_fraction": 0.6776160504, "include": true, "reason": "import numpy", "num_tokens": 1498}
|
/* Copyright 2018 Ignacio Torroba (ignaciotb@kth.se)
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef EKF_LOCALIZATION_HPP
#define EKF_LOCALIZATION_HPP
#include <ros/timer.h>
#include <ros/ros.h>
#include <eigen3/Eigen/Eigen>
#include <eigen3/Eigen/Dense>
#include <Eigen/SparseCore>
#include "gazebo_msgs/GetWorldProperties.h"
#include "gazebo_msgs/GetModelState.h"
#include <queue>
#include <math.h>
#include <boost/thread/mutex.hpp>
#include <boost/assign.hpp>
#include <boost/bind.hpp>
#include <boost/scoped_ptr.hpp>
#include <boost/math/distributions/chi_squared.hpp>
#include <boost/math/distributions/inverse_chi_squared.hpp>
#include <nav_msgs/Odometry.h>
#include <geometry_msgs/PoseArray.h>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include <geometry_msgs/TwistWithCovarianceStamped.h>
#include <geometry_msgs/Quaternion.h>
#include <geometry_msgs/TransformStamped.h>
#include <visualization_msgs/MarkerArray.h>
#include <visualization_msgs/Marker.h>
#include <geometry_msgs/PoseArray.h>
#include <tf/tf.h>
#include <tf2/transform_datatypes.h>
#include <tf2/utils.h>
#include <tf/transform_listener.h>
#include <tf/transform_broadcaster.h>
#include "ekf_slam_core/ekf_slam_core.hpp"
#include "smarc_lm_visualizer/init_map.h"
/**
* @brief The EKFSLAM class
* EKF-based localization node for LoLo
* Inputs:
* IMU, DVL and landmarks positions from measurements
* Map as a collection of landmarks with respect to world frame
* Outputs:
* nav_msgs/Odometry with an estimate of the 6DOF pose of LoLo
* updated tf transform odom --> base_link
*/
class EKFSLAM{
public:
EKFSLAM(std::string node_name, ros::NodeHandle &nh);
void ekfLocalize(const ros::TimerEvent&);
~EKFSLAM();
void init(std::vector<double> sigma_diag, std::vector<double> r_diag, std::vector<double> q_fls_diag, std::vector<double> q_mbes_diag, double delta, double mhl_dist_fls, double mhl_dist_mbes);
private:
// ROS variables
ros::NodeHandle *nh_;
std::string node_name_;
ros::Timer timer_;
// Comms
ros::Subscriber odom_subs_;
ros::Subscriber observs_subs_;
ros::Publisher pose_pub_;
ros::Publisher vis_pub_;
ros::ServiceClient init_map_client_;
// Handlers for sensors
std::deque<geometry_msgs::PoseArray> measurements_t_;
std::deque<nav_msgs::Odometry> odom_queue_t_;
boost::mutex msg_lock_;
// EKF state variables
Eigen::VectorXd mu_;
Eigen::MatrixXd Sigma_;
EKFCore* ekf_filter_;
// Mapping variables
int lm_num_;
// Aux
unsigned int size_odom_q_;
unsigned int size_measurements_q_;
// tf
tf::TransformBroadcaster map_bc_;
tf::TransformListener tf_listener_;
tf::StampedTransform transf_dvl_base_;
tf::StampedTransform transf_world_odom_;
tf::StampedTransform transf_map_world_;
tf::StampedTransform transf_base_sssr_;
geometry_msgs::TransformStamped msg_odom_map_;
std::string odom_frame_;
std::string fls_frame_;
std::string mbes_frame_;
std::string map_frame_;
std::string world_frame_;
std::string base_frame_;
std::string sssr_frame_;
std::string map_srv_name_;
std::string lm_srv_name_;
std::string map_srv_;
bool mbes_input_;
// Input callbacks
void odomCB(const nav_msgs::Odometry &odom_msg);
void observationsCB(const geometry_msgs::PoseArray &observ_msg);
/**
* @brief createMapMarkers
* Publishes the map as an array of markers for visualization in RVIZ
*/
void updateMapMarkers(double color);
/**
* @brief EKFSLAM::sendOutput
* @param t
* @return
* Publishes AUV odometry info and tf odom --> base_link
*/
bool sendOutput(ros::Time t_meas);
bool bcMapOdomTF(ros::Time t_meas);
};
#endif // EKF_LOCALIZATION_HPP
|
{"hexsha": "f488da2580460d2be776c62c6984175e3988beef", "size": 5225, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "auv_ekf_slam/include/ekf_slam/ekf_slam.hpp", "max_stars_repo_name": "nilsbore/smarc_navigation", "max_stars_repo_head_hexsha": "97d0a30498e72506e7472c98c5fa0d86d19f0f04", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 15.0, "max_stars_repo_stars_event_min_datetime": "2020-01-24T10:24:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T10:22:41.000Z", "max_issues_repo_path": "auv_ekf_slam/include/ekf_slam/ekf_slam.hpp", "max_issues_repo_name": "nilsbore/smarc_navigation", "max_issues_repo_head_hexsha": "97d0a30498e72506e7472c98c5fa0d86d19f0f04", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 20.0, "max_issues_repo_issues_event_min_datetime": "2018-02-08T09:46:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-07T09:40:26.000Z", "max_forks_repo_path": "auv_ekf_slam/include/ekf_slam/ekf_slam.hpp", "max_forks_repo_name": "nilsbore/smarc_navigation", "max_forks_repo_head_hexsha": "97d0a30498e72506e7472c98c5fa0d86d19f0f04", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 14.0, "max_forks_repo_forks_event_min_datetime": "2018-01-25T14:42:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T15:18:28.000Z", "avg_line_length": 34.1503267974, "max_line_length": 758, "alphanum_fraction": 0.7511961722, "num_tokens": 1236}
|
#!/usr/bin/python3
#‑∗‑ coding: utf‑8 ‑∗‑
import sys
import gym
import json
import importlib
import numpy as np
import utils
import monitor
def main():
results_list = []
for i in range(1, len(sys.argv)):
with open(sys.argv[i]) as f:
results_list.append(json.load(f))
# Plot results:
utils.plot_result(results_list)
if __name__ == "__main__":
# execute only if run as a script
main()
|
{"hexsha": "f52a9cc9d86811fe5a4e7c6d36726a435d6a68bf", "size": 432, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/plot_results.py", "max_stars_repo_name": "Lucien-MG/rl-blackjack", "max_stars_repo_head_hexsha": "f0903db4b4be14ba5994b0808d7c9c1e7ed00825", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/plot_results.py", "max_issues_repo_name": "Lucien-MG/rl-blackjack", "max_issues_repo_head_hexsha": "f0903db4b4be14ba5994b0808d7c9c1e7ed00825", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plot_results.py", "max_forks_repo_name": "Lucien-MG/rl-blackjack", "max_forks_repo_head_hexsha": "f0903db4b4be14ba5994b0808d7c9c1e7ed00825", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.28, "max_line_length": 45, "alphanum_fraction": 0.6435185185, "include": true, "reason": "import numpy", "num_tokens": 117}
|
using Plots, LinearAlgebra, Printf
# helper functions
@views av(A) = 0.25*(A[1:end-1,1:end-1].+A[2:end,1:end-1].+A[1:end-1,2:end].+A[2:end,2:end])
@views av_xa(A) = 0.5*(A[1:end-1,:].+A[2:end,:])
@views av_ya(A) = 0.5*(A[:,1:end-1].+A[:,2:end])
@views maxloc(A) = max.(A[1:end-2,1:end-2],A[1:end-2,2:end-1],A[1:end-2,3:end],
A[2:end-1,1:end-2],A[2:end-1,2:end-1],A[2:end-1,3:end],
A[3:end ,1:end-2],A[3:end ,2:end-1],A[3:end ,3:end])
@views bc2!(A) = begin A[1,:] = A[2,:]; A[end,:] = A[end-1,:]; A[:,1] = A[:,2]; A[:,end] = A[:,end-1] end
# main function
@views function Stokes2D_vep()
use_vep = true
# phyiscs
lx, ly = 1.0, 1.0
radi = 0.1
η0 = 1.0
η_reg = 1.2e-2
G0 = 1.0
Gi = 0.5G0
τ_y = 1.6
sinϕ = sind(30)
ebg = 1.0
dt = η0/G0/4.0
# numerics
nx, ny = 63, 63
nt = 10
εnl = 1e-6
maxiter = 150max(nx, ny)
nchk = 2max(nx, ny)
Re = 5π
r = 1.0
CFL = 0.99/sqrt(2)
# preprocessing
dx, dy = lx/nx, ly/ny
max_lxy = max(lx, ly)
vpdτ = CFL*min(dx, dy)
xc, yc = LinRange(-(lx-dx)/2,(lx-dx)/2,nx), LinRange(-(ly-dy)/2,(ly-dy)/2,ny)
xv, yv = LinRange(-lx/2,lx/2,nx+1), LinRange(-ly/2,ly/2,ny+1)
# allocate arrays
Pr = zeros(nx ,ny )
τxx = zeros(nx ,ny )
τyy = zeros(nx ,ny )
τxy = zeros(nx+1,ny+1)
τxyc = zeros(nx ,ny )
τii = zeros(nx ,ny )
Eii = zeros(nx ,ny )
λ = zeros(nx ,ny )
F = zeros(nx ,ny )
Fchk = zeros(nx ,ny )
dQdTxx = zeros(nx ,ny )
dQdTyy = zeros(nx ,ny )
dQdTxy = zeros(nx ,ny )
τxx_o = zeros(nx ,ny )
τyy_o = zeros(nx ,ny )
τxyc_o = zeros(nx ,ny )
τxy_o = zeros(nx+1,ny+1)
# Here _r stands for real: physical stress for residual computation
τxx_r = zeros(nx ,ny )
τyy_r = zeros(nx ,ny )
τxyc_r = zeros(nx ,ny )
τxy_r = zeros(nx+1,ny+1)
Vx = zeros(nx+1,ny )
Vy = zeros(nx ,ny+1)
dVx = zeros(nx-1,ny )
dVy = zeros(nx ,ny-1)
Rx = zeros(nx-1,ny )
Ry = zeros(nx ,ny-1)
∇V = zeros(nx ,ny )
ρg = zeros(nx ,ny )
Exx = zeros(nx ,ny )
Eyy = zeros(nx ,ny )
Exyc = zeros(nx ,ny )
Exy = zeros(nx+1,ny+1)
Exx_e = zeros(nx ,ny )
Eyy_e = zeros(nx ,ny )
Exyc_e = zeros(nx ,ny )
Exy_e = zeros(nx+1,ny+1)
Exx_τ = zeros(nx ,ny )
Eyy_τ = zeros(nx ,ny )
Exy_τ = zeros(nx+1,ny+1)
Exyc_τ = zeros(nx ,ny )
η_ve_τ = zeros(nx ,ny )
η_ve_τv = zeros(nx+1,ny+1)
η_vem = zeros(nx ,ny )
η_vev = zeros(nx+1,ny+1)
η_vevm = zeros(nx+1,ny+1)
dτ_ρ = zeros(nx ,ny )
dτ_ρv = zeros(nx+1,ny+1)
Gdτ = zeros(nx ,ny )
Gdτv = zeros(nx+1,ny+1)
η_vep = zeros(nx ,ny )
η_vepv = zeros(nx+1,ny+1)
η_vec = zeros(nx ,ny )
# init
Vx = [ ebg*x for x ∈ xv, _ ∈ yc ]
Vy = [-ebg*y for _ ∈ xc, y ∈ yv ]
η = fill(η0,nx,ny); ηv = fill(η0,nx+1,ny+1)
G = fill(G0,nx,ny); Gv = fill(G0,nx+1,ny+1)
@. G[xc^2 + yc'^2 < radi^2] = Gi
@. Gv[xv^2 + yv'^2 < radi^2] = Gi
η_e = G.*dt; η_ev = Gv.*dt
@. η_vec = 1.0/(1.0/η + 1.0/η_e)
@. η_vev = 1.0/(1.0/ηv + 1.0/η_ev)
@. η_vep = 1.0/(1.0/η + 1.0/η_e)
@. η_vepv = 1.0/(1.0/ηv + 1.0/η_ev)
# action
t = 0.0; evo_t = Float64[]; evo_τxx = Float64[]; niter = 0
for it = 1:nt
τxx_o .= τxx; τyy_o .= τyy; τxy_o .= τxy; τxyc_o .= τxyc
err = 2εnl; iter = 0
while err > εnl && iter < maxiter
if !use_vep
η_vem[2:end-1,2:end-1] .= maxloc(η_ve) ; bc2!(η_vem)
η_vevm[2:end-1,2:end-1] .= maxloc(η_vev); bc2!(η_vevm)
else
η_vem[2:end-1,2:end-1] .= maxloc(η_vep) ; bc2!(η_vem)
η_vevm[2:end-1,2:end-1] .= maxloc(η_vepv); bc2!(η_vevm)
end
@. dτ_ρ = vpdτ*max_lxy/Re/η_vem
@. dτ_ρv = vpdτ*max_lxy/Re/η_vevm
@. Gdτ = vpdτ^2/dτ_ρ/(r+2.0)
@. Gdτv = vpdτ^2/dτ_ρv/(r+2.0)
@. η_ve_τ = 1.0/(1.0/η + 1.0/η_e + 1.0/Gdτ)
@. η_ve_τv = 1.0/(1.0/ηv + 1.0/η_ev + 1.0/Gdτv)
# pressure
∇V .= diff(Vx, dims=1)./dx .+ diff(Vy, dims=2)./dy
@. Pr -= r*Gdτ*∇V
# strain rates
Exx .= diff(Vx, dims=1)./dx .- 1//3*∇V
Eyy .= diff(Vy, dims=2)./dy .- 1//3*∇V
Exy[2:end-1,2:end-1] .= 0.5.*(diff(Vx[2:end-1,:], dims=2)./dy .+ diff(Vy[:,2:end-1], dims=1)./dx)
Exyc .= av(Exy)
# viscoelastic strain rates
@. Exx_e = Exx + τxx_o /2.0/η_e
@. Eyy_e = Eyy + τyy_o /2.0/η_e
@. Exy_e = Exy + τxy_o /2.0/η_ev
@. Exyc_e = Exyc + τxyc_o/2.0/η_e
# viscoelastic pseudo-transient strain rates
@. Exx_τ = Exx_e + τxx /2.0/Gdτ
@. Eyy_τ = Eyy_e + τyy /2.0/Gdτ
@. Exy_τ = Exy_e + τxy /2.0/Gdτv
@. Exyc_τ = Exyc_e + τxyc/2.0/Gdτ
# stress update
@. τxx = 2.0*η_ve_τ *Exx_τ
@. τyy = 2.0*η_ve_τ *Eyy_τ
@. τxy = 2.0*η_ve_τv*Exy_τ
@. τxyc = 2.0*η_ve_τ *Exyc_τ
# stress and strain rate invariants
@. τii = sqrt(0.5*(τxx^2 + τyy^2) + τxyc*τxyc)
@. Eii = sqrt(0.5*(Exx_τ^2 + Eyy_τ^2) + Exyc_τ*Exyc_τ)
# yield function
@. F = τii - τ_y - Pr.*sinϕ
@. λ = max(F,0.0)/(η_ve_τ + η_reg)
@. dQdTxx = 0.5*τxx /τii
@. dQdTyy = 0.5*τyy /τii
@. dQdTxy = τxyc/τii
# plastic correction
@. τxx = 2.0*η_ve_τ *(Exx_τ - λ*dQdTxx)
@. τyy = 2.0*η_ve_τ *(Eyy_τ - λ*dQdTyy)
@. τxyc = 2.0*η_ve_τ *(Exyc_τ - 0.5*λ*dQdTxy)
τxy[2:end-1,2:end-1] .= 2.0 .* η_ve_τv[2:end-1,2:end-1].*(Exy_τ[2:end-1,2:end-1] .- 0.5 .* av(λ.*dQdTxy))
@. τii = sqrt(0.5*(τxx^2 + τyy^2) + τxyc*τxyc)
@. Fchk = τii - τ_y - Pr*sinϕ - λ*η_reg
@. η_vep = τii / 2.0 / Eii * 19.3 # nx, ny = 63, 63
# @. η_vep = τii / 2.0 / Eii * 19.3 * 1.99 # nx, ny = 127, 127
η_vepv[2:end-1,2:end-1] .= av(η_vep); bc2!(η_vep)
# velocity update
dVx .= av_xa(dτ_ρ) .* (.-diff(Pr, dims=1)./dx .+ diff(τxx, dims=1)./dx .+ diff(τxy[2:end-1,:], dims=2)./dy)
dVy .= av_ya(dτ_ρ) .* (.-diff(Pr, dims=2)./dy .+ diff(τyy, dims=2)./dy .+ diff(τxy[:,2:end-1], dims=1)./dx .+ av_ya(ρg))
@. Vx[2:end-1,:] = Vx[2:end-1,:] + dVx
@. Vy[:,2:end-1] = Vy[:,2:end-1] + dVy
if iter % nchk == 0
@. τxx_r = 2.0*η_vec*Exx_e
@. τyy_r = 2.0*η_vec*Eyy_e
@. τxy_r = 2.0*η_vev*Exy_e
@. τxyc_r = 2.0*η_vec*Exyc_e
@. τii = sqrt(0.5*(τxx_r^2 + τyy_r^2) + τxyc_r*τxyc_r)
# yield function
@. F = τii - τ_y - Pr.*sinϕ
@. λ = max(F,0.0)/(η_vec + η_reg)
@. dQdTxx = 0.5*τxx_r /τii
@. dQdTyy = 0.5*τyy_r /τii
@. dQdTxy = τxyc_r/τii
# plastic correction
@. τxx_r = 2.0*η_vec*(Exx_e - λ*dQdTxx)
@. τyy_r = 2.0*η_vec*(Eyy_e - λ*dQdTyy)
@. τxyc_r = 2.0*η_vec*(Exyc_e - 0.5*λ*dQdTxy)
τxy_r[2:end-1,2:end-1] .= 2.0 .* η_vev[2:end-1,2:end-1].*(Exy_e[2:end-1,2:end-1] .- 0.5 .* av(λ.*dQdTxy))
Rx .= .-diff(Pr, dims=1)./dx .+ diff(τxx_r, dims=1)./dx .+ diff(τxy_r[2:end-1,:], dims=2)./dy
Ry .= .-diff(Pr, dims=2)./dy .+ diff(τyy_r, dims=2)./dy .+ diff(τxy_r[:,2:end-1], dims=1)./dx .+ av_ya(ρg)
norm_Rx = norm(Rx)/sqrt(length(Rx)); norm_Ry = norm(Ry)/sqrt(length(Ry)); norm_∇V = norm(∇V)/sqrt(length(∇V))
err = maximum([norm_Rx, norm_Ry, norm_∇V])
@printf("it = %d, iter = %d, err = %1.2e norm[Rx=%1.2e, Ry=%1.2e, ∇V=%1.2e] (Fchk=%1.2e) \n", it, iter, err, norm_Rx, norm_Ry, norm_∇V, maximum(Fchk))
end
iter += 1; niter += 1
end
println(norm(Exyc_τ))
t += dt; push!(evo_t, t); push!(evo_τxx, maximum(τxx))
p1 = heatmap(xc,yc,τii',aspect_ratio=1,xlims=(-lx/2,lx/2),ylims=(-ly/2,ly/2),title="τii")
# p3 = heatmap(xc,yc,η_vep',aspect_ratio=1,xlims=(-lx/2,lx/2),ylims=(-ly/2,ly/2),title="τii")
p2 = plot(evo_t, evo_τxx , legend=false, xlabel="time", ylabel="max(τxx)", linewidth=0, markershape=:circle, framestyle=:box, markersize=3)
plot!(evo_t, 2.0.*ebg.*η0.*(1.0.-exp.(.-evo_t.*G0./η0)), linewidth=2.0) # analytical solution for VE loading
plot!(evo_t, 2.0.*ebg.*η0.*ones(size(evo_t)), linewidth=2.0) # viscous flow stress
display(plot(p1,p2))
end
println(niter)
return
end
# action
Stokes2D_vep()
|
{"hexsha": "ec0f0ae1f12099b61bdd8603e8162cfe00471199", "size": 9190, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "newdamp/Stokes2D_vep_reg_IU_LR_TD.jl", "max_stars_repo_name": "PTsolvers/Stokes2D_simpleVEP", "max_stars_repo_head_hexsha": "074c3e3649c50a317e33455d18dc9526614b4b66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-15T06:27:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-13T05:47:19.000Z", "max_issues_repo_path": "newdamp/Stokes2D_vep_reg_IU_LR_TD.jl", "max_issues_repo_name": "PTsolvers/Stokes2D_simpleVEP", "max_issues_repo_head_hexsha": "074c3e3649c50a317e33455d18dc9526614b4b66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-27T23:45:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-27T23:45:53.000Z", "max_forks_repo_path": "newdamp/Stokes2D_vep_reg_IU_LR_TD.jl", "max_forks_repo_name": "PTsolvers/Stokes2D_simpleVEP", "max_forks_repo_head_hexsha": "074c3e3649c50a317e33455d18dc9526614b4b66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.5545023697, "max_line_length": 166, "alphanum_fraction": 0.4571273123, "num_tokens": 4142}
|
/**
* \file
* \brief Laser Scanner communication (TCP Helper Class)
* Copyright (C) 2013, Osnabrück University
* Copyright (C) 2017, Ing.-Buero Dr. Michael Lehning, Hildesheim
* Copyright (C) 2017, SICK AG, Waldkirch
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Osnabrück University nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* * Neither the name of SICK AG nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission
* * Neither the name of Ing.-Buero Dr. Michael Lehning nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Last modified: 12th Dec 2017
*
* Authors:
* Michael Lehning <michael.lehning@lehning.de>
* Jochen Sprickerhof <jochen@sprickerhof.de>
* Martin Günther <mguenthe@uos.de>
*
* Based on the TiM communication example by SICK AG.
*
*/
#ifdef _MSC_VER
#pragma warning(disable: 4996)
#pragma warning(disable: 4267)
#pragma warning(disable: 4101) // C4101: "e" : Unreferenzierte lokale Variable
#define _WIN32_WINNT 0x0501
#endif
#include <sick_scan/sick_scan_common_tcp.h>
#include <sick_scan/tcp/colaa.hpp>
#include <sick_scan/tcp/colab.hpp>
#include <boost/asio.hpp>
#include <boost/lambda/lambda.hpp>
#include <algorithm>
#include <iterator>
#include <boost/lexical_cast.hpp>
#include <vector>
#include <sick_scan/sick_generic_radar.h>
#ifdef ROSSIMU
#include "sick_scan/rosconsole_simu.hpp"
#endif
std::vector<unsigned char> exampleData(65536);
std::vector<unsigned char> receivedData(65536);
static long receivedDataLen = 0;
static int getDiagnosticErrorCode()
{
#ifdef _MSC_VER
#undef ERROR
return(2);
#else
return (diagnostic_msgs::DiagnosticStatus::ERROR);
#endif
}
namespace sick_scan
{
bool emulateReply(UINT8 *requestData, int requestLen, std::vector<unsigned char> *replyVector)
{
std::string request;
std::string reply;
std::vector<std::string> keyWordList;
std::vector<std::string> answerList;
std::string scannerType = "???";
ros::NodeHandle nhPriv;
enum
{
SIMU_RADAR = 0, SIMU_MRS_1XXX = 1, SIMU_NUM
};
int enumType = SIMU_RADAR; // Default simulation
if (true == nhPriv.getParam("scanner_type", scannerType))
{
if (scannerType.compare("sick_mrs_1xxx") == 0)
{
ROS_INFO("Simulate MRS1xxx");
enumType = SIMU_MRS_1XXX;
}
}
switch (enumType)
{
case SIMU_RADAR:
// XXX
keyWordList.push_back("sMN SetAccessMode");
answerList.push_back("sAN SetAccessMode 1");
keyWordList.push_back("sWN EIHstCola");
answerList.push_back("sWA EIHstCola");
keyWordList.push_back("sRN FirmwareVersion");
answerList.push_back("sRA FirmwareVersion 8 1.0.0.0R");
keyWordList.push_back("sRN OrdNum");
answerList.push_back("sRA OrdNum 7 1234567");
keyWordList.push_back("sWN TransmitTargets 1");
answerList.push_back("sWA TransmitTargets");
keyWordList.push_back("sWN TransmitObjects 1");
answerList.push_back("sWA TransmitObjects");
keyWordList.push_back("sWN TCTrackingMode 0");
answerList.push_back("sWA TCTrackingMode");
keyWordList.push_back("sRN SCdevicestate");
answerList.push_back("sRA SCdevicestate 1");
keyWordList.push_back("sRN DItype");
answerList.push_back("sRA DItype D RMS3xx-xxxxxx");
keyWordList.push_back("sRN ODoprh");
answerList.push_back("sRA ODoprh 451");
keyWordList.push_back("sMN mSCloadappdef");
answerList.push_back("sAN mSCloadappdef");
keyWordList.push_back("sRN SerialNumber");
answerList.push_back("sRA SerialNumber 8 18020073");
keyWordList.push_back("sMN Run");
answerList.push_back("sAN Run 1s");
keyWordList.push_back("sRN ODpwrc");
answerList.push_back("sRA ODpwrc 20");
keyWordList.push_back("sRN LocationName");
answerList.push_back("sRA LocationName B not defined");
keyWordList.push_back("sEN LMDradardata 1");
answerList.push_back("sEA LMDradardata 1");
for (int i = 0; i < requestLen; i++)
{
request += (char) requestData[i];
}
for (size_t i = 0; i < keyWordList.size(); i++)
{
if (request.find(keyWordList[i]) != std::string::npos)
{
reply = (char) 0x02;
reply += answerList[i];
reply += (char) 0x03;
}
}
replyVector->clear();
for (size_t i = 0; i < reply.length(); i++)
{
replyVector->push_back((unsigned char) reply[i]);
}
break;
case SIMU_MRS_1XXX:
ROS_INFO("Emulation of MRS_1xxx is not implemented.\n");
assert(0);
// XXX
break;
}
return (true);
}
SickScanCommonTcp::SickScanCommonTcp(const std::string &hostname, const std::string &port, int &timelimit,
SickGenericParser *parser, char cola_dialect_id)
:
SickScanCommon(parser),
socket_(io_service_),
deadline_(io_service_),
hostname_(hostname),
port_(port),
timelimit_(timelimit)
{
setEmulSensor(false);
if ((cola_dialect_id == 'a') || (cola_dialect_id == 'A'))
{
this->setProtocolType(CoLa_A);
}
if ((cola_dialect_id == 'b') || (cola_dialect_id == 'B'))
{
this->setProtocolType(CoLa_B);
}
assert(this->getProtocolType() != CoLa_Unknown);
m_numberOfBytesInReceiveBuffer = 0;
m_alreadyReceivedBytes = 0;
this->setReplyMode(0);
// io_service_.setReadCallbackFunction(boost::bind(&SopasDevice::readCallbackFunction, this, _1, _2));
// Set up the deadline actor to implement timeouts.
// Based on blocking TCP example on:
// http://www.boost.org/doc/libs/1_46_0/doc/html/boost_asio/example/timeouts/blocking_tcp_client.cpp
deadline_.expires_at(boost::posix_time::pos_infin);
checkDeadline();
}
SickScanCommonTcp::~SickScanCommonTcp()
{
// stop_scanner();
close_device();
}
using boost::asio::ip::tcp;
using boost::lambda::var;
using boost::lambda::_1;
void SickScanCommonTcp::disconnectFunction()
{
}
void SickScanCommonTcp::disconnectFunctionS(void *obj)
{
if (obj != NULL)
{
((SickScanCommonTcp *) (obj))->disconnectFunction();
}
}
void SickScanCommonTcp::readCallbackFunctionS(void *obj, UINT8 *buffer, UINT32 &numOfBytes)
{
((SickScanCommonTcp *) obj)->readCallbackFunction(buffer, numOfBytes);
}
void SickScanCommonTcp::setReplyMode(int _mode)
{
m_replyMode = _mode;
}
int SickScanCommonTcp::getReplyMode()
{
return (m_replyMode);
}
#if 0
void SickScanCommonTcp::setProtocolType(char cola_dialect_id)
{
if ((cola_dialect_id == 'a') || (cola_dialect_id == 'A'))
{
this->m_protocol = CoLa_A;
}
else
{
this->m_protocol = CoLa_B;
}
}
#endif
/*!
\brief Set emulation flag (using emulation instead of "real" scanner - currently implemented for radar
\param _emulFlag: Flag to switch emulation on or off
\return
*/
void SickScanCommonTcp::setEmulSensor(bool _emulFlag)
{
m_emulSensor = _emulFlag;
}
/*!
\brief get emulation flag (using emulation instead of "real" scanner - currently implemented for radar
\param
\return bool: Flag to switch emulation on or off
*/
bool SickScanCommonTcp::getEmulSensor()
{
return (m_emulSensor);
}
//
// Look for 23-frame (STX/ETX) in receive buffer.
// Move frame to start of buffer
//
// Return: 0 : No (complete) frame found
// >0 : Frame length
//
SopasEventMessage SickScanCommonTcp::findFrameInReceiveBuffer()
{
UINT32 frameLen = 0;
UINT32 i;
// Depends on protocol...
if (getProtocolType() == CoLa_A)
{
//
// COLA-A
//
// Must start with STX (0x02)
if (m_receiveBuffer[0] != 0x02)
{
// Look for starting STX (0x02)
for (i = 1; i < m_numberOfBytesInReceiveBuffer; i++)
{
if (m_receiveBuffer[i] == 0x02)
{
break;
}
}
// Found beginning of frame?
if (i >= m_numberOfBytesInReceiveBuffer)
{
// No start found, everything can be discarded
m_numberOfBytesInReceiveBuffer = 0; // Invalidate buffer
return SopasEventMessage(); // No frame found
}
// Move frame start to index 0
UINT32 newLen = m_numberOfBytesInReceiveBuffer - i;
memmove(&(m_receiveBuffer[0]), &(m_receiveBuffer[i]), newLen);
m_numberOfBytesInReceiveBuffer = newLen;
}
// Look for ending ETX (0x03)
for (i = 1; i < m_numberOfBytesInReceiveBuffer; i++)
{
if (m_receiveBuffer[i] == 0x03)
{
break;
}
}
// Found end?
if (i >= m_numberOfBytesInReceiveBuffer)
{
// No end marker found, so it's not a complete frame (yet)
return SopasEventMessage(); // No frame found
}
// Calculate frame length in byte
frameLen = i + 1;
return SopasEventMessage(m_receiveBuffer, CoLa_A, frameLen);
}
else if (getProtocolType() == CoLa_B)
{
UINT32 magicWord;
UINT32 payloadlength;
if (m_numberOfBytesInReceiveBuffer < 4)
{
return SopasEventMessage();
}
UINT16 pos = 0;
magicWord = colab::getIntegerFromBuffer<UINT32>(m_receiveBuffer, pos);
if (magicWord != 0x02020202)
{
// Look for starting STX (0x02020202)
for (i = 1; i <= m_numberOfBytesInReceiveBuffer - 4; i++)
{
pos = i; // this is needed, as the position value is updated by getIntegerFromBuffer
magicWord = colab::getIntegerFromBuffer<UINT32>(m_receiveBuffer, pos);
if (magicWord == 0x02020202)
{
// found magic word
break;
}
}
// Found beginning of frame?
if (i > m_numberOfBytesInReceiveBuffer - 4)
{
// No start found, everything can be discarded
m_numberOfBytesInReceiveBuffer = 0; // Invalidate buffer
return SopasEventMessage(); // No frame found
}
else
{
// Move frame start to index
UINT32 bytesToMove = m_numberOfBytesInReceiveBuffer - i;
memmove(&(m_receiveBuffer[0]), &(m_receiveBuffer[i]), bytesToMove); // payload+magic+length+s+checksum
m_numberOfBytesInReceiveBuffer = bytesToMove;
}
}
// Pruefe Laenge des Pufferinhalts
if (m_numberOfBytesInReceiveBuffer < 9)
{
// Es sind nicht genug Daten fuer einen Frame
printInfoMessage("SickScanCommonNw::findFrameInReceiveBuffer: Frame cannot be decoded yet, only " +
::toString(m_numberOfBytesInReceiveBuffer) + " bytes in the buffer.", m_beVerbose);
return SopasEventMessage();
}
// Read length of payload
pos = 4;
payloadlength = colab::getIntegerFromBuffer<UINT32>(m_receiveBuffer, pos);
printInfoMessage(
"SickScanCommonNw::findFrameInReceiveBuffer: Decoded payload length is " + ::toString(payloadlength) +
" bytes.", m_beVerbose);
// Ist die Datenlaenge plausibel und wuede in den Puffer passen?
if (payloadlength > (sizeof(m_receiveBuffer) - 9))
{
// magic word + length + checksum = 9
printWarning(
"SickScanCommonNw::findFrameInReceiveBuffer: Frame too big for receive buffer. Frame discarded with length:"
+ ::toString(payloadlength) + ".");
m_numberOfBytesInReceiveBuffer = 0;
return SopasEventMessage();
}
if ((payloadlength + 9) > m_numberOfBytesInReceiveBuffer)
{
// magic word + length + s + checksum = 10
printInfoMessage(
"SickScanCommonNw::findFrameInReceiveBuffer: Frame not complete yet. Waiting for the rest of it (" +
::toString(payloadlength + 9 - m_numberOfBytesInReceiveBuffer) + " bytes missing).", m_beVerbose);
return SopasEventMessage(); // frame not complete
}
// Calculate the total frame length in bytes: Len = Frame (9 bytes) + Payload
frameLen = payloadlength + 9;
//
// test checksum of payload
//
UINT8 temp = 0;
UINT8 temp_xor = 0;
UINT8 checkSum;
// Read original checksum
pos = frameLen - 1;
checkSum = colab::getIntegerFromBuffer<UINT8>(m_receiveBuffer, pos);
// Erzeuge die Pruefsumme zum Vergleich
for (UINT16 i = 8; i < (frameLen - 1); i++)
{
pos = i;
temp = colab::getIntegerFromBuffer<UINT8>(m_receiveBuffer, pos);
temp_xor = temp_xor ^ temp;
}
// Vergleiche die Pruefsummen
if (temp_xor != checkSum)
{
printWarning("SickScanCommonNw::findFrameInReceiveBuffer: Wrong checksum, Frame discarded.");
m_numberOfBytesInReceiveBuffer = 0;
return SopasEventMessage();
}
return SopasEventMessage(m_receiveBuffer, CoLa_B, frameLen);
}
// Return empty frame
return SopasEventMessage();
}
/**
* Read callback. Diese Funktion wird aufgerufen, sobald Daten auf der Schnittstelle
* hereingekommen sind.
*/
void SickScanCommonTcp::processFrame(ros::Time timeStamp, SopasEventMessage &frame)
{
if (getProtocolType() == CoLa_A)
{
printInfoMessage(
"SickScanCommonNw::processFrame: Calling processFrame_CoLa_A() with " + ::toString(frame.size()) + " bytes.",
m_beVerbose);
// processFrame_CoLa_A(frame);
}
else if (getProtocolType() == CoLa_B)
{
printInfoMessage(
"SickScanCommonNw::processFrame: Calling processFrame_CoLa_B() with " + ::toString(frame.size()) + " bytes.",
m_beVerbose);
// processFrame_CoLa_B(frame);
}
// Push frame to recvQueue
DatagramWithTimeStamp dataGramWidthTimeStamp(timeStamp, std::vector<unsigned char>(frame.getRawData(),
frame.getRawData() +
frame.size()));
// recvQueue.push(std::vector<unsigned char>(frame.getRawData(), frame.getRawData() + frame.size()));
recvQueue.push(dataGramWidthTimeStamp);
}
void SickScanCommonTcp::readCallbackFunction(UINT8 *buffer, UINT32 &numOfBytes)
{
ros::Time rcvTimeStamp = ros::Time::now(); // stamp received datagram
bool beVerboseHere = false;
printInfoMessage(
"SickScanCommonNw::readCallbackFunction(): Called with " + toString(numOfBytes) + " available bytes.",
beVerboseHere);
ScopedLock lock(&m_receiveDataMutex); // Mutex for access to the input buffer
UINT32 remainingSpace = sizeof(m_receiveBuffer) - m_numberOfBytesInReceiveBuffer;
UINT32 bytesToBeTransferred = numOfBytes;
if (remainingSpace < numOfBytes)
{
bytesToBeTransferred = remainingSpace;
// printWarning("SickScanCommonNw::readCallbackFunction(): Input buffer space is to small, transferring only " +
// ::toString(bytesToBeTransferred) + " of " + ::toString(numOfBytes) + " bytes.");
}
else
{
// printInfoMessage("SickScanCommonNw::readCallbackFunction(): Transferring " + ::toString(bytesToBeTransferred) +
// " bytes from TCP to input buffer.", beVerboseHere);
}
if (bytesToBeTransferred > 0)
{
// Data can be transferred into our input buffer
memcpy(&(m_receiveBuffer[m_numberOfBytesInReceiveBuffer]), buffer, bytesToBeTransferred);
m_numberOfBytesInReceiveBuffer += bytesToBeTransferred;
UINT32 size = 0;
while (1)
{
// Now work on the input buffer until all received datasets are processed
SopasEventMessage frame = findFrameInReceiveBuffer();
size = frame.size();
if (size == 0)
{
// Framesize = 0: There is no valid frame in the buffer. The buffer is either empty or the frame
// is incomplete, so leave the loop
printInfoMessage("SickScanCommonNw::readCallbackFunction(): No complete frame in input buffer, we are done.",
beVerboseHere);
// Leave the loop
break;
}
else
{
// A frame was found in the buffer, so process it now.
printInfoMessage(
"SickScanCommonNw::readCallbackFunction(): Processing a frame of length " + ::toString(frame.size()) +
" bytes.", beVerboseHere);
processFrame(rcvTimeStamp, frame);
UINT32 bytesToMove = m_numberOfBytesInReceiveBuffer - size;
memmove(&(m_receiveBuffer[0]), &(m_receiveBuffer[size]), bytesToMove); // payload+magic+length+s+checksum
m_numberOfBytesInReceiveBuffer = bytesToMove;
}
}
}
else
{
// There was input data from the TCP interface, but our input buffer was unable to hold a single byte.
// Either we have not read data from our buffer for a long time, or something has gone wrong. To re-sync,
// we clear the input buffer here.
m_numberOfBytesInReceiveBuffer = 0;
}
}
int SickScanCommonTcp::init_device()
{
int portInt;
sscanf(port_.c_str(), "%d", &portInt);
m_nw.init(hostname_, portInt, disconnectFunctionS, (void *) this);
m_nw.setReadCallbackFunction(readCallbackFunctionS, (void *) this);
if (this->getEmulSensor())
{
ROS_INFO("Sensor emulation is switched on - network traffic is switched off.");
}
else
{
m_nw.connect();
}
return ExitSuccess;
}
int SickScanCommonTcp::close_device()
{
ROS_WARN("Disconnecting TCP-Connection.");
m_nw.disconnect();
return 0;
}
bool SickScanCommonTcp::stopScanData()
{
stop_scanner();
return (true);
}
void SickScanCommonTcp::handleRead(boost::system::error_code error, size_t bytes_transfered)
{
ec_ = error;
bytes_transfered_ += bytes_transfered;
}
void SickScanCommonTcp::checkDeadline()
{
if (deadline_.expires_at() <= boost::asio::deadline_timer::traits_type::now())
{
// The reason the function is called is that the deadline expired. Close
// the socket to return all IO operations and reset the deadline
socket_.close();
deadline_.expires_at(boost::posix_time::pos_infin);
}
// Nothing bad happened, go back to sleep
deadline_.async_wait(boost::bind(&SickScanCommonTcp::checkDeadline, this));
}
int SickScanCommonTcp::numberOfDatagramInInputFifo()
{
int numVal = this->recvQueue.getNumberOfEntriesInQueue();
return (numVal);
}
int SickScanCommonTcp::readWithTimeout(size_t timeout_ms, char *buffer, int buffer_size, int *bytes_read,
bool *exception_occured, bool isBinary)
{
// Set up the deadline to the proper timeout, error and delimiters
deadline_.expires_from_now(boost::posix_time::milliseconds(timeout_ms));
const char end_delim = static_cast<char>(0x03);
int dataLen = 0;
ec_ = boost::asio::error::would_block;
bytes_transfered_ = 0;
size_t to_read;
int numBytes = 0;
// Polling - should be changed to condition variable in the future
int waitingTimeInMs = 1; // try to lookup for new incoming packages
size_t i;
for (i = 0; i < timeout_ms; i += waitingTimeInMs)
{
if (false == this->recvQueue.isQueueEmpty())
{
break;
}
boost::this_thread::sleep(boost::posix_time::milliseconds(waitingTimeInMs));
}
if (i >= timeout_ms)
{
ROS_ERROR("no answer received after %zu ms. Maybe sopas mode is wrong.\n", timeout_ms);
return (ExitError);
}
boost::condition_variable cond_;
DatagramWithTimeStamp datagramWithTimeStamp = this->recvQueue.pop();
*bytes_read = datagramWithTimeStamp.datagram.size();
memcpy(buffer, &(datagramWithTimeStamp.datagram[0]), datagramWithTimeStamp.datagram.size());
return (ExitSuccess);
}
/**
* Send a SOPAS command to the device and print out the response to the console.
*/
int SickScanCommonTcp::sendSOPASCommand(const char *request, std::vector<unsigned char> *reply, int cmdLen)
{
#if 0
if (!socket_.is_open()) {
ROS_ERROR("sendSOPASCommand: socket not open");
diagnostics_.broadcast(getDiagnosticErrorCode(), "sendSOPASCommand: socket not open.");
return ExitError;
}
#endif
int sLen = 0;
int preambelCnt = 0;
bool cmdIsBinary = false;
if (request != NULL)
{
sLen = cmdLen;
preambelCnt = 0; // count 0x02 bytes to decide between ascii and binary command
if (sLen >= 4)
{
for (int i = 0; i < 4; i++)
{
if (request[i] == 0x02)
{
preambelCnt++;
}
}
}
if (preambelCnt < 4)
{
cmdIsBinary = false;
}
else
{
cmdIsBinary = true;
}
int msgLen = 0;
if (cmdIsBinary == false)
{
msgLen = strlen(request);
}
else
{
int dataLen = 0;
for (int i = 4; i < 8; i++)
{
dataLen |= ((unsigned char) request[i] << (7 - i) * 8);
}
msgLen = 8 + dataLen + 1; // 8 Msg. Header + Packet +
}
#if 1
if (getEmulSensor())
{
emulateReply((UINT8 *) request, msgLen, reply);
}
else
{
bool debugBinCmd = false;
if (debugBinCmd)
{
printf("=== START HEX DUMP ===\n");
for (int i = 0; i < msgLen; i++)
{
unsigned char *ptr = (UINT8 *) request;
printf("%02x ", ptr[i]);
}
printf("\n=== END HEX DUMP ===\n");
}
m_nw.sendCommandBuffer((UINT8 *) request, msgLen);
}
#else
/*
* Write a SOPAS variable read request to the device.
*/
try
{
boost::asio::write(socket_, boost::asio::buffer(request, msgLen));
}
catch (boost::system::system_error &e)
{
ROS_ERROR("write error for command: %s", request);
diagnostics_.broadcast(getDiagnosticErrorCode(), "Write error for sendSOPASCommand.");
return ExitError;
}
#endif
}
// Set timeout in 5 seconds
const int BUF_SIZE = 65536;
char buffer[BUF_SIZE];
int bytes_read;
// !!!
if (getEmulSensor())
{
}
else
{
if (readWithTimeout(getReadTimeOutInMs(), buffer, BUF_SIZE, &bytes_read, 0, cmdIsBinary) == ExitError)
{
ROS_INFO_THROTTLE(1.0, "sendSOPASCommand: no full reply available for read after %d ms", getReadTimeOutInMs());
diagnostics_.broadcast(getDiagnosticErrorCode(),
"sendSOPASCommand: no full reply available for read after timeout.");
return ExitError;
}
if (reply)
{
reply->resize(bytes_read);
std::copy(buffer, buffer + bytes_read, &(*reply)[0]);
}
}
return ExitSuccess;
}
int SickScanCommonTcp::get_datagram(ros::Time &recvTimeStamp, unsigned char *receiveBuffer, int bufferSize,
int *actual_length,
bool isBinaryProtocol, int *numberOfRemainingFifoEntries)
{
if (NULL != numberOfRemainingFifoEntries)
{
*numberOfRemainingFifoEntries = 0;
}
this->setReplyMode(1);
if (this->getEmulSensor())
{
#ifndef ROSSIMU
// boost::this_thread::sleep(boost::posix_time::milliseconds(waitingTimeInMs));
ros::Time timeStamp = ros::Time::now();
uint32_t nanoSec = timeStamp.nsec;
double waitTime10Hz = 10.0 * (double) nanoSec / 1E9; // 10th of sec. [0..10[
uint32_t waitTime = (int) waitTime10Hz; // round down
double waitTimeUntilNextTime10Hz = 1 / 10.0 * (1.0 - (waitTime10Hz - waitTime));
ros::Duration(waitTimeUntilNextTime10Hz).sleep();
SickScanRadar radar(this);
radar.setEmulation(true);
radar.simulateAsciiDatagram(receiveBuffer, actual_length);
recvTimeStamp = ros::Time::now();
#endif
}
else
{
const int maxWaitInMs = getReadTimeOutInMs();
std::vector<unsigned char> dataBuffer;
#if 1 // prepared for reconnect
bool retVal = this->recvQueue.waitForIncomingObject(maxWaitInMs);
if (retVal == false)
{
ROS_WARN("Timeout during waiting for new datagram");
return ExitError;
}
else
{
// Look into receiving queue for new Datagrams
//
//
DatagramWithTimeStamp datagramWithTimeStamp = this->recvQueue.pop();
if (NULL != numberOfRemainingFifoEntries)
{
*numberOfRemainingFifoEntries = this->recvQueue.getNumberOfEntriesInQueue();
}
recvTimeStamp = datagramWithTimeStamp.timeStamp;
dataBuffer = datagramWithTimeStamp.datagram;
}
#endif
// dataBuffer = this->recvQueue.pop();
long size = dataBuffer.size();
memcpy(receiveBuffer, &(dataBuffer[0]), size);
*actual_length = size;
}
#if 0
static int cnt = 0;
char szFileName[255];
sprintf(szFileName, "/tmp/dg%06d.bin", cnt++);
FILE *fout;
fout = fopen(szFileName, "wb");
if (fout != NULL)
{
fwrite(receiveBuffer, size, 1, fout);
fclose(fout);
}
#endif
return ExitSuccess;
if (!socket_.is_open())
{
ROS_ERROR("get_datagram: socket not open");
diagnostics_.broadcast(getDiagnosticErrorCode(), "get_datagram: socket not open.");
return ExitError;
}
/*
* Write a SOPAS variable read request to the device.
*/
std::vector<unsigned char> reply;
// Wait at most 5000ms for a new scan
size_t timeout = 30000;
bool exception_occured = false;
char *buffer = reinterpret_cast<char *>(receiveBuffer);
if (readWithTimeout(timeout, buffer, bufferSize, actual_length, &exception_occured, isBinaryProtocol) !=
ExitSuccess)
{
ROS_ERROR_THROTTLE(1.0, "get_datagram: no data available for read after %zu ms", timeout);
diagnostics_.broadcast(getDiagnosticErrorCode(), "get_datagram: no data available for read after timeout.");
// Attempt to reconnect when the connection was terminated
if (!socket_.is_open())
{
#ifdef _MSC_VER
Sleep(1000);
#else
sleep(1);
#endif
ROS_INFO("Failure - attempting to reconnect");
return init();
}
return exception_occured ? ExitError : ExitSuccess; // keep on trying
}
return ExitSuccess;
}
} /* namespace sick_scan */
|
{"hexsha": "f5f63e5825a9439a748b5fbda2e9bb017936f275", "size": 29353, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "driver/src/sick_scan_common_tcp.cpp", "max_stars_repo_name": "datadancer/sick_scan_base", "max_stars_repo_head_hexsha": "2bfb4c8c6761240de6785abd6aeb5e520e415259", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 18.0, "max_stars_repo_stars_event_min_datetime": "2019-12-03T02:34:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T01:36:44.000Z", "max_issues_repo_path": "driver/src/sick_scan_common_tcp.cpp", "max_issues_repo_name": "datadancer/sick_scan_base", "max_issues_repo_head_hexsha": "2bfb4c8c6761240de6785abd6aeb5e520e415259", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 17.0, "max_issues_repo_issues_event_min_datetime": "2019-09-02T07:44:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T09:40:09.000Z", "max_forks_repo_path": "driver/src/sick_scan_common_tcp.cpp", "max_forks_repo_name": "datadancer/sick_scan_base", "max_forks_repo_head_hexsha": "2bfb4c8c6761240de6785abd6aeb5e520e415259", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 17.0, "max_forks_repo_forks_event_min_datetime": "2019-08-14T06:54:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T03:15:53.000Z", "avg_line_length": 31.4608788853, "max_line_length": 121, "alphanum_fraction": 0.6095799407, "num_tokens": 7243}
|
import numpy as np
from numpy import ma # useful for thresholding
# import matplotlib.pyplot as plt
from astropy.io import fits
# from skimage import exposure
# from skimage.filters import rank
# from skimage.morphology import rectangle
from scipy.ndimage.filters import convolve
from scipy import stats
from skimage.transform import AffineTransform
from skimage.morphology import dilation, opening
from skimage.morphology import disk
from tidalclassifier.utils.custom_image_utils import clip, estimate_background, scaled_plot, trimMask
# https://stackoverflow.com/questions/46046928/how-to-find-replacement-of-deprecated-function-in-scipy
def threshold(a, threshmin=None, threshmax=None, newval=0):
a = ma.array(a, copy=True)
mask = np.zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin).filled(False)
if threshmax is not None:
mask |= (a > threshmax).filled(False)
a[mask] = newval
return a
def thresholder(stacked_im, color_im, pre_instruct):
# custom version of https://arxiv.org/pdf/1512.02000.pdf
# some choices need to be made about exactly how this works. Currently:
# mask is created by identifying pixels in convolved subtracted image n std's above 0 (i.e. above raw img bkg - bkg)
# mask is filled by that convolved subtracted image
flat_bkg, fake_bkg, std = estimate_background(stacked_im)
threshold_level = pre_instruct['sig_n'] * std
subtracted_im = stacked_im - flat_bkg
filter_im = convolve(subtracted_im, weights=np.full((1, 3, 3), 1.0 / 9.)) # convolve the background-sub'd image
# create mask by thresholding the convolved image
nothing_above_threshold = threshold(filter_im, threshmax=threshold_level, newval=0) # 0 if above value
nothing_below_threshold = filter_im - nothing_above_threshold
mask = np.ceil(nothing_below_threshold / nothing_below_threshold.max())
# https://en.wikipedia.org/wiki/Connected-component_labeling
trimmed_mask = trimMask(mask, mode=pre_instruct['mode']).astype(float)
# fill the mask with convolved background-subtracted image, return as 'threshold' (as before)
filled_mask_convd = filter_im * trimmed_mask
trimmed_mask = np.squeeze(trimmed_mask)
selem = disk(pre_instruct['dilation_radius'])
dilated_mask = dilation(trimmed_mask, selem)
# dilated_mask = dilated_mask - trimmed_mask
smooth_mask = np.zeros_like(stacked_im)
smooth_mask[0,:,:] = dilated_mask
# zoomed_mask = AffineTransform(matrix=trimmed_mask, scale=pre_instruct['mask_zoom'])
# convolve the mask with 5x5 average to smooth it out
smooth_mask = convolve(smooth_mask, weights=np.full((1, 15, 15), 1.0 / 225.))
# smooth_mask = convolve(smooth_mask, weights=np.full((1, 5, 5), 1.0 / 25.))
# smooth_mask = convolve(smooth_mask, weights=np.full((1, 5, 5), 1.0 / 25.))
# smooth_mask = convolve(smooth_mask, weights=np.full((1, 5, 5), 1.0 / 25.))
# smooth_mask = convolve(smooth_mask, weights=np.full((1, 5, 5), 1.0 / 25.))
# smooth_mask = convolve(smooth_mask, weights=np.full((1, 5, 5), 1.0 / 25.))
# smooth_mask = convolve(smooth_mask, weights=np.full((1, 5, 5), 1.0 / 25.))
# smooth_mask = convolve(smooth_mask, weights=np.full((1, 5, 5), 1.0 / 25.))
# smooth_mask = convolve(smooth_mask, weights=np.full((1, 5, 5), 1.0 / 25.))
# smooth_mask = convolve(smooth_mask, weights=np.full((1, 5, 5), 1.0 / 25.))
# fill the mask with original image and convolve dynamically if desired
filled_mask = stacked_im * smooth_mask
# invert the mask
smooth_inverted_mask = 1 - smooth_mask
# fill the inverted mask with background
bkg_for_mask = fake_bkg * smooth_inverted_mask
# combine
threshold_bkg = filled_mask + bkg_for_mask
# ensure positive everywhere: linear rescale (may need to adapt this)
threshold_bkg = threshold_bkg + pre_instruct['sig_n'] * std
threshold_bkg = np.abs(threshold_bkg) # neg. pixels are very rare (5 sigma deviation required) but will exist. Avoid.
# fill the inverted mask with color image
smooth_mask = np.squeeze(smooth_mask) # now (256x256)
smooth_mask_3dim = np.stack([smooth_mask,smooth_mask,smooth_mask],axis=0) # now (3,256,256)
threshold_col = smooth_mask_3dim * color_im # color im is also (3,256,256)
# TODO: add fake bkg by band
# threshold_bkg = smooth_inverted_mask
# take care if lowering n_sig: this could become more significant, if below 3 sig or so, bkg will increase
# scaled_plot(im, plt, clip_q=True)
# scaled_plot(subtracted_im, plt, clip_q=True)
# scaled_plot(filtered_im,plt,clip_q=True)
# scaled_plot(cut_below_threshold, plt)
# scaled_plot(mask, plt)
# scaled_plot(trimmed_mask, plt)
# scaled_plot(filled_mask, plt)
# plt.show()
# mask = np.expand_dims(mask, axis=0)
# print(mask.shape)
# print('new')
return filled_mask_convd, trimmed_mask, threshold_bkg, threshold_col
def thresholdImage(stacked_im, color_im, table_id, pre_instruct, read_dir, write_dir, alt_filename=None):
# This method is primary. Called by renamer script to generate masked images to be passed to metaCNN
table_id = str(table_id)
# 5 sig mode
pre_instruct['sig_n'] = 5
filled_mask, trimmed_mask, bkg_mask, col_mask = thresholder(stacked_im, color_im, pre_instruct)
base_filename = read_dir + table_id + '_stacked.fits'
if alt_filename != None:
base_filename = alt_filename
# save cleaned version as image
hdulist = fits.open(base_filename) # read original image for base file
hdu = hdulist[0] # open main compartment
hdu.data = filled_mask # set main compartment data component to be the final image
hdu.header['T_FILLED'] = True # add line in header
hdu.writeto(write_dir + table_id + '_threshold_5sig.fits', clobber=True) # write to file, may overwrite
# save cleaned version as image
hdulist = fits.open(base_filename) # read original image for base file
hdu = hdulist[0] # open main compartment
hdu.data = trimmed_mask.astype(int) # set main compartment data component to be the final image
hdu.header['T_MASK'] = True # add line in header
hdu.writeto(write_dir + table_id + '_threshold_mask_5sig.fits', clobber=True) # write to file, may overwrite
# save cleaned version as image
hdulist = fits.open(base_filename) # read original image for base file
hdu = hdulist[0] # open main compartment
hdu.data = bkg_mask # set main compartment data component to be the final image
hdu.header['T_BKG'] = True # add line in header
hdu.writeto(write_dir + table_id + '_threshold_bkg_5sig.fits', clobber=True) # write to file, may overwrite
# save cleaned version as image
hdulist = fits.open(base_filename) # read original image for base file
hdu = hdulist[0] # open main compartment
hdu.data = col_mask # set main compartment data component to be the final image
hdu.header['T_COL'] = True # add line in header
hdu.writeto(write_dir + table_id + '_threshold_color_5sig.fits', clobber=True) # write to file, may overwrite
###
# 3 sig mode
pre_instruct['sig_n'] = 3
filled_mask, trimmed_mask, bkg_mask, col_mask = thresholder(stacked_im, color_im, pre_instruct)
# save cleaned version as image
hdulist = fits.open(base_filename) # read original image for base file
hdu = hdulist[0] # open main compartment
hdu.data = filled_mask # set main compartment data component to be the final image
hdu.header['T_FILLED'] = True # add line in header
hdu.writeto(write_dir + table_id + '_threshold_3sig.fits', clobber=True) # write to file, may overwrite
# save cleaned version as image
hdulist = fits.open(base_filename) # read original image for base file
hdu = hdulist[0] # open main compartment
hdu.data = trimmed_mask.astype(int) # set main compartment data component to be the final image
hdu.header['T_MASK'] = True # add line in header
hdu.writeto(write_dir + table_id + '_threshold_mask_3sig.fits', clobber=True) # write to file, may overwrite
# save cleaned version as image
hdulist = fits.open(base_filename) # read original image for base file
hdu = hdulist[0] # open main compartment
hdu.data = bkg_mask # set main compartment data component to be the final image
hdu.header['T_BKG'] = True # add line in header
hdu.writeto(write_dir + table_id + '_threshold_bkg_3sig.fits', clobber=True) # write to file, may overwrite
# save cleaned version as image
hdulist = fits.open(base_filename) # read original image for base file
hdu = hdulist[0] # open main compartment
hdu.data = col_mask # set main compartment data component to be the final image
hdu.header['T_COL'] = True # add line in header
hdu.writeto(write_dir + table_id + '_threshold_color_3sig.fits', clobber=True) # write to file, may overwrite
|
{"hexsha": "1b8ae60a142fa77bb3d7082a1cbb257e4485271f", "size": 8947, "ext": "py", "lang": "Python", "max_stars_repo_path": "tidalclassifier/utils/thresholder.py", "max_stars_repo_name": "RustyPanda/tidal-features-classifier", "max_stars_repo_head_hexsha": "32e1aa0c074528ed04419d596c2014f78448e11b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-01T06:08:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-01T06:08:53.000Z", "max_issues_repo_path": "tidalclassifier/utils/thresholder.py", "max_issues_repo_name": "RustyPanda/tidal-features-classifier", "max_issues_repo_head_hexsha": "32e1aa0c074528ed04419d596c2014f78448e11b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tidalclassifier/utils/thresholder.py", "max_forks_repo_name": "RustyPanda/tidal-features-classifier", "max_forks_repo_head_hexsha": "32e1aa0c074528ed04419d596c2014f78448e11b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.3386243386, "max_line_length": 121, "alphanum_fraction": 0.7175589583, "include": true, "reason": "import numpy,from numpy,from scipy,from astropy", "num_tokens": 2427}
|
[STATEMENT]
lemma l2_inv4_init [iff]:
"init l2 \<subseteq> l2_inv4"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. init l2 \<subseteq> l2_inv4
[PROOF STEP]
by (auto simp add: l2_def l2_init_def l2_inv4_def)
|
{"llama_tokens": 99, "file": "Key_Agreement_Strong_Adversaries_dhlvl2", "length": 1}
|
"""
Generate code for HTML table to visualise human-object pairs
Fred Zhang <frederic.zhang@anu.edu.au>
The Australian National University
Australian Centre for Robotic Vision
"""
import argparse
import numpy as np
import pocket
def name_parser(name):
"""
{INDEX}.png
"""
seg = name.split(".")
return "Dataset index: {}".format(seg[0])
if __name__ == "__main__":
parser = argparse.ArgumentParser("Generate HTML table")
parser.add_argument("--image-dir",
required=True,
type=str)
args = parser.parse_args()
table = pocket.utils.ImageHTMLTable(
4, args.image_dir,
parser=name_parser,
width="75%"
)
table()
|
{"hexsha": "ce2e786175bfa96dc6cf05d0030ca9ec8b9453e1", "size": 729, "ext": "py", "lang": "Python", "max_stars_repo_path": "hicodet/utilities/generate_html_page.py", "max_stars_repo_name": "henlein/upt", "max_stars_repo_head_hexsha": "0baa1fe7ebc16347f19bf18f10da4a6dc8fd3837", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2020-12-15T12:04:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T15:01:36.000Z", "max_issues_repo_path": "hicodet/utilities/generate_html_page.py", "max_issues_repo_name": "henlein/upt", "max_issues_repo_head_hexsha": "0baa1fe7ebc16347f19bf18f10da4a6dc8fd3837", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-12-10T03:13:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-26T02:15:46.000Z", "max_forks_repo_path": "hicodet/utilities/generate_html_page.py", "max_forks_repo_name": "henlein/upt", "max_forks_repo_head_hexsha": "0baa1fe7ebc16347f19bf18f10da4a6dc8fd3837", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-01-06T15:12:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-24T03:39:07.000Z", "avg_line_length": 19.7027027027, "max_line_length": 60, "alphanum_fraction": 0.6200274348, "include": true, "reason": "import numpy", "num_tokens": 158}
|
[STATEMENT]
lemma pre_post_left_isotone:
"-p \<le> -q \<Longrightarrow> -p\<stileturn>-r \<le> -q\<stileturn>-r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - p \<le> - q \<Longrightarrow> - p \<stileturn> - r \<le> - q \<stileturn> - r
[PROOF STEP]
using order_lesseq_imp pre_post_galois
[PROOF STATE]
proof (prove)
using this:
(\<forall>z\<ge>?x. ?y \<le> z) = (?y \<le> ?x)
(- ?p \<le> ?x \<guillemotleft> - ?q) = (- ?p \<stileturn> - ?q \<le> ?x)
goal (1 subgoal):
1. - p \<le> - q \<Longrightarrow> - p \<stileturn> - r \<le> - q \<stileturn> - r
[PROOF STEP]
by blast
|
{"llama_tokens": 265, "file": "Correctness_Algebras_Pre_Post", "length": 2}
|
#!/usr/bin/env python
#
# remove filters from photometry catalogs, physicsgrid, and observationgrid
# used to modify simulated data to make plots for proposals
import argparse
import numpy as np
from astropy.table import Table
import tables
from beast.physicsmodel.grid import (FileSEDGrid, SpectralGrid)
import beast.observationmodel.noisemodel.generic_noisemodel as noisemodel
def remove_filters_from_files(catfile,
physgrid,
obsgrid,
outbase,
rm_filters):
# remove the requested filters from the catalog file
cat = Table.read(catfile)
for cfilter in rm_filters:
colname = '{}_rate'.format(cfilter)
if colname in cat.colnames:
cat.remove_column(colname)
else:
print('{} not in catalog file'.format(colname))
cat.write('{}_cat.fits'.format(outbase), overwrite=True)
# get the sed grid and process
g0 = FileSEDGrid(physgrid, backend='cache')
filters = g0.header['filters'].split(' ')
shortfilters = [(cfilter.split('_'))[-1].lower() for cfilter in filters]
nlamb = []
nfilters = []
rindxs = []
for csfilter, clamb, cfilter in zip(shortfilters, g0.lamb, filters):
if csfilter not in rm_filters:
nlamb.append(clamb)
nfilters.append(cfilter)
else:
rindxs.append(shortfilters.index(csfilter))
nseds = np.delete(g0.seds, rindxs, 1)
print('orig filters: {}'.format(' '.join(filters)))
print(' new filters: {}'.format(' '.join(nfilters)))
g = SpectralGrid(np.array(nlamb), seds=nseds,
grid=g0.grid, backend='memory')
g.grid.header['filters'] = ' '.join(nfilters)
g.writeHDF('{}_sed.grid.hd5'.format(outbase))
# get and process the observation model
obsgrid = noisemodel.get_noisemodelcat(obsgrid)
with tables.open_file('{}_noisemodel.grid.hd5'.format(outbase), 'w') \
as outfile:
outfile.create_array(outfile.root, 'bias',
np.delete(obsgrid.root.bias, rindxs, 1))
outfile.create_array(outfile.root, 'error',
np.delete(obsgrid.root.error, rindxs, 1))
outfile.create_array(outfile.root, 'completeness',
np.delete(obsgrid.root.completeness, rindxs, 1))
if __name__ == '__main__':
# commandline parser
parser = argparse.ArgumentParser()
parser.add_argument("catfile",
help='filename of photometry catalog')
parser.add_argument("physgrid",
help='filename of physics grid')
parser.add_argument("obsgrid",
help='filename of observation/nosie grid')
parser.add_argument("outbase", default='lessfilters',
help='filename for simulated observations')
parser.add_argument('--rm_filters', type=str, nargs='*',
help='filters to remove')
args = parser.parse_args()
# do the merge
remove_filters_from_files(args.catfile,
args.physgrid,
args.obsgrid,
args.outbase,
args.rm_filters)
|
{"hexsha": "b51afa91fac7749c90eac81b1facd53af079417a", "size": 3302, "ext": "py", "lang": "Python", "max_stars_repo_path": "beast/tools/remove_filters.py", "max_stars_repo_name": "marthaboyer/beast", "max_stars_repo_head_hexsha": "1ca71fb64ab60827e4e4e1937b64f319a98166c3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "beast/tools/remove_filters.py", "max_issues_repo_name": "marthaboyer/beast", "max_issues_repo_head_hexsha": "1ca71fb64ab60827e4e4e1937b64f319a98166c3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "beast/tools/remove_filters.py", "max_forks_repo_name": "marthaboyer/beast", "max_forks_repo_head_hexsha": "1ca71fb64ab60827e4e4e1937b64f319a98166c3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1011235955, "max_line_length": 77, "alphanum_fraction": 0.5938824955, "include": true, "reason": "import numpy,from astropy", "num_tokens": 717}
|
from distutils.core import setup
from distutils.extension import Extension
import numpy as np
meshModule = Extension("polymesh.mesh",
sources=["polymesh/mesh.c"],
include_dirs = [np.get_include()],
extra_compile_args=["-Wno-unreachable-code"])
hydrostaticModule = Extension("polymesh.hydrostatic",
sources=["polymesh/hydrostatic.c"],
include_dirs = [np.get_include()],
extra_compile_args=["-Wno-unreachable-code"])
setup(name="polymesh",
version="0.1",
description="A library for setting up simulations in OpenFOAM",
author="Jarle A. Kramer",
author_email="jarlekramer@gmail.com",
license="MIT",
packages=["polymesh"],
install_requires=["numpy",],
python_requires=">=3",
include_package_data=True,
ext_modules=[meshModule, hydrostaticModule],
)
|
{"hexsha": "51b2fef7ed2aedbebc491580679661387630468b", "size": 832, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "aixuexisun/polymesh", "max_stars_repo_head_hexsha": "07ea7a87028f2f662fa6db92a27cc05a156de545", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-04T13:57:24.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-04T13:57:24.000Z", "max_issues_repo_path": "setup.py", "max_issues_repo_name": "aixuexisun/polymesh", "max_issues_repo_head_hexsha": "07ea7a87028f2f662fa6db92a27cc05a156de545", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "setup.py", "max_forks_repo_name": "aixuexisun/polymesh", "max_forks_repo_head_hexsha": "07ea7a87028f2f662fa6db92a27cc05a156de545", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-25T12:44:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T12:44:57.000Z", "avg_line_length": 30.8148148148, "max_line_length": 66, "alphanum_fraction": 0.6935096154, "include": true, "reason": "import numpy", "num_tokens": 198}
|
import numpy as np
import mbuild as mb
class AmorphousSilicaBulk(mb.Compound):
""" An amorphous silica box, 2.2g/cm^3"""
def __init__(self):
super(AmorphousSilicaBulk, self).__init__()
mb.load('amorphous_silica_bulk.pdb', compound=self,
relative_to_module=self.__module__)
self.periodicity = np.array([5, 5, 5])
if __name__ == "__main__":
bulk = AmorphousSilicaBulk()
bulk.save('bulk.mol2')
|
{"hexsha": "10b4e4faf5b32499418f80fa49e7cc13bb61c617", "size": 451, "ext": "py", "lang": "Python", "max_stars_repo_path": "mbuild/lib/bulk_materials/amorphous_silica_bulk.py", "max_stars_repo_name": "dcardenasv/mbuild", "max_stars_repo_head_hexsha": "20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mbuild/lib/bulk_materials/amorphous_silica_bulk.py", "max_issues_repo_name": "dcardenasv/mbuild", "max_issues_repo_head_hexsha": "20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-09-18T04:59:45.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-18T04:59:45.000Z", "max_forks_repo_path": "mbuild/lib/bulk_materials/amorphous_silica_bulk.py", "max_forks_repo_name": "dcardenasv/mbuild", "max_forks_repo_head_hexsha": "20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5294117647, "max_line_length": 59, "alphanum_fraction": 0.6607538803, "include": true, "reason": "import numpy", "num_tokens": 128}
|
import argparse
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import pdb
import os
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Input, Dense, Conv2D, BatchNormalization, LeakyReLU, Flatten
from tensorflow.keras.layers import concatenate
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras import regularizers
from tensorflow.keras import optimizers
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
def create_model(n_classes=10, rgb_units=16, rgb_layers=2, ir_units=16, ir_layers=2, alti_units=16, alti_layers=2, dense_size=100, kernel=5, reg_weight=0.1):
""" Creates Keras CNN model. """
# retrieve params
verbose = True
#### RGB MODEL
rgb_input_shape = (256, 256, 3)
rgb_input_layer = Input(shape=rgb_input_shape)
rgb_cnn_layer = Conv2D(filters=rgb_units, kernel_size=kernel, kernel_regularizer=regularizers.l2(reg_weight))(rgb_input_layer)
rgb_cnn_layer= LeakyReLU(alpha=0.2)(rgb_cnn_layer)
rgb_cnn_layer = BatchNormalization()(rgb_cnn_layer)
for _ in range(rgb_layers - 1):
rgb_cnn_layer = Conv2D(filters=rgb_units, kernel_size=kernel, kernel_regularizer=regularizers.l2(reg_weight))(rgb_cnn_layer)
rgb_cnn_layer= LeakyReLU(alpha=0.2)(rgb_cnn_layer)
rgb_cnn_layer = BatchNormalization()(rgb_cnn_layer)
rgb_flatten = Flatten()(rgb_cnn_layer)
rgb_dense = Dense(dense_size, kernel_regularizer=regularizers.l2(reg_weight))(rgb_flatten)
rgb_out = LeakyReLU(alpha=0.2)(rgb_dense)
#### IR Model
ir_input_shape = (256, 256, 1)
ir_input_layer = Input(shape=ir_input_shape)
ir_cnn_layer = Conv2D(filters=ir_units, kernel_size=kernel, kernel_regularizer=regularizers.l2(reg_weight))(ir_input_layer)
ir_cnn_layer= LeakyReLU(alpha=0.2)(ir_cnn_layer)
ir_cnn_layer = BatchNormalization()(ir_cnn_layer)
for _ in range(ir_layers - 1):
ir_cnn_layer = Conv2D(filters=ir_units, kernel_size=kernel, kernel_regularizer=regularizers.l2(reg_weight))(ir_cnn_layer)
ir_cnn_layer= LeakyReLU(alpha=0.2)(ir_cnn_layer)
ir_cnn_layer = BatchNormalization()(ir_cnn_layer)
ir_flatten = Flatten()(ir_cnn_layer)
ir_dense = Dense(dense_size, kernel_regularizer=regularizers.l2(reg_weight))(ir_flatten)
ir_out = LeakyReLU(alpha=0.2)(ir_dense)
#### Alti Model
alti_input_shape = (256, 256, 1)
alti_input_layer = Input(shape=alti_input_shape)
alti_cnn_layer = Conv2D(filters=alti_units, kernel_size=kernel, kernel_regularizer=regularizers.l2(reg_weight))(alti_input_layer)
alti_cnn_layer= LeakyReLU(alpha=0.2)(alti_cnn_layer)
alti_cnn_layer = BatchNormalization()(alti_cnn_layer)
for _ in range(alti_layers - 1):
alti_cnn_layer = Conv2D(filters=alti_units, kernel_size=kernel, kernel_regularizer=regularizers.l2(reg_weight))(alti_cnn_layer)
alti_cnn_layer= LeakyReLU(alpha=0.2)(alti_cnn_layer)
alti_cnn_layer = BatchNormalization()(alti_cnn_layer)
alti_flatten = Flatten()(alti_cnn_layer)
alti_dense = Dense(dense_size, kernel_regularizer=regularizers.l2(reg_weight))(alti_flatten)
alti_out = LeakyReLU(alpha=0.2)(alti_dense)
combined = concatenate([rgb_out, ir_out, alti_out])
total_out = Dense(n_classes, activation='softmax', kernel_regularizer=regularizers.l2(reg_weight))(combined)
# Define the total model
model = Model(inputs=[rgb_input_layer, ir_input_layer, alti_input_layer], outputs=total_out)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
if verbose:
print(model.summary())
print(f'regularizer: {reg_weight}')
# compile and return model
return model
# Generic data generator object for feeding data to fit_generator
def data_generator_mixed(X1, X2, X3, y, bs):
i = 0
while True:
i += bs
# Restart from beginning
if i + bs > len(X1):
i = 0
X1_batch = X1[i:i+bs]
X2_batch = X2[i:i+bs]
X3_batch = X3[i:i+bs]
y_batch = y[i:i+bs]
yield ([X1_batch, X2_batch, X3_batch], y_batch)
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs",
default=25,
type=int,
help="How many epochs?")
parser.add_argument("--gpus",
default=-1,
type=int,
help="Which GPUs?")
return parser
if __name__ == '__main__':
top10 = True
if top10:
n_classes = 10
n_samples = 13240
else:
n_classes = 20
n_samples = 14363
data_amount = 'large'
parser = get_argparser()
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpus)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '1'
########################
# Define CNN model
########################
n_epochs = args.n_epochs
#model_name = f'CNN10-layers{layers}-units{units}-kernel{kernel}-reg{regularizer}-rgb'
model = create_model(n_classes=n_classes, kernel=5, reg_weight=0.05,
rgb_units=32, rgb_layers=4, ir_units=4, ir_layers=3, alti_units=16, alti_layers=3)
########################
# Load and process data
########################
if top10:
Xrgb = np.load('Xrgb_top10.npy')
Xir = np.load('Xir_top10.npy')
Xalti = np.load('Xalti_top10.npy')
Xir = np.expand_dims(Xir, axis=3)
Xalti = np.expand_dims(Xalti, axis=3)
yfull = np.load('y_top10.npy')
else:
Xrgb = np.load('Xrgb_top20.npy')
Xir = np.load('Xir_top20.npy')
Xalti = np.load('Xalti_top20.npy')
Xir = np.expand_dims(Xir, axis=3)
Xalti = np.expand_dims(Xalti, axis=3)
yfull = np.load('y_top20.npy')
# Add scaling on Xalti: scale to 0-1 range.
Xalti = Xalti + np.abs(np.min(Xalti))
Xalti = Xalti / np.abs(np.max(Xalti))
yfull_cat = to_categorical(yfull)
if data_amount == 'small':
Xtrain_rgb = Xrgb[0:500]
Xtrain_ir = Xir[0:500]
Xtrain_alti = Xalti[0:500]
Xtest_rgb = Xrgb[1000:1100]
Xtest_ir = Xir[1000:1100]
Xtest_alti = Xalti[1000:1100]
ytrain = yfull_cat[0:500]
ytest = yfull_cat[1000:1100]
else:
shff_idx = np.random.permutation(Xrgb.shape[0])
Xtrain_rgb = Xrgb[shff_idx[0:8000]]
Xval_rgb = Xrgb[shff_idx[8000:10600]]
Xtest_rgb = Xrgb[shff_idx[10600:]]
Xtrain_ir = Xir[shff_idx[0:8000]]
Xval_ir = Xir[shff_idx[8000:10600]]
Xtest_ir = Xir[shff_idx[10600:]]
Xtrain_alti = Xalti[shff_idx[0:8000]]
Xval_alti = Xalti[shff_idx[8000:10600]]
Xtest_alti = Xalti[shff_idx[10600:]]
ytrain = yfull_cat[shff_idx[0:8000]]
yval = yfull_cat[shff_idx[8000:10600]]
ytest = yfull_cat[shff_idx[10600:]]
########################
# Train the model
########################
batch_size = 32
epoch_steps = len(Xtrain_rgb) // batch_size
val_steps = len(Xval_rgb) // batch_size
train_history = model.fit_generator(
data_generator_mixed(Xtrain_rgb, Xtrain_ir, Xtrain_alti, ytrain, batch_size),
validation_data=data_generator_mixed(Xval_rgb, Xval_ir, Xval_alti, yval, batch_size),
callbacks=[EarlyStopping(monitor='val_loss', patience=5, verbose=0, min_delta=0, mode='auto', restore_best_weights=True)],
steps_per_epoch=epoch_steps,
validation_steps=val_steps,
epochs=n_epochs)
########################
# Save and evaluate model
########################
save = True
if save:
model_name = 'combined'
model.save(model_name+'.h5')
print(f'Keras model saved to {model_name+".h5"}')
loss_obj = np.vstack([train_history.history['loss'], train_history.history['val_loss'], train_history.history['accuracy'], train_history.history['val_accuracy']])
np.savetxt(f'train-history-{model_name}.csv', loss_obj, delimiter=',', fmt='%.5f')
ypred_val = model.predict([Xval_rgb, Xval_ir, Xval_alti])
val_accuracy = np.mean(np.argmax(ypred_val, axis=1) == np.argmax(yval, axis=1))
print(f"final val accuracy is {val_accuracy}")
ypred = model.predict([Xtest_rgb, Xtest_ir, Xtest_alti])
test_accuracy = np.mean(np.argmax(ypred, axis=1) == np.argmax(ytest, axis=1))
print(f"final test accuracy is {test_accuracy}")
fig, ax = plt.subplots(2, 1, figsize=(10, 8))
ax[0].hist(np.argmax(ypred_val, axis=1))
ax[1].hist(np.argmax(ypred, axis=1))
if top10:
ax[0].set_xticks([])
ax[0].set_xticklabels([])
ax[1].set_xticks(np.arange(10))
ax[1].set_xticklabels(np.arange(10))
else:
ax[0].set_xticks([])
ax[0].set_xticklabels([])
ax[1].set_xticks(np.arange(20))
ax[1].set_xticklabels(np.arange(20))
plt.savefig(f'{model_name}-hist.pdf')
|
{"hexsha": "613c139154a79aa2a1364d5eb28e81ad16907397", "size": 8229, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_combined.py", "max_stars_repo_name": "clementfung/biodiversity-analyzer", "max_stars_repo_head_hexsha": "a798bec2d4b5450e2a554dc7b9400cbe4198c102", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train_combined.py", "max_issues_repo_name": "clementfung/biodiversity-analyzer", "max_issues_repo_head_hexsha": "a798bec2d4b5450e2a554dc7b9400cbe4198c102", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_combined.py", "max_forks_repo_name": "clementfung/biodiversity-analyzer", "max_forks_repo_head_hexsha": "a798bec2d4b5450e2a554dc7b9400cbe4198c102", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1704545455, "max_line_length": 164, "alphanum_fraction": 0.722566533, "include": true, "reason": "import numpy", "num_tokens": 2548}
|
% -*- root: Main.tex -*-
\section{SVD}
$\mathbf{A} = \mathbf{U} \mathbf{D} \mathbf{V}^\top = \sum_{k=1}^{\operatorname{rank}(\mathbf{A})} d_{k,k} u_k (v_k)^\top$\\
$\mathbf{A} \in \mathbb{R}^{N \times P}, \mathbf{U} \in \mathbb{R}^{N \times N}, \mathbf{D} \in \mathbb{R}^{N \times P}, \mathbf{V} \in \mathbb{R}^{P \times P}$\\
$\mathbf{U}^\top \mathbf{U} = I = \mathbf{V}^\top \mathbf{V}$ ($\mathbf{U}, \mathbf{V}$ orthogonal)\\
$\mathbf{U}$ columns are eigenvectors of $\mathbf{A} \mathbf{A}^\top$, $\mathbf{V}$ columns are eigenvectors of $\mathbf{A}^\top \mathbf{A}$, $\mathbf{D}$ diagonal elements are singular values.\\
$(\mathbf{D}^{-1})_{i,i} = \frac{1}{\mathbf{D}_{i, i}}$ (don't forget to transpose)
1. calculate $\mathbf{A}^\top \mathbf{A}$.\\
2. calculate eigenvalues of $\mathbf{A}^\top \mathbf{A}$, the square root of them, in descending order, are the diagonal elements of $\mathbf{D}$.\\
3. calculate eigenvectors of $\mathbf{A}^\top \mathbf{A}$ using the eigenvalues resulting in the columns of $\mathbf{V}$.\\
4. calculate the missing matrix: $\mathbf{U} = \mathbf{A} \mathbf{V} \mathbf{D}^{-1}$.\\
5. normalize each column of $\mathbf{U}$ and $\mathbf{V}$.
\subsection*{Low-Rank approximation}
Using only $K$ largest eigenvalues and corresponding eigenvectors. $\tilde{\mathbf{A}}_{i, j} = \sum_{k=1}^K \mathbf{U}_{i, k} \mathbf{D}_{k,k} \mathbf{V}_{j, k} = \sum_{k=1}^K \mathbf{U}_{i, k} \mathbf{D}_{k,k} (\mathbf{V}^\top)_{k, j}$.
\subsection*{Collaborative Filtering \textendash\ Prediction}
For $\mathbf{U}, \mathbf{V}$ (Movie-/User-embedding) use $k$-rank approx (fill up dimensions). Predict user preferences: $\mathbf{U_k} \mathbf{U_k^T} b$.
\subsection*{Echart-Young Theorem}
$\min_{rank(B)=K} ||A-B||_F^2 = ||A-A_k||_F^2 = \sum_{r=k+1}^{rank(A)} \sigma_r^2$
$\min_{rank(B)=K} ||A-B||_2 = ||A-A_k||_2 = \sigma_{k+1}$
|
{"hexsha": "e33615337f33de1120e100191e243dc0a6153920", "size": 1847, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "SVD.tex", "max_stars_repo_name": "kalsan/eth-cil-exam-summary", "max_stars_repo_head_hexsha": "ae93c61b8e7e88be725641d9580b37367c332458", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-07-31T11:12:05.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-02T09:35:54.000Z", "max_issues_repo_path": "SVD.tex", "max_issues_repo_name": "kalsan/eth-cil-exam-summary", "max_issues_repo_head_hexsha": "ae93c61b8e7e88be725641d9580b37367c332458", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SVD.tex", "max_forks_repo_name": "kalsan/eth-cil-exam-summary", "max_forks_repo_head_hexsha": "ae93c61b8e7e88be725641d9580b37367c332458", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-07-19T14:14:46.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-05T13:50:06.000Z", "avg_line_length": 76.9583333333, "max_line_length": 238, "alphanum_fraction": 0.639415268, "num_tokens": 753}
|
#!/usr/bin/env python
# Copyright (c) 2021, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking
# platform" software is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import numpy as np
from matplotlib import pyplot as plt
from scipy.interpolate import RegularGridInterpolator
from scipy.io import wavfile
from scipy.signal import welch
fs = 32000 # sample rate, Hz
T = 30.0 # sample duration, seconds
freqs = [12333, 10533] # Hz
rng = np.random.RandomState(23)
def get_signal(F, fs, T):
t = np.arange(fs * T) / fs
weight_sum = 0
signal = np.zeros(t.shape)
weight = 1.0
weight_sum += weight
signal += weight * np.sin(2 * np.pi * F * t)
weight = 1.5
weight_sum += weight
signal += weight * np.clip(1.0 / 3 * rng.normal(size=t.shape), -1, 1)
return (32768 / weight_sum * signal).astype("int16")
for F in freqs:
signal = get_signal(F, fs, T)
fname = "test_sounds/tone%s.wav" % F
wavfile.write(fname, fs, signal)
print("wrote %s" % fname)
for F in freqs:
fname = "test_sounds/tone%s.wav" % F
fs, signal = wavfile.read(fname)
welch_F, Pxx = welch(signal, fs, nperseg=8192, average="median")
plt.semilogy(welch_F, Pxx)
interp = RegularGridInterpolator([welch_F], Pxx)
print("PSD for %s:" % fname)
for F in freqs:
print(" @ %s Hz: %s" % (F, interp([F])))
plt.legend([str(F) for F in freqs])
plt.xlabel("Frequency (Hz)")
plt.ylabel("PSD ($V^2$ / Hz)")
fname = "tones.png"
plt.savefig(fname)
print("wrote %s" % fname)
plt.close()
|
{"hexsha": "3351cb7072163f287e02b824275e4eb7ad3a801b", "size": 2194, "ext": "py", "lang": "Python", "max_stars_repo_path": "astrobee/simulation/acoustics_cam/src/generate_pure_tones.py", "max_stars_repo_name": "rsoussan/isaac", "max_stars_repo_head_hexsha": "c6cc7b5c0707a9b23b5fc4376f0a062052992bc6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2021-11-18T19:29:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T01:55:51.000Z", "max_issues_repo_path": "astrobee/simulation/acoustics_cam/src/generate_pure_tones.py", "max_issues_repo_name": "rsoussan/isaac", "max_issues_repo_head_hexsha": "c6cc7b5c0707a9b23b5fc4376f0a062052992bc6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-11-30T17:14:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T21:38:33.000Z", "max_forks_repo_path": "astrobee/simulation/acoustics_cam/src/generate_pure_tones.py", "max_forks_repo_name": "rsoussan/isaac", "max_forks_repo_head_hexsha": "c6cc7b5c0707a9b23b5fc4376f0a062052992bc6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-12-03T02:38:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T01:52:03.000Z", "avg_line_length": 28.4935064935, "max_line_length": 75, "alphanum_fraction": 0.6841385597, "include": true, "reason": "import numpy,from scipy", "num_tokens": 623}
|
from distutils.core import setup, Extension
try:
import numpy # @UnusedImport # NOQA
except ImportError:
msg = ("No module named numpy. "
"Please install numpy first, it is needed before installing PH5.")
raise ImportError(msg)
def get_extension_options():
options = ("ibm2ieee_py",
["ph5/core/c_dependencies/ibm2ieee_py.c",
"ph5/core/c_dependencies/ibm2ieeewrapper_py.c"])
return options
def install():
setup(name="ibm2ieee_py",
version="2013.121",
ext_modules=[Extension(*get_extension_options(),
include_dirs=[numpy.get_include()]
)])
if __name__ == '__main__':
install()
|
{"hexsha": "8bef8e58d325b68071ab046561d87bac588cc1b4", "size": 739, "ext": "py", "lang": "Python", "max_stars_repo_path": "ph5/core/c_dependencies/suibm2ieee_py.py", "max_stars_repo_name": "mbriggs134/PH5", "max_stars_repo_head_hexsha": "691790269c4f80c892bacb84fdcaba905b9e0d6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2016-12-07T20:09:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T22:23:57.000Z", "max_issues_repo_path": "ph5/core/c_dependencies/suibm2ieee_py.py", "max_issues_repo_name": "mbriggs134/PH5", "max_issues_repo_head_hexsha": "691790269c4f80c892bacb84fdcaba905b9e0d6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 395, "max_issues_repo_issues_event_min_datetime": "2016-11-03T03:43:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T20:54:22.000Z", "max_forks_repo_path": "ph5/core/c_dependencies/suibm2ieee_py.py", "max_forks_repo_name": "mbriggs134/PH5", "max_forks_repo_head_hexsha": "691790269c4f80c892bacb84fdcaba905b9e0d6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2016-10-25T22:22:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-10T18:19:45.000Z", "avg_line_length": 26.3928571429, "max_line_length": 77, "alphanum_fraction": 0.598105548, "include": true, "reason": "import numpy", "num_tokens": 161}
|
# Filename: geom.py
# License: LICENSES/LICENSE_UVIC_EPFL
import numpy as np
def parse_geom(geom, geom_type):
parsed_geom = {}
if geom_type == "Homography":
parsed_geom["h"] = geom.reshape((-1, 3, 3))
elif geom_type == "Calibration":
parsed_geom["K"] = geom[:, :9].reshape((-1, 3, 3))
parsed_geom["R"] = geom[:, 9:18].reshape((-1, 3, 3))
parsed_geom["t"] = geom[:, 18:21].reshape((-1, 3, 1))
parsed_geom["K_inv"] = geom[:, 23:32].reshape((-1, 3, 3))
parsed_geom["q"] = geom[:, 32:36].reshape([-1, 4, 1])
parsed_geom["q_inv"] = geom[:, 36:40].reshape([-1, 4, 1])
else:
raise NotImplementedError(
"{} is not a supported geometry type!".format(geom_type)
)
return parsed_geom
def np_skew_symmetric(v):
zero = np.zeros_like(v[:, 0])
M = np.stack([
zero, -v[:, 2], v[:, 1],
v[:, 2], zero, -v[:, 0],
-v[:, 1], v[:, 0], zero,
], axis=1)
return M
def np_unskew_symmetric(M):
v = np.concatenate([
0.5 * (M[:, 7] - M[:, 5])[None],
0.5 * (M[:, 2] - M[:, 6])[None],
0.5 * (M[:, 3] - M[:, 1])[None],
], axis=1)
return v
def get_episqr(x1, x2, dR, dt):
num_pts = len(x1)
# Make homogeneous coordinates
x1 = np.concatenate([
x1, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
x2 = np.concatenate([
x2, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
# Compute Fundamental matrix
dR = dR.reshape(1, 3, 3)
dt = dt.reshape(1, 3)
F = np.repeat(np.matmul(
np.reshape(np_skew_symmetric(dt), (-1, 3, 3)),
dR
).reshape(-1, 3, 3), num_pts, axis=0)
x2Fx1 = np.matmul(x2.transpose(0, 2, 1), np.matmul(F, x1)).flatten()
ys = x2Fx1**2
return ys.flatten()
def get_episym(x1, x2, dR, dt):
num_pts = len(x1)
# Make homogeneous coordinates
x1 = np.concatenate([
x1, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
x2 = np.concatenate([
x2, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
# Compute Fundamental matrix
dR = dR.reshape(1, 3, 3)
dt = dt.reshape(1, 3)
F = np.repeat(np.matmul(
np.reshape(np_skew_symmetric(dt), (-1, 3, 3)),
dR
).reshape(-1, 3, 3), num_pts, axis=0)
x2Fx1 = np.matmul(x2.transpose(0, 2, 1), np.matmul(F, x1)).flatten()
Fx1 = np.matmul(F, x1).reshape(-1, 3)
Ftx2 = np.matmul(F.transpose(0, 2, 1), x2).reshape(-1, 3)
ys = x2Fx1**2 * (
1.0 / (Fx1[..., 0]**2 + Fx1[..., 1]**2) +
1.0 / (Ftx2[..., 0]**2 + Ftx2[..., 1]**2))
return ys.flatten()
def get_sampsons(x1, x2, dR, dt):
num_pts = len(x1)
# Make homogeneous coordinates
x1 = np.concatenate([
x1, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
x2 = np.concatenate([
x2, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
# Compute Fundamental matrix
dR = dR.reshape(1, 3, 3)
dt = dt.reshape(1, 3)
F = np.repeat(np.matmul(
np.reshape(np_skew_symmetric(dt), (-1, 3, 3)),
dR
).reshape(-1, 3, 3), num_pts, axis=0)
x2Fx1 = np.matmul(x2.transpose(0, 2, 1), np.matmul(F, x1)).flatten()
Fx1 = np.matmul(F, x1).reshape(-1, 3)
Ftx2 = np.matmul(F.transpose(0, 2, 1), x2).reshape(-1, 3)
ys = x2Fx1**2 / (
Fx1[..., 0]**2 + Fx1[..., 1]**2 + Ftx2[..., 0]**2 + Ftx2[..., 1]**2
)
return ys.flatten()
#
# geom.py ends here
|
{"hexsha": "8a453254242195575d402c1be0f480d686a1126c", "size": 3503, "ext": "py", "lang": "Python", "max_stars_repo_path": "geom.py", "max_stars_repo_name": "zengru001usst/acne", "max_stars_repo_head_hexsha": "ae652e814649e88034b3b506ccbe34432b1eb85a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2020-03-27T21:00:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T06:54:00.000Z", "max_issues_repo_path": "geom.py", "max_issues_repo_name": "zengru001usst/acne", "max_issues_repo_head_hexsha": "ae652e814649e88034b3b506ccbe34432b1eb85a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-06-15T17:30:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-14T12:22:33.000Z", "max_forks_repo_path": "geom.py", "max_forks_repo_name": "zengru001usst/acne", "max_forks_repo_head_hexsha": "ae652e814649e88034b3b506ccbe34432b1eb85a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-05-06T14:50:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T06:53:55.000Z", "avg_line_length": 24.3263888889, "max_line_length": 75, "alphanum_fraction": 0.5166999715, "include": true, "reason": "import numpy", "num_tokens": 1285}
|
C Copyright(C) 1999-2020 National Technology & Engineering Solutions
C of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
C NTESS, the U.S. Government retains certain rights in this software.
C
C See packages/seacas/LICENSE for details
C=======================================================================
LOGICAL FUNCTION PLTSTC(INDX,BUFF)
REAL DEVCAP(23)
REAL DEFOUT(7)
COMMON /STATUS/DEVCAP,DEFOUT
REAL DEVP(5)
COMMON /DEVICE/DEVP
REAL COLP(3)
REAL PALETT(3,16)
COMMON /COLOR/COLP,PALETT
REAL TEXTP(40)
COMMON /TEXT/TEXTP
REAL VECTP(5)
REAL XCUR
REAL YCUR
COMMON /VECTRC/VECTP,XCUR,YCUR
INTEGER IDEX(200,2)
INTEGER NVECT(200,2)
REAL XSIZE(200,2)
REAL YSIZE(200,2)
REAL X0(2300,2)
REAL Y0(2300,2)
REAL X1(2300,2)
REAL Y1(2300,2)
COMMON /FONT/IDEX,NVECT,XSIZE,YSIZE,X0,Y0,X1,Y1
REAL GRAPHP(100)
COMMON /GRAPH/GRAPHP
COMMON /MAPPAR/MAPP(11)
REAL MAPP
COMMON /STORAG/MEMORY(1000)
DIMENSION BUFF(*)
CHARACTER*16 IERROR
PLTSTC = .TRUE.
IF (INDX.EQ.0) THEN
CALL PLTRSC
ELSE IF (INDX.EQ.1) THEN
BTEMP = COLP(1)
COLP(1) = BUFF(1)
IF (BUFF(1).EQ.-1.) THEN
CALL PLTSPC(0.,.706,0.,.706,.1875,0.,0.,1.)
CALL PLTSPC(.1875,0.,0.,1.,.3708,0.,1.,0.)
CALL PLTSPC(.3708,0.,1.,0.,.6208,1.,1.,0.)
CALL PLTSPC(.6208,1.,1.,0.,.8292,1.,.659,0.)
CALL PLTSPC(.8292,1.,.659,0.,1.,1.,0.,0.)
ELSE IF (BUFF(1).EQ.0.) THEN
CALL PLTSPC(0.,1.,0.,0.,.1708,1.,.659,0.)
CALL PLTSPC(.1708,1.,.659,0.,.3792,1.,1.,0.)
CALL PLTSPC(.3792,1.,1.,0.,.6292,0.,1.,0.)
CALL PLTSPC(.6292,0.,1.,0.,.8125,0.,0.,1.)
CALL PLTSPC(.8125,0.,0.,1.,1.,.706,0.,.706)
ELSE IF (BUFF(1).EQ.1.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,2),PALETT(2,2),
* PALETT(3,2))
ELSE IF (BUFF(1).EQ.2.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,3),PALETT(2,3),
* PALETT(3,3))
ELSE IF (BUFF(1).EQ.3.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,4),PALETT(2,4),
* PALETT(3,4))
ELSE IF (BUFF(1).EQ.4.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,5),PALETT(2,5),
* PALETT(3,5))
ELSE IF (BUFF(1).EQ.5.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,6),PALETT(2,6),
* PALETT(3,6))
ELSE IF (BUFF(1).EQ.6.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,7),PALETT(2,7),
* PALETT(3,7))
ELSE IF (BUFF(1).EQ.7.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,8),PALETT(2,8),
* PALETT(3,8))
ELSE IF (BUFF(1).EQ.8.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,9),PALETT(2,9),
* PALETT(3,9))
ELSE IF (BUFF(1).EQ.9.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,10),PALETT(2,10),
* PALETT(3,10))
ELSE IF (BUFF(1).EQ.10.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,11),PALETT(2,11),
* PALETT(3,11))
ELSE IF (BUFF(1).EQ.11.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,12),PALETT(2,12),
* PALETT(3,12))
ELSE IF (BUFF(1).EQ.12.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,13),PALETT(2,13),
* PALETT(3,13))
ELSE IF (BUFF(1).EQ.13.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,14),PALETT(2,14),
* PALETT(3,14))
ELSE IF (BUFF(1).EQ.14.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,15),PALETT(2,15),
* PALETT(3,15))
ELSE IF (BUFF(1).EQ.15.) THEN
CALL PLTSPC(0.,0.,0.,0.,1.,PALETT(1,16),PALETT(2,16),
* PALETT(3,16))
ELSE
COLP(1) = BTEMP
CALL CHRRVC(BUFF(1),IERROR,L)
CALL PLTFLU
CALL SIORPT('PLTSTC','Illegal buffer '//IERROR(1:L)//
* ' passed to PLTSTC.',2)
PLTSTC = .FALSE.
END IF
ELSE
CALL CHRIC(INDX,IERROR,L)
CALL PLTFLU
CALL SIORPT('PLTSTC','Illegal index '//IERROR(1:L)//'.',2)
PLTSTC = .FALSE.
END IF
RETURN
END
|
{"hexsha": "f92e3698ec23db584bedf36d8631734b8585d9ab", "size": 4503, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/seacas/libraries/plt/pltstc.f", "max_stars_repo_name": "jschueller/seacas", "max_stars_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_stars_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2016-02-04T18:38:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T03:01:49.000Z", "max_issues_repo_path": "packages/seacas/libraries/plt/pltstc.f", "max_issues_repo_name": "jschueller/seacas", "max_issues_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_issues_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_issues_count": 206, "max_issues_repo_issues_event_min_datetime": "2015-11-20T01:57:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:12:04.000Z", "max_forks_repo_path": "packages/seacas/libraries/plt/pltstc.f", "max_forks_repo_name": "jschueller/seacas", "max_forks_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_forks_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_forks_count": 68, "max_forks_repo_forks_event_min_datetime": "2016-01-13T22:46:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:25:05.000Z", "avg_line_length": 32.1642857143, "max_line_length": 72, "alphanum_fraction": 0.4683544304, "num_tokens": 1728}
|
# -*- coding: utf-8 -*-
"""
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% PLOTTER %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Plot the results in 3D format, they can be either plot individually or
in goups of two or four.
Save the plot in a svg file whose name is the Label of the variable
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Inputs:
V Potential distribution in the domain
E Electric field in the domain
J Current density
rho Charge density
nE Norm of E
nJ Norm of J
plotvar: List with the variables to be plotted
Lable: List of lables for plots
nx, ny Number of nodes in the x and y edges if the mesh is
structurated
Outputs:
3D plots of the variables found
svg files of the plots made
Salomón Castaño
Universidad EAFIT, Sciences Department, Physics Engineering, Numeric Methods
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D #DO NOT ERREASE
import numpy as np
#Plot one variable in 3D using triangles to shape the surface
def t1_surf(X,Y,Z1,Label):
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111, projection='3d')
surf1 = ax.plot_trisurf(X, Y, Z1, cmap=cm.plasma, linewidth=0.0, \
antialiased=False, edgecolor=None, alpha = 1)
plt.colorbar(surf1, shrink=0.4, aspect=15)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel(Label)
fig.savefig(Label+'.svg', format='svg', dpi=1200)
plt.show()
#Plot one variable in 3D using squres to shape the surface
def q1_surf(x,y,z1,Label,nx,ny):
X = np.reshape(x, (ny, nx))
Y = np.reshape(y, (ny, nx))
Z1 = np.reshape(z1, (ny, nx))
fig = plt.figure(figsize=(7,7))
fig.savefig(Label+'.svg', format='svg', dpi=1200)
ax = fig.add_subplot(111, projection='3d')
surf1 = ax.plot_surface(X, Y, Z1, cmap=cm.plasma, linewidth=0.0, \
antialiased=False, edgecolor=None, alpha = 1)
plt.colorbar(surf1, shrink=0.4, aspect=15)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel(Label)
fig.savefig(Label+'.svg', format='svg', dpi=1200)
plt.show()
#Plot two variables in 3D using squres to shape the surface
def q2_surf(x,y,z1,col1,z2,col2,Label1,Label1c,Label2,Label2c,nx,ny):
X = np.reshape(x, (ny, nx))
Y = np.reshape(y, (ny, nx))
Z1 = np.reshape(z1, (ny, nx))
Z2 = np.reshape(z2, (ny, nx))
col1 = np.reshape(col1, (ny, nx))
col2 = np.reshape(col2, (ny, nx))
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(1, 2, 1, projection='3d')
my_col1 = cm.plasma(col1/np.amax(col1))
my_col2 = cm.plasma(col2/np.amax(col2))
surf1 = ax.plot_surface(X, Y, Z1, facecolors = my_col1, cmap=cm.plasma, linewidth=0.5, \
antialiased=False, edgecolor='w', alpha = 1)
clb = plt.colorbar(surf1, shrink=0.4, aspect=15)
clb.set_label(Label1c, labelpad=-40, y=1.1, rotation=0)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel(Label1)
ax = fig.add_subplot(1, 2, 2, projection='3d')
surf2 = ax.plot_surface(X, Y, Z2, facecolors = my_col2, cmap=cm.plasma, linewidth=0.0, \
antialiased=False, edgecolor=None, alpha = 1)
clb = plt.colorbar(surf2, shrink=0.4, aspect=15)
clb.set_label(Label2c, labelpad=-40, y=1.1, rotation=0)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel(Label2)
fig.savefig(Label1+Label2+'.svg', format='svg', dpi=1200)
plt.show()
#Plot four variables in 3D using triangles to shape the surface
def t4_surf(X,Y,Z1,Z2,Z3,Z4,Label1,Label2,Label3,Label4):
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(2, 2, 1, projection='3d')
surf1 = ax.plot_trisurf(X, Y, Z1, cmap=cm.plasma, linewidth=0.0, \
antialiased=False, edgecolor=None, alpha = 1)
plt.colorbar(surf1, shrink=0.4, aspect=15)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel(Label1)
ax = fig.add_subplot(2, 2, 2, projection='3d')
surf2 = ax.plot_trisurf(X, Y, Z2, cmap=cm.plasma, linewidth=0.0, \
antialiased=False, edgecolor=None, alpha = 1)
plt.colorbar(surf2, shrink=0.4, aspect=15)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel(Label2)
plt.show()
ax = fig.add_subplot(2, 2, 3, projection='3d')
surf3 = ax.plot_trisurf(X, Y, Z3, cmap=cm.plasma, linewidth=0.0, \
antialiased=False, edgecolor=None, alpha = 1)
plt.colorbar(surf3, shrink=0.4, aspect=15)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel(Label3)
plt.show()
ax = fig.add_subplot(2, 2, 4, projection='3d')
surf4 = ax.plot_trisurf(X, Y, Z4, cmap=cm.plasma, linewidth=0.0, \
antialiased=False, edgecolor=None, alpha = 1)
plt.colorbar(surf4, shrink=0.4, aspect=15)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel(Label4)
fig.savefig('Group plot.svg', format='svg', dpi=1200)
plt.show()
|
{"hexsha": "1ffeb4e1d2bf4e217326f86b864becda839b9630", "size": 5175, "ext": "py", "lang": "Python", "max_stars_repo_path": "ADsolver/PM_plotter.py", "max_stars_repo_name": "salomon-castano/PM---Codigo", "max_stars_repo_head_hexsha": "86785e8fab7e5da49962f824d6c5e662efb21737", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-16T16:43:05.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-03T22:33:22.000Z", "max_issues_repo_path": "ADsolver/PM_plotter.py", "max_issues_repo_name": "salomon-castano/PM---Codigo", "max_issues_repo_head_hexsha": "86785e8fab7e5da49962f824d6c5e662efb21737", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ADsolver/PM_plotter.py", "max_forks_repo_name": "salomon-castano/PM---Codigo", "max_forks_repo_head_hexsha": "86785e8fab7e5da49962f824d6c5e662efb21737", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4436619718, "max_line_length": 92, "alphanum_fraction": 0.5994202899, "include": true, "reason": "import numpy", "num_tokens": 1569}
|
(**
This file is part of the Flocq formalization of floating-point
arithmetic in Coq: http://flocq.gforge.inria.fr/
Copyright (C) 2009-2018 Sylvie Boldo
#<br />#
Copyright (C) 2009-2018 Guillaume Melquiond
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
COPYING file for more details.
*)
(** Basic operations on floats: alignment, addition, multiplication *)
Require Import Raux Defs Float_prop.
Set Implicit Arguments.
Set Strongly Strict Implicit.
Section Float_ops.
Variable beta : radix.
Notation bpow e := (bpow beta e).
Arguments Float {beta}.
Definition Falign (f1 f2 : float beta) :=
let '(Float m1 e1) := f1 in
let '(Float m2 e2) := f2 in
if Zle_bool e1 e2
then (m1, (m2 * Zpower beta (e2 - e1))%Z, e1)
else ((m1 * Zpower beta (e1 - e2))%Z, m2, e2).
Theorem Falign_spec :
forall f1 f2 : float beta,
let '(m1, m2, e) := Falign f1 f2 in
F2R f1 = @F2R beta (Float m1 e) /\ F2R f2 = @F2R beta (Float m2 e).
Proof.
unfold Falign.
intros (m1, e1) (m2, e2).
generalize (Zle_cases e1 e2).
case (Zle_bool e1 e2) ; intros He ; split ; trivial.
now rewrite <- F2R_change_exp.
rewrite <- F2R_change_exp.
apply refl_equal.
omega.
Qed.
Theorem Falign_spec_exp:
forall f1 f2 : float beta,
snd (Falign f1 f2) = Zmin (Fexp f1) (Fexp f2).
Proof.
intros (m1,e1) (m2,e2).
unfold Falign; simpl.
generalize (Zle_cases e1 e2);case (Zle_bool e1 e2); intros He.
case (Zmin_spec e1 e2); intros (H1,H2); easy.
case (Zmin_spec e1 e2); intros (H1,H2); easy.
Qed.
Definition Fopp (f1 : float beta) : float beta :=
let '(Float m1 e1) := f1 in
Float (-m1)%Z e1.
Theorem F2R_opp :
forall f1 : float beta,
(F2R (Fopp f1) = -F2R f1)%R.
intros (m1,e1).
apply F2R_Zopp.
Qed.
Definition Fabs (f1 : float beta) : float beta :=
let '(Float m1 e1) := f1 in
Float (Zabs m1)%Z e1.
Theorem F2R_abs :
forall f1 : float beta,
(F2R (Fabs f1) = Rabs (F2R f1))%R.
intros (m1,e1).
apply F2R_Zabs.
Qed.
Definition Fplus (f1 f2 : float beta) : float beta :=
let '(m1, m2 ,e) := Falign f1 f2 in
Float (m1 + m2) e.
Theorem F2R_plus :
forall f1 f2 : float beta,
F2R (Fplus f1 f2) = (F2R f1 + F2R f2)%R.
Proof.
intros f1 f2.
unfold Fplus.
generalize (Falign_spec f1 f2).
destruct (Falign f1 f2) as ((m1, m2), e).
intros (H1, H2).
rewrite H1, H2.
unfold F2R. simpl.
rewrite plus_IZR.
apply Rmult_plus_distr_r.
Qed.
Theorem Fplus_same_exp :
forall m1 m2 e,
Fplus (Float m1 e) (Float m2 e) = Float (m1 + m2) e.
Proof.
intros m1 m2 e.
unfold Fplus.
simpl.
now rewrite Zle_bool_refl, Zminus_diag, Zmult_1_r.
Qed.
Theorem Fexp_Fplus :
forall f1 f2 : float beta,
Fexp (Fplus f1 f2) = Zmin (Fexp f1) (Fexp f2).
Proof.
intros f1 f2.
unfold Fplus.
rewrite <- Falign_spec_exp.
now destruct (Falign f1 f2) as ((p,q),e).
Qed.
Definition Fminus (f1 f2 : float beta) :=
Fplus f1 (Fopp f2).
Theorem F2R_minus :
forall f1 f2 : float beta,
F2R (Fminus f1 f2) = (F2R f1 - F2R f2)%R.
Proof.
intros f1 f2; unfold Fminus.
rewrite F2R_plus, F2R_opp.
ring.
Qed.
Theorem Fminus_same_exp :
forall m1 m2 e,
Fminus (Float m1 e) (Float m2 e) = Float (m1 - m2) e.
Proof.
intros m1 m2 e.
unfold Fminus.
apply Fplus_same_exp.
Qed.
Definition Fmult (f1 f2 : float beta) : float beta :=
let '(Float m1 e1) := f1 in
let '(Float m2 e2) := f2 in
Float (m1 * m2) (e1 + e2).
Theorem F2R_mult :
forall f1 f2 : float beta,
F2R (Fmult f1 f2) = (F2R f1 * F2R f2)%R.
Proof.
intros (m1, e1) (m2, e2).
unfold Fmult, F2R. simpl.
rewrite mult_IZR, bpow_plus.
ring.
Qed.
End Float_ops.
|
{"author": "JasonGross", "repo": "flocq", "sha": "0abac1eaa11c02f01ccffdc69846382e3a7040c5", "save_path": "github-repos/coq/JasonGross-flocq", "path": "github-repos/coq/JasonGross-flocq/flocq-0abac1eaa11c02f01ccffdc69846382e3a7040c5/src/Calc/Operations.v"}
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020-2021 by SCICO Developers
# All rights reserved. BSD 3-clause License.
# This file is part of the SCICO package. Details of the copyright and
# user license can be found in the 'LICENSE' file distributed with the
# package.
"""Utility functions."""
# The timer classes in this module are copied from https://github.com/bwohlberg/sporco
from __future__ import annotations
import io
import socket
import urllib.error as urlerror
import urllib.request as urlrequest
import warnings
from functools import wraps
from timeit import default_timer as timer
from typing import Any, Callable, List, Optional, Union
import numpy as np
import jax
from jax.interpreters.batching import BatchTracer
from jax.interpreters.partial_eval import DynamicJaxprTracer
from jax.interpreters.pxla import ShardedDeviceArray
from jax.interpreters.xla import DeviceArray
import scico.blockarray
from scico.typing import Axes, JaxArray, Shape
__author__ = """\n""".join(
[
"Brendt Wohlberg <brendt@ieee.org>",
"Luke Pfister <pfister@lanl.gov>",
"Thilo Balke <thilo.balke@gmail.com>",
]
)
def ensure_on_device(
*arrays: Union[np.ndarray, JaxArray, scico.blockarray.BlockArray]
) -> Union[JaxArray, scico.blockarray.BlockArray]:
"""Casts ndarrays to DeviceArrays and leaves DeviceArrays, BlockArrays, and ShardedDeviceArray
as is.
This is intended to be used when initializing optimizers and functionals so that all arrays are
either DeviceArrays, BlockArrays, or ShardedDeviceArray.
Args:
*arrays: One or more input arrays (ndarray, DeviceArray, BlockArray, or ShardedDeviceArray).
Returns:
arrays : Modified array or arrays. Modified are only those that were necessary.
Raises:
TypeError: If the arrays contain something that is neither ndarray, DeviceArray, BlockArray,
nor ShardedDeviceArray.
"""
arrays = list(arrays)
for i in range(len(arrays)):
if isinstance(arrays[i], np.ndarray):
warnings.warn(
f"Argument {i+1} of {len(arrays)} is an np.ndarray. "
f"Will cast it to DeviceArray. "
f"To suppress this warning cast all np.ndarrays to DeviceArray first.",
stacklevel=2,
)
arrays[i] = jax.device_put(arrays[i])
elif not isinstance(
arrays[i],
(DeviceArray, scico.blockarray.BlockArray, ShardedDeviceArray),
):
raise TypeError(
f"Each item of `arrays` must be ndarray, DeviceArray, BlockArray, or ShardedDeviceArray; "
f"Argument {i+1} of {len(arrays)} is {type(arrays[i])}."
)
if len(arrays) == 1:
return arrays[0]
else:
return arrays
def url_get(url: str, maxtry: int = 3, timeout: int = 10) -> io.BytesIO: # pragma: no cover
"""Get content of a file via a URL.
Args:
url: URL of the file to be downloaded
maxtry: Maximum number of download retries. Default: 3.
timeout: Timeout in seconds for blocking operations. Default: 10
Returns:
Buffered I/O stream
Raises:
ValueError: If the maxtry parameter is not greater than zero
urllib.error.URLError: If the file cannot be downloaded
"""
if maxtry <= 0:
raise ValueError("Parameter maxtry should be greater than zero")
for ntry in range(maxtry):
try:
rspns = urlrequest.urlopen(url, timeout=timeout)
cntnt = rspns.read()
break
except urlerror.URLError as e:
if not isinstance(e.reason, socket.timeout):
raise
return io.BytesIO(cntnt)
def parse_axes(
axes: Axes, shape: Optional[Shape] = None, default: Optional[List[int]] = None
) -> List[int]:
"""
Normalize `axes` to a list and optionally ensure correctness.
Normalize `axes` to a list and (optionally) ensure that entries refer to axes that exist
in `shape`.
Args:
axes: user specification of one or more axes: int, list, tuple, or ``None``
shape: the shape of the array of which axes are being specified.
If not ``None``, `axes` is checked to make sure its entries refer to axes that
exist in `shape`.
default: default value to return if `axes` is ``None``. By default,
`list(range(len(shape)))`.
Returns:
List of axes (never an int, never ``None``)
"""
if axes is None:
if default is None:
if shape is None:
raise ValueError(f"`axes` cannot be `None` without a default or shape specified")
else:
axes = list(range(len(shape)))
else:
axes = default
elif isinstance(axes, (list, tuple)):
axes = axes
elif isinstance(axes, int):
axes = (axes,)
else:
raise ValueError(f"Could not understand axes {axes} as a list of axes")
if shape is not None and max(axes) >= len(shape):
raise ValueError(
f"Invalid axes {axes} specified; each axis must be less than `len(shape)`={len(shape)}"
)
elif len(set(axes)) != len(axes):
raise ValueError("Duplicate vaue in axes {axes}; each axis must be unique")
return axes
def is_nested(x: Any) -> bool:
"""Check if input is a list/tuple containing at least one list/tuple.
Args:
x: Object to be tested.
Returns:
True if ``x`` is a list/tuple of list/tuples, False otherwise.
Example:
>>> is_nested([1, 2, 3])
False
>>> is_nested([(1,2), (3,)])
True
>>> is_nested([ [1, 2], 3])
True
"""
if isinstance(x, (list, tuple)):
if any([isinstance(_, (list, tuple)) for _ in x]):
return True
else:
return False
else:
return False
def check_for_tracer(func: Callable) -> Callable:
"""Check if positional arguments to ``func`` are jax tracers.
This is intended to be used as a decorator for functions that call
external code from within SCICO. At present, external functions cannot
be jit-ed or vmap/pmaped. This decorator checks for signs of jit/vmap/pmap
and raises an appropriate exception.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if any([isinstance(x, DynamicJaxprTracer) for x in args]):
raise TypeError(
f"DynamicJaxprTracer found in {func.__name__}; did you jit this function?"
)
if any([isinstance(x, BatchTracer) for x in args]):
raise TypeError(
f"BatchTracer found in {func.__name__}; did you vmap/pmap this function?"
)
else:
return func(*args, **kwargs)
return wrapper
class Timer:
"""Timer class supporting multiple independent labeled timers.
The timer is based on the relative time returned by
:func:`timeit.default_timer`.
"""
def __init__(
self,
labels: Optional[Union[str, List[str]]] = None,
default_label: str = "main",
all_label: str = "all",
):
"""
Args:
labels: Label(s) of the timer(s) to be initialised to zero.
default_label : Default timer label to be used when methods are called without
specifying a label
all_label : Label string that will be used to denote all timer labels
"""
# Initialise current and accumulated time dictionaries
self.t0 = {}
self.td = {}
# Record default label and string indicating all labels
self.default_label = default_label
self.all_label = all_label
# Initialise dictionary entries for labels to be created
# immediately
if labels is not None:
if not isinstance(labels, (list, tuple)):
labels = [
labels,
]
for lbl in labels:
self.td[lbl] = 0.0
self.t0[lbl] = None
def start(self, labels: Optional[Union[str, List[str]]] = None):
"""Start specified timer(s).
Args:
labels : Label(s) of the timer(s) to be started. If it is ``None``, start the
default timer with label specified by the `default_label` parameter of
:meth:`__init__`.
"""
# Default label is self.default_label
if labels is None:
labels = self.default_label
# If label is not a list or tuple, create a singleton list
# containing it
if not isinstance(labels, (list, tuple)):
labels = [
labels,
]
# Iterate over specified label(s)
t = timer()
for lbl in labels:
# On first call to start for a label, set its accumulator to zero
if lbl not in self.td:
self.td[lbl] = 0.0
self.t0[lbl] = None
# Record the time at which start was called for this lbl if
# it isn't already running
if self.t0[lbl] is None:
self.t0[lbl] = t
def stop(self, labels: Optional[Union[str, List[str]]] = None):
"""Stop specified timer(s).
Args:
labels: Label(s) of the timer(s) to be stopped. If it is ``None``, stop the
default timer with label specified by the `default_label` parameter of
:meth:`__init__`. If it is equal to the string specified by the
`all_label` parameter of :meth:`__init__`, stop all timers.
"""
# Get current time
t = timer()
# Default label is self.default_label
if labels is None:
labels = self.default_label
# All timers are affected if label is equal to self.all_label,
# otherwise only the timer(s) specified by label
if labels == self.all_label:
labels = self.t0.keys()
elif not isinstance(labels, (list, tuple)):
labels = [
labels,
]
# Iterate over specified label(s)
for lbl in labels:
if lbl not in self.t0:
raise KeyError(f"Unrecognized timer key {lbl}")
# If self.t0[lbl] is None, the corresponding timer is
# already stopped, so no action is required
if self.t0[lbl] is not None:
# Increment time accumulator from the elapsed time
# since most recent start call
self.td[lbl] += t - self.t0[lbl]
# Set start time to None to indicate timer is not running
self.t0[lbl] = None
def reset(self, labels: Optional[Union[str, List[str]]] = None):
"""Reset specified timer(s).
Args:
labels: Label(s) of the timer(s) to be stopped. If it is ``None``, stop the
default timer with label specified by the `default_label` parameter of
:meth:`__init__`. If it is equal to the string specified by the
`all_label` parameter of :meth:`__init__`, stop all timers.
"""
# Default label is self.default_label
if labels is None:
labels = self.default_label
# All timers are affected if label is equal to self.all_label,
# otherwise only the timer(s) specified by label
if labels == self.all_label:
labels = self.t0.keys()
elif not isinstance(labels, (list, tuple)):
labels = [
labels,
]
# Iterate over specified label(s)
for lbl in labels:
if lbl not in self.t0:
raise KeyError(f"Unrecognized timer key {lbl}")
# Set start time to None to indicate timer is not running
self.t0[lbl] = None
# Set time accumulator to zero
self.td[lbl] = 0.0
def elapsed(self, label: Optional[str] = None, total: bool = True) -> float:
"""Get elapsed time since timer start.
Args:
label: Label of the timer for which the elapsed time is required. If it is
``None``, the default timer with label specified by the `default_label`
parameter of :meth:`__init__` is selected.
total: If ``True`` return the total elapsed time since the first call of
:meth:`start` for the selected timer, otherwise return the elapsed time
since the most recent call of :meth:`start` for which there has not been
a corresponding call to :meth:`stop`.
Returns:
Elapsed time
"""
# Get current time
t = timer()
# Default label is self.default_label
if label is None:
label = self.default_label
# Return 0.0 if default timer selected and it is not initialised
if label not in self.t0:
return 0.0
# Raise exception if timer with specified label does not exist
if label not in self.t0:
raise KeyError(f"Unrecognized timer key {label}")
# If total flag is True return sum of accumulated time from
# previous start/stop calls and current start call, otherwise
# return just the time since the current start call
te = 0.0
if self.t0[label] is not None:
te = t - self.t0[label]
if total:
te += self.td[label]
return te
def labels(self) -> List[str]:
"""Get a list of timer labels.
Returns:
List of timer labels
"""
return self.t0.keys()
def __str__(self) -> str:
"""Return string representation of object.
The representation consists of a table with the following columns:
* Timer label
* Accumulated time from past start/stop calls
* Time since current start call, or 'Stopped' if timer is not
currently running
"""
# Get current time
t = timer()
# Length of label field, calculated from max label length
fldlen = [len(lbl) for lbl in self.t0] + [
len(self.default_label),
]
lfldln = max(fldlen) + 2
# Header string for table of timers
s = f"{'Label':{lfldln}s} Accum. Current\n"
s += "-" * (lfldln + 25) + "\n"
# Construct table of timer details
for lbl in sorted(self.t0):
td = self.td[lbl]
if self.t0[lbl] is None:
ts = " Stopped"
else:
ts = f" {(t - self.t0[lbl]):.2e} s" % (t - self.t0[lbl])
s += f"{lbl:{lfldln}s} {td:.2e} s {ts}\n"
return s
class ContextTimer:
"""A wrapper class for :class:`Timer` that enables its use as a
context manager.
For example, instead of
>>> t = Timer()
>>> t.start()
>>> do_something()
>>> t.stop()
>>> elapsed = t.elapsed()
one can use
>>> t = Timer()
>>> with ContextTimer(t):
... do_something()
>>> elapsed = t.elapsed()
"""
def __init__(
self,
timer: Optional[Timer] = None,
label: Optional[str] = None,
action: str = "StartStop",
):
"""
Args:
timer: Timer object to be used as a context manager. If ``None``, a
new class:`Timer` object is constructed.
label: Label of the timer to be used. If it is ``None``, start the default timer.
action: Actions to be taken on context entry and exit. If the value is 'StartStop',
start the timer on entry and stop on exit; if it is 'StopStart', stop the timer
on entry and start it on exit.
"""
if action not in ["StartStop", "StopStart"]:
raise ValueError(f"Unrecognized action {action}")
if timer is None:
self.timer = Timer()
else:
self.timer = timer
self.label = label
self.action = action
def __enter__(self):
"""Start the timer and return this ContextTimer instance."""
if self.action == "StartStop":
self.timer.start(self.label)
else:
self.timer.stop(self.label)
return self
def __exit__(self, type, value, traceback):
"""Stop the timer and return True if no exception was raised within
the 'with' block, otherwise return False.
"""
if self.action == "StartStop":
self.timer.stop(self.label)
else:
self.timer.start(self.label)
if type:
return False
else:
return True
def elapsed(self, total: bool = True) -> float:
"""Return the elapsed time for the timer.
Args:
total: If ``True`` return the total elapsed time since the first call of
:meth:`start` for the selected timer, otherwise return the elapsed time
since the most recent call of :meth:`start` for which there has not been
a corresponding call to :meth:`stop`.
Returns:
Elapsed time
"""
return self.timer.elapsed(self.label, total=total)
|
{"hexsha": "d36b5f60a4042bca3c244d8ae4e28b03afcc1ac9", "size": 17307, "ext": "py", "lang": "Python", "max_stars_repo_path": "scico/util.py", "max_stars_repo_name": "lukepfister/scico", "max_stars_repo_head_hexsha": "c849c4fa6089b99d9a4dec520c9a04cca426d2d7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scico/util.py", "max_issues_repo_name": "lukepfister/scico", "max_issues_repo_head_hexsha": "c849c4fa6089b99d9a4dec520c9a04cca426d2d7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scico/util.py", "max_forks_repo_name": "lukepfister/scico", "max_forks_repo_head_hexsha": "c849c4fa6089b99d9a4dec520c9a04cca426d2d7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6712062257, "max_line_length": 106, "alphanum_fraction": 0.5833477784, "include": true, "reason": "import numpy,import jax,from jax", "num_tokens": 3982}
|
#!/usr/bin/python3
'''Copyright (c) 2018 Mozilla
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
# Train a LPCNet model
import lpcnet
import sys
import numpy as np
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from ulaw import ulaw2lin, lin2ulaw
import keras.backend as K
import h5py
import argparse
import os
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
import matplotlib.pyplot as plt
# less verbose tensorflow ....
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
config = tf.ConfigProto()
# use this option to reserve GPU memory, e.g. for running more than
# one thing at a time. Best to disable for GPUs with small memory
#config.gpu_options.per_process_gpu_memory_fraction = 0.44
set_session(tf.Session(config=config))
# Try reducing batch_size if you run out of memory on your GPU
batch_size = 32
# with of feature records used for training
nb_features = 55
parser = argparse.ArgumentParser(description='LPCNet training')
parser.add_argument('feature_file', help='.f32 file of float features')
parser.add_argument('packed_ulaw_file', help='file of 4 multiplexed ulaw samples per speech sample')
parser.add_argument('prefix', help='.h5 file prefix to easily identify each experiment')
parser.add_argument('--frame_size', type=int, default=160, help='frames size in samples')
parser.add_argument('--epochs', type=int, default=20, help='Number of training epochs')
parser.add_argument('--no_pitch_embedding', action='store_true', help='disable pitch embedding')
parser.add_argument('--load_h5', help='disable pitch embedding')
args = parser.parse_args()
nb_epochs = args.epochs
model, _, _ = lpcnet.new_lpcnet_model(frame_size=args.frame_size, training=True)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
model.summary()
if args.load_h5:
print("loading: %s" % (args.load_h5))
model.load_weights(args.load_h5)
feature_file = args.feature_file
pcm_file = args.packed_ulaw_file
prefix = args.prefix
frame_size = model.frame_size
nb_used_features = model.nb_used_features
feature_chunk_size = 15 # time window for conv1d/receptive field
pcm_chunk_size = frame_size*feature_chunk_size
# u for unquantised, load 16 bit PCM samples and convert to mu-law
data = np.fromfile(pcm_file, dtype='uint8')
nb_frames = len(data)//(4*pcm_chunk_size)
features = np.fromfile(feature_file, dtype='float32')
# limit to discrete number of frames
data = data[:nb_frames*4*pcm_chunk_size]
features = features[:nb_frames*feature_chunk_size*nb_features]
features = np.reshape(features, (nb_frames*feature_chunk_size, nb_features))
sig = np.reshape(data[0::4], (nb_frames, pcm_chunk_size, 1))
pred = np.reshape(data[1::4], (nb_frames, pcm_chunk_size, 1))
in_exc = np.reshape(data[2::4], (nb_frames, pcm_chunk_size, 1))
out_exc = np.reshape(data[3::4], (nb_frames, pcm_chunk_size, 1))
del data
"""
# plot ulaw signals to sanity check
testf=10
print(sig.shape)
#plt.plot(sig[testf,:],label="sig")
#plt.plot(pred[testf,:],label="pred")
plt.plot(in_exc[testf,:],label="in_exc")
plt.plot(out_exc[testf,:],label="out_exc")
plt.legend()
plt.show()
"""
features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features))
features = features[:, :, :nb_used_features]
# 0..37 features total
# 0..17 cepstrals, 36 = pitch, 37 = pitch gain, 38 = lpc-gain
# nb_used_features=38, so 0...37, so lpc-gain not used
features[:,:,18:36] = 0 # zero out 18..35, so pitch and pitch gain being fed in, lpc gain ignored
"""
# plot features to sanity check
print(features.shape)
testf=10
plt.plot(features[testf,:,37:38])
plt.show()
"""
fpad1 = np.concatenate([features[0:1, 0:2, :], features[:-1, -2:, :]], axis=0)
fpad2 = np.concatenate([features[1:, :2, :], features[0:1, -2:, :]], axis=0)
features = np.concatenate([fpad1, features, fpad2], axis=1)
# pitch feature uses as well as cepstrals
periods = (.1 + 50*features[:,:,36:37]+100).astype('int16')
print(periods.shape)
if args.no_pitch_embedding:
print("no_pitch_embedding")
periods[:] = 0
# sanity check training data aginst pitch embedding range
assert np.all(periods >= 40), "pitch embedding < 40"
assert np.all(periods < 256), "pitch embeddeding > 255"
"""
# plot pitch to sanity check
print(features.shape, periods.shape)
plt.plot(periods.reshape(-1)[:1000])
plt.show()
"""
in_data = np.concatenate([sig, pred, in_exc], axis=-1)
del sig
del pred
del in_exc
# dump models to disk as we go
#checkpoint = ModelCheckpoint('lpcnet20h_384_10_G16_{epoch:02d}.h5')
checkpoint = ModelCheckpoint(prefix + '_{epoch:d}.h5')
# use this to reload a partially trained model
model.compile(optimizer=Adam(0.001, amsgrad=True, decay=5e-5), loss='sparse_categorical_crossentropy')
model.fit([in_data, features, periods], out_exc, batch_size=batch_size, epochs=nb_epochs, callbacks=[checkpoint, lpcnet.Sparsify(2000, 40000, 400, (0.05, 0.05, 0.2))])
|
{"hexsha": "94ab9a8ab007c5bbdd7447729275bf4c09e4b901", "size": 6198, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/train_lpcnet.py", "max_stars_repo_name": "dave-g8kbb/LPCNet", "max_stars_repo_head_hexsha": "7a74323056480ae54805ee873c0183730ffdc6e7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 59, "max_stars_repo_stars_event_min_datetime": "2019-03-26T09:21:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T13:43:16.000Z", "max_issues_repo_path": "src/train_lpcnet.py", "max_issues_repo_name": "dave-g8kbb/LPCNet", "max_issues_repo_head_hexsha": "7a74323056480ae54805ee873c0183730ffdc6e7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2019-03-26T20:05:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-11T05:16:42.000Z", "max_forks_repo_path": "src/train_lpcnet.py", "max_forks_repo_name": "dave-g8kbb/LPCNet", "max_forks_repo_head_hexsha": "7a74323056480ae54805ee873c0183730ffdc6e7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2019-04-18T11:57:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T00:52:59.000Z", "avg_line_length": 36.8928571429, "max_line_length": 167, "alphanum_fraction": 0.752178122, "include": true, "reason": "import numpy", "num_tokens": 1576}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 4 10:26:10 2018
@author: ai
"""
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
def soft_jaccard(outputs, targets):
eps = 1e-15
jaccard_target = (targets == 1).float()
jaccard_output = F.sigmoid(outputs)
intersection = (jaccard_output * jaccard_target).sum()
union = jaccard_output.sum() + jaccard_target.sum()
return intersection / (union - intersection + eps)
class LossBinary:
"""
Loss defined as BCE - log(soft_jaccard)
Vladimir Iglovikov, Sergey Mushinskiy, Vladimir Osin,
Satellite Imagery Feature Detection using Deep Convolutional Neural Network: A Kaggle Competition
arXiv:1706.06169
"""
def __init__(self, jaccard_weight=0):
self.nll_loss = nn.BCEWithLogitsLoss()
self.jaccard_weight = jaccard_weight
def __call__(self, outputs, targets):
loss = (1 - self.jaccard_weight) * self.nll_loss(outputs, targets)
if self.jaccard_weight:
loss += self.jaccard_weight * (1 - soft_jaccard(outputs, targets))
return loss
class FocalLoss2d(nn.Module):
def __init__(self, gamma=2, size_average=True):
super(FocalLoss2d, self).__init__()
self.gamma = gamma
self.size_average = size_average
def forward(self, logit, target, class_weight=None):
target = target.view(-1, 1).long()
if class_weight is None:
class_weight = [1]*2 #[0.5, 0.5]
prob = F.sigmoid(logit)
prob = prob.view(-1, 1)
prob = torch.cat((1-prob, prob), 1)
select = torch.FloatTensor(len(prob), 2).zero_().cuda()
select.scatter_(1, target, 1.)
class_weight = torch.FloatTensor(class_weight).cuda().view(-1,1)
class_weight = torch.gather(class_weight, 0, target)
prob = (prob*select).sum(1).view(-1,1)
prob = torch.clamp(prob,1e-8,1-1e-8)
batch_loss = - class_weight *(torch.pow((1-prob), self.gamma))*prob.log()
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss
return loss
## http://geek.csdn.net/news/detail/126833
class PseudoBCELoss2d(nn.Module):
def __init__(self):
super(PseudoBCELoss2d, self).__init__()
def forward(self, logit, truth):
z = logit.view (-1)
t = truth.view (-1)
loss = z.clamp(min=0) - z*t + torch.log(1 + torch.exp(-z.abs()))
loss = loss.sum()/len(t) #w.sum()
return loss
###############################################################################
def calc_iou(actual,pred):
intersection = np.count_nonzero(actual*pred)
union = np.count_nonzero(actual) + np.count_nonzero(pred) - intersection
iou_result = intersection/union if union!=0 else 0.
return iou_result
def calc_ious(actuals,preds):
ious_ = np.array([calc_iou(a,p) for a,p in zip(actuals,preds)])
return ious_
def calc_precisions(thresholds,ious):
thresholds = np.reshape(thresholds,(1,-1))
ious = np.reshape(ious,(-1,1))
ps = ious>thresholds
mps = ps.mean(axis=1)
return mps
def indiv_scores(masks,preds):
masks[masks>0] = 1
preds[preds>0] = 1
ious = calc_ious(masks,preds)
thresholds = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
precisions = calc_precisions(thresholds,ious)
###### Adjust score for empty masks
emptyMasks = np.count_nonzero(masks.reshape((len(masks),-1)),axis=1)==0
emptyPreds = np.count_nonzero(preds.reshape((len(preds),-1)),axis=1)==0
adjust = (emptyMasks==emptyPreds).astype(np.float)
precisions[emptyMasks] = adjust[emptyMasks]
###################
return precisions
def calc_metric(masks,preds):
return np.mean(indiv_scores(masks,preds))
def do_kaggle_metric(predict,truth, threshold=0.5):
"""
input includes 3 parametters:
predict: x in (-infty,+infty)
truth : y in (0,1)
threshold
"""
EPS = 1e-12
N = len(predict)
predict = predict.reshape(N,-1)
truth = truth.reshape(N,-1)
predict = predict>threshold
truth = truth>0.5
intersection = truth & predict
union = truth | predict
iou = intersection.sum(1)/(union.sum(1)+EPS)
#-------------------------------------------
result = []
precision = []
is_empty_truth = (truth.sum(1)==0)
is_empty_predict = (predict.sum(1)==0)
threshold = np.array([0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95])
for t in threshold:
p = iou>=t
tp = (~is_empty_truth) & (~is_empty_predict) & (iou> t)
fp = (~is_empty_truth) & (~is_empty_predict) & (iou<=t)
fn = (~is_empty_truth) & ( is_empty_predict)
fp_empty = ( is_empty_truth) & (~is_empty_predict)
tn_empty = ( is_empty_truth) & ( is_empty_predict)
p = (tp + tn_empty) / (tp + tn_empty + fp + fp_empty + fn)
result.append( np.column_stack((tp,fp,fn,tn_empty,fp_empty)) )
precision.append(p)
result = np.array(result).transpose(1,2,0)
precision = np.column_stack(precision)
precision = precision.mean(1)
return precision, result, threshold
class RobustFocalLoss2d(nn.Module):
#assume top 10% is outliers
def __init__(self, gamma=2, size_average=True):
super(RobustFocalLoss2d, self).__init__()
self.gamma = gamma
self.size_average = size_average
def forward(self, logit, target, class_weight=None):
target = target.view(-1, 1).long()
if class_weight is None:
class_weight = [1]*2 #[0.5, 0.5]
prob = F.sigmoid(logit)
prob = prob.view(-1, 1)
prob = torch.cat((1-prob, prob), 1)
select = torch.FloatTensor(len(prob), 2).zero_().cuda()
select.scatter_(1, target, 1.)
class_weight = torch.FloatTensor(class_weight).cuda().view(-1,1)
class_weight = torch.gather(class_weight, 0, target)
prob = (prob*select).sum(1).view(-1,1)
prob = torch.clamp(prob,1e-8,1-1e-8)
focus = torch.pow((1-prob), self.gamma)
#focus = torch.where(focus < 2.0, focus, torch.zeros(prob.size()).cuda())
focus = torch.clamp(focus,0,2)
batch_loss = - class_weight *focus*prob.log()
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss
return loss
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(np.isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / union
ious.append(iou)
iou = ious.mean() # mean accross images if per_image
return 100 * iou
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / union)
ious.append(iou)
ious = map(mean, zip(*ious)) # mean accross images if per_image
return 100 * np.array(ious)
#--------------------------- BINARY LOSSES ---------------------------
class Binary_lovasz_dice:
"""
Loss defined as BCE - log(soft_jaccard)
Vladimir Iglovikov, Sergey Mushinskiy, Vladimir Osin,
Satellite Imagery Feature Detection using Deep Convolutional Neural Network: A Kaggle Competition
arXiv:1706.06169
"""
def __init__(self, weight=0,per_image=True, ignore=None):
#self.nll_loss = binary_xloss()
self.weight = weight
self.per_image =per_image
self.ignore = ignore
def __call__(self, outputs, targets):
loss = (1 - self.weight) * (1 - soft_jaccard(outputs, targets))
if self.weight:
loss += self.weight * (lovasz_hinge(per_image=self.per_image, ignore=self.ignore)(outputs, targets))
return loss
class LossBinary_lovaz:
"""
Loss defined as BCE - log(soft_jaccard)
Vladimir Iglovikov, Sergey Mushinskiy, Vladimir Osin,
Satellite Imagery Feature Detection using Deep Convolutional Neural Network: A Kaggle Competition
arXiv:1706.06169
"""
def __init__(self, weight=0,per_image=True, ignore=None):
self.nll_loss = nn.BCEWithLogitsLoss()
self.weight = weight
def __call__(self, outputs, targets):
loss = (1 - self.weight) * self.nll_loss(outputs, targets)
if self.jaccard_weight:
loss += self.weight * (lovasz_hinge(per_image=self.per_image, ignore=self.ignore)(outputs, targets))
return loss
class lovasz_hinge:
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
def __init__(self, per_image=True, ignore=None):
self.per_image =per_image
self.ignore = ignore
def __call__(self, outputs, targets):
if self.per_image:
loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), self.ignore))
for log, lab in zip(outputs, targets))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(outputs, targets, self.ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.elu(errors_sorted)+1, Variable(grad))
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = (labels != ignore)
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
class StableBCELoss(torch.nn.modules.Module):
def __init__(self):
super(StableBCELoss, self).__init__()
def forward(self, input, target):
neg_abs = - input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
def binary_xloss(logits, labels, ignore=None):
"""
Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id
"""
logits, labels = flatten_binary_scores(logits, labels, ignore)
loss = StableBCELoss()(logits, Variable(labels.float()))
return loss
# --------------------------- MULTICLASS LOSSES ---------------------------
def lovasz_softmax(probas, labels, only_present=False, per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1)
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
only_present: average only on classes present in ground truth
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), only_present=only_present)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), only_present=only_present)
return loss
def lovasz_softmax_flat(probas, labels, only_present=False):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
only_present: average only on classes present in ground truth
"""
C = probas.size(1)
losses = []
for c in range(C):
fg = (labels == c).float() # foreground for class c
if only_present and fg.sum() == 0:
continue
errors = (Variable(fg) - probas[:, c]).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
def xloss(logits, labels, ignore=None):
"""
Cross entropy loss
"""
return F.cross_entropy(logits, Variable(labels), ignore_index=255)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
# Any results you write to the current directory are saved as output.
def get_iou_vector(A, B):
batch_size = A.shape[0]
metric = []
for batch in range(batch_size):
t, p = A[batch].squeeze(1), B[batch].squeeze(1)
if np.count_nonzero(t) == 0 and np.count_nonzero(p) > 0:
metric.append(0)
continue
if np.count_nonzero(t) == 0 and np.count_nonzero(p) == 0:
metric.append(1)
continue
iou = jaccard(t, p)
thresholds = np.arange(0.5, 1, 0.05)
s = []
for thresh in thresholds:
s.append(iou > thresh)
metric.append(np.mean(s))
return np.mean(metric)
def jaccard(y_true, y_pred):
epsilon = 1e-15
intersection = (y_pred * y_true).sum(dim=-2).sum(dim=-1)
union = y_true.sum(dim=-2).sum(dim=-1) + y_pred.sum(dim=-2).sum(dim=-1)
return ((intersection + epsilon)/ (union - intersection + epsilon)).mean()
|
{"hexsha": "45e57cd85738e2270b023127874e4b34f48b7e77", "size": 16439, "ext": "py", "lang": "Python", "max_stars_repo_path": "loss_function.py", "max_stars_repo_name": "hphuongdhsp/Kaggle-TGS-Salt-Identification-Challenge", "max_stars_repo_head_hexsha": "4c1f9749f772f318473921f6d213af89cd4c0df2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-11-04T13:40:05.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-08T02:49:05.000Z", "max_issues_repo_path": "loss_function.py", "max_issues_repo_name": "hphuongdhsp/Kaggle-TGS-Salt-Identification-Challenge", "max_issues_repo_head_hexsha": "4c1f9749f772f318473921f6d213af89cd4c0df2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "loss_function.py", "max_forks_repo_name": "hphuongdhsp/Kaggle-TGS-Salt-Identification-Challenge", "max_forks_repo_head_hexsha": "4c1f9749f772f318473921f6d213af89cd4c0df2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-01-12T18:03:41.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-12T18:03:41.000Z", "avg_line_length": 31.5527831094, "max_line_length": 128, "alphanum_fraction": 0.6098302816, "include": true, "reason": "import numpy", "num_tokens": 4506}
|
[STATEMENT]
lemma lc_mult_scalar_monomial_right:
"lc (p \<odot> monomial c v) = punit.lc p * (c::'b::semiring_no_zero_divisors)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
proof (cases "c = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. c = (0::'b) \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
2. c \<noteq> (0::'b) \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
c = (0::'b)
goal (2 subgoals):
1. c = (0::'b) \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
2. c \<noteq> (0::'b) \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
c = (0::'b)
goal (1 subgoal):
1. lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lc (p \<odot> monomial c v) = punit.lc p * c
goal (1 subgoal):
1. c \<noteq> (0::'b) \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. c \<noteq> (0::'b) \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
c \<noteq> (0::'b)
goal (1 subgoal):
1. c \<noteq> (0::'b) \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
proof (cases "p = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. p = 0 \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
2. p \<noteq> 0 \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
p = 0
goal (2 subgoals):
1. p = 0 \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
2. p \<noteq> 0 \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
p = 0
goal (1 subgoal):
1. lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lc (p \<odot> monomial c v) = punit.lc p * c
goal (1 subgoal):
1. p \<noteq> 0 \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. p \<noteq> 0 \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
p \<noteq> 0
goal (1 subgoal):
1. p \<noteq> 0 \<Longrightarrow> lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
with \<open>c \<noteq> 0\<close>
[PROOF STATE]
proof (chain)
picking this:
c \<noteq> (0::'b)
p \<noteq> 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
c \<noteq> (0::'b)
p \<noteq> 0
goal (1 subgoal):
1. lc (p \<odot> monomial c v) = punit.lc p * c
[PROOF STEP]
by (simp add: punit.lc_def lc_def lt_mult_scalar_monomial_right lookup_mult_scalar_monomial_right_plus)
[PROOF STATE]
proof (state)
this:
lc (p \<odot> monomial c v) = punit.lc p * c
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
lc (p \<odot> monomial c v) = punit.lc p * c
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1534, "file": "Polynomials_MPoly_Type_Class_Ordered", "length": 18}
|
subroutine killop
!! ~ ~ ~ PURPOSE ~ ~ ~
!! this subroutine performs the kill operation
!! ~ ~ ~ INCOMING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! bio_ms(:) |kg/ha |land cover/crop biomass (dry weight)
!! curyr |none |current year of simulation
!! hrupest(:) |none |pesticide use flag:
!! | 0: no pesticides used in HRU
!! | 1: pesticides used in HRU
!! icr(:) |none |sequence number of crop grown within the
!! |current year
!! ihru |none |HRU number
!! ncrops(:,:,:)|
!! npmx |none |number of different pesticides used in
!! |the simulation
!! nro(:) |none |sequence number for year in rotation
!! nyskip |none |number of years to skip output printing/
!! |summarization
!! plt_pst(:,:)|kg/ha |pesticide on plant foliage
!! sol_fon(:,:) |kg N/ha |amount of nitrogen stored in the fresh
!! |organic (residue) pool
!! sol_fop(:,:) |kg P/ha |amount of phosphorus stored in the fresh
!! |organic (residue) pool
!! sol_pst(:,:,1)|kg/ha |pesticide in first layer of soil
!! sol_rsd(:,:) |kg/ha |amount of organic matter in the soil
!! |classified as residue
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ OUTGOING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! bio_ms(:) |kg/ha |land cover/crop biomass (dry weight)
!! idorm(:) |none |dormancy status code:
!! |0 land cover growing (not dormant)
!! |1 land cover dormant
!! igro(:) |none |land cover status code:
!! |0 no land cover currently growing
!! |1 land cover growing
!! laiday(:) |m**2/m**2 |leaf area index
!! ncrops(:,:,:)|
!! phuacc(:) |none |fraction of plant heat units accumulated
!! plantn(:) |kg N/ha |amount of nitrogen in plant biomass
!! plantp(:) |kg P/ha |amount of phosphorus in plant biomass
!! plt_pst(:,:) |kg/ha |pesticide on plant foliage
!! sol_fon(:,:) |kg N/ha |amount of nitrogen stored in the fresh
!! |organic (residue) pool
!! sol_fop(:,:) |kg P/ha |amount of phosphorus stored in the fresh
!! |organic (residue) pool
!! sol_pst(:,:,1)|kg/ha |pesticide in first layer of soil
!! sol_rsd(:,:) |kg/ha |amount of organic matter in the soil
!! |classified as residue
!! strsw(:) |none |fraction of potential plant growth achieved
!! |on the day where the reduction is caused by
!! |water stress
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ LOCAL DEFINITIONS ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! j |none |HRU number
!! k |none |counter
!! resnew |
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ SUBROUTINES/FUNCTIONS CALLED ~ ~ ~
!! Intrinsic: Max
!! ~ ~ ~ ~ ~ ~ END SPECIFICATIONS ~ ~ ~ ~ ~ ~
use parm
integer :: j, k
real :: resnew
!!by zhang
!!====================
real :: BLG1, BLG2, BLG3, CLG, sf
real :: sol_min_n, resnew_n, resnew_ne
real :: LMF, LSF, LSLF, LSNF,LMNF
orgc_f = 0.
BLG1 = 0.
BLG2 = 0.
BLG3 = 0.
CLG = 0.
sf = 0.
sol_min_n = 0.
resnew = 0.
resnew_n = 0.
resnew_ne = 0.
LMF = 0.
LSF = 0.
LSLF = 0.
LSNF = 0.
LMNF = 0.
!!by zhang
!!====================
j = 0
j = ihru
! if (curyr > nyskip) then
! ncrops(icr(j),j) = ncrops(icr(j),j) + 1
! endif
!! 22 January 2008
resnew = 0.
rtresnew = 0.
resnew = bio_ms(j) * (1. - rwt(j))
rtresnew = bio_ms(j) * rwt(j)
call rootfr
!! update residue, N, P on soil surface
sol_rsd(1,j) = resnew + sol_rsd(1,j)
sol_fon(1,j) = plantn(j) * (1. - rwt(j)) + sol_fon(1,j)
sol_fop(1,j) = plantp(j) * (1. - rwt(j)) + sol_fop(1,j)
sol_rsd(1,j) = Max(sol_rsd(1,j),0.)
sol_fon(1,j) = Max(sol_fon(1,j),0.)
sol_fop(1,j) = Max(sol_fop(1,j),0.)
!!insert new biomss by zhang
!!=================================
if (cswat == 2) then
!!all the lignin from STD is assigned to LSL,
!!add STDL calculation
!!
!sol_LSL(k,ihru) = sol_STDL(k,ihru)
!CLG=BLG(3,JJK)*HUI(JJK)/(HUI(JJK)+EXP(BLG(1,JJK)-BLG(2,JJK)*&HUI(JJK))
! 52 BLG1 = LIGNIN FRACTION IN PLANT AT .5 MATURITY
! 53 BLG2 = LIGNIN FRACTION IN PLANT AT MATURITY
!CROPCOM.dat BLG1 = 0.01 BLG2 = 0.10
!SUBROUTINE ASCRV(X1,X2,X3,X4)
!EPIC0810
!THIS SUBPROGRAM COMPUTES S CURVE PARMS GIVEN 2 (X,Y) POINTS.
!USE PARM
!XX=LOG(X3/X1-X3)
!X2=(XX-LOG(X4/X2-X4))/(X4-X3)
!X1=XX+X3*X2
!RETURN
!END
!HUI(JJK)=HU(JJK)/XPHU
BLG1 = 0.01/0.10
BLG2 = 0.99
BLG3 = 0.10
XX = log(0.5/BLG1-0.5)
BLG2 = (XX -log(1./BLG2-1.))/(1.-0.5)
BLG1 = XX + 0.5*BLG2
CLG=BLG3*phuacc(j)/(phuacc(j)+EXP(BLG1-BLG2*phuacc(j)))
!if (k == 1) then
sf = 0.05
!else
!sf = 0.1
!end if
!kg/ha
sol_min_n = 0.
sol_min_n = (sol_no3(1,j)+sol_nh3(1,j))
resnew = resnew
resnew_n = ff1 * (plantn(j) - yieldn)
resnew_ne = resnew_n + sf * sol_min_n
!Not sure 1000 should be here or not!
!RLN = 1000*(resnew * CLG/(resnew_n+1.E-5))
RLN = (resnew * CLG/(resnew_n+1.E-5))
RLR = MIN(.8, resnew * CLG/(resnew+1.E-5))
LMF = 0.85 - 0.018 * RLN
if (LMF <0.01) then
LMF = 0.01
else
if (LMF >0.7) then
LMF = 0.7
end if
end if
!if ((resnew * CLG/(resnew_n+1.E-5)) < 47.22) then
! LMF = 0.85 - 0.018 * (resnew * CLG/(resnew_n+1.E-5))
!else
! LMF = 0.
!end if
LSF = 1 - LMF
sol_LM(1,j) = sol_LM(1,j) + LMF * resnew
sol_LS(1,j) = sol_LS(1,j) + LSF * resnew
!here a simplified assumption of 0.5 LSL
LSLF = 0.0
LSLF = CLG
sol_LSL(1,j) = sol_LSL(1,j) + RLR* LSF * resnew
sol_LSC(1,j) = sol_LSC(1,j) + 0.42*LSF * resnew
sol_LSLC(1,j) = sol_LSLC(1,j) + RLR*0.42*LSF * resnew
sol_LSLNC(1,j) = sol_LSC(1,j) - sol_LSLC(1,j)
!X3 = MIN(X6,0.42*LSF * resnew/150)
if (resnew_n >= (0.42 * LSF * resnew /150)) then
sol_LSN(1,j) = sol_LSN(1,j) + 0.42 * LSF * resnew / 150
sol_LMN(1,j) = sol_LMN(1,j) + resnew_n -
& (0.42 * LSF * resnew / 150) + 1.E-25
else
sol_LSN(1,j) = sol_LSN(1,j) + resnew_n
sol_LMN(1,j) = sol_LMN(1,j) + 1.E-25
end if
!LSNF = sol_LSN(1,j)/(sol_LS(1,j)+1.E-5)
sol_LMC(1,j) = sol_LMC(1,j) + 0.42 * LMF * resnew
!LMNF = sol_LMN(1,j)/(sol_LM(1,j) + 1.E-5)
!update no3 and nh3 in soil
sol_no3(1,j) = sol_no3(1,j) * (1-sf)
sol_nh3(1,j) = sol_nh3(1,j) * (1-sf)
end if
!!insert new biomss by zhang
!!===============================
!! allocate dead roots, N, P to soil layers
do l=1, sol_nly(j)
sol_rsd(l,j) = sol_rsd(l,j) + rtfr(l) * rtresnew
sol_fon(l,j) = sol_fon(l,j) + rtfr(l) * plantn(j) * rwt(j)
sol_fop(l,j) = sol_fop(l,j) + rtfr(l) * plantp(j) * rwt(j)
!!insert new biomss by zhang
!!==============================
if (cswat == 2) then
!!all the lignin from STD is assigned to LSL,
!!add STDL calculation
!!
!sol_LSL(k,ihru) = sol_STDL(k,ihru)
!CLG=BLG(3,JJK)*HUI(JJK)/(HUI(JJK)+EXP(BLG(1,JJK)-BLG(2,JJK)*&HUI(JJK))
! 52 BLG1 = LIGNIN FRACTION IN PLANT AT .5 MATURITY
! 53 BLG2 = LIGNIN FRACTION IN PLANT AT MATURITY
!CROPCOM.dat BLG1 = 0.01 BLG2 = 0.10
!SUBROUTINE ASCRV(X1,X2,X3,X4)
!EPIC0810
!THIS SUBPROGRAM COMPUTES S CURVE PARMS GIVEN 2 (X,Y) POINTS.
!USE PARM
!XX=LOG(X3/X1-X3)
!X2=(XX-LOG(X4/X2-X4))/(X4-X3)
!X1=XX+X3*X2
!RETURN
!END
!HUI(JJK)=HU(JJK)/XPHU
BLG1 = 0.01/0.10
BLG2 = 0.99
BLG3 = 0.10
XX = log(0.5/BLG1-0.5)
BLG2 = (XX -log(1./BLG2-1.))/(1.-0.5)
BLG1 = XX + 0.5*BLG2
CLG=BLG3*phuacc(j)/(phuacc(j)+EXP(BLG1-BLG2*phuacc(j)))
if (l == 1) then
sf = 0.05
else
sf = 0.1
end if
!kg/ha
sol_min_n = 0.
sol_min_n = (sol_no3(l,j)+sol_nh3(l,j))
resnew = rtfr(l) * rtresnew
resnew_n = rtfr(l) *ff2 * (plantn(j) - yieldn)
resnew_ne = resnew_n + sf * sol_min_n
!Not sure 1000 should be here or not!
!RLN = 1000*(resnew * CLG/(resnew_n+1.E-5))
RLN = (resnew * CLG/(resnew_n+1.E-5))
RLR = MIN(.8, resnew * CLG/1000/(resnew/1000+1.E-5))
LMF = 0.85 - 0.018 * RLN
if (LMF <0.01) then
LMF = 0.01
else
if (LMF >0.7) then
LMF = 0.7
end if
end if
!if ((resnew * CLG/(resnew_n+1.E-5)) < 47.22) then
! LMF = 0.85 - 0.018 * (resnew * CLG/(resnew_n+1.E-5))
!else
! LMF = 0.
!end if
LSF = 1 - LMF
sol_LM(l,j) = sol_LM(l,j) + LMF * resnew
sol_LS(l,j) = sol_LS(l,j) + LSF * resnew
!here a simplified assumption of 0.5 LSL
!LSLF = 0.0
!LSLF = CLG
sol_LSL(l,j) = sol_LSL(l,j) + RLR*resnew
sol_LSC(l,j) = sol_LSC(l,j) + 0.42*LSF * resnew
sol_LSLC(l,j) = sol_LSLC(l,j) + RLR*0.42*resnew
sol_LSLNC(l,j) = sol_LSC(l,j) - sol_LSLC(l,j)
!X3 = MIN(X6,0.42*LSF * resnew/150)
if (resnew_ne >= (0.42 * LSF * resnew /150)) then
sol_LSN(l,j) = sol_LSN(l,j) + 0.42 * LSF * resnew / 150
sol_LMN(l,j) = sol_LMN(l,j) + resnew_ne -
& (0.42 * LSF * resnew / 150) + 1.E-25
else
sol_LSN(l,j) = sol_LSN(l,j) + resnew_ne
sol_LMN(l,j) = sol_LMN(l,j) + 1.E-25
end if
!LSNF = sol_LSN(l,j)/(sol_LS(l,j)+1.E-5)
sol_LMC(l,j) = sol_LMC(l,j) + 0.42 * LMF * resnew
!LMNF = sol_LMN(l,j)/(sol_LM(l,j) + 1.E-5)
!update no3 and nh3 in soil
sol_no3(l,j) = sol_no3(l,j) * (1-sf)
sol_nh3(l,j) = sol_nh3(l,j) * (1-sf)
end if
!!insert new biomss by zhang
!!===============================
end do
if (hrupest(j) == 1) then
do k = 1, npmx
sol_pst(k,j,1) = sol_pst(k,j,1) + plt_pst(k,j)
plt_pst(k,j) = 0.
end do
end if
bio_hv(icr(j),j) = bio_ms(j) + bio_hv(icr(j),j)
!bio_yrms(j) = bio_yrms(j) + bio_ms(j) / 1000.
!! reset variables
igro(j) = 0
idorm(j) = 0
bio_ms(j) = 0.
rwt(j) = 0.
plantn(j) = 0.
plantp(j) = 0.
strsw(j) = 1.
laiday(j) = 0.
hvstiadj(j) = 0.
phuacc(j) = 0.
! phubase(j) = 0.
rtfr = 0. ! Resetting roots fraction per layer array
return
end
|
{"hexsha": "bcaf4f1145d96a65313c5bd89ed74e912feb680c", "size": 13450, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "modified_source_code/killop.f", "max_stars_repo_name": "gpignotti/swat_soil_moisture_sensitivity", "max_stars_repo_head_hexsha": "d2c3f6185acabf31440d613651192cdaabfe6b6c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modified_source_code/killop.f", "max_issues_repo_name": "gpignotti/swat_soil_moisture_sensitivity", "max_issues_repo_head_hexsha": "d2c3f6185acabf31440d613651192cdaabfe6b6c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-02T19:37:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-02T19:37:29.000Z", "max_forks_repo_path": "modified_source_code/killop.f", "max_forks_repo_name": "gpignotti/swat_soil_moisture_sensitivity", "max_forks_repo_head_hexsha": "d2c3f6185acabf31440d613651192cdaabfe6b6c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7486338798, "max_line_length": 82, "alphanum_fraction": 0.4034944238, "num_tokens": 4511}
|
import copy
import numpy as np
import open3d as o3d
import trimesh
if not trimesh.ray.has_embree:
raise "PyEmbree engine not installed, this experiment will never end"
def outlier_rejection(
source_points, source_normals, target_points, target_normals, max_dist=2.0
):
# Prune long rays, max_distance threshold
ray_distances = np.linalg.norm(target_points - source_points, axis=1)
# Get distance inliers
source_points = source_points[ray_distances < max_dist]
target_points = target_points[ray_distances < max_dist]
source_normals = source_normals[ray_distances < max_dist]
target_normals = target_normals[ray_distances < max_dist]
return source_points, source_normals, target_points, target_normals
def project_scan_to_mesh(tmesh, source, max_dist=2.0):
"""Project a PointCloud to the given mesh using ray to triangle
intersections."""
# Create the rays we will shoot
source_points = np.asarray(source.points)
source_normals = np.asarray(source.normals)
ray_directions = copy.deepcopy(source_points)
ray_origins = np.zeros_like(ray_directions)
# run the mesh- ray query
target_points, index_ray, index_tri = tmesh.ray.intersects_location(
ray_origins=ray_origins,
ray_directions=ray_directions,
multiple_hits=False,
)
# Manually pick all the points and normals that did hit any of the triangles
source_points = source_points[index_ray]
source_normals = source_normals[index_ray]
target_normals = np.asarray(tmesh.face_normals[index_tri])
(
source_points,
source_normals,
target_points,
target_normals,
) = outlier_rejection(
source_points, source_normals, target_points, target_normals, max_dist
)
# Create the new source and target PointClouds
source_cloud = o3d.geometry.PointCloud()
source_points = o3d.utility.Vector3dVector(np.asarray(source_points))
source_normals = o3d.utility.Vector3dVector(np.asarray(source_normals))
source_cloud.points = source_points
source_cloud.normals = source_normals
target_cloud = o3d.geometry.PointCloud()
target_points = o3d.utility.Vector3dVector(np.asarray(target_points))
target_normals = o3d.utility.Vector3dVector(np.asarray(target_normals))
target_cloud.points = target_points
target_cloud.normals = target_normals
return source_cloud, target_cloud
|
{"hexsha": "1ea3575ca0f7b0d333d510656467dc3f83cb33de", "size": 2428, "ext": "py", "lang": "Python", "max_stars_repo_path": "puma/projections/scan2mesh.py", "max_stars_repo_name": "okryush/puma", "max_stars_repo_head_hexsha": "fd3f21c5566ae64110420a26ef6c9d8da0e67dce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 239, "max_stars_repo_stars_event_min_datetime": "2021-03-30T07:33:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T07:14:06.000Z", "max_issues_repo_path": "puma/projections/scan2mesh.py", "max_issues_repo_name": "alualu628628/puma", "max_issues_repo_head_hexsha": "4a5980fcd302fc794f50e782e478a3bdd77f57b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2021-06-10T17:26:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:23:52.000Z", "max_forks_repo_path": "puma/projections/scan2mesh.py", "max_forks_repo_name": "alualu628628/puma", "max_forks_repo_head_hexsha": "4a5980fcd302fc794f50e782e478a3bdd77f57b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 46, "max_forks_repo_forks_event_min_datetime": "2021-03-30T07:18:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T04:49:34.000Z", "avg_line_length": 34.6857142857, "max_line_length": 80, "alphanum_fraction": 0.7491762768, "include": true, "reason": "import numpy", "num_tokens": 549}
|
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while (True):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 25, 5)
cv2.imshow('frame', binary)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
{"hexsha": "fa496ce229820170a4bda7fe870d3dae1b238d35", "size": 417, "ext": "py", "lang": "Python", "max_stars_repo_path": "example/01-Adaptive Thresholding.py", "max_stars_repo_name": "gh-BumsooKim/Vision-Art", "max_stars_repo_head_hexsha": "27986705377471cac57b73f9db386a11042e8bec", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example/01-Adaptive Thresholding.py", "max_issues_repo_name": "gh-BumsooKim/Vision-Art", "max_issues_repo_head_hexsha": "27986705377471cac57b73f9db386a11042e8bec", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example/01-Adaptive Thresholding.py", "max_forks_repo_name": "gh-BumsooKim/Vision-Art", "max_forks_repo_head_hexsha": "27986705377471cac57b73f9db386a11042e8bec", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-25T11:47:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-25T11:47:32.000Z", "avg_line_length": 23.1666666667, "max_line_length": 77, "alphanum_fraction": 0.6163069544, "include": true, "reason": "import numpy", "num_tokens": 120}
|
This code has been copied by mistake. Please do NOT use it
unless by invitation. If you have not been specifically requested
to perform tests with it, please destroy this file
SUBROUTINE FOCK1(F, PTOT, PA, PB)
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
INCLUDE 'SIZES/NOLIST'
DIMENSION F(*), PTOT(*), PA(*), PB(*)
*********************************************************************
*** COMPUTE THE REMAINING CONTRIBUTIONS TO THE ONE-CENTRE ELEMENTS.
*********************************************************************
COMMON /MOLKST/ NUMAT,NAT(NUMATM),NFIRST(NUMATM),NMIDLE(NUMATM),
+ NLAST(NUMATM), NORBS, NELECS,
1 NALPHA, NBETA, NCLOSE, NOPEN
COMMON /TWOELE/ GSS(54),GSP(54),GPP(54),GP2(54),HSP(54)
COMMON /GAUSS / FN1(54),FN2(54)
DIMENSION QTOT(NUMATM), QA(NUMATM), QB(NUMATM)
CALL CHRGE(PTOT,QTOT)
CALL CHRGE(PA,QA)
DO 10 I=1,NUMAT
10 QB(I)=QTOT(I)-QA(I)
DO 100 II=1,NUMAT
IA=NFIRST(II)
IB=NLAST(II)
NI=NAT(II)
IF(NI.EQ.1)THEN
SUM=0.D0
ELSE
SUM2=0.D0
SUM1=0.D0
DO 111 I=IA,IB
IM1=I-1
DO 112 J=IA,IM1
112 SUM1=SUM1+PTOT(J+(I*(I-1))/2)**2
111 SUM2=SUM2+PTOT((I*(I+1))/2)**2
SUM=SUM1*2.D0+SUM2
SUM=(SUM-QTOT(II)**2*0.25D0)
# WRITE(6,'('' ATOM'',I3,'' ANISOTROPY'',F12.6)')NI,SUM
SUM=SUM*FN1(NI)
ENDIF
# WRITE(6,'('' ATOM'',I3,'' CORRECTION'',F12.5)')II,SUM
F(S,S)
KA=(IA*(IA+1))/2
F(KA)=F(KA)+PB(KA)*GSS(NI)+(QTOT(II)-PTOT(KA))*GSP(NI)
+ -(QA(II)-PA(KA))*HSP(NI)
MODIFICATION TO ACCOUNT FOR DIPOLAR BONDS!
# +SUM
END OF MODIFICATION
IF (NI.LT.3) GO TO 100
IPLUS=IA+1
L=KA
DO 80 J=IPLUS,IB
M=L+IA
L=L+J
F(P,P)
F(L)=F(L)+PTOT(KA)*GSP(NI)-PA(KA)*HSP(NI)+
1 PB(L)*GPP(NI)+(QTOT(II)-PTOT(L)-PTOT(KA))*GP2(NI)
2 -0.5D0*(QA(II)-PA(L)-PA(KA))*(GPP(NI)-GP2(NI))
MODIFICATION TO ACCOUNT FOR DIPOLAR BONDS!
# +SUM
END OF MODIFICATION
F(S,P)
80 F(M)=F(M)+2.D0*PTOT(M)*HSP(NI)-PA(M)*(HSP(NI)+GSP(NI))
F(P,P*)
IMINUS=IB-1
DO 90 J=IPLUS,IMINUS
IC=J+1
DO 90 L=IC,IB
M=(L*(L-1))/2+J
90 F(M)=F(M)+PTOT(M)*(GPP(NI)-GP2(NI))
+ -0.5D0*PA (M)*(GPP(NI)+GP2(NI))
100 CONTINUE
RETURN
END
PROGRAM MAIN
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
INCLUDE 'SIZES/NOLIST'
COMMON /KEYWRD/ KEYWRD
COMMON /GEOVAR/ NVAR,LOC(2,MAXPAR), XPARAM(MAXPAR)
COMMON /GEOSYM/ NDEP,LOCPAR(200),IDEPFN(200),LOCDEP(200)
COMMON /GEOKST/ NATOMS,LABELS(NUMATM),
+NA(NUMATM),NB(NUMATM),NC(NUMATM)
COMMON /GMETRY/ GEO(3,NUMATM)
COMMON /GRADNT/ GRAD(MAXPAR),GNORM
COMMON /NUMCAL/ NUMCAL
COMMON /TIME / TIME0
COMMON /PATH / LATOM,LPARAM,REACT(100)
CHARACTER*80 KEYWRD
NUMCAL=1
TIME0=SECOND()
READ AND CHECK INPUT FILE, EXIT IF NECESSARY.
WRITE INPUT FILE TO UNIT 6 AS FEEDBACK TO USER
5 CALL READ
INITIALIZE CALCULATION AND WRITE CALCULATION INDEPENDENT INFO
CALL MOLDAT
CALCULATE
IF(INDEX(KEYWRD,'SADDLE') .NE. 0) THEN
CALL REACT1(FUNCT)
CALL WRITE(TIME0,FUNCT)
STOP
ENDIF
IF (LATOM .NE. 0) THEN
DO PATH
CALL PATHS
STOP
END IF
IF (INDEX(KEYWRD,'FORCE') .NE. 0 ) THEN
FORCE CALCULATION IF DESIRED
CALL FORCE
STOP
ENDIF
IF(INDEX(KEYWRD,'NLLSQ') .NE. 0) THEN
CALL NLLSQ(XPARAM, NVAR )
CALL COMPFG(XPARAM,.TRUE.,ESCF,.TRUE.,GRAD,.TRUE.)
CALL WRITE(TIME0,ESCF)
STOP
ENDIF
IF (INDEX(KEYWRD,'1SCF') .NE. 0) THEN
NVAR=0
IF(INDEX(KEYWRD,'GRAD').NE.0) THEN
NVAR=0
DO 10 I=2,NATOMS
IF(LABELS(I).EQ.99) GOTO 10
IF(I.EQ.2)ILIM=1
IF(I.EQ.3)ILIM=2
IF(I.GT.3)ILIM=3
DO 13 J=1,ILIM
NVAR=NVAR+1
LOC(1,NVAR)=I
LOC(2,NVAR)=J
3 XPARAM(NVAR)=GEO(J,I)
0 CONTINUE
ENDIF
ENDIF
IF(INDEX(KEYWRD,'SIGMA') .NE. 0) THEN
CALL POWSQ(XPARAM, NVAR, ESCF)
CALL WRITE(TIME0,ESCF)
STOP
ENDIF
ORDINARY GEOMETRY OPTIMISATION
CALL FLEPO(XPARAM, NVAR, ESCF)
CALL WRITE(TIME0,ESCF)
STOP
END
BLOCK DATA
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
COMMON /NATORB/ NATORB(54)
+ /ALPHA / ALP(54)
1 /CORE / CORE(54)
2 /MULTIP/ DD(54),QQ(54),AM(54),AD(54),AQ(54)
3 /EXPONT/ ZS(54),ZP(54),ZD(54)
4 /ONELEC/ USS(54),UPP(54),UDD(54)
5 /BETAS / BETAS(54),BETAP(54),BETAD(54)
6 /TWOELE/ GSS(54),GSP(54),GPP(54),GP2(54),HSP(54)
7 /ATOMIC/ EISOL(54),EHEAT(54)
8 /AM1REF/ AM1REF(54)
9 /VSIPS / VS(54),VP(54),VD(54)
A /ISTOPE/ AMS(54)
B /IDEAS / GUESS1(54,10),GUESS2(54,10),GUESS3(54,10)
C /GAUSS / FN1(54),FN2(54)
COMMON BLOCKS FOR MINDO/3
COMMON /ONELE3 / USS3(18),UPP3(18)
+ /TWOEL3 / F03(18)
1 /ATOMI3 / EISOL3(18),EHEAT3(18)
2 /BETA3 / BETA3(153)
3 /ALPHA3 / ALP3(153)
4 /EXPON3 / ZS3(18),ZP3(18)
END OF MINDO/3 COMMON BLOCKS
NATORB IS THE NUMBER OF ATOMIC ORBITALS PER ATOM.
DATA NATORB/2*1,8*4,8*4,2*4,10*9,6*4,2*4,10*9,6*4/
DATA AM1REF /54*1.D0/
THE ATOMIC MASSES OF THE ISOTOPES
DATA AMS(1) / 1.007825D0/
DATA AMS(4) / 9.012190D0/
DATA AMS(5) / 11.00931D0/
DATA AMS(6) / 12.00000D0/
DATA AMS(7) / 14.00307D0/
DATA AMS(8) / 15.99491D0/
DATA AMS(9) / 18.99840D0/
DATA AMS(13)/ 26.98153D0/
DATA AMS(14)/ 27.97693D0/
DATA AMS(15)/ 30.97376D0/
DATA AMS(16)/ 31.97207D0/
DATA AMS(17)/ 34.96885D0 /
DATA AMS(35)/ 79.9D0 /
DATA AMS(53)/ 126.9D0 /
ATOMIC WEIGHTS FOR LESS ABUNDANT ISOTOPES:
AMS(2) = DEUTERIUM
AMS(3) = CARBON 13
AMS(10)= CARBON 14
AMS(11)= OXYGEN 18
AMS(12)= BORON 10
DATA AMS(2) /2.0141022D0/
DATA AMS(3) /13.003354D0/
DATA AMS(10)/14.003242D0/
DATA AMS(11)/17.999160D0/
DATA AMS(12)/10.012940D0/
CORE IS THE CHARGE ON THE ATOM AS SEEN BY THE ELECTRONS
DATA CORE(1),CORE(4) /1.0D00, 2.D0/
DATA CORE(5)/3.0D00/
DATA CORE(6)/4.0D00/
DATA CORE(7)/5.0D00/
DATA CORE(8)/6.0D00/
DATA CORE(9)/7.0D00/
DATA CORE(13)/3.0D00/
DATA CORE(14)/4.0D00/
DATA CORE(15)/5.0D00/
DATA CORE(16)/6.0D00/
DATA CORE(17)/7.0D00/
DATA CORE(35)/7.D0/
DATA CORE(53)/7.D0/
ENTHALPIES OF FORMATION OF GASEOUS ATOMS ARE TAKEN FROM \ANNUAL
REPORTS,1974,71B,P 117\ THERE ARE SOME SIGNIFICANT DIFFERENCES
BETWEEN THE VALUES REPORTED THERE AND THE VALUES PREVIOUSLY IN
THE BLOCK DATA OF THIS PROGRAM. ONLY THE THIRD ROW ELEMENTS
HAVE BEEN UPDATED.
DATA EHEAT(1),EHEAT(4) /52.102D00 , 76.96D00 /
DATA EHEAT(5)/135.7D00/
DATA EHEAT(6) / 170.89D00 /
DATA EHEAT(7)/113.0D00/
DATA EHEAT(8)/59.559D00/
DATA EHEAT(9)/18.89D00/
DATA EHEAT(13)/79.49D00/
DATA EHEAT(14)/108.39D00/
DATA EHEAT(15)/75.57D00/
DATA EHEAT(16)/66.40D00/
DATA EHEAT(17)/28.99D00/
DATA EHEAT(35)/26.74D0/
DATA EHEAT(53)/25.517D0/
*** VS AND VP ARE THE VALENCE STATE IONIZATION POTENTAIL OF S AND P
ELECTRONS IN E.V. : USED IN THE EHT RESONANCE INTEGRALS.
DATA VS(1) / -13.605 /
DATA VS(5)/-15.16D00/
DATA VS(6)/-21.34D00/
DATA VS(7)/-27.51D00/
DATA VS(8)/-35.30D00/
DATA VS(9)/-43.70D00/
DATA VS(14)/-17.82D00/
DATA VS(15)/-21.10D00/
DATA VS(16)/-23.84D00/
DATA VS(17)/-25.26D00/
DATA VP(1) / 0.0D00 /
DATA VP(5)/-8.52D00/
DATA VP(6)/-11.54D00/
DATA VP(7)/-14.34D00/
DATA VP(8)/-17.91D00/
DATA VP(9)/-20.89D00/
DATA VP(14)/-8.51D00/
DATA VP(15)/-10.29D00/
DATA VP(16)/-12.41D00/
DATA VP(17)/-15.09D00/
DATA NPQ/1,1, 2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3, 4,4,4,4,4,4,4,4,
+4,4,4,4,4,4,4,4,4,4, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5/
*** ONE CENTER REPULSION INTEGRALS
GSS ::= (SS,SS)
GPP ::= (PP,PP)
GSP ::= (SS,PP)
GP2 ::= (PP,P*P*)
HSP ::= (SP,SP)
DATA GSS(1) / 12.848D00 /
DATA GSS(4)/9.00D00/
DATA GSS(5)/10.59D00/
DATA GSS(6) / 12.23D00 /
DATA GSS(7)/13.59D00/
DATA GSS(8)/15.42D00/
DATA GSS(9)/16.92D00/
DATA GSS(13)/8.09D00/
DATA GSS(14)/9.82D00/
DATA GSS(15)/11.56D00/
DATA GSS(16)/12.88D00/
DATA GSS(17)/15.03D00/
DATA GSS(35)/15.03643948D0/
DATA GSS(53)/15.04044855D0/
DATA GPP(4)/6.97D00/
DATA GPP(5)/8.86D00/
DATA GPP(6) / 11.08D00 /
DATA GPP(7)/12.98D00/
DATA GPP(8)/14.52D00/
DATA GPP(9)/16.71D00/
DATA GPP(13)/5.98D00/
DATA GPP(14)/7.31D00/
DATA GPP(15)/8.64D00/
DATA GPP(16)/9.90D00/
DATA GPP(17)/11.30D00/
DATA GPP(35)/11.27632539D0/
DATA GPP(53)/11.14778369D0/
DATA GSP(4)/7.43D00/
DATA GSP(5)/9.56D00/
DATA GSP(6) / 11.47D00 /
DATA GSP(7)/12.66D00/
DATA GSP(8)/14.48D00/
DATA GSP(9)/17.25D00/
DATA GSP(13)/6.63D00/
DATA GSP(14)/8.36D00/
DATA GSP(15)/10.08D00/
DATA GSP(16)/11.26D00/
DATA GSP(17)/13.16D00/
DATA GSP(35)/13.03468242D0/
DATA GSP(53)/13.05655798D0/
DATA GP2(4)/6.22D00/
DATA GP2(5)/7.86D00/
DATA GP2(6) / 9.84D00 /
DATA GP2(7)/11.59D00/
DATA GP2(8)/12.98D00/
DATA GP2(9)/14.91D00/
DATA GP2(13)/5.40D00/
DATA GP2(14)/6.54D00/
DATA GP2(15)/7.68D00/
DATA GP2(16)/8.83D00/
DATA GP2(17)/9.97D00/
DATA GP2(35)/9.85442552D0/
DATA GP2(53)/9.91409071D0/
DATA HSP(4)/1.28D00/
DATA HSP(5)/1.81D00/
DATA HSP(6) / 2.43D00 /
DATA HSP(7)/3.14D00/
DATA HSP(8)/3.94D00/
DATA HSP(9)/4.83D00/
DATA HSP(13)/0.70D00/
DATA HSP(14)/1.32D00/
DATA HSP(15)/1.92D00/
DATA HSP(16)/2.26D00/
DATA HSP(17)/2.42D00/
DATA HSP(35)/2.45586832D0/
DATA HSP(53)/2.45638202D0/
THE MONOCENTRIC INTEGRALS HSP AND GSP FOR ALUMINIUM ARE ONLY
ESTIMATES. A VALUE OF G1 FOR AL IS NEEDED TO RESOLVE OLEARIS
INTEGRALS.
OPTIMIZED MNDO PARAMETERS FOR H, BE, B, C, N, O, F
CL
ESTIMATED MNDO PARAMETERS FOR AL,SI, P, S
ELEMENTS H, C, N, O WERE PARAMETERIZED BY WALTER THIEL
ELEMENTS B,SI,P,S WERE .. MICHAEL MCKEE
ELEMENTS BE,F,AL,CL WERE .. HENRY RZEPA
DATA USS(1)/-11.906276D00/
DATA USS(4)/-16.602378D00/
DATA USS(5)/-34.547130D00/
DATA USS(6)/-52.279745D00/
DATA USS(7)/ -71.932122D00/
DATA USS(8)/-99.644309D00/
DATA USS(9)/-131.071548D00/
DATA USS(13)/-23.807097D00/
DATA USS(14)/-40.568292D00/
DATA USS(15)/-56.143360D00/
DATA USS(16)/-75.239152D00/
DATA USS(17)/-100.227166D00/
DATA USS(35)/ -99.98644054D0/
DATA USS(53)/-100.00305378D0/
DATA UPP(4)/-10.703771D00/
DATA UPP(5)/-23.121690D00/
DATA UPP(6)/-39.205558D00/
DATA UPP(7)/-57.172319D00/
DATA UPP(8)/-77.797472D00/
DATA UPP(9)/-105.782137D00/
DATA UPP(13)/-17.519878D00/
DATA UPP(14)/-28.089187D00/
DATA UPP(15)/-42.851080D00/
DATA UPP(16)/-57.832013D00/
DATA UPP(17)/-77.378667D00/
DATA UPP(35)/-75.67130754D0/
DATA UPP(53)/-74.61146919D0/
OPTIMIZED ORBITAL EXPONENTS (DEFAULT = CLEMENTI + 0.3)
DATA ZS(1),ZP(1)/1.331967D00 , 0.0D00/
DATA ZS(4), ZP(4) / 1.004210D0, 1.004210D0 /
DATA ZS(5), ZP(5) / 1.506801D0, 1.506801D0 /
DATA ZS(6),ZP(6) / 1.787537D00 , 1.787537D00 /
DATA ZS(7), ZP(7) / 2.255614D0, 2.255614D0 /
DATA ZS(8), ZP(8) / 2.699905D0, 2.699905D0 /
DATA ZS(9), ZP(9) / 2.848487D0, 2.848487D0 /
DATA ZS(13), ZP(13) / 1.444161D0, 1.444161D0 /
DATA ZS(14), ZP(14) / 1.435306D0, 1.435306D0 /
DATA ZS(15), ZP(15) / 2.108720D0, 1.785810D0 /
DATA ZS(16), ZP(16) / 2.613591D0, 2.034393D0 /
DATA ZS(17), ZP(17) / 3.784645D0, 2.036263D00 /
DATA ZS(35), ZP(35) / 3.85430190D0,2.19920914D0/
DATA ZS(53), ZP(53) / 2.2729610D0, 2.16949803D0/
DATA ZD/10*0.D0,1.D0,1.D0,1.D0,1.D0,1.D0,1.D0,1.D0,1.D0,36*1.D0/
TWO CENTRE RESONANCE INTEGRALS AND CORE-CORE REPULSION FUNCTIONS
DATA BETAS(1)/ -6.989064D0 /
DATA BETAS(4)/ -4.017096D0 /
DATA BETAS(5)/ -8.252054D0 /
DATA BETAS(6)/ -18.985044D0 /
DATA BETAS(7)/ -20.495758D0 /
DATA BETAS(8)/ -32.688082D0 /
DATA BETAS(9)/ -48.290466D0 /
DATA BETAS(13)/ -2.450284D0 /
DATA BETAS(14)/ -4.256218D0 /
DATA BETAS(15)/ -6.791600D0 /
DATA BETAS(16)/ -11.142231D0 /
DATA BETAS(17)/ -14.262320D0 /
DATA BETAS(35)/ -8.917107D0 /
DATA BETAS(53)/ -7.414451D0 /
DATA BETAP(4)/ -4.017096D0 /
DATA BETAP(5)/ -8.252054D0 /
DATA BETAP(6)/ -7.934122D0 /
DATA BETAP(7)/ -20.495758D0 /
DATA BETAP(8)/ -32.688082D0 /
DATA BETAP(9)/ -36.508540D0 /
DATA BETAP(13)/ -2.670284D0 /
DATA BETAP(14)/ -4.256218D0 /
DATA BETAP(15)/ -6.791600D0 /
DATA BETAP(16)/ -11.142231D0 /
DATA BETAP(17)/ -14.623200D0 /
DATA BETAP(35)/ -9.943740D0 /
DATA BETAP(53)/ -6.196781D0 /
DATA ALP(1) / 2.5441341D00/
DATA ALP(4)/ 1.669434D0/
DATA ALP(5)/ 2.134993D0/
DATA ALP(6) / 2.546380D00 /
DATA ALP(7)/ 2.861342D0/
DATA ALP(8)/ 3.160604D0/
DATA ALP(9)/ 3.4196606D0/
DATA ALP(13)/ 1.8688394D0/
DATA ALP(14)/ 2.1961078D0/
DATA ALP(15)/ 2.4152800D0/
DATA ALP(16)/ 2.4916445D0/
DATA ALP(17)/ 2.542201D0/
DATA ALP(35)/ 2.44570512D0/
DATA ALP(53)/ 2.2073200D0/
ELECTRONIC ENERGIES OF NEUTRAL FREE ATOMS
DATA EISOL(1) / -11.906276D00 /
DATA EISOL(4)/ -24.204740D0/
DATA EISOL(5)/ -64.315950D0/
DATA EISOL(6) / -120.500606D00 /
DATA EISOL(7)/ -202.581201D0/
DATA EISOL(8)/ -317.868506D0/
DATA EISOL(9)/ -476.683781D0/
DATA EISOL(13)/ -44.4840711D0/
DATA EISOL(14)/ -90.53496D0/
DATA EISOL(15)/ -152.959960D0/
DATA EISOL(16)/ -235.45636D0/
DATA EISOL(17)/ -353.137667D0/
DATA EISOL(35)/ -346.681250D0/
DATA EISOL(53)/ -340.59836D00/
DIPOLE AND QUADRUPOLE CHARGE SEPARATIONS.
DATA DD(4), QQ(4) / 1.43732454D0, 1.21961031D0 /
DATA DD(5), QQ(5) / 0.95790730D0, 0.81281129D0 /
DATA DD(6),QQ(6) / .80746618D00 , .68515777D00 /
DATA DD(7), QQ(7) / 0.63990367D0, 0.54297627D0 /
DATA DD(8), QQ(8) / 0.53460239D0, 0.45362517D0 /
DATA DD(9), QQ(9) / 0.50671661D0, 0.42996330D0 /
DATA DD(13),QQ(13)/ 1.39923869D0, 1.15867971D0 /
DATA DD(14),QQ(14)/ 1.40787117D0, 1.16582809D0 /
DATA DD(15),QQ(15)/ 1.01296990D0, 0.93700901D0 /
DATA DD(16),QQ(16)/ 0.82315963D0, 0.82251564D0 /
DATA DD(17),QQ(17)/ 0.49868702D0, 0.82176028D0 /
DATA DD(35),QQ(35)/ 0.6051074D0, 0.9645873D0 /
DATA DD(53),QQ(53)/ 1.4253233D0, 1.1841707D0 /
ADDITIVE TERMS( STORED AS A LINEAR ARRAY FOR COMPATIBILITY)
DATA AM(1),AD(1),AQ(1)/ 0.47217935D0,0.47217935D0,0.00000000D0/
DATA AM(4),AD(4),AQ(4)/ 0.33076075D0,0.33561420D0,0.38629372D0/
DATA AM(5),AD(5),AQ(5)/ 0.38919515D0,0.49047299D0,0.55569787D0/
DATA AM(6),AD(6),AQ(6)/ 0.44946711D0,0.61494736D0,0.66858974D0/
DATA AM(7),AD(7),AQ(7)/0.49944873D0,0.78436428D0,0.81447199D0/
DATA AM(8),AD(8),AQ(8)/ 0.56670342D0,0.95925620D0,0.94959338D0/
DATA AM(9),AD(9),AQ(9)/ 0.62183021D0,1.08503007D0,1.03436433D0/
DATA AM(13),AD(13),AQ(13)/0.29731716D0,0.26355743D0,0.36735599D0/
DATA AM(14),AD(14),AQ(14)/0.36089673D0,0.34418174D0,0.39826149D0/
DATA AM(15),AD(15),AQ(15)/0.42484381D0,0.48824197D0,0.49794058D0/
DATA AM(16),AD(16),AQ(16)/0.47335538D0,0.58893950D0,0.56496234D0/
DATA AM(17),AD(17),AQ(17)/0.55237045D0,0.80612202D0,0.60686609D0/
DATA AM(35),AD(35),AQ(35)/0.5526068D0, 0.7258330D0, 0.5574589D0 /
DATA AM(53),AD(53),AQ(53)/0.5527541D0, 0.4593451D0, 0.4585376D0 /
ALL THE FOLLOWING DATA APPLY TO MINDO/3 AND NOT TO MNDO
*** F03 IS THE ONE CENTER AVERAGED REPULSION INTEGRAL FOR USE IN THE
TWO CENTER ELECTRONIC REPULSION INTEGRAL EVALUATION.
DATA F03 / 12.848D0, 0.0D0, 0.0D0, 0.0D0,
. 8.958D0, 10.833D0, 12.377D0, 13.985D0, 16.250D0,
. 0.000D0, 0.000D0, 0.000D0, 0.000D0,
. 7.57D0 , 9.00D0 , 10.20D0 , 11.73,.0D0/
*** USS AND UPP ARE THE ONE-CENTER CORE ELECTRON ATTRACTION AND KINETI
ENERGY INTEGRALS FOR S AND P ELECTRONS RESPECTIVELY IN E.V.
DATA USS3 / -12.505D0, 0.000D0, 0.000D0, 0.000D0,
. -33.61D0, -51.79D0, -66.06D0, -91.73D0 ,
. -129.86D0,
. 0.0000D0 , 0.000 D0 ,0.000D0 , 0.000D0 ,
. -39.82D0 , -56.23D0 , -73.39D0 , -98.99D0 ,.0D0/
DATA UPP3 / 0.0D0, 0.0D0, 0.0D0, 0.0D0,
. -25.11D0 , -39.18D0 , -56.40D0 , -78.80D0 , -105.93D0 ,
. 0.000D0 , 0.000D0 , 0.000D0 , 0.000D0 ,
. -29.15D0 , -42.31D0 , -57.25D0 , -76.43D0 ,.0D0/
*** EISOL3 AND EHEAT3 ARE THE GS ELECTRONIC ENERGY OF THE NEUTRAL ATOM
(IN E.V.) AND THE HEAT OF FORMATION IF THE FREE ATOM (IN KCAL/MOL)
DATA EISOL3 /-12.505D0 , 0.0D0 , 0.0D0 ,0.0D0 ,
. -61.70D0 ,-119.47D0 , -187.51D0 , -307.07D0 , -475.00D0 ,
. 0.0D0 , 0.0D0 , 0.0D0 , 0.0D0 ,
. -90.98D0 , -150.81D0 , -229.15D0 , -345.93D0 , 0.0D0/
DATA EHEAT3 / 52.102D0 , 0.0D0 , 0.0D0 , 0.0D0 ,
. 135.7 D0 , 170.89D0 , 113.0 D0 , 59.559D0 , 18.86D0 ,
. 0.0D0 , 0.0D0 , 0.0D0 , 0.0D0 ,
. 106.0D0 , 79.8D0 , 65.65D0 , 28.95D0 , 0.0D0 /
*** BETA3 AND ALP3 ARE THE BOND PARAMETERS USED IN THE
RESONANCE INTEGRAL AND THE CORE CORE REPULSION INTEGRAL RESPECTIVE
THAT IS ACCORDING TO THE FOLLOWING CONVENTION
HERE IS THE
BOND TYPE DESIGNATION
H B C N O F SI P S CL
-----------------------------------------
H 1 11 16 22 29 37 92 106 121 137
B 15 20 26 33 41
C 21 27 34 42 97 111 126 142
N 28 35 43 127 143
O 36 44 128 144
F 45 129
SI 105
P 120 151
S 136 152
CL 153
DATA BETA3(1),ALP3(1) / 0.244770D0 , 1.489450D0 /
DATA BETA3(11),ALP3(11) / 0.185347D0 , 2.090352D0 /
DATA BETA3(15),ALP3(15) / 0.151324D0 , 2.280544D0 /
DATA BETA3(16),ALP3(16) / 0.315011D0 , 1.475836D0 /
DATA BETA3(20),ALP3(20) / 0.250031D0 , 2.138291D0 /
DATA BETA3(21),ALP3(21) / 0.419907D0 , 1.371208D0 /
DATA BETA3(22),ALP3(22) / 0.360776D0 , 0.589380D0 /
DATA BETA3(26),ALP3(26) / 0.310959D0 , 1.909763D0 /
DATA BETA3(27),ALP3(27) / 0.410886D0 , 1.635259D0 /
DATA BETA3(28),ALP3(28) / 0.377342D0 , 2.029618D0 /
DATA BETA3(29),ALP3(29) / 0.417759D0 , 0.478901D0 /
DATA BETA3(33),ALP3(33) / 0.349745D0 , 2.484827D0 /
DATA BETA3(34),ALP3(34) / 0.464514D0 , 1.820975D0 /
DATA BETA3(35),ALP3(35) / 0.458110D0 , 1.873859D0 /
DATA BETA3(36),ALP3(36) / 0.659407D0 , 1.537190D0 /
DATA BETA3(37),ALP3(37) / 0.195242D0 , 3.771362D0 /
DATA BETA3(41),ALP3(41) / 0.219591D0 , 2.862183D0 /
DATA BETA3(42),ALP3(42) / 0.247494D0 , 2.725913D0 /
DATA BETA3(43),ALP3(43) / 0.205347D0 , 2.861667D0 /
DATA BETA3(44),ALP3(44) / 0.334044D0 , 2.266949D0 /
DATA BETA3(45),ALP3(45) / 0.197464D0 , 3.864997D0 /
DATA BETA3(92),ALP3(92) / 0.289647D0 , 0.940789D0 /
DATA BETA3(97),ALP3(97) / 0.411377D0 , 1.101382D0 /
DATA BETA3(105),ALP3(105) / 0.291703D0 , 0.918432D0 /
DATA BETA3(106),ALP3(106) / 0.320118D0 , 0.923170D0 /
DATA BETA3(111),ALP3(111) / 0.457816D0 , 1.029693D0 /
DATA BETA3(120),ALP3(120) / 0.311790D0 , 1.186652D0 /
DATA BETA3(121),ALP3(121) / 0.220654D0 , 1.700698D0 /
DATA BETA3(126),ALP3(126) / 0.284620D0 , 1.761370D0 /
DATA BETA3(127),ALP3(127) / 0.313170D0 , 1.878176D0/
DATA BETA3(128),ALP3(128) / 0.422890D0 , 2.077240D0 /
DATA BETA3(129),ALP3(129) / 0.000000D0 , 0.000000D0 /
DATA BETA3(136),ALP3(136) / 0.202489D0 , 1.751617D0 /
DATA BETA3(137),ALP3(137) / 0.231653D0 , 2.089404D0 /
DATA BETA3(142),ALP3(142) / 0.315480D0 , 1.676222D0 /
DATA BETA3(143),ALP3(143) / 0.302298D0 , 1.817064D0 /
DATA BETA3(144),ALP3(144) / 0.000000D0 , 0.000000D0 /
DATA BETA3(151),ALP3(151) / 0.277322D0 , 1.543720D0 /
DATA BETA3(152),ALP3(152) / 0.221764D0 , 1.950318D0 /
DATA BETA3(153),ALP3(153) / 0.258969D0 , 1.792125D0 /
*** HERE COMES THE OPTIMIZED SLATER_S EXPONENTS FOR THE EVALUATION
OF THE OVERLAP INTEGRALS AND MOLECULAR DIPOLE MOMENTS.
DATA ZS3(1),ZP3(1) / 1.3D0 , 0.0D0 /
DATA ZS3(5),ZP3(5) / 1.211156D0 , 0.972826D0 /
DATA ZS3(6),ZP3(6) / 1.739391D0 , 1.709645D0 /
DATA ZS3(7),ZP3(7) / 2.704546D0 , 1.870839D0 /
DATA ZS3(8),ZP3(8) / 3.640575D0 , 2.168448D0 /
DATA ZS3(9),ZP3(9) / 3.111270D0 , 1.41986D0 /
DATA ZS3(14),ZP3(14) / 1.629173D0 , 1.381721D0 /
DATA ZS3(15),ZP3(15) / 1.926108D0 , 1.590665D0 /
DATA ZS3(16),ZP3(16) / 1.719480D0 , 1.403205D0 /
DATA ZS3(17),ZP3(17) / 3.430887D0 , 1.627017D0 /
END OF MINDO/3 SPECIFIC DATA
DATA FOR ELEMENT 1
DATA USS ( 1)/ -11.3954710D0/
DATA BETAS ( 1)/ -6.9012090D0/
DATA ZS ( 1)/ 1.3319670D0/
DATA ALP ( 1)/ 2.6448900D0/
DATA EISOL ( 1)/ -11.3954710D0/
DATA AM ( 1)/ 0.4721793D0/
DATA AD ( 1)/ 0.4721793D0/
DATA AQ ( 1)/ 0.4721793D0/
DATA GUESS1( 1,1)/ 0.0763700D0/
DATA GUESS2( 1,1)/ 7.7536570D0/
DATA GUESS3( 1,1)/ 1.8607430D0/
DATA GUESS1( 1,2)/ -0.0219120D0/
DATA GUESS2( 1,2)/ 2.9661090D0/
DATA GUESS3( 1,2)/ 2.6653650D0/
DATA FOR ELEMENT 4
DATA USS ( 4)/ -16.6023780D0/
DATA UPP ( 4)/ -10.7037710D0/
DATA BETAS ( 4)/ -4.0170960D0/
DATA BETAP ( 4)/ -4.0170960D0/
DATA ZS ( 4)/ 1.0042100D0/
DATA ZP ( 4)/ 1.0042100D0/
DATA ZD ( 4)/ 0.2000000D0/
DATA ALP ( 4)/ 1.6694340D0/
DATA EISOL ( 4)/ -24.2047560D0/
DATA DD ( 4)/ 1.4373245D0/
DATA QQ ( 4)/ 1.2196103D0/
DATA AM ( 4)/ 0.3307607D0/
DATA AD ( 4)/ 0.3356142D0/
DATA AQ ( 4)/ 0.3846373D0/
DATA FOR ELEMENT 5
DATA USS ( 5)/ -34.5471300D0/
DATA UPP ( 5)/ -23.1216900D0/
DATA BETAS ( 5)/ -8.2520540D0/
DATA BETAP ( 5)/ -8.2520540D0/
DATA ZS ( 5)/ 1.5068010D0/
DATA ZP ( 5)/ 1.5068010D0/
DATA ZD ( 5)/ 0.2000000D0/
DATA ALP ( 5)/ 2.1349930D0/
DATA EISOL ( 5)/ -64.3159500D0/
DATA DD ( 5)/ 0.9579073D0/
DATA QQ ( 5)/ 0.8128113D0/
DATA AM ( 5)/ 0.3891951D0/
DATA AD ( 5)/ 0.4904730D0/
DATA AQ ( 5)/ 0.5556979D0/
DATA FOR ELEMENT 6
DATA USS ( 6)/ -52.2869850D0/
DATA UPP ( 6)/ -39.3957210D0/
DATA BETAS ( 6)/ -14.7893280D0/
DATA BETAP ( 6)/ -9.4013270D0/
DATA ZS ( 6)/ 1.7875370D0/
DATA ZP ( 6)/ 1.7875370D0/
DATA ZD ( 6)/ 0.2000000D0/
DATA ALP ( 6)/ 2.6361300D0/
DATA EISOL ( 6)/ -120.8954120D0/
DATA DD ( 6)/ 0.8074662D0/
DATA QQ ( 6)/ 0.6851578D0/
DATA AM ( 6)/ 0.4494671D0/
DATA AD ( 6)/ 0.6149474D0/
DATA AQ ( 6)/ 0.6685897D0/
DATA FN1 ( 6)/ 0.0356490D0/
DATA GUESS1( 6,1)/ 0.0419910D0/
DATA GUESS2( 6,1)/ 7.7536570D0/
DATA GUESS3( 6,1)/ 1.8607430D0/
DATA GUESS1( 6,2)/ -0.0006430D0/
DATA GUESS2( 6,2)/ 2.9661090D0/
DATA GUESS3( 6,2)/ 2.6653650D0/
DATA FOR ELEMENT 7
DATA USS ( 7)/ -66.7680870D0/
DATA UPP ( 7)/ -57.3605830D0/
DATA BETAS ( 7)/ -12.8274670D0/
DATA BETAP ( 7)/ -21.8979740D0/
DATA ZS ( 7)/ 2.2556140D0/
DATA ZP ( 7)/ 2.2556140D0/
DATA ZD ( 7)/ 0.2000000D0/
DATA ALP ( 7)/ 2.8749780D0/
DATA EISOL ( 7)/ -192.8029230D0/
DATA DD ( 7)/ 0.6399037D0/
DATA QQ ( 7)/ 0.5429763D0/
DATA AM ( 7)/ 0.4994487D0/
DATA AD ( 7)/ 0.7843643D0/
DATA AQ ( 7)/ 0.8126445D0/
DATA FN1 ( 7)/ 0.4664350D0/
DATA GUESS1( 7,1)/ 0.0191900D0/
DATA GUESS2( 7,1)/ 7.7536570D0/
DATA GUESS3( 7,1)/ 1.8607430D0/
DATA GUESS1( 7,2)/ 0.0105670D0/
DATA GUESS2( 7,2)/ 2.9661090D0/
DATA GUESS3( 7,2)/ 2.6653650D0/
DATA FOR ELEMENT 8
DATA USS ( 8)/ -92.6269880D0/
DATA UPP ( 8)/ -77.8228510D0/
DATA BETAS ( 8)/ -22.7022330D0/
DATA BETAP ( 8)/ -32.4538720D0/
DATA ZS ( 8)/ 2.6999050D0/
DATA ZP ( 8)/ 2.6999050D0/
DATA ZD ( 8)/ 0.2000000D0/
DATA ALP ( 8)/ 3.2543070D0/
DATA EISOL ( 8)/ -303.9353800D0/
DATA DD ( 8)/ 0.5346024D0/
DATA QQ ( 8)/ 0.4536252D0/
DATA AM ( 8)/ 0.5667034D0/
DATA AD ( 8)/ 0.9592562D0/
DATA AQ ( 8)/ 0.9495934D0/
DATA FN1 ( 8)/ 0.1647660D0/
DATA GUESS1( 8,1)/ 0.0266930D0/
DATA GUESS2( 8,1)/ 7.7536570D0/
DATA GUESS3( 8,1)/ 1.8607430D0/
DATA GUESS1( 8,2)/ -0.0044940D0/
DATA GUESS2( 8,2)/ 2.9661090D0/
DATA GUESS3( 8,2)/ 2.6653650D0/
DATA FOR ELEMENT 9
DATA USS ( 9)/ -131.0715480D0/
DATA UPP ( 9)/ -105.7821370D0/
DATA BETAS ( 9)/ -48.2904660D0/
DATA BETAP ( 9)/ -36.5085400D0/
DATA ZS ( 9)/ 2.8484870D0/
DATA ZP ( 9)/ 2.8484870D0/
DATA ZD ( 9)/ 0.2000000D0/
DATA ALP ( 9)/ 3.4196606D0/
DATA EISOL ( 9)/ -476.6837810D0/
DATA DD ( 9)/ 0.5067166D0/
DATA QQ ( 9)/ 0.4299633D0/
DATA AM ( 9)/ 0.6218302D0/
DATA AD ( 9)/ 1.0850301D0/
DATA AQ ( 9)/ 1.0343643D0/
DATA FOR ELEMENT 13
DATA USS (13)/ -23.8070970D0/
DATA UPP (13)/ -17.5198780D0/
DATA BETAS (13)/ -2.4502840D0/
DATA BETAP (13)/ -2.6702840D0/
DATA ZS (13)/ 1.4441610D0/
DATA ZP (13)/ 1.4441610D0/
DATA ZD (13)/ 1.0000000D0/
DATA ALP (13)/ 1.8688394D0/
DATA EISOL (13)/ -43.0840720D0/
DATA DD (13)/ 1.3992387D0/
DATA QQ (13)/ 1.1586797D0/
DATA AM (13)/ 0.2973172D0/
DATA AD (13)/ 0.2635574D0/
DATA AQ (13)/ 0.3673560D0/
DATA FOR ELEMENT 14
DATA USS (14)/ -40.5682920D0/
DATA UPP (14)/ -28.0891870D0/
DATA BETAS (14)/ -4.2562180D0/
DATA BETAP (14)/ -4.2562180D0/
DATA ZS (14)/ 1.4353060D0/
DATA ZP (14)/ 1.4353060D0/
DATA ZD (14)/ 1.0000000D0/
DATA ALP (14)/ 2.1961078D0/
DATA EISOL (14)/ -90.5399580D0/
DATA DD (14)/ 1.4078712D0/
DATA QQ (14)/ 1.1658281D0/
DATA AM (14)/ 0.3608967D0/
DATA AD (14)/ 0.3441817D0/
DATA AQ (14)/ 0.3999442D0/
DATA FOR ELEMENT 15
DATA USS (15)/ -56.1433600D0/
DATA UPP (15)/ -42.8510800D0/
DATA BETAS (15)/ -6.7916000D0/
DATA BETAP (15)/ -6.7916000D0/
DATA ZS (15)/ 2.1087200D0/
DATA ZP (15)/ 1.7858100D0/
DATA ZD (15)/ 1.0000000D0/
DATA ALP (15)/ 2.4152800D0/
DATA EISOL (15)/ -152.9599600D0/
DATA DD (15)/ 1.0129699D0/
DATA QQ (15)/ 0.9370090D0/
DATA AM (15)/ 0.4248438D0/
DATA AD (15)/ 0.4882420D0/
DATA AQ (15)/ 0.4979406D0/
DATA FOR ELEMENT 16
DATA USS (16)/ -75.2391520D0/
DATA UPP (16)/ -57.8320130D0/
DATA BETAS (16)/ -11.1422310D0/
DATA BETAP (16)/ -11.1422310D0/
DATA ZS (16)/ 2.6135910D0/
DATA ZP (16)/ 2.0343930D0/
DATA ZD (16)/ 1.0000000D0/
DATA ALP (16)/ 2.4916445D0/
DATA EISOL (16)/ -235.4413560D0/
DATA DD (16)/ 0.8231596D0/
DATA QQ (16)/ 0.8225156D0/
DATA AM (16)/ 0.4733554D0/
DATA AD (16)/ 0.5889395D0/
DATA AQ (16)/ 0.5632724D0/
DATA FOR ELEMENT 17
DATA USS (17)/ -100.2271660D0/
DATA UPP (17)/ -77.3786670D0/
DATA BETAS (17)/ -14.2623200D0/
DATA BETAP (17)/ -14.6232000D0/
DATA ZS (17)/ 3.7846450D0/
DATA ZP (17)/ 2.0362630D0/
DATA ZD (17)/ 1.0000000D0/
DATA ALP (17)/ 2.5422010D0/
DATA EISOL (17)/ -353.1176670D0/
DATA DD (17)/ 0.4986870D0/
DATA QQ (17)/ 0.8217603D0/
DATA AM (17)/ 0.5523705D0/
DATA AD (17)/ 0.8061220D0/
DATA AQ (17)/ 0.6053435D0/
DATA FOR ELEMENT 35
DATA USS (35)/ -99.9864405D0/
DATA UPP (35)/ -75.6713075D0/
DATA BETAS (35)/ -8.9171070D0/
DATA BETAP (35)/ -9.9437400D0/
DATA ZS (35)/ 3.8543019D0/
DATA ZP (35)/ 2.1992091D0/
DATA ZD (35)/ 1.0000000D0/
DATA ALP (35)/ 2.4457051D0/
DATA EISOL (35)/ -346.6812500D0/
DATA DD (35)/ 0.6051074D0/
DATA QQ (35)/ 0.9645873D0/
DATA AM (35)/ 0.5526068D0/
DATA AD (35)/ 0.7258330D0/
DATA AQ (35)/ 0.5574589D0/
DATA FOR ELEMENT 53
DATA USS (53)/ -100.0030538D0/
DATA UPP (53)/ -74.6114692D0/
DATA BETAS (53)/ -7.4144510D0/
DATA BETAP (53)/ -6.1967810D0/
DATA ZS (53)/ 2.2729610D0/
DATA ZP (53)/ 2.1694980D0/
DATA ZD (53)/ 1.0000000D0/
DATA ALP (53)/ 2.2073200D0/
DATA EISOL (53)/ -340.5983600D0/
DATA DD (53)/ 1.4253233D0/
DATA QQ (53)/ 1.1841707D0/
DATA AM (53)/ 0.5527541D0/
DATA AD (53)/ 0.4593451D0/
DATA AQ (53)/ 0.4585376D0/
END
SUBROUTINE MOLDAT
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
INCLUDE 'SIZES/NOLIST'
COMMON /GEOKST/ NATOMS,LABELS(NUMATM),
+ NA(NUMATM),NB(NUMATM),NC(NUMATM)
+ /MOLKST/ NUMAT,NAT(NUMATM),NFIRST(NUMATM),NMIDLE(NUMATM),
1 NLAST(NUMATM), NORBS, NELECS,NALPHA,NBETA,
+ NCLOSE,NOPEN
2 /KEYWRD/ KEYWRD
3 /NATORB/ NATORB(54)
4 /CORE / CORE(54)
5 /BETAS / BETAS(54),BETAP(54),BETAD(54)
6 /MOLORB/ USPD(MAXORB),PSPD(MAXORB)
7 /VSIPS / VS(54),VP(54),VD(54)
8 /ONELEC/ USS(54),UPP(54),UDD(54)
9 /ATHEAT/ ATHEAT
COMMON /GMETRY/ GEO(3,NUMATM)
PARAMETER (MDUMY=MAXORB*MAXORB+(NUMATM*(NUMATM+1))/2-MPACK)
COMMON /SCRACH/ RXYZ(MPACK), XDUMY(MDUMY)
COMMON BLOCKS FOR MINDO/3
COMMON /ONELE3 / USS3(18),UPP3(18)
1 /ATOMI3 / EISOL3(18),EHEAT3(18)
4 /EXPON3 / ZS3(18),ZP3(18)
END OF MINDO/3 COMMON BLOCKS
COMMON /EXPONT/ ZS(54),ZP(54),ZD(54)
COMMON /ATOMIC/ EISOL(54),EHEAT(54)
DIMENSION COORD(3,NUMATM)
CHARACTER*80 KEYWRD
LOGICAL DEBUG, UHF,EXCI, TRIP, MINDO3, BIRAD,PARAM
DEBUG = (INDEX(KEYWRD,'MOLDAT').NE.0)
MINDO3= (INDEX(KEYWRD,'MINDO3').NE.0)
UHF=(INDEX(KEYWRD,'UHF') .NE. 0)
KHARGE=0
I=INDEX(KEYWRD,'CHARGE')
IF(I.NE.0) KHARGE=READA(KEYWRD,I)
NELECS=-KHARGE
NDORBS=0
ATHEAT=0.D0
EAT=0.D0
NUMAT=0
IF( MINDO3 ) THEN
DO 10 I=1,18
USS(I)=USS3(I)
UPP(I)=UPP3(I)
EISOL(I)=EISOL3(I)
EHEAT(I)=EHEAT3(I)
ZS(I)=ZS3(I)
10 ZP(I)=ZP3(I)
ENDIF
IA=1
IB=0
DO 190 II=1,NATOMS
IF(LABELS(II).EQ.99) GOTO 190
NUMAT=NUMAT+1
NAT(NUMAT)=LABELS(II)
NFIRST(NUMAT)=IA
NI=NAT(NUMAT)
ATHEAT=ATHEAT+EHEAT(NI)
EAT =EAT +EISOL(NI)
NELECS=NELECS+NINT(CORE(NI))
IB=IA+NATORB(NI)-1
NMIDLE(NUMAT)=IB
IF(NATORB(NI).EQ.9)NDORBS=NDORBS+5
IF(NATORB(NI).EQ.9)NMIDLE(NUMAT)=IA+3
NLAST(NUMAT)=IB
USPD(IA)=USS(NI)
IF(IA.EQ.IB) GOTO 183
K=IA+1
K1=IA+3
DO 181 J=K,K1
USPD(J)=UPP(NI)
181 CONTINUE
182 IF(K1.EQ.IB)GOTO 183
K=K1+1
DO 184 J=K,IB
184 USPD(J)=UDD(NI)
183 CONTINUE
190 IA=IB+1
ATHEAT=ATHEAT-EAT*23.061D0
NORBS=NLAST(NUMAT)
TRIP=(INDEX(KEYWRD,'TRIPLET').NE.0)
EXCI=(INDEX(KEYWRD,'EXCITED').NE.0)
BIRAD=(INDEX(KEYWRD,'BIRAD').NE.0)
PARAM=(INDEX(KEYWRD,'PARAM').NE.0)
IF(INDEX(KEYWRD,'C.I.') .NE. 0) THEN
IF(TRIP) THEN
WRITE(6,'(//10X,''C.I. NOT ALLOWED WITH TRIPLET '')')
STOP
ENDIF
IF(UHF) THEN
WRITE(6,'(//10X,''C.I. NOT ALLOWED WITH UHF '')')
STOP
ENDIF
ENDIF
NOW TO WORK OUT HOW MANY ELECTRONS ARE IN EACH TYPE OF SHELL
NALPHA=0
NBETA=0
NCLOSE=0
NOPEN=0
IF( UHF ) THEN
NBETA=NELECS/2
IF( TRIP ) THEN
IF(NBETA*2 .NE. NELECS) THEN
WRITE(6,'(//10X,''TRIPLET SPECIFIED WITH ODD NUMBER'',
+ '' OF ELECTRONS, CORRECT FAULT '')')
STOP
ELSE
WRITE(6,'(//'' TRIPLET STATE CALCULATION'')')
NBETA=NBETA-1
ENDIF
ENDIF
NALPHA=NELECS-NBETA
WRITE(6,'(//10X,''UHF CALCULATION, NO. OF ALPHA ELECTRONS ='',I3,
+/27X,''NO. OF BETA ELECTRONS ='',I3)')NALPHA,NBETA
ELSE
NOW TO DETERMINE OPEN AND CLOSED SHELLS
NCLOSE=NELECS/2
NOPEN = NELECS-NCLOSE*2
IF( TRIP .OR. EXCI .OR. BIRAD ) THEN
IF(NCLOSE*2 .NE. NELECS) THEN
WRITE(6,'(//10X,''SYSTEM SPECIFIED WITH ODD NUMBER'',
+ '' OF ELECTRONS, CORRECT FAULT '')')
STOP
ELSE
WRITE(6,'(//'' SYSTEM IS A BIRADICAL'')')
IF(TRIP)WRITE(6,'(//'' TRIPLET STATE CALCULATION'')')
IF(EXCI)WRITE(6,'(//'' EXCITED STATE CALCULATION'')')
IF(.NOT. (TRIP .OR. EXCI ).AND. .NOT. PARAM)
+WRITE(6,'(//'' GROUND STATE CALCULATION'')')
NCLOSE=NCLOSE-1
NOPEN=2
ENDIF
ENDIF
IF( .NOT. PARAM)WRITE(6,'(//10X,''RHF CALCULATION, NO. OF '',
+''DOUBLY OCCUPIED LEVELS ='',I3)')NCLOSE
IF(NOPEN.NE.0)
+WRITE(6,'(/27X,''NO. OF SINGLY OCCUPIED LEVELS ='',I3)')NOPEN
NOPEN=NOPEN+NCLOSE
ENDIF
YY=FLOAT(KHARGE)/FLOAT(NORBS)
L=0
DO 191 I=1,NUMAT
NI=NAT(I)
XX=1.D0
IF(NI.GT.2) XX=0.25D0
W=CORE(NI)*XX-YY
IA=NFIRST(I)
IB=NLAST(I)
DO 360 J=IA,IB
L=L+1
360 PSPD(L)=W
191 CONTINUE
WRITE OUT THE INTERATOMIC DISTANCES
CALL GMETRY(GEO,COORD)
RMIN=100.D0
L=0
DO 17 I=1,NUMAT
DO 17 J=1,I
L=L+1
RXYZ(L)=SQRT((COORD(1,I)-COORD(1,J))**2+
+ (COORD(2,I)-COORD(2,J))**2+
1 (COORD(3,I)-COORD(3,J))**2)
IF(RMIN.GT.RXYZ(L) .AND. I .NE. J) THEN
IMINR=I
JMINR=J
RMIN=RXYZ(L)
ENDIF
17 CONTINUE
IF (INDEX(KEYWRD,'PARAM')+INDEX(KEYWRD,'NOINTER') .EQ. 0) THEN
WRITE(6,'(//10X,'' INTERATOMIC DISTANCES'')')
CALL VECPRT(RXYZ,NUMAT)
ENDIF
IF(RMIN.LT.0.8D0.AND.INDEX(KEYWRD,'GEO-OK') .EQ.0) THEN
WRITE(6,332)IMINR,JMINR,RMIN
332 FORMAT(//,' ATOMS',I3,' AND',I3,' ARE SEPARATED BY',F8.4,
+' ANGSTROMS.',/' TO CONTINUE CALCULATION SPECIFY "GEO-OK"')
STOP
ENDIF
IF(.NOT. DEBUG) RETURN
WRITE(6,1)NUMAT,NORBS,NDORBS,NATOMS
1 FORMAT(' NUMBER OF REAL ATOMS:',I4,/
+ ,' NUMBER OF ORBITALS: ',I4,/
1 ,' NUMBER OF D ORBITALS:',I4,/
2 ,' TOTAL NO. OF ATOMS: ',I4)
WRITE(6,3)(USPD(I),I=1,NORBS)
3 FORMAT(' ONE-ELECTRON DIAGONAL TERMS',/,10(/,10F8.3))
WRITE(6,5)(PSPD(I),I=1,NORBS)
5 FORMAT(' INITIAL P FOR ALL ATOMIC ORBITALS',/,10(/,10F8.3))
RETURN
END
SUBROUTINE ROTATE (NI,NJ,XI,XJ,W,KR,E1B,E2A,ENUC,HSS)
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
DIMENSION XI(3),XJ(3),W(100),E1B(10),E2A(10)
COMMON /NATORB/ NATORB(54)
COMMON /BETAS / BETAS(54),DUMY(108)
COMMON /TWOEL3/ F03(18)
COMMON /ALPHA3/ ALP3(153)
COMMON /ALPHA / ALP(54)
COMMON /CORE / TORE(54)
COMMON /IDEAS / FN1(54,10),FN2(54,10),FN3(54,10)
****************************************************************************
DIELRE CALCULATES THE TWO-PARTICLE INTERACTIONS.
ON INPUT NI = ATOMIC NUMBER OF FIRST ATOM.
NJ = ATOMIC NUMBER OF SECOND ATOM.
XI = COORDINATE OF FIRST ATOM.
XJ = COORDINATE OF SECOND ATOM.
ON OUTPUT W = ARRAY OF TWO-ELECTRON REPULSION INTEGRALS.
E1B,E2A= ARRAY OF ELECTRON-NUCLEAR ATTRACTION INTEGRALS,
E1B = ELECTRON ON ATOM NI ATTRACTING NUCLEUS OF NJ.
ENUC = NUCLEAR-NUCLEAR REPULSION TERM.
****************************************************************************
COMMON /ROTDUM/ CSS1,CSP1,CPPS1,CPPP1,CSS2,CSP2,CPPS2,CPPP2
COMMON /ROTDU2/ X1,X2,X3,Y1,Y2,Y3,Z1,Z2,Z3
COMMON /KEYWRD/ KEYWRD
CHARACTER*80 KEYWRD
DIMENSION X(3),Y(3),Z(3),RI(22),CORE(4,2), COVRAD(54)
LOGICAL SI,SK, FIRST
EQUIVALENCE (CORE(1,1),CSS1),(X(1),X1),(Y(1),Y1),(Z(1),Z1)
DATA ITYPE /1/
DATA COVRAD/54*2.4D0/
DATA COVRAD(1) /0.9D0/
DATA FIRST /.TRUE./
*** THIS ROUTINE COMPUTES THE REPUSLION AND NUCLEAR ATTRACTION
INTEGRALS OVER MOLECULAR-FRAME COORDINATES. THE INTEGRALS OVER
LOCAL FRAME COORDINATES ARE EVALUATED BY SUBROUTINE REPP AND STORED
AS FOLLOWS (WHERE P-SIGMA = O, AND P-PI = P AND P* ) IN RI
(SS/SS)=1, (SO/SS)=2, (OO/SS)=3, (PP/SS)=4, (SS/OS)=5,
(SO/SO)=6, (SP/SP)=7, (OO/SO)=8, (PP/SO)=9, (PO/SP)=10,
(SS/OO)=11, (SS/PP)=12, (SO/OO)=13, (SO/PP)=14, (SP/OP)=15,
(OO/OO)=16, (PP/OO)=17, (OO/PP)=18, (PP/PP)=19, (PO/PO)=20,
(PP/P*P*)=21, (P*P/P*P)=22.
IF(FIRST) THEN
FIRST=.FALSE.
ENDIF
CONST1=ALP(2)
CONST2=ALP(10)
RIJ=0.D0
SIJ=1.D0
NT=NI+NJ
# IF(NT.EQ.8.OR.NT.EQ.9) THEN
# IF(NI.EQ.7.OR.NI.EQ.8) SIJ=2.D0*HSS/(BETAS(NI)+BETAS(NJ))
# IF(NJ.EQ.7.OR.NJ.EQ.8) SIJ=2.D0*HSS/(BETAS(NI)+BETAS(NJ))
# ENDIF
# WRITE(6,'(4F12.6)')SIJ,HSS,BETAS(NI),BETAS(NJ)
DO 15 I=1,3
X(I)=XI(I)-XJ(I)
15 RIJ=RIJ+X(I)**2
14 GOTO (100,200,300) ITYPE
100 CONTINUE
IF(INDEX(KEYWRD,'MINDO3') .NE. 0) THEN
ITYPE=2
ELSE
ITYPE=3
ENDIF
GOTO 14
200 CONTINUE
SUM=14.399D0/SQRT(RIJ+(7.1995D0/F03(NI)+7.1995D0/F03(NJ))**2)
W(1)=SUM
KR=KR+1
L=0
DO 210 I=1,4
DO 220 J=1,I
L=L+1
E1B(L)=0.D0
220 E2A(L)=0.D0
E1B(L)=-SUM*TORE(NJ)
210 E2A(L)=-SUM*TORE(NI)
II=MAX(NI,NJ)
NBOND=(II*(II-1))/2+NI+NJ-II
RIJ=SQRT(RIJ)
IF(NBOND.EQ.22 .OR. NBOND .EQ. 29) GO TO 2
GO TO 1
2 SCALE=ALP3(NBOND)*EXP(-RIJ)
GO TO 10
1 SCALE=EXP(-ALP3(NBOND)*RIJ)
10 CONTINUE
ENUC=TORE(NI)*TORE(NJ)*(SUM+(14.399D0/RIJ-SUM)*SCALE)
RETURN
300 CONTINUE
RIJ=SQRT(RIJ)
CALL REPP(NI,NJ,RIJ,RI,CORE)
GAM=RI(1)
*** THE REPULSION INTEGRALS OVER MOLECULAR FRAME (W) ARE STORED IN THE
ORDER IN WHICH THEY WILL LATER BE USED. IE. (I,J/K,L) WHERE
J.LE.I AND L.LE.K AND L VARIES MOST RAPIDLY AND I LEAST
RAPIDLY. (ANTI-NORMAL COMPUTER STORAGE)
A=1.D0/RIJ
DO 11 I=1,3
11 X(I)=X(I)*A
Z(3)=0.D0
IF(ABS(X(3)).GT.0.999999D0) GOTO 12
Z(3)=SQRT(1.D0-X(3)**2)
A=1.D0/Z(3)
Y(1)=-A*X(2)*SIGN(1.D0,X(1))
Y(2)=ABS(A*X(1))
Y(3)=0.D0
Z(1)=-A*X(1)*X(3)
Z(2)=-A*X(2)*X(3)
GOTO 13
12 Y(1)=0.D0
Y(2)=1.D0
Y(3)=0.D0
Z(1)=1.D0
Z(2)=0.D0
13 CONTINUE
IB=NATORB(NI)
JB=NATORB(NJ)
KI=0
DO 130 I=1,IB
SI=I.EQ.1
II=I-1
DO 130 J=1,I
JJ=J-1
IJ=0
IF (JJ.EQ.0) IJ=-1
IF (SI) IJ=+1
DO 130 K=1,JB
KK=K-1
SK=KK.GT.0
DO 130 L=1,K
KI=KI+1
IF (SK) GO TO 50
*** INTEGRAL (I,J/K,L) IS OF THE TYPE (I,J/S,S)
IF (IJ) 30,40,20
(SS/SS)
20 W(KI)=RI(1)
GO TO 131
(PS/SS)
30 W(KI)=RI(2)*X(II)
GO TO 131
(PP/SS)
40 W(KI)=RI(3)*X(II)*X(JJ)+RI(4)*(Y(II)*Y(JJ)+Z(II)*Z(JJ))
GO TO 131
50 LL=L-1
IF (LL.GT.0) GO TO 90
*** INTEGRAL (I,J/K,L) IS OF THE TYPE (I,J/P,S)
IF (IJ) 70,80,60
(SS/PS)
60 W(KI)=RI(5)*X(KK)
GO TO 131
(PS/PS)
70 W(KI)=RI(6)*X(II)*X(KK)+RI(7)*(Y(II)*Y(KK)+Z(II)*Z(KK))
GO TO 131
(PP/PS)
80 W(KI)=X(KK)*(RI(8)*X(II)*X(JJ)+RI(9)*(Y(II)*Y(JJ)+Z(II)*Z(JJ)))
1 +RI(10)*(X(II)*(Y(JJ)*Y(KK)+Z(JJ)*Z(KK))+X(JJ)*(Y(II)*Y(KK)+Z(I
2 I)*Z(KK)))
GO TO 131
*** INTEGRAL (I,J/K,L) IS OF THE TYPE (I,J/P,P)
90 IF (IJ) 110,120,101
(SS/PP)
101 W(KI)=RI(11)*X(KK)*X(LL)+RI(12)*(Y(KK)*Y(LL)+Z(KK)*Z(LL))
GO TO 131
(PS/PP)
110 W(KI)=X(II)*(RI(13)*X(KK)*X(LL)+RI(14)*(Y(KK)*Y(LL)+Z(KK)*Z(LL)
1 ))+RI(15)*(Y(II)*(Y(KK)*X(LL)+Y(LL)*X(KK))+Z(II)*(Z(KK)*X(LL)+Z
2 (LL)*X(KK)))
GO TO 131
(PP/PP)
120 W(KI)=(RI(16)*X(II)*X(JJ)+RI(17)*(Y(II)*Y(JJ)+Z(II)*Z(JJ)))*X(K
1 K)*X(LL)+RI(18)*X(II)*X(JJ)*(Y(KK)*Y(LL)+Z(KK)*Z(LL))+RI(19)*(Y
2 (II)*Y(JJ)*Y(KK)*Y(LL)+Z(II)*Z(JJ)*Z(KK)*Z(LL))+RI(20)*(X(II)*(
3 X(KK)*(Y(JJ)*Y(LL)+Z(JJ)*Z(LL))+X(LL)*(Y(JJ)*Y(KK)+Z(JJ)*Z(KK))
4 )+X(JJ)*(X(KK)*(Y(II)*Y(LL)+Z(II)*Z(LL))+X(LL)*(Y(II)*Y(KK)+Z(I
5 I)*Z(KK))))+RI(21)*(Y(II)*Y(JJ)*Z(KK)*Z(LL)+Z(II)*Z(JJ)*Y(KK)*Y
6 (LL))+RI(22)*(Y(II)*Z(JJ)+Z(II)*Y(JJ))*(Y(KK)*Z(LL)+Z(KK)*Y(LL)
7 )
131 CONTINUE
130 CONTINUE
150 CONTINUE
E1B(1)=-CSS1
IF(NI.GT.3) THEN
E1B(2) = -CSP1 *X1
E1B(3) = -CPPS1*X1**2-CPPP1*(Y1**2+Z1**2)
E1B(4) = -CSP1 *X2
E1B(5) = -CPPS1*X1*X2-CPPP1*(Y1*Y2+Z1*Z2)
E1B(6) = -CPPS1*X2*X2-CPPP1*(Y2*Y2+Z2*Z2)
E1B(7) = -CSP1 *X3
E1B(8) = -CPPS1*X1*X3-CPPP1*(Y1*Y3+Z1*Z3)
E1B(9) = -CPPS1*X2*X3-CPPP1*(Y2*Y3+Z2*Z3)
E1B(10)= -CPPS1*X3*X3-CPPP1*(Y3*Y3+Z3*Z3)
END IF
E2A(1)=-CSS2
IF(NJ.GT.3) THEN
E2A(2) = -CSP2 *X1
E2A(3) = -CPPS2*X1**2-CPPP2*(Y1**2+Z1**2)
E2A(4) = -CSP2 *X2
E2A(5) = -CPPS2*X1*X2-CPPP2*(Y1*Y2+Z1*Z2)
E2A(6) = -CPPS2*X2*X2-CPPP2*(Y2*Y2+Z2*Z2)
E2A(7) = -CSP2 *X3
E2A(8) = -CPPS2*X1*X3-CPPP2*(Y1*Y3+Z1*Z3)
E2A(9) = -CPPS2*X2*X3-CPPP2*(Y2*Y3+Z2*Z3)
E2A(10)= -CPPS2*X3*X3-CPPP2*(Y3*Y3+Z3*Z3)
END IF
SCALE = EXP(-ALP(NI)*RIJ)+EXP(-ALP(NJ)*RIJ)
NT=NI+NJ
IF(NT.EQ.8.OR.NT.EQ.9) THEN
IF(NI.EQ.7.OR.NI.EQ.8) SCALE=SCALE+(RIJ-1.D0)*EXP(-ALP(NI)*RIJ)
IF(NJ.EQ.7.OR.NJ.EQ.8) SCALE=SCALE+(RIJ-1.D0)*EXP(-ALP(NJ)*RIJ)
ELSE
ENDIF
ENUC = TORE(NI)*TORE(NJ)*GAM
SCALE=SCALE*ENUC
# ENUC = ENUC +FN1(NI)*EXP(-FN2(NI)*(RIJ-FN3(NI))**2)
# + +FN1(NJ)*EXP(-FN2(NJ)*(RIJ-FN3(NJ))**2)
# WRITE(6,'('' GAUSSIAN'',F12.6)')
DO 156 IG=1,10
IF(ABS(FN1(NI,IG)).GT.0.D0)
+SCALE=SCALE +TORE(NI)*TORE(NJ)/RIJ*
+FN1(NI,IG)*EXP(-FN2(NI,IG)*(RIJ-FN3(NI,IG))**2)
IF(ABS(FN1(NJ,IG)).GT.0.D0)
+SCALE=SCALE +TORE(NI)*TORE(NJ)/RIJ*
+FN1(NJ,IG)*EXP(-FN2(NJ,IG)*(RIJ-FN3(NJ,IG))**2)
156 CONTINUE
ENUC=ENUC+SCALE
*** NOW ROTATE THE NUCLEAR ATTRACTION INTEGRALS.
*** THE STORAGE OF THE NUCLEAR ATTRACTION INTEGRALS CORE(KL/IJ) IS
(SS/)=1, (SO/)=2, (OO/)=3, (PP/)=4
DEBUG PRINTING
KR=KR+KI
RETURN
END
|
{"hexsha": "4415121cee6a68b627d778d0f703a2db0cb571b4", "size": 53938, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "1983_MOPAC1/am1.for", "max_stars_repo_name": "openmopac/MOPAC-archive", "max_stars_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-16T20:53:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T20:54:11.000Z", "max_issues_repo_path": "1983_MOPAC1/am1.for", "max_issues_repo_name": "openmopac/MOPAC-archive", "max_issues_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "1983_MOPAC1/am1.for", "max_forks_repo_name": "openmopac/MOPAC-archive", "max_forks_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7479734709, "max_line_length": 78, "alphanum_fraction": 0.4405613853, "num_tokens": 20771}
|
[STATEMENT]
lemma (in semiring_0) pnormal_degree: "last p \<noteq> 0 \<Longrightarrow> degree p = length p - 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. last p \<noteq> (0::'a) \<Longrightarrow> degree p = length p - 1
[PROOF STEP]
using pnormalize_eq[of p]
[PROOF STATE]
proof (prove)
using this:
last p \<noteq> (0::'a) \<Longrightarrow> pnormalize p = p
goal (1 subgoal):
1. last p \<noteq> (0::'a) \<Longrightarrow> degree p = length p - 1
[PROOF STEP]
unfolding degree_def
[PROOF STATE]
proof (prove)
using this:
last p \<noteq> (0::'a) \<Longrightarrow> pnormalize p = p
goal (1 subgoal):
1. last p \<noteq> (0::'a) \<Longrightarrow> length (pnormalize p) - 1 = length p - 1
[PROOF STEP]
by simp
|
{"llama_tokens": 288, "file": null, "length": 3}
|
import symplectic
import decompose
import chp_py
import slow_sim
import utils
import imp
import numpy as np
imp.reload(symplectic)
imp.reload(decompose)
imp.reload(chp_py)
imp.reload(slow_sim)
imp.reload(utils)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This file contains test cases for all of the relevant functions in #
# symplectic.py, decompose.py, chp_py.py, and utils.py #
# DO NOT MODIFY (unless you have a deep understanding of the code) #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def test_single_symplectic(i, n):
"""
Tests a single symplectic matrix
"""
M = symplectic.symplectic(i, n)
L = symplectic.get_lambda(n)
result = (M.T @ L @ M) % 2
result = ((result == L).all())
return result
def test_symplectic():
"""
Test cases for the functions in symplectic.py
Returns True if passed all test cases, otherwise returns False
"""
print("Testing: symplectic.py")
for n in range(1, 6):
for j in range(500):
i = np.random.randint(symplectic.numberofsymplectic(n))
result = test_single_symplectic(i, n)
if not result:
print("Failed: found a bad symplectic with (n,i) = "
+ str((n, i)))
return False
print("Passed: symplectic.py passed all tests\n")
return True
def test_decompose():
"""
Test cases for the functions in decompose.py
Returns True if passed all test cases, otherwise returns False
"""
print("Testing: decompose.py")
for n in range(1, 6):
top = np.hstack((np.zeros((n, n)), np.identity(n)))
bottom = np.hstack((np.identity(n), np.zeros((n, n))))
L = np.vstack((top, bottom))
for j in range(500):
# Get a random symplectic
i = np.random.randint(symplectic.numberofsymplectic(n))
S = symplectic.symplectic(i, n)
S = decompose.transform_symplectic(S)
# Make sure the transformation worked
result = (S.T @ L @ S) % 2
result = ((result == L).all())
if not result:
print("Failed: could not transform symplectic " +
"matrix with (n, i)= "
+ str((n, i)))
return False
# Make sure we can decompose it
# Applying the decomposed gates to the identity should give us S
gates = decompose.decompose_state(chp_py.CHP_Simulation(n, S))
new_sim = chp_py.CHP_Simulation(n)
new_sim.apply_gates(gates)
result = (new_sim.state == S).all()
if not result:
print("Failed: found a bad decomposition for " +
"symplectic matrix with (n, i)= "
+ str((n, i)))
return False
print("Passed: decompose.py passed all tests\n")
return True
def test_chp_py():
"""
Test cases for the functions in chp_py.py
Returns True if passed all test cases, otherwise returns False
"""
print("Testing: chp_py.py")
# n = number of qubits, m = size of qubit gate
for n in range(1, 50):
# Create two simulations
# sim1 uses decomposed gates and sim2 uses matrix multiplication
# apply 100 random gates to each one
sim1 = chp_py.CHP_Simulation(n)
sim2 = chp_py.CHP_Simulation(n)
for m in range(1, min(n, 5)):
for j in range(100):
# Get a random symplectic of size 2m x 2m
i = np.random.randint(symplectic.numberofsymplectic(m))
S = symplectic.symplectic(i, m)
S = decompose.transform_symplectic(S)
# Get m random qubits
qubits = np.arange(m)
np.random.shuffle(qubits)
qubits = qubits[:m]
# Get the gates and matrix represention of S
gates = decompose.decompose_state(chp_py.CHP_Simulation(m, S))
gates = decompose.change_gates(gates, qubits)
M = decompose.symplectic_to_matrix(S, n, qubits)
sim1.apply_gates(gates)
sim2.state = (sim2.state @ M) % 2
result = (sim1.state == sim2.state).all()
if not result:
print("Failed: found two simulations with different " +
"states for n = " + str(n))
return False
print("Passed: chp_py.py passed all tests\n")
return True
def test_collision_probability():
"""
Test cases for the collision probability algorithm
Returns True if passed all test cases, otherwise returns False
"""
print("Testing: collision probability algorithm")
# n = number of qubits, m = size of qubit gate
for n in range(1, 10):
# sim1 is fast and sim2 is slow, as sim2 uses exponential
# storage and runtime
sim1 = chp_py.CHP_Simulation(n)
sim2 = slow_sim.Slow_Simulation(n)
for m in range(1, min(n, 5)):
for j in range(100):
# Get a random symplectic of size 2m x 2m
i = np.random.randint(symplectic.numberofsymplectic(m))
S = symplectic.symplectic(i, m)
S = decompose.transform_symplectic(S)
# Get m random qubits
qubits = np.arange(m)
np.random.shuffle(qubits)
qubits = qubits[:m]
# Get the gates and matrix represention of S
gates = decompose.decompose_state(chp_py.CHP_Simulation(m, S))
gates = decompose.change_gates(gates, qubits)
decompose.apply_gates(gates, sim1)
decompose.apply_gates(gates, sim2)
k_1 = int(-sim1.log_collision_probability)
k_2 = np.round(-np.log2(sim2.collision_probability))
result = (k_1 == k_2)
if not result:
print("Failed: collision probability algorithm returned " +
"incorrect result with n = "
+ str(n) + " and m = " + str(m))
return False
print("Passed: collision probability algorithm passed all tests\n")
return True
def gates_to_coords(gates):
"""
This is a helper function for test_utils(). Given a set of gates, it
extracts the coordinates of the qubits that the gates were applied to
"""
# Get gates and extract the coordinates, this is a bit tricky
coords = []
[coords.append(g[1]) for g in gates if g[1] not in coords]
coords = [sorted((coords[2 * i], coords[2 * i + 1])) for i
in range(len(coords) // 2)]
coords = sorted((i, j) for (i, j) in coords)
return coords
def test_utils():
"""
Test cases for the functions in utils.py
Returns True if passed all test cases, otherwise returns False
"""
print("Testing: utils.py")
# Test the indexing functions with a random size 4-D array
shape = tuple(np.random.randint(1, 11, size=4))
grid = np.arange(np.prod(shape)).reshape(shape)
for i in range(np.prod(shape)):
coord = utils.index_to_coord(i, shape)
result = (i == grid[coord])
if not result:
print("Failed: incorrect coordinate for index i = "
+ str(i) + " with shape = " + str(shape))
return False
index = utils.coord_to_index(coord, shape)
result = (i == index)
if not result:
print("Failed: incorrect index for coord = "
+ str(coord) + " with shape = " + str(shape))
return False
# Test the neighbor function on a few points on an easy
# 3D grid of shape 3 x 3 x 3
shape = (3, 3, 3)
test_indices = [0, 4, 13, 16, 24, 26]
diag_answers = {0: [1, 3, 4, 9, 10, 12, 13],
4: [0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17],
13: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26],
16: [3, 4, 5, 6, 7, 8, 12, 13, 14, 15, 17, 21, 22, 23,
24, 25, 26],
24: [12, 13, 15, 16, 21, 22, 25],
26: [13, 14, 16, 17, 22, 23, 25]}
no_diag_answers = {0: [1, 3, 9],
4: [1, 3, 5, 7, 13],
13: [4, 10, 12, 14, 16, 22],
16: [7, 13, 15, 17, 25],
24: [15, 21, 25],
26: [17, 23, 25]}
for i in test_indices:
neighbors = utils.get_neighbors_grid(i, shape, True)
result = (sorted(neighbors) == diag_answers[i])
if not result:
print("Failed: incorrect neighbors (including diagonals) for i = "
+ str(i) + " with shape = " + str(shape))
return False
neighbors = utils.get_neighbors_grid(i, shape, False)
result = (sorted(neighbors) == no_diag_answers[i])
if not result:
print("Failed: incorrect neighbors (not including diagonals) for "
+ "i = " + str(i) + " with shape = " + str(shape))
return False
# Test the get_lattice_gates function on a 1D, 2D, and 3D grid
# 1D grid of 10 qubits
shape = (10,)
answers = {0: [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)],
1: [(1, 2), (3, 4), (5, 6), (7, 8)]}
for i in range(2):
gates = utils.get_lattice_gates(shape, i)
coords = gates_to_coords(gates)
result = (coords == answers[i])
if not result:
print("Failed: found the wrong set of gates in round i = "
+ str(i) + " with shape = " + str(shape))
return False
# 2D grid of shape 5 x 5
shape = (5, 5)
answers = {0: [(0, 5), (1, 6), (2, 7), (3, 8),
(4, 9), (10, 15), (11, 16),
(12, 17), (13, 18), (14, 19)],
1: [(0, 1), (2, 3), (5, 6), (7, 8),
(10, 11), (12, 13), (15, 16),
(17, 18), (20, 21), (22, 23)],
2: [(5, 10), (6, 11), (7, 12), (8, 13),
(9, 14), (15, 20), (16, 21),
(17, 22), (18, 23), (19, 24)],
3: [(1, 2), (3, 4), (6, 7), (8, 9),
(11, 12), (13, 14), (16, 17),
(18, 19), (21, 22), (23, 24)]}
for i in range(4):
gates = utils.get_lattice_gates(shape, i)
coords = gates_to_coords(gates)
result = (coords == answers[i])
if not result:
print("Failed: found the wrong set of gates in round i = "
+ str(i) + " with shape = " + str(shape))
return False
# 3D grid of shape 3 x 4 x 5
shape = (2, 3, 4)
answers = {0: [(0, 12), (1, 13), (2, 14), (3, 15),
(4, 16), (5, 17), (6, 18), (7, 19),
(8, 20), (9, 21), (10, 22), (11, 23)],
1: [(0, 4), (1, 5), (2, 6), (3, 7),
(12, 16), (13, 17), (14, 18), (15, 19)],
2: [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9),
(10, 11), (12, 13), (14, 15), (16, 17),
(18, 19), (20, 21), (22, 23)],
3: [],
4: [(4, 8), (5, 9), (6, 10), (7, 11), (16, 20),
(17, 21), (18, 22), (19, 23)],
5: [(1, 2), (5, 6), (9, 10), (13, 14), (17, 18),
(21, 22)]}
for i in range(5):
gates = utils.get_lattice_gates(shape, i)
coords = gates_to_coords(gates)
result = (coords == answers[i])
if not result:
print("Failed: found the wrong set of gates in round i = "
+ str(i) + " with shape = " + str(shape))
return False
# Test collision_probability_mean_and_std function for
# small values of k
for i in range(100):
# Initialize some variables
m, x = (25, 100)
s = 5
k_matrix = np.random.randint(20, size=(m, x))
mean_ans = np.zeros(x)
std_ans = np.zeros(x)
mean_err_ans = np.zeros(x)
std_err_ans = np.zeros(x)
for j in range(x):
# k_vec is a set of m samples
k_vec = k_matrix[:, j]
# Divide the m samples into m/s sets of size s
k_sets = k_vec.reshape(m // s, s).astype(np.float64)
# Add a small perturbation to avoid a set with
# zero std to avoid infs and nans
k_sets[:, -1] += 1e-3
# Exponentiate the k_sets
p_sets = np.power(1 / 2, k_sets)
# mu[i] and sigma[i] are the mean and std of the
# i^th set in p_sets
mu = np.mean(p_sets, axis=1)
sigma = np.std(p_sets, axis=1)
# a is the mean of the values in mu
# b is the std of the values in mu
# c is the mean of the values in sigma
# d is the std of the values in mu
a = np.mean(mu)
b = np.std(mu)
c = np.mean(sigma)
d = np.std(sigma)
# Convert to the log scale and use propogation of
# error formula for the errors of the logs
mean_ans[j] = -np.log2(a)
std_ans[j] = -np.log2(c)
mean_err_ans[j] = b / (a * np.log(2))
std_err_ans[j] = d / (c * np.log(2))
mean, mean_err, std, std_err = (
utils.collision_probability_mean_and_std(k_matrix, s))
result = ((abs(mean - mean_ans) < 1e-6).all() and
(abs(mean_err - mean_err_ans < 1e-6)).all() and
(abs(std - std_ans) < 1e-6).all() and
(abs(std_err - std_err_ans < 1e-6)).all())
if not result:
print("Failed: found incorrect values for mean and standard " +
"deviation of the collision probability")
return False
print("Passed: utils.py passed all tests\n")
return True
def test_stored_two_qubit_gates():
"""
Test cases for the stored two_qubit_gates
Returns True if passed all test cases, otherwise returns False
"""
print("Testing: stored two qubit gates")
utils.store_all_two_qubit_gates()
two_qubit_gates = utils.load_data("two_qubit_gates")
two_qubit_matrices = utils.load_data("two_qubit_matrices")
matrices = set()
for i in two_qubit_gates:
# Make sure the gates and matrices have the same effect
gates = two_qubit_gates[i]
M = two_qubit_matrices[i]
sim1 = chp_py.CHP_Simulation(2)
sim1.apply_gates(gates)
# Also store the matrices in a set
M_string = str(M.flatten())
matrices.add(M_string)
result = (sim1.state == M).all()
if not result:
print("Failed: two qubit gates do not agree with "
+ "two qubit matrix for i = " + str(i))
return False
# Make sure we have the right number of unique matrices
result = (len(matrices) == symplectic.numberofsymplectic(2))
if not result:
print("Failed: did not find the correct number of unique "
+ "two qubit matrices")
return False
print("Passed: stored two qubit gates passed all tests\n")
return True
def run_all_tests():
print("")
if all((test_symplectic(), test_decompose(), test_chp_py(),
test_collision_probability(), test_utils(),
test_stored_two_qubit_gates())):
print("Passed: all tests")
else:
print("Failed: certain tests, see output above")
run_all_tests()
|
{"hexsha": "29bd95956bc47252ea43a643730ddd7f24ad672f", "size": 15848, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "matthewkhoury96/random_quantum_circuits", "max_stars_repo_head_hexsha": "20d15da79ab4bcf0c0b82c314514a6c06cc19e8c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-06-13T23:30:00.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-13T23:30:00.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "matthewkhoury96/random_quantum_circuits", "max_issues_repo_head_hexsha": "20d15da79ab4bcf0c0b82c314514a6c06cc19e8c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "matthewkhoury96/random_quantum_circuits", "max_forks_repo_head_hexsha": "20d15da79ab4bcf0c0b82c314514a6c06cc19e8c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4541387025, "max_line_length": 79, "alphanum_fraction": 0.5146390712, "include": true, "reason": "import numpy", "num_tokens": 4442}
|
import sys
import h5py
from hdgwas.tools import Timer,Mapper, merge_genotype
import os
from hdgwas.data import Reader
import argparse
import gc
import tables
import numpy as np
parser = argparse.ArgumentParser(description='Script to merge genotype')
parser.add_argument("-g", "--genotype",nargs='+', type=str, help="path/paths to genotype data folder")
parser.add_argument('-mapper', type=str, help='Mapper data folder')
parser.add_argument('-mapper_name', type=str, help='Mapper name')
parser.add_argument("-o", "--out", type=str, required=True, help="path to save result folder")
parser.add_argument("-save_name", type=str, required=True, help="merge study name")
parser.add_argument('-study_name', type=str, required=True,nargs='+', help=' Name for saved genotype data, without ext')
parser.add_argument('-cluster', type=str, default='n', choices=['y','n'], help=' Is it parallel cluster job, default no')
parser.add_argument('-node', nargs='+',help='number of nodes / this node number, example: 10 2 ')
parser.add_argument('-split',type=int,help='Split size for merge genotypes')
args = parser.parse_args()
print args
if __name__ == '__main__':
print ('Not implemented!')
# mapper=Mapper(args.mapper_name)
# mapper.load(args.mapper)
# mapper.chunk_size=args.split
#
#
# hdf5_iter=0
# h5_name=args.save_name
# pytable_filter=tables.Filters(complevel=9, complib='zlib')
# gen=[]
# for i,j in enumerate(args.genotype):
# gen.append(Reader('genotype'))
# gen[i].start(j,hdf5=True, study_name=args.study_name[i], ID=False)
#
# RSID=[]
# SUB_ID=[]
# for i in gen:
# SUB_ID.append(i.folder._data.get_id())
# mapper.cluster=args.cluster
# mapper.node=args.node
#
# while True:
# if args.cluster=='n':
# SNPs_index, keys=mapper.get_next()
# else:
# chunk=mapper.chunk_pop()
# if chunk is None:
# SNPs_index=None
# break
# print chunk
# SNPs_index, keys=mapper.get_chunk(chunk)
#
# if SNPs_index is None:
# break
# RSID.append(keys)
#
# data=merge_genotype(gen, SNPs_index) #TODO (high) add mapper
# print data.shape
# if args.cluster=='n':
# h5_gen_file = tables.open_file(
# os.path.join(args.out,str(hdf5_iter)+'_'+h5_name+'.h5'), 'w', title=args.save_name)
# else:#TODO (high) check!
# h5_gen_file = tables.open_file(
# os.path.join(args.out,str(chunk[0])+'_' +str(chunk[1])+'_'+h5_name+'.h5'), 'w', title=args.save_name)
# hdf5_iter+=1
#
# atom = tables.Int8Atom() # TODO (low) check data format
# genotype = h5_gen_file.create_carray(h5_gen_file.root, 'genotype', atom,
# (data.shape),
# title='Genotype',
# filters=pytable_filter)
# genotype[:] = data
# h5_gen_file.close()
# genotype=None
# data=None
# gc.collect()
# print hdf5_iter
#
# RSID=np.array(RSID)
# SUB_ID=np.array(SUB_ID)
# if args.cluster=='n':
# np.save(os.path.join(args.out,'RSID.npy'),RSID)
# np.save(os.path.join(args.out,'SUB_ID.npy'),SUB_ID)
#
# else:
# np.save(os.path.join(args.out,str(args.node[1])+'_RSID.npy'),RSID)
# np.save(os.path.join(args.out,str(args.node[1])+'_SUB_ID.npy'),SUB_ID)
|
{"hexsha": "0342ad8bdd1ae93333e5d59da052b88b0d3fce0a", "size": 3154, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/merge_genotype.py", "max_stars_repo_name": "roshchupkin/hase", "max_stars_repo_head_hexsha": "c7aa36459c53ccb5bd1f884bbc38df0cfebdf208", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2016-03-25T12:22:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-14T11:14:00.000Z", "max_issues_repo_path": "tools/merge_genotype.py", "max_issues_repo_name": "roshchupkin/hase", "max_issues_repo_head_hexsha": "c7aa36459c53ccb5bd1f884bbc38df0cfebdf208", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2016-08-02T22:06:07.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-10T08:42:22.000Z", "max_forks_repo_path": "tools/merge_genotype.py", "max_forks_repo_name": "roshchupkin/hase", "max_forks_repo_head_hexsha": "c7aa36459c53ccb5bd1f884bbc38df0cfebdf208", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-12-03T12:49:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-13T15:11:27.000Z", "avg_line_length": 31.54, "max_line_length": 121, "alphanum_fraction": 0.6740646798, "include": true, "reason": "import numpy", "num_tokens": 953}
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
from hashTable import hashTable
Qangle = 24
Qstrenth = 3
Qcoherence = 3
mat = cv2.imread("../train/a.jpg")
h = np.load("Filters.npy")
mat = cv2.cvtColor(mat, cv2.COLOR_BGR2YCrCb)[:,:,2]
# LR = cv2.resize(LuminanceMat,(0,0),fx=0.5,fy=0.5)
# LR = cv2.GaussianBlur(LR,(0,0),2)
# Upscaling
LR = cv2.resize(mat,(0,0),fx=2,fy=2)
LRDirect = np.zeros((LR.shape[0],LR.shape[1]))
for xP in range(5,LR.shape[0]-6):
for yP in range(5,LR.shape[1]-6):
patch = LR[xP-5:xP+6,yP-5:yP+6]
[angle,strenth,coherence] = hashTable(patch,Qangle,Qstrenth,Qcoherence)
j = angle*9+strenth*3+coherence
A = patch.reshape(1,-1)
t = xP%2*2+yP%2
hh = np.matrix(h[j,t])
LRDirect[xP][yP] = hh*A.T
print("Test is off")
# Show the result
mat = cv2.imread("../train/a.jpg")
mat = cv2.cvtColor(mat, cv2.COLOR_BGR2YCrCb)
fig, axes = plt.subplots(ncols=2,figsize=(15,10))
axes[0].imshow(cv2.cvtColor(mat, cv2.COLOR_YCrCb2RGB))
axes[0].set_title('ORIGIN')
LR = cv2.resize(mat,(0,0),fx=2,fy=2)
LRDirectImage = LR
LRDirectImage[:,:,2] = LRDirect
axes[1].imshow(cv2.cvtColor(LRDirectImage, cv2.COLOR_YCrCb2RGB))
axes[1].set_title('RAISR')
fig.savefig("../fig.png")
|
{"hexsha": "2c73b5fe39b62a2948aa4fa7bb558739795e4af0", "size": 1280, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/test.py", "max_stars_repo_name": "gonewithwind5883/mikuraisr", "max_stars_repo_head_hexsha": "8e4565a753aba8fb73645a644dee8173a1563ce1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/test.py", "max_issues_repo_name": "gonewithwind5883/mikuraisr", "max_issues_repo_head_hexsha": "8e4565a753aba8fb73645a644dee8173a1563ce1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/test.py", "max_forks_repo_name": "gonewithwind5883/mikuraisr", "max_forks_repo_head_hexsha": "8e4565a753aba8fb73645a644dee8173a1563ce1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0980392157, "max_line_length": 79, "alphanum_fraction": 0.64375, "include": true, "reason": "import numpy", "num_tokens": 460}
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/gradients.estimate.ipynb (unless otherwise specified).
__all__ = ['get_grads_pullback']
# Cell
import numpy as np
from sklearn.linear_model import LinearRegression
def get_grads_pullback(data,
embedding,
geom,
tangent_bases_M,
tangent_bases_N,
selected_points):
affinity_matrix = geom.affinity_matrix
m = embedding.shape[1]
d = tangent_bases_M.shape[2]
nsel = selected_points.shape[0]
dF = np.zeros((nsel, d, m))
for i in range(nsel):
pt = selected_points[i]
neighborspt = affinity_matrix[pt].indices
deltap0 = data[neighborspt, :] - data[pt, :]
deltaq0 = embedding[neighborspt, :] - embedding[pt, :]
projected_M = np.einsum('b d, i b -> i d', tangent_bases_M[i], deltap0)
tangent_bases_N_outer = np.einsum('m d, n d -> m n', tangent_bases_N[i], tangent_bases_N[i])
projected_N = np.einsum('m n, i n -> i m', tangent_bases_N_outer, deltaq0)
lr = LinearRegression()
lr.fit(projected_M, projected_N)
#weights = affinity_matrix[selectedpoints[i]].data
#lr.fit(projected_M, projected_N, weights)
dF[i] = lr.coef_.transpose()#np.linalg.lstsq(projected_M, deltaq0)[0]
return(dF)
|
{"hexsha": "e662920f1caa44b9c1cd7af3815e37880441bbc1", "size": 1329, "ext": "py", "lang": "Python", "max_stars_repo_path": "montlake/gradients/estimate.py", "max_stars_repo_name": "sjkoelle/montlake", "max_stars_repo_head_hexsha": "b908a43e0c00763bd1cf86120eaa6bdf7d8d1196", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-11-24T19:39:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T01:30:14.000Z", "max_issues_repo_path": "montlake/gradients/estimate.py", "max_issues_repo_name": "sjkoelle/montlake", "max_issues_repo_head_hexsha": "b908a43e0c00763bd1cf86120eaa6bdf7d8d1196", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "montlake/gradients/estimate.py", "max_forks_repo_name": "sjkoelle/montlake", "max_forks_repo_head_hexsha": "b908a43e0c00763bd1cf86120eaa6bdf7d8d1196", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9736842105, "max_line_length": 102, "alphanum_fraction": 0.6365688488, "include": true, "reason": "import numpy", "num_tokens": 353}
|
# Author: Karl Stratos (me@karlstratos.com)
import argparse
import dynet as dy
import matplotlib.pyplot as plt
import numpy as np
import random
from core.information_theory import InformationTheory
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
info = InformationTheory()
num_points_except_end = args.num_points - 1
stepsize = 1.0 / num_points_except_end
found = False
epsilon = 1e-6
while not found:
Omega1 = info.rand_joint(args.zsize, args.zsize)
Omega2 = info.rand_joint(args.zsize, args.zsize)
#Omega1 = dy.inputTensor([[1.0, 0.0],
# [0.0, 0.1]]) # NOT doubly stochastic!
#Omega2 = dy.inputTensor([[0.0, 0.1],
# [0.1, 0.0]])
Omega1 = dy.inputTensor([[0.4940, 0.3006],
[0.1383, 0.0671]])
Omega2 = dy.inputTensor([[0.1513, 0.2415],
[0.2545, 0.3527]])
print
print "Going from: "
print Omega1.value()
print "to"
print Omega2.value()
print
alpha = 0
point_indices = []
mi_values = []
increasing = False
decreasing = False
num_turns = 0
for point_index in xrange(args.num_points):
Omega = (1.0 - alpha) * Omega1 + alpha * Omega2
mi_value = info.mi_zero(Omega).value()
point_indices.append(point_index + 1)
mi_values.append(mi_value)
alpha += stepsize
if point_index == 1:
print "point {0}, MI: {1} -> {2}".format(point_index + 1,
mi_value_before,
mi_value),
if mi_value > mi_value_before:
increasing = True
decreasing = False
if mi_value < mi_value_before:
increasing = False
decreasing = True
if increasing:
print "increasing"
if decreasing:
print "decreasing"
elif point_index > 1:
if increasing:
print "point {0} increasing, now MI: {1} -> {2}".format(
point_index + 1, mi_value_before, mi_value),
if mi_value < mi_value_before - epsilon:
increasing = False
decreasing = True
print "inc->dec",
num_turns += 1
print "TURNED {0} times".format(num_turns),
if num_turns == args.turn and not found:
print " ------ FOUND",
found = True
print
if decreasing:
print "point {0} decreasing, now MI: {1} -> {2}".format(
point_index + 1, mi_value_before, mi_value),
if mi_value > mi_value_before + epsilon:
increasing = True
decreasing = False
print "dec->inc",
num_turns += 1
print "TURNED {0} times".format(num_turns),
if num_turns == args.turn and not found:
print " ------ FOUND",
found = True
print
mi_value_before = mi_value
#break
assert len(point_indices) == args.num_points
assert len(mi_values) == args.num_points
plt.plot(point_indices, mi_values)
plt.show()
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--zsize", type=int, default=2,
help="number of variables: %(default)d")
argparser.add_argument("--num-points", type=int, default=100,
help="number of interpolated points: %(default)d")
argparser.add_argument("--turn", type=int, default=3,
help="number of turns: %(default)d")
argparser.add_argument("--seed", type=int, default=1024,
help="random seed: %(default)d")
parsed_args = argparser.parse_args()
main(parsed_args)
|
{"hexsha": "76eb9b778d2af23c9a92ab3d4924fc04cc0df62f", "size": 4392, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot_mi.py", "max_stars_repo_name": "karlstratos/iaan", "max_stars_repo_head_hexsha": "e76a3cd3a17807a21478ae1dc83be4da76c01d70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-09-06T20:21:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-30T21:39:23.000Z", "max_issues_repo_path": "plot_mi.py", "max_issues_repo_name": "karlstratos/iaan", "max_issues_repo_head_hexsha": "e76a3cd3a17807a21478ae1dc83be4da76c01d70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plot_mi.py", "max_forks_repo_name": "karlstratos/iaan", "max_forks_repo_head_hexsha": "e76a3cd3a17807a21478ae1dc83be4da76c01d70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0, "max_line_length": 77, "alphanum_fraction": 0.4806466302, "include": true, "reason": "import numpy", "num_tokens": 931}
|
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from tqdm.auto import tqdm
from model import SiAudNet
def train(
model: SiAudNet,
train_on_gpu: bool,
n_epochs: int,
train_loader: torch.utils.data.DataLoader,
valid_loader: torch.utils.data.DataLoader,
optimizer: optim.Optimizer,
file_name: str,
use_scheduler: bool = False,
) -> None:
print("Training...")
scheduler = None
if use_scheduler:
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
"min",
verbose=True)
valid_loss_min = np.Inf # track change in validation loss
for epoch in range(n_epochs):
train_loss = 0.0
valid_loss = 0.0
## train
model.train()
for data, target in tqdm(train_loader):
target = target.float() # BCELogitLoss requires float loss
if train_on_gpu:
target = target.cuda()
data = (data[0].cuda(), data[1].cuda())
optimizer.zero_grad()
output = model(data)
loss = SiAudNet.criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item() * data[0].size(0)
del loss
## validate
model.eval()
with torch.no_grad():
for data, target in valid_loader:
target = target.float()
if train_on_gpu:
target = target.cuda()
data = (data[0].cuda(), data[1].cuda())
output = model(data)
loss = SiAudNet.criterion(output, target)
# update average validation loss
valid_loss += loss.item() * data[0].size(0)
# calculate average losses
train_loss = train_loss / len(train_loader.dataset)
valid_loss = valid_loss / len(valid_loader.dataset)
if use_scheduler:
scheduler.step(valid_loss)
# print training/validation statistics
print(
f"Epoch: {epoch} \tTraining Loss: {train_loss:.6f} \tValidation Loss: {valid_loss:.6f}"
)
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
print(
f"Validation loss decreased ({valid_loss_min:.6f} --> {valid_loss:.6f}). Saving.."
)
torch.save(model.state_dict(), file_name)
valid_loss_min = valid_loss
model.load_state_dict(torch.load(file_name))
def test(model: SiAudNet, test_on_gpu: bool,
test_loader: torch.utils.data.DataLoader) -> None:
print("Testing...")
# track test loss
test_loss = 0.0
classes = ["not match", "match"]
class_correct = [0, 0]
class_total = [0, 0]
if test_on_gpu:
model = model.cuda()
sigmoid = nn.Sigmoid()
model.eval()
with torch.no_grad():
for data, target in tqdm(test_loader):
target = target.float() # BCELogitLoss requires float loss
if test_on_gpu:
target = target.cuda()
data = (data[0].cuda(), data[1].cuda())
output = model(data)
loss = SiAudNet.criterion(output, target)
test_loss += loss.item() * data[0].size(0)
pred = sigmoid(output)
for curr_target, curr_pred in zip(target, pred):
if curr_target > 0.5:
class_correct[1] += 1 if curr_pred > 0.5 else 0
class_total[1] += 1
else:
class_correct[0] += 1 if curr_pred <= 0.5 else 0
class_total[0] += 1
# average test loss
test_loss = test_loss / len(test_loader.dataset)
print(f"Test Loss: {test_loss:.6f}\n")
for i, nn_class in enumerate(classes):
if class_total[i] > 0:
print(
f"Test Accuracy of {nn_class+':':11}{class_correct[i] / class_total[i]:.3%} ({np.sum(class_correct[i])}/{np.sum(class_total[i])})"
)
else:
print(f"Test Accuracy of {nn_class+':':11} N/A")
print(
f"\nOverall Test Accuracy: {np.sum(class_correct) / np.sum(class_total):.3%} ({np.sum(class_correct)}/{np.sum(class_total)})"
)
|
{"hexsha": "2884b598aa65793a3a4eef7630e0f1a81ae8a04f", "size": 4411, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_test.py", "max_stars_repo_name": "dim5/ossr", "max_stars_repo_head_hexsha": "bc9f98ec8559c5301a167088e92d06adb876e267", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-08-02T08:35:09.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-09T19:57:16.000Z", "max_issues_repo_path": "train_test.py", "max_issues_repo_name": "dim5/ossr", "max_issues_repo_head_hexsha": "bc9f98ec8559c5301a167088e92d06adb876e267", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-07-30T21:28:16.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-30T18:09:17.000Z", "max_forks_repo_path": "train_test.py", "max_forks_repo_name": "dim5/ossr", "max_forks_repo_head_hexsha": "bc9f98ec8559c5301a167088e92d06adb876e267", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8461538462, "max_line_length": 146, "alphanum_fraction": 0.5440943097, "include": true, "reason": "import numpy", "num_tokens": 1006}
|
# coding:utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
import os
import time
import datetime
import json
import sys
import sklearn.metrics
from tqdm import tqdm
def to_var(x):
return Variable(torch.from_numpy(x).cuda()) if torch.cuda.is_available() else Variable(torch.from_numpy(x))
class Accuracy(object):
def __init__(self):
self.correct = 0
self.total = 0
def add(self, is_correct):
self.total += 1
if is_correct:
self.correct += 1
def get(self):
if self.total == 0:
return 0.0
else:
return float(self.correct) / self.total
def clear(self):
self.correct = 0
self.total = 0
class Config(object):
def __init__(self, args):
self.acc_NA = Accuracy()
self.acc_not_NA = Accuracy()
self.acc_total = Accuracy()
self.data_path = './data'
self.use_bag = True
self.use_gpu = True
self.is_training = True
self.max_length = 80
self.char_num = 4
self.path_num = 7
self.MDP_length = 10
self.pos_num = 2 * self.max_length
self.tag_num = 30
self.dir_num = 4
self.num_classes = 7
self.sen_hidden_size = args.sen_hidden_size
self.path_hidden_size = args.path_hidden_size
self.pos_size = 5
self.tag_size = 10
self.dir_size = 5
self.max_epoch = 60
self.opt_method = 'SGD'
self.optimizer = None
self.learning_rate = args.learning_rate
self.weight_decay = 1e-5
self.drop_prob = 0.5
self.checkpoint_dir = './checkpoint/'
self.test_result_dir = './test_result'
self.save_epoch = 1
self.test_epoch = 1
self.pretrain_model = None
self.trainModel = None
self.testModel = None
self.batch_size = args.batch_size
self.word_size = 50
self.window_size = 3
self.dataset = args.dataset
self.epoch_range = None
def set_data_path(self, data_path):
self.data_path = data_path
def set_max_length(self, max_length):
self.max_length = max_length
self.pos_num = 2 * self.max_length
def set_MDP_length(self, MDP_length):
self.MDP_length = MDP_length
self.pos_num = 2 * self.max_length
def set_dir_num(self, dir_num):
self.dir_num = dir_num
def set_num_classes(self, num_classes):
self.num_classes = num_classes
def set_hidden_size(self, hidden_size):
self.hidden_size = hidden_size
def set_window_size(self, window_size):
self.window_size = window_size
def set_pos_size(self, pos_size):
self.pos_size = pos_size
def set_word_size(self, word_size):
self.word_size = word_size
def set_dir_size(self, dir_size):
self.dir_size = dir_size
def set_max_epoch(self, max_epoch):
self.max_epoch = max_epoch
def set_batch_size(self, batch_size):
self.batch_size = batch_size
def set_opt_method(self, opt_method):
self.opt_method = opt_method
def set_learning_rate(self, learning_rate):
self.learning_rate = learning_rate
def set_weight_decay(self, weight_decay):
self.weight_decay = weight_decay
def set_drop_prob(self, drop_prob):
self.drop_prob = drop_prob
def set_checkpoint_dir(self, checkpoint_dir):
self.checkpoint_dir = checkpoint_dir
def set_test_epoch(self, test_epoch):
self.test_epoch = test_epoch
def set_save_epoch(self, save_epoch):
self.save_epoch = save_epoch
def set_pretrain_model(self, pretrain_model):
self.pretrain_model = pretrain_model
def set_is_training(self, is_training):
self.is_training = is_training
def set_use_bag(self, use_bag):
self.use_bag = use_bag
def set_use_gpu(self, use_gpu):
self.use_gpu = use_gpu
def set_epoch_range(self, epoch_range):
self.epoch_range = epoch_range
def set_char_num(self, char_num):
self.char_num = char_num
def set_path_num(self, path_num):
self.path_num = path_num
def load_train_data(self):
print("Reading training data...")
self.data_word_vec = np.load(os.path.join(self.data_path, 'vec.npy'))
self.data_char_vec = np.load(os.path.join(self.data_path, 'char_vec.npy'))
self.deprel_vec = np.load(os.path.join(self.data_path, 'deprel_vec.npy'))
self.data_train_word = np.load(os.path.join(self.data_path, 'train_word.npy'))
self.data_train_char = np.load(os.path.join(self.data_path, 'train_char.npy'))
self.data_train_sen_len = np.load(os.path.join(self.data_path, 'train_sen_length.npy'))
self.data_train_char_mask = np.load(os.path.join(self.data_path, 'train_char_mask.npy'))
self.data_train_pos1 = np.load(os.path.join(self.data_path, 'train_pos1.npy'))
self.data_train_pos2 = np.load(os.path.join(self.data_path, 'train_pos2.npy'))
self.data_train_tag = np.load(os.path.join(self.data_path, 'train_tag.npy'))
self.data_train_MDPword = np.load(os.path.join(self.data_path, 'train_MDPword.npy'))
self.data_train_MDPrel = np.load(os.path.join(self.data_path, 'train_MDPrel.npy'))
self.data_train_MDPpos = np.load(os.path.join(self.data_path, 'train_MDPtag.npy'))
self.data_train_MDPdir = np.load(os.path.join(self.data_path, 'train_MDPdir.npy'))
self.data_train_MDP_len = np.load(os.path.join(self.data_path, 'train_MDP_length.npy'))
self.data_train_mask = np.load(os.path.join(self.data_path, 'train_mask.npy'))
self.data_train_head = np.load(os.path.join(self.data_path, 'train_head.npy'))
self.data_train_tail = np.load(os.path.join(self.data_path, 'train_tail.npy'))
self.data_train_ent_pair = json.load(open(os.path.join(self.data_path, 'train_bag_pair.json')))
self.data_train_root = np.load(os.path.join(self.data_path, 'train_root.npy'))
if self.use_bag:
self.data_query_label = np.load(os.path.join(self.data_path, 'train_ins_label.npy'))
self.data_train_label = np.load(os.path.join(self.data_path, 'train_bag_label.npy'))
self.data_train_scope = np.load(os.path.join(self.data_path, 'train_bag_scope.npy'))
else:
self.data_train_label = np.load(os.path.join(self.data_path, 'train_ins_label.npy'))
self.data_train_scope = np.load(os.path.join(self.data_path, 'train_ins_scope.npy'))
print("Finish reading")
self.train_order = list(range(len(self.data_train_label)))
self.train_batches = int(len(self.data_train_label) / self.batch_size)
if len(self.data_train_label) % self.batch_size != 0:
self.train_batches += 1
def load_test_data(self):
print("Reading testing data...")
self.data_word_vec = np.load(os.path.join(self.data_path, 'vec.npy'))
self.data_char_vec = np.load(os.path.join(self.data_path, 'char_vec.npy'))
self.deprel_vec = np.load(os.path.join(self.data_path, 'deprel_vec.npy'))
self.data_test_word = np.load(os.path.join(self.data_path, 'test_word.npy'))
self.data_test_char = np.load(os.path.join(self.data_path, 'test_char.npy'))
self.data_test_sen_len = np.load(os.path.join(self.data_path, 'test_sen_length.npy'))
self.data_test_char_mask = np.load(os.path.join(self.data_path, 'test_char_mask.npy'))
self.data_test_pos1 = np.load(os.path.join(self.data_path, 'test_pos1.npy'))
self.data_test_pos2 = np.load(os.path.join(self.data_path, 'test_pos2.npy'))
self.data_test_tag = np.load(os.path.join(self.data_path, 'test_tag.npy'))
self.data_test_MDPword = np.load(os.path.join(self.data_path, 'test_MDPword.npy'))
self.data_test_MDPrel = np.load(os.path.join(self.data_path, 'test_MDPrel.npy'))
self.data_test_MDPpos = np.load(os.path.join(self.data_path, 'test_MDPtag.npy'))
self.data_test_MDPdir = np.load(os.path.join(self.data_path, 'test_MDPdir.npy'))
self.data_test_MDP_len = np.load(os.path.join(self.data_path, 'test_MDP_length.npy'))
self.data_test_mask = np.load(os.path.join(self.data_path, 'test_mask.npy'))
self.data_test_head = np.load(os.path.join(self.data_path, 'test_head.npy'))
self.data_test_tail = np.load(os.path.join(self.data_path, 'test_tail.npy'))
self.data_test_ent_pair = json.load(open(os.path.join(self.data_path, 'test_bag_pair.json')))
self.data_test_root = np.load(os.path.join(self.data_path, 'test_root.npy'))
if self.use_bag:
self.data_query_label = np.load(os.path.join(self.data_path, 'test_ins_label.npy'))
self.data_test_label = np.load(os.path.join(self.data_path, 'test_bag_label.npy'))
self.data_test_scope = np.load(os.path.join(self.data_path, 'test_bag_scope.npy'))
else:
self.data_test_label = np.load(os.path.join(self.data_path, 'test_ins_label.npy'))
self.data_test_scope = np.load(os.path.join(self.data_path, 'test_ins_scope.npy'))
print("Finish reading")
self.test_batches = int(len(self.data_test_label) / self.batch_size)
if len(self.data_test_label) % self.batch_size != 0:
self.test_batches += 1
self.total_recall = self.data_test_label[:, 1:].sum()
def set_train_model(self, model):
print("Initializing training model...")
self.model = model
self.trainModel = self.model(config=self)
if self.pretrain_model != None:
self.trainModel.load_state_dict(torch.load(self.pretrain_model))
if torch.cuda.is_available():
self.trainModel.cuda()
if self.optimizer != None:
pass
elif self.opt_method == "Adagrad" or self.opt_method == "adagrad":
self.optimizer = optim.Adagrad(self.trainModel.parameters(), lr=self.learning_rate, lr_decay=self.lr_decay,
weight_decay=self.weight_decay)
elif self.opt_method == "Adadelta" or self.opt_method == "adadelta":
self.optimizer = optim.Adadelta(self.trainModel.parameters(), lr=self.learning_rate,
weight_decay=self.weight_decay)
elif self.opt_method == "Adam" or self.opt_method == "adam":
self.optimizer = optim.Adam(self.trainModel.parameters(), lr=self.learning_rate,
weight_decay=self.weight_decay)
else:
self.optimizer = optim.SGD(self.trainModel.parameters(), lr=self.learning_rate,
weight_decay=self.weight_decay)
print("Finish initializing")
def set_test_model(self, model):
print("Initializing test model...")
self.model = model
self.testModel = self.model(config=self)
if torch.cuda.is_available():
self.testModel.cuda()
self.testModel.eval()
print("Finish initializing")
def get_train_batch(self, batch):
input_scope = np.take(self.data_train_scope,
self.train_order[batch * self.batch_size: (batch + 1) * self.batch_size], axis=0)
index = []
scope = [0]
for num in input_scope:
index = index + list(range(num[0], num[1] + 1))
scope.append(scope[len(scope) - 1] + num[1] - num[0] + 1)
self.batch_word = self.data_train_word[index, :]
self.batch_char = self.data_train_char[index, :, :]
self.batch_char_mask = self.data_train_char_mask[index, :]
self.batch_sen_length = self.data_train_sen_len[index]
self.batch_tag = self.data_train_tag[index, :]
self.batch_pos1 = self.data_train_pos1[index, :]
self.batch_pos2 = self.data_train_pos2[index, :]
self.batch_MDPword = self.data_train_MDPword[index, :, :]
self.batch_MDPrel = self.data_train_MDPrel[index, :, :]
self.batch_MDPpos = self.data_train_MDPpos[index, :, :]
self.batch_MDPdir = self.data_train_MDPdir[index, :, :]
self.batch_MDP_length = self.data_train_MDP_len[index, :]
self.batch_mask = self.data_train_mask[index, :]
self.batch_head = self.data_train_head[index]
self.batch_tail = self.data_train_tail[index]
self.batch_root = self.data_train_root[index]
self.batch_label = np.take(self.data_train_label,
self.train_order[batch * self.batch_size: (batch + 1) * self.batch_size], axis=0)
self.batch_attention_query = self.data_query_label[index]
self.batch_scope = scope
def get_test_batch(self, batch):
input_scope = self.data_test_scope[batch * self.batch_size: (batch + 1) * self.batch_size]
index = []
scope = [0]
for num in input_scope:
index = index + list(range(num[0], num[1] + 1))
scope.append(scope[len(scope) - 1] + num[1] - num[0] + 1)
self.batch_word = self.data_test_word[index, :]
self.batch_pos1 = self.data_test_pos1[index, :]
self.batch_pos2 = self.data_test_pos2[index, :]
self.batch_sen_length = self.data_test_sen_len[index]
self.batch_char = self.data_test_char[index, :, :]
self.batch_char_mask = self.data_test_char_mask[index, :]
self.batch_tag = self.data_test_tag[index, :]
self.batch_MDPword = self.data_test_MDPword[index, :, :]
self.batch_MDPrel = self.data_test_MDPrel[index, :, :]
self.batch_MDPpos = self.data_test_MDPpos[index, :, :]
self.batch_MDPdir = self.data_test_MDPdir[index, :, :]
self.batch_MDP_length = self.data_test_MDP_len[index, :]
self.batch_mask = self.data_test_mask[index, :]
self.batch_head = self.data_test_head[index]
self.batch_tail = self.data_test_tail[index]
self.batch_root = self.data_test_root[index]
self.batch_attention_query = self.data_query_label[index]
self.batch_scope = scope
def train_one_step(self):
self.trainModel.embedding.word = to_var(self.batch_word)
self.trainModel.embedding.char = to_var(self.batch_char)
self.trainModel.embedding.char_mask = to_var(self.batch_char_mask)
self.trainModel.embedding.pos1 = to_var(self.batch_pos1)
self.trainModel.embedding.pos2 = to_var(self.batch_pos2)
self.trainModel.embedding.tag = to_var(self.batch_tag)
self.trainModel.embedding.MDPword = to_var(self.batch_MDPword)
self.trainModel.embedding.MDPrel = to_var(self.batch_MDPrel)
self.trainModel.embedding.MDPpos = to_var(self.batch_MDPpos)
self.trainModel.embedding.MDPdir = to_var(self.batch_MDPdir)
self.trainModel.embedding.head = to_var(self.batch_head)
self.trainModel.embedding.tail = to_var(self.batch_tail)
self.trainModel.embedding.root = to_var(self.batch_root)
self.trainModel.encoder.mask = to_var(self.batch_mask)
self.trainModel.encoder.sen_len = to_var(self.batch_sen_length)
self.trainModel.selector.scope = self.batch_scope
self.trainModel.selector.attention_query = to_var(self.batch_attention_query)
self.trainModel.selector.label = to_var(self.batch_label)
self.trainModel.classifier.label = to_var(self.batch_label)
self.optimizer.zero_grad()
loss, _output = self.trainModel()
loss.backward()
self.optimizer.step()
for i, prediction in enumerate(_output):
if self.batch_label[i] == 0:
self.acc_NA.add(prediction.item() == self.batch_label[i])
else:
self.acc_not_NA.add(prediction.item() == self.batch_label[i])
self.acc_total.add(prediction.item() == self.batch_label[i])
return loss.data[0]
def test_one_step(self):
self.testModel.embedding.word = to_var(self.batch_word)
self.testModel.embedding.char = to_var(self.batch_char)
self.testModel.embedding.char_mask = to_var(self.batch_char_mask)
self.testModel.embedding.pos1 = to_var(self.batch_pos1)
self.testModel.embedding.pos2 = to_var(self.batch_pos2)
self.testModel.embedding.tag = to_var(self.batch_tag)
self.testModel.embedding.MDPword = to_var(self.batch_MDPword)
self.testModel.embedding.MDPrel = to_var(self.batch_MDPrel)
self.testModel.embedding.MDPpos = to_var(self.batch_MDPpos)
self.testModel.embedding.MDPdir = to_var(self.batch_MDPdir)
self.testModel.embedding.head = to_var(self.batch_head)
self.testModel.embedding.tail = to_var(self.batch_tail)
self.testModel.embedding.root = to_var(self.batch_root)
self.testModel.encoder.mask = to_var(self.batch_mask)
self.testModel.encoder.sen_len = to_var(self.batch_sen_length)
self.testModel.selector.scope = self.batch_scope
self.testModel.selector.attention_query = to_var(self.batch_attention_query)
return self.testModel.test()
def train(self):
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
best_auc = 0.0
best_p = None
best_r = None
test_triple = None
best_epoch = 0
for epoch in range(self.max_epoch):
print('Epoch ' + str(epoch) + ' starts...')
self.acc_NA.clear()
self.acc_not_NA.clear()
self.acc_total.clear()
np.random.shuffle(self.train_order)
for batch in range(self.train_batches):
self.get_train_batch(batch)
loss = self.train_one_step()
time_str = datetime.datetime.now().isoformat()
sys.stdout.write(
"epoch %d step %d time %s | loss: %f, NA accuracy: %f, not NA accuracy: %f, total accuracy: %f\r" % (
epoch, batch, time_str, loss, self.acc_NA.get(), self.acc_not_NA.get(), self.acc_total.get()))
sys.stdout.flush()
if (epoch + 1) % self.save_epoch == 0:
print('Epoch ' + str(epoch) + ' has finished')
print('Saving model...')
path = os.path.join(self.checkpoint_dir, self.dataset + '-' + self.model.__name__ + '-' + str(epoch))
torch.save(self.trainModel.state_dict(), path)
print('Have saved model to ' + path)
if (epoch + 1) % self.test_epoch == 0 and epoch > 20:
self.testModel = self.trainModel
auc, pr_x, pr_y, triple = self.test_one_epoch()
if auc > best_auc:
best_auc = auc
best_p = pr_x
best_r = pr_y
best_epoch = epoch
test_triple = triple
print("Finish training")
print("Best epoch = %d | auc = %f" % (best_epoch, best_auc))
log = open(self.dataset + '-log.txt', 'a')
log.write(self.model.__name__ + ': Best epoch = ' + str(best_epoch) + ' | auc = ' + str(best_auc) + '\n')
print("Storing best result...")
if not os.path.isdir(self.test_result_dir):
os.mkdir(self.test_result_dir)
np.save(os.path.join(self.test_result_dir, self.model.__name__ + '_x.npy'), best_p)
np.save(os.path.join(self.test_result_dir, self.model.__name__ + '_y.npy'), best_r)
json.dump(test_triple, open(os.path.join(self.test_result_dir, self.model.__name__ + 'test_triple.json'), 'w'))
print("Finish storing")
def test_one_epoch(self):
test_score = []
for batch in tqdm(range(self.test_batches)):
self.get_test_batch(batch)
batch_score = self.test_one_step()
test_score = test_score + batch_score
test_result = []
test_triple = []
for i in range(len(test_score)):
score = test_score[i][0]
rel = 0
for j in range(1, len(test_score[i])):
if test_score[i][j] >= score:
score = test_score[i][j]
rel = j
if rel > 0:
test_result.append([self.data_test_label[i][rel], test_score[i][rel]])
test_triple.append({'entity_pair': self.data_test_ent_pair[i], 'flag': str(self.data_test_label[i][rel]),
'grond_truth':str(list(self.data_test_label[i]).index(1)),
'relation': str(rel), 'score': str(test_score[i][rel]), 'confidence': str(test_score[i])})
print(test_triple)
test_result = sorted(test_result, key=lambda x: x[1])
test_result = test_result[::-1]
pr_x = []
pr_y = []
correct = 0
for i, item in enumerate(test_result):
correct += item[0]
pr_y.append(float(correct) / (i + 1))
pr_x.append(float(correct) / self.total_recall)
print('Total facts: ', self.total_recall)
auc = sklearn.metrics.auc(x=pr_x, y=pr_y)
print("auc: ", auc)
return auc, pr_x, pr_y, test_triple
def test(self):
best_epoch = None
best_auc = 0.0
best_p = None
best_r = None
best_triple = None
for epoch in self.epoch_range:
path = os.path.join(self.checkpoint_dir, self.dataset+'-'+self.model.__name__ + '-' + str(epoch))
if not os.path.exists(path):
continue
print("Start testing epoch %d" % (epoch))
self.testModel.load_state_dict(torch.load(path))
auc, p, r, test_triple = self.test_one_epoch()
if auc > best_auc:
best_auc = auc
best_epoch = epoch
best_p = p
best_r = r
best_triple = test_triple
print("Finish testing epoch %d" % (epoch))
print("Best epoch = %d | auc = %f" % (best_epoch, best_auc))
print("Storing best result...")
if not os.path.isdir(self.test_result_dir):
os.mkdir(self.test_result_dir)
np.save(os.path.join(self.test_result_dir, self.model.__name__ + '_x.npy'), best_p)
np.save(os.path.join(self.test_result_dir, self.model.__name__ + '_y.npy'), best_r)
json.dump(best_triple, open(os.path.join(self.test_result_dir, self.model.__name__ + 'test_triple.json'), 'w'))
print("Finish storing")
|
{"hexsha": "fb9ac03cbd14088b5ebaf6df32520cea12efcc29", "size": 23085, "ext": "py", "lang": "Python", "max_stars_repo_path": "config/Config.py", "max_stars_repo_name": "ningpang/com-CNN", "max_stars_repo_head_hexsha": "a494589a9445c9b3ad63175ec6b2084d4d3e3e81", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-14T10:19:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-14T10:19:20.000Z", "max_issues_repo_path": "config/Config.py", "max_issues_repo_name": "ningpang/com-CNN", "max_issues_repo_head_hexsha": "a494589a9445c9b3ad63175ec6b2084d4d3e3e81", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "config/Config.py", "max_forks_repo_name": "ningpang/com-CNN", "max_forks_repo_head_hexsha": "a494589a9445c9b3ad63175ec6b2084d4d3e3e81", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-03T08:00:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-03T08:00:22.000Z", "avg_line_length": 47.112244898, "max_line_length": 123, "alphanum_fraction": 0.6207060862, "include": true, "reason": "import numpy", "num_tokens": 5236}
|
"""
This module provides the Destination class that holds all the basic API methods
for interfacing with a destination.
Destintations are databases, filesystems, file formats (HDF5, CSV), etc.
"""
import struct
import numpy as np
import multiprocessing
import Queue
import time
import logging
from origin.server import template_validation
from origin.server import measurement_validation
from origin import data_types, current_time, TIMESTAMP, registration_validation
###############################################################################
#
# Metadata format:
#
# knownStreams : {
# `stream name` : {
# stream : `stream_name`,
# id : `stream_id`,
# version : `version`,
# key_order : `key_order`,
# format_str : `format_str`,
# definition : { # most recent definition object
# `field1` : { "type":data_type, "key_index": `index` },
# `field2` : { "type":data_type, "key_index": `index` },
# ...
# }
# # optional dict of older versions, version number is the hash
# versions : {
# 1 : `defintion_obj`, # see above for definition format
# 2 : `defintion_obj`,
# ...
# }
# },
# ...
# }
#
# # current versiion definitions
# knownStreamVersions : {
# stream : `definition_obj,
# stream : `definition_obj,
# ...
# }
#
###############################################################################
class QueuedObject(object):
"""An generic object that can be passed between processes."""
def __init__(self, object_type, id):
self.type = object_type
self.id = id
class QueuedMeasurement(QueuedObject):
"""A data object that can be passed between processes."""
def __init__(self, stream, version, measurements, id):
super(QueuedMeasurement, self).__init__('data', id)
self.stream = stream
self.version = version
self.measurements = measurements
class QueuedMessage(QueuedObject):
"""A message object that can be passed between processes for signaling."""
def __init__(self, command, message=''):
super(QueuedMessage, self).__init__('msg', id)
self.command = command
self.message = message
class Destination(object):
"""!@brief A class representing a data storage location, such as a database,
filesystem, or file format.
This class defines a common API that a real destination class must inherit
from. Destination specific methods that are required will raise a
NotImplementedError if not overridden.
"""
def __init__(self, logger, config):
"""!@brief Initialize the destination.
@param logger pass in a logger object
@param config configuration object
"""
self.logger = logger
self.config = config
self.known_streams = {}
self.known_stream_versions = {}
self.connect()
self.read_stream_def_table()
if self.config.getboolean("Server", "batch_allowed"):
self.logger.info("Batched inserts are enabled.")
self.start_writer()
# Asynchronous insertion of batched measurement
self.inserter = self.queue_measurement
else:
self.logger.info("Batched inserts are disabled.")
# Synchronous insertion of a single measurement
self.inserter = self.insert_measurement
def connect(self):
"""!@brief Prepare the backend.
This method must be overwritten in the specific implementation.
"""
raise NotImplementedError
def close(self):
"""!@brief Close the backend connection cleanly.
This method must be overwritten in the specific implementation.
"""
raise NotImplementedError
def read_stream_def_table(self):
"""!@brief Read stored metadata and populate the knownStreams, and
knownStreamVersions dictionaries.
This method must be overwritten in the specific implementation.
"""
raise NotImplementedError
def create_new_stream(self, stream, version, template, key_order):
"""!@brief Create a new stream or create a new version of a stream based on
a stream template.
This method also enters format_str into the knownStreams dict.
@param stream a string containing the stream name
@param version the version number of the new stream
@param template the template dictionary for the new stream
@param key_order an ordered list of key strings that define the order that
data is expected in
@return stream_id the id number for the new stream
"""
# generate format_str for stream if possible, strings are not supported
# in binary, do early to trigger exception before write to disk
err, format_str = self.format_string(template, key_order)
if err > 0:
format_str = ''
stream_id = self.get_stream_id(stream)
definition = {}
if key_order is not None:
for i, key in enumerate(key_order):
k = key.strip()
definition[k] = {"type": template[k], "key_index": i}
else:
for key in template:
k = key.strip()
definition[k] = {"type": template[k], "key_index": -1}
if version > 1:
stream_obj = self.known_streams[stream]
else:
stream_obj = {
"stream": stream.strip(),
"id": stream_id,
"versions": []
}
stream_obj["version"] = version
stream_obj["key_order"] = key_order
stream_obj["format_str"] = format_str
stream_obj["definition"] = definition
stream_obj["versions"].append({
"version": version,
"key_order": key_order,
"format_str": format_str,
"definition": definition,
})
# update the stream inventory
self.known_streams[stream] = stream_obj
self.known_stream_versions[stream] = stream_obj['definition']
# id might get updated, so check just in case
stream_id = self.create_new_stream_destination(stream_obj)
return stream_id
def create_new_stream_destination(self, stream_obj):
"""!@brief Store a new stream or creates a new version of a stream in the
destination.
This method must be overwritten in the specific implementation.
@param stream_obj the stream template dictionary
@return stream_id the id number for the new stream
"""
raise NotImplementedError
def format_string(self, template, key_order):
"""!@brief Generate a format string for unpacking native data packets.
The format string conresponds to the codes in the struct package.
@param template the stream template dictionary
@param key_order an ordered list of key strings that defines the order
that native format data is expected
@return a tuple of (error, format_str) where error=0 for success.
"""
if key_order is None:
return (1, "No key_order specified")
format_str = '!' # use network byte order
try:
format_str += data_types[self.config.get("Server", "timestamp_type")]["format_char"]
except KeyError:
format_str += data_types["uint"]["format_char"]
for key in key_order:
self.logger.debug('key: %s', key)
dtype = template[key]
try:
format_str += data_types[dtype]["format_char"]
if not data_types[dtype]["binary_allowed"]:
msg = "Unsupported type '{}' in binary data.".format(dtype)
return (1, msg)
except KeyError:
return (1, "Type \"{}\" not recognized.".format(dtype))
return (0, format_str)
# returns version and streamID
def register_stream(self, stream, template, key_order=None):
"""!@brief Register a new stream or new version of a stream with the server.
@param stream a string holding the stream name
@param template the stream definition dictionary
@return a tuple of (error, stream_ver) where error=0 for success, and
stream_ver is a byte string that serves as a unique identifier.
"""
update = False
dest_version = None
stream = stream.strip()
self.logger.info("Attempt to register stream {}".format(stream))
if stream not in self.known_streams.keys():
update = True
dest_version = 1
else:
stream_id = self.known_streams[stream]["id"]
dest_version = self.known_streams[stream]["version"]
if template_validation(template, self.known_stream_versions[stream]):
msg = "Known stream, {} matching current defintion."
msg += " No database modification needed."
self.logger.info(msg.format(stream))
update = False
# its a new version
else:
update = True
dest_version += 1
if update:
valid, msg = registration_validation(stream, template, key_order)
if not valid:
return (1, msg)
stream_id = self.create_new_stream(stream, dest_version, template, key_order)
if stream_id < 0:
return (1, 'server error')
# update the current streams after all that
self.read_stream_def_table()
return (0, struct.pack("!II", stream_id, dest_version))
def insert_measurement(self, stream, measurements):
"""!@brief Synchronously insert data into destination.
This method must be overwritten in the specific implementation.
@param stream a string holding the stream name
@param measurements a dictionary containing the row data
"""
raise NotImplementedError
def insert_measurements(self, stream, version, measurements):
"""!@brief Synchronously insert data into destination.
This method must be overwritten in the specific implementation.
@param stream a string holding the stream name
@param version the version number of the stream
@param measurements a list of dictionaries containing the row data
"""
raise NotImplementedError
def queue_measurement(self, stream, measurement):
"""!@brief Add a measurement dict to be inserted asynchronously."""
ver = self.known_streams[stream]["version"]
self.insert_queue.put(QueuedMeasurement(stream, ver, measurement, self.message_counter))
self.message_counter += 1
def measurement(self, stream, measurements):
"""!@brief Perfom measurement validation, timestamp data if missing field,
then save to destination.
@return a tuple of (error, result_text, measurements)
error: 0 for successful operation
result_text: message to return to client
measurements: processed data, empty dict if error
"""
if stream not in self.known_streams.keys():
msg = (
"Trying to add a measurement to data on an unknown stream: {}"
)
self.logger.warning(msg.format(stream))
return (1, "Unknown stream", {})
if not measurement_validation(measurements, self.known_stream_versions[stream]):
msg = "Measurement didn't validate against the existing format"
self.logger.warning(msg)
return (1, "Invalid measurements against schema", {})
try:
if measurements[TIMESTAMP] == 0:
raise KeyError
except KeyError:
measurements[TIMESTAMP] = current_time(self.config)
# pointer to either queue_measurement or insert_measurement, resolved in __init__
self.inserter(stream, measurements)
result = 0
result_text = ""
return (result, result_text, measurements)
def measurement_ordered(self, stream, time_stamp, measurements):
"""!@brief Process a list of implicitly ordered measurements, then save to
destination.
The measurement list order is defined by the key order from when the
stream was registered.
@param stream a string holding the stream name
@param time_stamp the time_stamp from the data message, 0 if not
specified
@param measurements an ordered list of the measurement data
@return a tuple of (error, result_text, measurements)
error: 0 for successful operation
result_text: message to return to client
measurements: processed data, empty dict if error
"""
meas = {}
for key in self.known_stream_versions[stream]:
idx = self.known_stream_versions[stream][key]["key_index"]
meas[key] = measurements[idx]
meas[TIMESTAMP] = time_stamp
return self.measurement(stream, meas)
def measurement_binary(self, stream, measurements):
"""!@brief Process a binary list of implicitly ordered measurements, then save
to destination.
@param stream a string holding the stream name
@param measurements an ordered byte array of the measurement data
@return a tuple of (error, result_text, measurements)
error: 0 for successful operation
result_text: message to return to client
measurements: processed data, empty dict if error
"""
fmtstr = self.known_streams[stream]["format_str"]
try:
dtuple = struct.unpack_from(fmtstr, measurements)
except:
msg = 'Error unpacking stream data.'
msg2 = msg + ' stream: `{}` format_str: `{}`'
msg2 += ' measurements bytes: `{}`'
self.logger.error(msg2)
return (1, msg, {})
meas = list(dtuple[1:])
time_stamp = dtuple[0]
return self.measurement_ordered(stream, time_stamp, meas)
def find_stream(self, stream_id):
"""!@brief Look up a stream name based on the stream id number
@param stream_id the id number for the stream
@return the stream name corresponding to the stream_id
"""
for stream in self.known_streams:
if self.known_streams[stream]["id"] == stream_id:
return stream
raise ValueError
def get_raw_stream_data(self, stream, start=None, stop=None, fields=[]):
"""!@brief Read stream data from storage between the timestamps given by
time = [start,stop].
This method must be overwritten in the specific implementation.
@param stream a string holding the stream name
@param start 32b unix timestamp that defines the start of the data
window
@param stop 32b unix timestamp that defines the end of the data window
@param fields A list that contains the fields for which data is
desired, the value of the dictionary key is arbitrary.
@return a tuple with (error, data, msg)
error: 0 for a successful operation
data: data is dictionary with fields as the keys and data lists as
the values
msg: holds an error msg or '' if no error
"""
raise NotImplementedError
def get_stat_stream_data(self, stream, start=None, stop=None, fields=[]):
"""!@brief Get statistics on the stream data during the time window defined by
time = [start, stop].
@param stream a string holding the stream name
@param start 32b unix timestamp that defines the start of the data
window
@param stop 32b unix timestamp that defines the end of the data window
@param fields A list that contains the fields for which data is
desired, the value of the dictionary key is arbitrary.
@return a tuple with (error, data, msg)
error: 0 for a successful operation
data: data is dictionary with fields as the keys and statistical
data sub-dictionaries as the values
msg: holds an error msg or '' if no error
"""
try:
result, stream_data, result_text = self.get_raw_stream_data(
stream,
start=start,
stop=stop,
fields=fields
)
data = {}
for field in stream_data:
if field == TIMESTAMP:
data[field] = {
'start': stream_data[field][0],
'stop': stream_data[field][1]
}
elif self.known_stream_versions[stream][field]['type'] == 'string':
# TODO: figure out how to handle strings
data[field] = stream_data[field]
else:
# some stats need to be converted back to the native python
# type for JSON serialization
dtype = data_types[self.known_stream_versions[stream][field]['type']]["type"]
avg = np.nanmean(stream_data[field])
std = np.nanstd(stream_data[field])
max = dtype(np.nanmax(stream_data[field]))
min = dtype(np.nanmin(stream_data[field]))
data[field] = {
'average': avg,
'standard_deviation': std,
'max': max,
'min': min
}
except Exception:
self.logger.exception("Exception in server code:")
msg = "Could not process request."
result, data, result_text = (1, {}, msg)
return(result, data, result_text)
def validate_time_range(self, start, stop):
"""!@brief Make sure the time range is valid, if not rearrange start and stop
times.
@param start 32b unix timestamp that defines the start of the data
window
@param stop 32b unix timestamp that defines the end of the data window
@return tuple of validated (start, stop) times in 32b format
"""
try:
stop = long(stop) * 2**32
except TypeError:
self.logger.debug("Using default stop time")
stop = current_time(self.config)
try:
start = long(start) * 2**32
except TypeError:
self.logger.debug("Using default start time")
start = stop - 5 * 60L * 2**32 # 5 minute range default
if start > stop:
msg = "Requested read range out of order. Swapping range."
self.logger.warning(msg)
msg = "Read request time range (start, stop): ({},{})"
self.logger.debug(msg.format(start, stop))
return (stop, start)
msg = "Read request time range (start, stop): ({},{})"
self.logger.debug(msg.format(start, stop))
return (start, stop)
def print_stream_info(self):
"""!@brief Print user readable display of current streams in destination"""
for stream in self.known_stream_versions:
self.logger.info("")
self.logger.info("=" * 20 + " {} ".format(stream) + "=" * 20)
self.logger.info(" stream_id: {}".format(self.known_streams[stream]['id']))
for field_name in self.known_stream_versions[stream]:
self.logger.info(" field: {} ({})".format(
field_name,
self.known_stream_versions[stream][field_name]['type']
))
self.logger.info("")
def get_stream_id(self, stream):
"""!@brief Generate a new stream id.
The stream id is found by dynamically checking the id of all known
streams and incrementing.
This method could be overwritten if the destination has a built in
way to track the total number of streams.
@param stream a string holding the name of the stream
@return stream_id a unique stream_id
"""
if stream in self.known_streams:
return self.known_streams[stream]['id']
stream_id = 0
for s in self.known_streams:
sid = self.known_streams[s]['id']
if sid > stream_id:
stream_id = sid
return stream_id + 1
def start_writer(self):
"""!@brief Initialize a queue object."""
self.logger.info("Starting destination worker...")
self.message_counter = 0
self.insert_queue = multiprocessing.Queue()
self.writer = multiprocessing.Process(target=self.write_worker, args=(self.insert_queue,))
self.writer.daemon = True
self.writer.start()
self.logger.info("Destination worker started")
def write_worker(self, queue):
stream_data = {}
logger = logging.getLogger('write_worker')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('var/write_worker.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s = %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info("Hello from the destination worker")
while True:
# sort new entries
data_cnt = 0
start = time.time()
while True:
try:
d = queue.get(False)
except Queue.Empty:
# logger.debug('Empty queue found')
pass
else:
logger.debug("new data found in queue: {}".format(d.measurements))
if d.type == 'data':
data_cnt += 1
if d.stream not in stream_data:
logger.debug('unrecognized stream: {}'.format(d.stream))
stream_data[d.stream] = {}
if d.version not in stream_data[d.stream]:
logger.debug('unrecognized stream version: {}({})'.format(d.stream, d.version))
stream_data[d.stream][d.version] = []
stream_data[d.stream][d.version].append(d.measurements)
logmsg = 'stream: {} data length: {}'
logger.debug(logmsg.format(d.stream, len(stream_data[d.stream][d.version])))
logger.debug('total data messages processed: {}'.format(data_cnt))
elif d.type == 'msg':
logmsg = 'Write worker recieved command in queue. cmd: {}, msg: {}'
logger.info(logmsg.format(d.command, d.message))
# check timeout condition
if time.time() - start > 1:
logger.debug('timeout condition met')
break
if data_cnt:
logger.debug("I am about to try to insert {} rows".format(data_cnt))
# bulk inserts of sorted data
for s in stream_data:
for v in stream_data[s]:
if len(stream_data[s][v]) > 0:
try:
start = time.time()
self.insert_measurements(s, v, stream_data[s][v], logger=logger)
logmsg = 'Successfully inserted {} rows into stream: {}, elasped time: {} s'
logger.info(logmsg.format(len(stream_data[s][v]), s, time.time()-start))
except:
logger.exception('Unhandled server error encounter in write_worker.')
# TODO: save data in queue that errored to disk
stream_data[s][v] = []
|
{"hexsha": "7f8aa8a1077fa39b627938072daf114b9f35ae15", "size": 24243, "ext": "py", "lang": "Python", "max_stars_repo_path": "ServerStuff/lib/origin/server/origin_destination.py", "max_stars_repo_name": "QuantumQuadrate/powstab2", "max_stars_repo_head_hexsha": "4311302417a859f8c17197648ee958b126d3a454", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ServerStuff/lib/origin/server/origin_destination.py", "max_issues_repo_name": "QuantumQuadrate/powstab2", "max_issues_repo_head_hexsha": "4311302417a859f8c17197648ee958b126d3a454", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-10-24T21:52:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-12T18:58:56.000Z", "max_forks_repo_path": "ServerStuff/lib/origin/server/origin_destination.py", "max_forks_repo_name": "QuantumQuadrate/powstab2", "max_forks_repo_head_hexsha": "4311302417a859f8c17197648ee958b126d3a454", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-12T23:08:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T23:08:12.000Z", "avg_line_length": 40.5401337793, "max_line_length": 108, "alphanum_fraction": 0.5831374005, "include": true, "reason": "import numpy", "num_tokens": 4796}
|
[STATEMENT]
theorem suffix_append:
"suffix xs (ys @ zs) \<longleftrightarrow> suffix xs zs \<or> (\<exists>xs'. xs = xs' @ zs \<and> suffix xs' ys)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. suffix xs (ys @ zs) = (suffix xs zs \<or> (\<exists>xs'. xs = xs' @ zs \<and> suffix xs' ys))
[PROOF STEP]
by (auto simp: suffix_def append_eq_append_conv2)
|
{"llama_tokens": 147, "file": null, "length": 1}
|
//
// detail/atomic_fenced_block.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_ASIO_DETAIL_ATOMIC_FENCED_BLOCK_HPP
#define BOOST_ASIO_DETAIL_ATOMIC_FENCED_BLOCK_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
#if defined(BOOST_ASIO_HAS_STD_ATOMIC)
#include <atomic>
#include <boost/asio/detail/noncopyable.hpp>
#include <boost/asio/detail/push_options.hpp>
namespace boost {
namespace asio {
namespace detail {
class atomic_fenced_block
: private noncopyable
{
public:
enum half_t { half };
enum full_t { full };
// Constructor for a half fenced block.
explicit atomic_fenced_block(half_t)
{
}
// Constructor for a full fenced block.
explicit atomic_fenced_block(full_t)
{
std::atomic_thread_fence(std::memory_order_acquire);
}
// Destructor.
~atomic_fenced_block()
{
std::atomic_thread_fence(std::memory_order_release);
}
};
} // namespace detail
} // namespace asio
} // namespace boost
#include <boost/asio/detail/pop_options.hpp>
#endif // defined(BOOST_ASIO_HAS_STD_ATOMIC)
#endif // BOOST_ASIO_DETAIL_ATOMIC_FENCED_BLOCK_HPP
|
{"hexsha": "62a1f440d71da19cbc2b05e11605a86a278dfe00", "size": 1432, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "thirdparty/boost/boost/asio/detail/atomic_fenced_block.hpp", "max_stars_repo_name": "bureau14/qdb-benchmark", "max_stars_repo_head_hexsha": "1839d7ac04417de56b7a7fb2b7deff50756b3048", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2017-01-19T09:35:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-26T07:31:38.000Z", "max_issues_repo_path": "thirdparty/boost/boost/asio/detail/atomic_fenced_block.hpp", "max_issues_repo_name": "bureau14/qdb-benchmark", "max_issues_repo_head_hexsha": "1839d7ac04417de56b7a7fb2b7deff50756b3048", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2015-11-09T15:38:28.000Z", "max_issues_repo_issues_event_max_datetime": "2015-11-12T11:14:58.000Z", "max_forks_repo_path": "thirdparty/boost/boost/asio/detail/atomic_fenced_block.hpp", "max_forks_repo_name": "bureau14/qdb-benchmark", "max_forks_repo_head_hexsha": "1839d7ac04417de56b7a7fb2b7deff50756b3048", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2015-11-02T09:37:09.000Z", "max_forks_repo_forks_event_max_datetime": "2017-05-05T06:38:49.000Z", "avg_line_length": 22.375, "max_line_length": 79, "alphanum_fraction": 0.7297486034, "num_tokens": 362}
|
import argparse
import os
import torch.backends.cudnn as cudnn
import models
import torchvision.transforms as transforms
import flow_transforms
from scipy.ndimage import imread
from scipy.misc import imsave
from loss import *
import time
import random
from glob import glob
import pdb
import matplotlib.pyplot as plt
# import sys
# sys.path.append('../cython')
# from connectivity import enforce_connectivity
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__"))
parser = argparse.ArgumentParser(description='PyTorch SPixelNet inference on a folder of imgs',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--image', type=str, default='./example_image', help='path to image')
parser.add_argument('--data_suffix', default='jpg', help='suffix of the testing image')
parser.add_argument('--pretrained', metavar='PTH', help='path to pre-trained model',
default= './pretrain_ckpt/SpixelNet_bsd_ckpt.tar')
parser.add_argument('--output', metavar='DIR', default= './demo' , help='path to output folder')
parser.add_argument('--downsize', default=16, type=float,help='superpixel grid cell, must be same as training setting')
parser.add_argument('-nw', '--num_threads', default=1, type=int, help='num_threads')
parser.add_argument('-b', '--batch-size', default=1, type=int, metavar='N', help='mini-batch size')
args = parser.parse_args()
random.seed(100)
@torch.no_grad()
def test(args, model, img_file, save_path):
# Data loading code
input_transform = transforms.Compose([
flow_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0,0,0], std=[255,255,255]),
transforms.Normalize(mean=[0.411,0.432,0.45], std=[1,1,1])
])
load_path = img_file
imgId = os.path.basename(img_file)[:-4]
# may get 4 channel (alpha channel) for some format
print(imgId)
color = True
img_ = imread(load_path)
if len(img_.shape) == 2:
img_ = np.tile(np.expand_dims(img_, 2), (1,1,3))
mask = np.where(img_>0, np.ones_like(img_), np.zeros_like(img_))
color = False
img_ = img_[:,:,:3]
H, W, _ = img_.shape
H_, W_ = int(np.ceil(H/16.)*16), int(np.ceil(W/16.)*16)
# get spixel id
n_spixl_h = int(np.floor(H_ / args.downsize))
n_spixl_w = int(np.floor(W_ / args.downsize))
spix_values = np.int32(np.arange(0, n_spixl_w * n_spixl_h).reshape((n_spixl_h, n_spixl_w)))
spix_idx_tensor_ = shift9pos(spix_values)
spix_idx_tensor = np.repeat(
np.repeat(spix_idx_tensor_, args.downsize, axis=1), args.downsize, axis=2)
spixeIds = torch.from_numpy(np.tile(spix_idx_tensor, (1, 1, 1, 1))).type(torch.float).cuda()
n_spixel = int(n_spixl_h * n_spixl_w)
img = cv2.resize(img_, (W_, H_), interpolation=cv2.INTER_CUBIC)
img1 = input_transform(img)
ori_img = input_transform(img_)
# compute output
tic = time.time()
output,_ = model(img1.cuda().unsqueeze(0))
toc = time.time() - tic
# assign the spixel map
curr_spixl_map = update_spixl_map(spixeIds, output)
ori_sz_spixel_map = F.interpolate(curr_spixl_map.type(torch.float), size=( H_,W_), mode='nearest').type(torch.int)
mean_values = torch.tensor([0.411, 0.432, 0.45], dtype=img1.cuda().unsqueeze(0).dtype).view(3, 1, 1)
spixel_viz, spixel_label_map = get_spixel_image((ori_img + mean_values).clamp(0, 1), ori_sz_spixel_map.squeeze(), n_spixels= n_spixel, b_enforce_connect=True)
# save spixel viz
if not os.path.isdir(os.path.join(save_path, 'spixel_viz')):
os.makedirs(os.path.join(save_path, 'spixel_viz'))
spixl_save_name = os.path.join(save_path, 'spixel_viz', imgId + '_sPixel.png')
if color:
imsave(spixl_save_name, spixel_viz.transpose(1, 2, 0))
else:
imsave(spixl_save_name, spixel_viz.transpose(1, 2, 0)*mask)
return toc
def main():
global args, save_path
print("=>will generate superpixels for image: '{}'".format(args.image))
save_path = args.output
print('=> will save everything to {}'.format(save_path))
if not os.path.isdir(save_path):
os.makedirs(save_path)
if not os.path.exists(args.image):
print(f'Error, no image found at {args.image}')
# create model
network_data = torch.load(args.pretrained)
print("=> using pre-trained model '{}'".format(args.pretrained))
model = models.__dict__[network_data['arch']]( data = network_data).cuda()
model.eval()
args.arch = network_data['arch']
cudnn.benchmark = True
time = test(args, model, args.image, save_path)
print("time cost: %.3f"%(time))
if __name__ == '__main__':
main()
|
{"hexsha": "f220ce2c7238cd73da34cb05db1270436134bd98", "size": 4803, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_demo.py", "max_stars_repo_name": "YanFangCS/AINET", "max_stars_repo_head_hexsha": "a860d9dc59a53242746057e1b97c2e5088d9a6d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2022-01-07T10:27:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T13:15:23.000Z", "max_issues_repo_path": "run_demo.py", "max_issues_repo_name": "YanFangCS/AINET", "max_issues_repo_head_hexsha": "a860d9dc59a53242746057e1b97c2e5088d9a6d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_demo.py", "max_forks_repo_name": "YanFangCS/AINET", "max_forks_repo_head_hexsha": "a860d9dc59a53242746057e1b97c2e5088d9a6d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-18T09:26:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T09:26:35.000Z", "avg_line_length": 35.8432835821, "max_line_length": 163, "alphanum_fraction": 0.6785342494, "include": true, "reason": "from scipy", "num_tokens": 1306}
|
!
Module forward_support
Use Phys_constants
Use Atomic_data
Use Line_data_structure
Use Eq_state
Contains
! This routine checks an optical depth scale to make sure there are no
! two consecutive points with the same value. If such an occurrence is found
! then the tau scale from that point to the bottom is shifted slightly by 1%
! of the degenerate value. The corrected scale is returned in the same vector
! It is assumed that the tau scale is sorted from top to bottom, so tau(1) is the
! top of the atmosphere
!
! If you change this, check also misc/z_to_tau.f90
Subroutine Check_tau(tau)
Implicit None
Real, Dimension(:) :: tau
Integer :: npoints, ipoint
npoints=Size(tau)
Do ipoint=npoints-1, 1, -1
If (abs(tau(ipoint)-tau(ipoint+1)) .lt. .01*tau(ipoint) ) then
tau(ipoint+1:npoints)=tau(ipoint+1:npoints)+.01*tau(ipoint)
End if
End do
End Subroutine Check_tau
! This subroutine constructs the absorption matrix at all the depth-points
! from the array of elements Phi_x and Psi_x. The matrix is normalized to
! the continuum opacity at 5000 Angstroms (which enters through the array
! Cont).
!
Subroutine Abs_matrix(npoints, nwlengths, Phi_I, Phi_Q, Phi_U, Phi_V, Psi_Q, &
Psi_U, Psi_V, Cont, Absorp) ! Construct absorption matrix
Implicit None
Integer :: npoints, nwlengths, iwave
Real, Dimension (nwlengths, npoints, 4, 4) :: Absorp
Real, Dimension (nwlengths, npoints) :: Phi_I, Phi_Q, Phi_U, Phi_V
Real, Dimension (nwlengths, npoints) :: Psi_Q, Psi_U, Psi_V
Real, Dimension (npoints) :: Cont
!
Do iwave=1, nwlengths
Phi_I(iwave, :)=Phi_I(iwave, :)/Cont(:)
Phi_Q(iwave, :)=Phi_Q(iwave, :)/Cont(:)
Phi_U(iwave, :)=Phi_U(iwave, :)/Cont(:)
Phi_V(iwave, :)=Phi_V(iwave, :)/Cont(:)
Psi_Q(iwave, :)=Psi_Q(iwave, :)/Cont(:)
Psi_U(iwave, :)=Psi_U(iwave, :)/Cont(:)
Psi_V(iwave, :)=Psi_V(iwave, :)/Cont(:)
End do
!
Absorp(1:nwlengths,1:npoints,1,1)=Phi_I(1:nwlengths,1:npoints)
Absorp(1:nwlengths,1:npoints,2,2)=Phi_I(1:nwlengths,1:npoints)
Absorp(1:nwlengths,1:npoints,3,3)=Phi_I(1:nwlengths,1:npoints)
Absorp(1:nwlengths,1:npoints,4,4)=Phi_I(1:nwlengths,1:npoints)
!
Absorp(1:nwlengths,1:npoints,1,2)=Phi_Q(1:nwlengths,1:npoints)
Absorp(1:nwlengths,1:npoints,2,1)=Phi_Q(1:nwlengths,1:npoints)
!
Absorp(1:nwlengths,1:npoints,1,3)=Phi_U(1:nwlengths,1:npoints)
Absorp(1:nwlengths,1:npoints,3,1)=Phi_U(1:nwlengths,1:npoints)
!
Absorp(1:nwlengths,1:npoints,1,4)=Phi_V(1:nwlengths,1:npoints)
Absorp(1:nwlengths,1:npoints,4,1)=Phi_V(1:nwlengths,1:npoints)
!
Absorp(1:nwlengths,1:npoints,2,3)=Psi_V(1:nwlengths,1:npoints)
Absorp(1:nwlengths,1:npoints,3,2)=-Psi_V(1:nwlengths,1:npoints)
!
Absorp(1:nwlengths,1:npoints,2,4)=-Psi_U(1:nwlengths,1:npoints)
Absorp(1:nwlengths,1:npoints,4,2)=Psi_U(1:nwlengths,1:npoints)
!
Absorp(1:nwlengths,1:npoints,3,4)=Psi_Q(1:nwlengths,1:npoints)
Absorp(1:nwlengths,1:npoints,4,3)=-Psi_Q(1:nwlengths,1:npoints)
!
Return
End Subroutine Abs_matrix
! This subroutine computes the damping parameter for the absorption profile.
! Temp, El_P and Pg and in cgs units
! If GA and GQ are .gt. 0, they are used to compute the Stark and
! radiative broadenings in the same manner as MULTI.
! If GA and GQ are .le. 0 then an approximation is used (see Gray)
!
Subroutine damping(Line, Temp, El_p, Pg, Dldop, Damp, GA, GQ)
Use Debug_module
Implicit None
Type (Line_data) :: Line
Real :: Temp, El_p, Dldop, Damp, C6, ioniz, chi_l, a, b, metal
Real :: gamma_r, gamma_vdw, gamma_s, Pg, dldopHz, GA, GQ
Real :: Sigma, Alpha, X, GX, GAMMAF, GVW, K, A0, M0, H1FRC, HE1FRC, VBAR
Real, Dimension(1) :: tmp1, tmp2, tmp3, tmp4, tmp5, T1, ElP1, Pg1
Real, Dimension (10) :: Pp
Logical, Save :: Warning
Data Warning/.FALSE./
PARAMETER (K=1.380658E-23,M0=1.660540E-27)
PARAMETER (A0=5.29177249E-11) ! Bohr radius
!
dldopHz=cc/Line%Wlength/Line%Wlength*Dldop/1.E-8 ! Dldop in Hz
!
! Radiative damping:
If (GA .le. 0) then
! Reference: Gray 1976, "The observation and analysis of stellar
! photospheres", pag 227, just after Eq. (11-19). This approximation
! is poor, but the radiative damping is usually negligible anyway.
!
gamma_r=0.22233/(Line%Wlength*Line%Wlength*1.E-16)
Else
gamma_r=GA ! Same as MULTI
End if
If (Line%collisions .eq. 3 .and. gamma_r .ge. 0) then ! Explicit Radiative coefficient
gamma_r = Line%Gamma_Rad
gamma_r = gamma_r * 1D8
End if
!
! Van der Waals damping:
If (Line%collisions .eq. 1 .or. (Line%Collisions .eq. 3 .and. Line%Gamma_vdW_16 .lt. 0) ) then
! Reference: Gray (see above, pag 239, Eqs. (11-35) and (11-36))
! Formula used:
! log (gamma_vdw) = 19.6 + 2./5.*log (C6(H)) + log Pg - 7./10.*log T
If (Line%Ion_stage .eq. 1) then
ioniz=At_ioniz1(Line%Atomic_number)
else if (Line%Ion_stage .eq. 2) then
ioniz=At_ioniz2(Line%Atomic_number)
else
If (.not. Warning) &
Print *,'Ionization stage gt 2. Van der Waals broadening not considered'
ioniz=-1
Warning=.TRUE.
end if
gamma_vdw=0.
If (ioniz .gt. -1) then
chi_l=1.24E4/Line%Wlength
a=(ioniz-Line%Energy_low-chi_l)**2.
b=(ioniz-Line%Energy_low)**2.
C6=3.E-31*(1./a-1./b)
If (C6 .lt. 0) then
gamma_vdw=0.
Else
gamma_vdw=19.6 + 2./5.*log10(C6) + log10(Pg) - 7./10.*log10(Temp)
gamma_vdw=10.**(gamma_vdw)
End if
gamma_vdw=gamma_vdw*Line%VDW_enh ! Empirical Van der Waals enhancement
End if
Else if (Line%collisions .eq. 2) then ! Barklem formula
! Following
! http://www.astro.uu.se/~barklem/howto.html
Sigma=Line%Bark_sigma*A0*A0
Alpha=Line%Bark_alpha
! Compute the Gamma function of X, this function is valid over the
! range 1<X<2 ie. 0<ALPHA<2 which is always satisfied
X=2.-ALPHA*.5
GX=X-1.0
GAMMAF=1+(-.5748646+(.9512363+(-.6998588+(.4245549-.1010678*GX &
)*GX)*GX)*GX)*GX
! Compute the halfwidth per unit perturber number density for this temp
GVW=(4./PI)**(ALPHA*0.5)*GAMMAF*1.E4*SIGMA
VBAR=SQRT(8.*K*Temp/PI/M0*(1./1.008+1./Line%Atomic_weight))
GVW=GVW*((VBAR/1.E4)**(1.-ALPHA))
! Get H and He partial pressures
T1=Temp
ElP1=El_p
Pg1=Pg
Call compute_others_from_T_Pe_Pg(1,T1,Elp1, Pg1, tmp1,tmp2,tmp3,tmp4,tmp5)
! Fullwidth given H1FRC perturbers per cm^3 with approx He I broadening
! The factor of 2 comes from converting to the full width.
! The factor of 1.E6 converts from SI to cgs
H1FRC=tmp1(1) ! nH=neutral H
HE1FRC=0.1*H1FRC ! Approx neutral He by 0.10*neutral H
GVW=GVW*(H1FRC+ 0.42*HE1FRC)*1.E6*2.
gamma_vdw=GVW
gamma_vdw=gamma_vdw*Line%VDW_enh ! Empirical Van der Waals enhancement
Else If (Line%Collisions .ne. 3) then
Print *,'Unknown collisional broadening in damping (forward_supp.f90)'
Stop
Endif
If (Line%collisions .eq. 3) then ! Explicit vdW coefficient
! Get H partial pressures
T1=Temp
ElP1=El_p
Pg1=Pg
Call compute_others_from_T_Pe_Pg(1,T1,Elp1, Pg1, tmp1,tmp2,tmp3,tmp4,tmp5)
If (Line%Gamma_vdW_16 .gt. 0) then
gamma_vdw=Line%Gamma_vdW_16*(tmp1(1)/1D16)/(1D4**0.38)*(Temp**0.38)
gamma_vdw = gamma_vdw * 1D8
End if
gamma_vdw=gamma_vdw*Line%VDW_enh ! Empirical Van der Waals enhancement!
End if
! Stark damping
If (GQ .le. 0) then ! debug
! Formula used: gamma_4=38.8*(v**(1./3.))*(C4**(2./3.))*N , from
! Unsold 1955, "Physik der Sternatmospharen", 2nd ed.,
! Springer-Verlag, pp. 326 and 331. According to Gray (ref above),
! pag 237, this is similar to
! log (gamma_4) = 19.4 + 2./3*log C4 + log Pe - 5./6.*log T
! The value of log C4 is approximated by -14. (see Gray, Table 11-3)
gamma_s = 19.4 + 2./3.*(-14.) + log10(El_p) - 5./6.*log10(Temp)
gamma_s = 10.**(gamma_s)
Else
! gamma_s = GQ*El_p/bk/Temp ! Same as MULTI
gamma_s = GQ*((El_p/bk/Temp)**0.6666666)*4.*Pi*0.426*0.6*(3**2-2**2) ! Same as MULTI for i=2 to i=3 transition
End if
If (Line%collisions .eq. 3 .and. gamma_s .ge. 0) then ! Explicit Stark coefficient
gamma_s = Line%Gamma_Strk_12*(El_P/BK/Temp/1D12)/(1D4**0.17)*(Temp**0.17)
gamma_s = gamma_s * 1D8
End if
! if (el_p .gt. 38 .and. el_p .lt. 80) then ! debug
! print *,'gamma=',gamma_r,gamma_vdw,gamma_s
! pause
! endif
Damp=(gamma_r+gamma_vdw+gamma_s)/4./Pi/dldopHz
Return
End Subroutine damping
!
subroutine matinx ( a )
! 'exact' inversion of 4 X 4 matrix
Use Debug_Module ! Optional
implicit real ( a-h, o-z )
dimension a ( 4 , 4 ) , b ( 4 , 4 )
! dimension c(4,4)
integer i , j
absmax = 0.
do i = 1 , 4
do j = 1 , 4
! c(i,j)=a(i,j)
if ( abs ( a ( i , j ) ) .gt. absmax ) absmax = a ( i , j )
end do
end do
if ( absmax .eq. 0. ) then
print *,'singularity problem. Zero or NaN matrix D in Hermite'
print *,'a=',a
! stop
do i=1, 4
do j=1, 4
a(i,j)=0.
end do
a(i,i)=1.
end do
! Only if optional module debug is used
Call Debug_Log('Singularity problem',1)
end if
fabsmx = 1.d00 / absmax
do i = 1 , 4
do j = 1 , 4
a ( i , j ) = a ( i , j ) * fabsmx
end do
end do
b(1,1) = a(2,2) * a(3,3) * a(4,4) + a(2,3) * a(3,4) * a(4,2) &
+ a(2,4) * a(3,2) * a(4,3) - a(2,2) * a(3,4) * a(4,3) &
- a(2,3) * a(3,2) * a(4,4) - a(2,4) * a(3,3) * a(4,2)
b(2,1) = a(2,3) * a(3,1) * a(4,4) + a(2,4) * a(3,3) * a(4,1) &
+ a(2,1) * a(3,4) * a(4,3) - a(2,3) * a(3,4) * a(4,1) &
- a(2,4) * a(3,1) * a(4,3) - a(2,1) * a(3,3) * a(4,4)
b(3,1) = a(2,4) * a(3,1) * a(4,2) + a(2,1) * a(3,2) * a(4,4) &
+ a(2,2) * a(3,4) * a(4,1) - a(2,4) * a(3,2) * a(4,1) &
- a(2,1) * a(3,4) * a(4,2) - a(2,2) * a(3,1) * a(4,4)
b(4,1) = a(2,1) * a(3,3) * a(4,2) + a(2,2) * a(3,1) * a(4,3) &
+ a(2,3) * a(3,2) * a(4,1) - a(2,1) * a(3,2) * a(4,3) &
- a(2,2) * a(3,3) * a(4,1) - a(2,3) * a(3,1) * a(4,2)
b(1,2) = a(3,2) * a(4,4) * a(1,3) + a(3,3) * a(4,2) * a(1,4) &
+ a(3,4) * a(4,3) * a(1,2) - a(3,2) * a(4,3) * a(1,4) &
- a(3,3) * a(4,4) * a(1,2) - a(3,4) * a(4,2) * a(1,3)
b(2,2) = a(3,3) * a(4,4) * a(1,1) + a(3,4) * a(4,1) * a(1,3) &
+ a(3,1) * a(4,3) * a(1,4) - a(3,3) * a(4,1) * a(1,4) &
- a(3,4) * a(4,3) * a(1,1) - a(3,1) * a(4,4) * a(1,3)
b(3,2) = a(3,4) * a(4,2) * a(1,1) + a(3,1) * a(4,4) * a(1,2) &
+ a(3,2) * a(4,1) * a(1,4) - a(3,4) * a(4,1) * a(1,2) &
- a(3,1) * a(4,2) * a(1,4) - a(3,2) * a(4,4) * a(1,1)
b(4,2) = a(3,1) * a(4,2) * a(1,3) + a(3,2) * a(4,3) * a(1,1) &
+ a(3,3) * a(4,1) * a(1,2) - a(3,1) * a(4,3) * a(1,2) &
- a(3,2) * a(4,1) * a(1,3) - a(3,3) * a(4,2) * a(1,1)
b(1,3) = a(4,2) * a(1,3) * a(2,4) + a(4,3) * a(1,4) * a(2,2) &
+ a(4,4) * a(1,2) * a(2,3) - a(4,2) * a(1,4) * a(2,3) &
- a(4,3) * a(1,2) * a(2,4) - a(4,4) * a(1,3) * a(2,2)
b(2,3) = a(4,3) * a(1,1) * a(2,4) + a(4,4) * a(1,3) * a(2,1) &
+ a(4,1) * a(1,4) * a(2,3) - a(4,3) * a(1,4) * a(2,1) &
- a(4,4) * a(1,1) * a(2,3) - a(4,1) * a(1,3) * a(2,4)
b(3,3) = a(4,4) * a(1,1) * a(2,2) + a(4,1) * a(1,2) * a(2,4) &
+ a(4,2) * a(1,4) * a(2,1) - a(4,4) * a(1,2) * a(2,1) &
- a(4,1) * a(1,4) * a(2,2) - a(4,2) * a(1,1) * a(2,4)
b(4,3) = a(4,1) * a(1,3) * a(2,2) + a(4,2) * a(1,1) * a(2,3) &
+ a(4,3) * a(1,2) * a(2,1) - a(4,1) * a(1,2) * a(2,3) &
- a(4,2) * a(1,3) * a(2,1) - a(4,3) * a(1,1) * a(2,2)
b(1,4) = a(1,2) * a(2,4) * a(3,3) + a(1,3) * a(2,2) * a(3,4) &
+ a(1,4) * a(2,3) * a(3,2) - a(1,2) * a(2,3) * a(3,4) &
- a(1,3) * a(2,4) * a(3,2) - a(1,4) * a(2,2) * a(3,3)
b(2,4) = a(1,3) * a(2,4) * a(3,1) + a(1,4) * a(2,1) * a(3,3) &
+ a(1,1) * a(2,3) * a(3,4) - a(1,3) * a(2,1) * a(3,4) &
- a(1,4) * a(2,3) * a(3,1) - a(1,1) * a(2,4) * a(3,3)
b(3,4) = a(1,4) * a(2,2) * a(3,1) + a(1,1) * a(2,4) * a(3,2) &
+ a(1,2) * a(2,1) * a(3,4) - a(1,4) * a(2,1) * a(3,2) &
- a(1,1) * a(2,2) * a(3,4) - a(1,2) * a(2,4) * a(3,1)
b(4,4) = a(1,1) * a(2,2) * a(3,3) + a(1,2) * a(2,3) * a(3,1) &
+ a(1,3) * a(2,1) * a(3,2) - a(1,1) * a(2,3) * a(3,2) &
- a(1,2) * a(2,1) * a(3,3) - a(1,3) * a(2,2) * a(3,1)
det = a ( 1 , 1 ) * b ( 1 , 1 ) + a ( 1 , 2 ) * b ( 2 , 1 ) &
+ a ( 1 , 3 ) * b ( 3 , 1 ) + a ( 1 , 4 ) * b ( 4 , 1 )
! if(abs(det).lt.1.e-2)then
! print*,det
! do i=1,4
! do j=1,4
! print*,'c(',i,',',j,')=',sngl(c(i,j))
! end do
! end do
! end if
! print*,'en matinx (4)',det
fdeta = fabsmx / det
! print*,'en matinx (5)',fdeta
do 5000 i = 1 , 4
do 5001 j = 1 , 4
a ( i , j ) = b ( i , j ) * fdeta
5001 continue
5000 continue
! if(abs(det).lt.1.e-2)then
! do i=1,4
! do j=1,4
! sum=0.
! do l=1,4
! sum=sum+a(i,l)*c(l,j)
! end do
! print*,'c*inversa(',i,',',j,')=',sum
! end do
! end do
! end if
! print*,'en matinx (6)',fdeta
return
end subroutine matinx
!______________________________________________________________________________
subroutine matinx4 ( b )
!______________________________________________________________________________
implicit real ( a-h, o-z )
dimension b ( 4 , 4 )
q=b(1,2)
u=b(1,3)
v=b(1,4)
r=b(2,3)
s=b(4,2)
t=b(3,4)
q2=q*q
u2=u*u
v2=v*v
r2=r*r
s2=s*s
t2=t*t
a=q*t+r*v+s*u
a2=a*a
taq=t*a+q
qat=q*a-t
sau=s*a+u
uas=u*a-s
rav=r*a+v
var=v*a-r
ur=u*r-s*v
uv=u*v+r*s
qr=q*r-t*v
qs=q*s-t*u
qu=q*u+t*s
qv=q*v+t*r
det=1.+t2+r2+s2-q2-u2-v2-a2
b(1,1)=(1.+t2+r2+s2)/det
b(2,1)=(ur-taq)/det
b(3,1)=(-qr-sau)/det
b(4,1)=(qs-rav)/det
b(1,2)=(-ur-taq)/det
b(2,2)=(1.+t2-u2-v2)/det
b(3,2)=(qu-var)/det
b(4,2)=(qv+uas)/det
b(1,3)=(qr-sau)/det
b(2,3)=(qu+var)/det
b(3,3)=(1.+s2-q2-v2)/det
b(4,3)=(uv-qat)/det
b(1,4)=(-qs-rav)/det
b(2,4)=(qv-uas)/det
b(3,4)=(uv+qat)/det
b(4,4)=(1.+r2-q2-u2)/det
return
end subroutine matinx4
!
! This routine performs a matrix product A.B=C. The dimension of A are nrA
! by ncA (number of rows and columns, respectively), while ncB is the number
! of columns in B.
!
Subroutine Multiply_matrix(nrA, ncA, ncB, A, B, C)
Implicit None
Integer :: nrA, ncA, ncB
Real, Dimension (nrA, ncA) :: A
Real, Dimension (ncA, ncB) :: B
Real, Dimension (nrA, ncB) :: C
Integer :: i, j, k
!
Do i=1, nrA
Do j=1, ncB
C(i, j)=0.
Do k=1, ncA
C(i, j)=C(i, j)+A(i, k)*B(k, j)
End do
End do
End do
Return
End Subroutine Multiply_matrix
! This subroutine calculates the derivatives of a function y assuming a
! parabolic behavior between i-1, i and i+1 (being i the point where the
! derivative is to be evaluated). The derivatives at the boundaries are
! computed assuming a linear behavior. The vector x does not need to be
! equispaced.
!
Subroutine Parab_deriv(npoints, x, y, dy)
Implicit None
Integer :: npoints, i
Real, Dimension (npoints) :: x, y, dy
Real :: a, b, c, den
!
! Linear interpolation for the boundaries
!
dy(1)=(y(2)-y(1))/(x(2)-x(1))
dy(npoints)=(y(npoints)-y(npoints-1))/(x(npoints)-x(npoints-1))
!
! Parabolic interpolation for all the other points
!
Do i=2,npoints-1
den=(x(i)-x(i-1))*(x(i)-x(i+1))*(x(i-1)-x(i+1))
a = (x(i+1)*(y(i-1)-y(i)) + x(i-1)*(y(i)-y(i+1)) + &
x(i)*(y(i+1)-y(i-1)))/den
b = (x(i+1)*x(i+1)*(y(i)-y(i-1)) + x(i)*x(i)*(y(i-1)-y(i+1)) + &
x(i-1)*x(i-1)*(y(i+1)-y(i)))/den
!
! y = a x^2 + bx + c => y' = 2 a x + b
!
dy(i)=2.*a*x(i)+b
End do
Return
End Subroutine Parab_deriv
End Module forward_support
|
{"hexsha": "6946e0d928508f8351919e05e2d547b1c142fed9", "size": 16482, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "inversor/database/NICOLE/forward/forward_supp.f90", "max_stars_repo_name": "aasensio/DeepLearning", "max_stars_repo_head_hexsha": "71838115ce93e0ca96c8314cff3f07de1d64c235", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inversor/database/NICOLE/forward/forward_supp.f90", "max_issues_repo_name": "aasensio/DeepLearning", "max_issues_repo_head_hexsha": "71838115ce93e0ca96c8314cff3f07de1d64c235", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inversor/database/NICOLE/forward/forward_supp.f90", "max_forks_repo_name": "aasensio/DeepLearning", "max_forks_repo_head_hexsha": "71838115ce93e0ca96c8314cff3f07de1d64c235", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4811715481, "max_line_length": 115, "alphanum_fraction": 0.5376774663, "num_tokens": 7195}
|
import numpy as np
def generate_spirals(
n_samples,
n_class=2,
noise=0.3,
random_state=None,
):
"""
Generate 2-dimensional Gaussian XOR distribution.
(Classic XOR problem but each point is the
center of a Gaussian blob distribution)
Parameters
----------
n_samples : int
Total number of points divided among the four
clusters with equal probability.
n_class : array of shape [n_centers], optional (default=2)
Number of class for the spiral simulation.
noise : float, optional (default=0.3)
Parameter controlling the spread of each class.
random_state : int, RandomState instance, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
"""
if random_state != None:
np.random.seed(random_state)
X = []
y = []
if n_class == 2:
turns = 2
elif n_class == 3:
turns = 2.5
elif n_class == 5:
turns = 3.5
elif n_class == 7:
turns = 4.5
else:
raise ValueError("sorry, can't currently surpport %s classes " % n_class)
mvt = np.random.multinomial(n_samples, 1 / n_class * np.ones(n_class))
if n_class == 2:
r = np.random.uniform(0, 1, size=int(n_samples / n_class))
r = np.sort(r)
t = np.linspace(
0, np.pi * 4 * turns / n_class, int(n_samples / n_class)
) + np.random.normal(0, noise, int(n_samples / n_class))
dx = r * np.cos(t)
dy = r * np.sin(t)
X.append(np.vstack([dx, dy]).T)
X.append(np.vstack([-dx, -dy]).T)
y += [0] * int(n_samples / n_class)
y += [1] * int(n_samples / n_class)
else:
for j in range(1, n_class + 1):
r = np.linspace(0.01, 1, int(mvt[j - 1]))
t = np.linspace(
(j - 1) * np.pi * 4 * turns / n_class,
j * np.pi * 4 * turns / n_class,
int(mvt[j - 1]),
) + np.random.normal(0, noise, int(mvt[j - 1]))
dx = r * np.cos(t)
dy = r * np.sin(t)
dd = np.vstack([dx, dy]).T
X.append(dd)
y += [j - 1] * int(mvt[j - 1])
return np.vstack(X), np.array(y).astype(int)
|
{"hexsha": "cf5c4a1851c68e76817cbf96a2a36e0974da10f8", "size": 2599, "ext": "py", "lang": "Python", "max_stars_repo_path": "proglearn/sims/spiral_sim.py", "max_stars_repo_name": "kfenggg/ProgLearn", "max_stars_repo_head_hexsha": "43842543315ef49d60f4ebcabcdd9b8fda1b3277", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2020-05-17T21:56:36.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-18T17:39:26.000Z", "max_issues_repo_path": "proglearn/sims/spiral_sim.py", "max_issues_repo_name": "kfenggg/ProgLearn", "max_issues_repo_head_hexsha": "43842543315ef49d60f4ebcabcdd9b8fda1b3277", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 209, "max_issues_repo_issues_event_min_datetime": "2020-06-05T19:08:51.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-03T16:49:39.000Z", "max_forks_repo_path": "proglearn/sims/spiral_sim.py", "max_forks_repo_name": "kfenggg/ProgLearn", "max_forks_repo_head_hexsha": "43842543315ef49d60f4ebcabcdd9b8fda1b3277", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 33, "max_forks_repo_forks_event_min_datetime": "2020-06-10T23:12:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-28T05:09:44.000Z", "avg_line_length": 28.5604395604, "max_line_length": 82, "alphanum_fraction": 0.5309734513, "include": true, "reason": "import numpy", "num_tokens": 679}
|
import numpy as np
from collections import namedtuple
import dataclasses
import pandas as pd
from typing import List, Tuple, Dict, Union, Any, Optional, Callable
def as_flattened(vals: Dict[str, Any], base: Optional[Tuple[str, ...]] = None) -> Dict[Tuple[str, ...], Any]:
if base is None:
base = tuple()
out = {}
for name, val in vals.items():
if isinstance(val, dict):
flat = as_flattened(val, base=base + (name,))
out.update(flat)
else:
out[base + (name,)] = val
return out
def as_nested(vals: Dict[Tuple[str, ...], Any]) -> Dict[str, Any]:
out: Dict[str, Any] = {}
for names, val in vals.items():
assert len(names) >= 1
current = out
for name in names[:-1]:
current = current.setdefault(name, {})
assert names[-1] not in current
current[names[-1]] = val
return out
def count_items(dtype: np.dtype) -> int:
if dtype.fields is None:
prod = 1
for length in dtype.shape:
prod *= length
return prod
else:
num = 0
for dt, _ in dtype.fields.values():
num += count_items(dt)
return num
def _as_dict(data: np.ndarray) -> Dict[str, Any]:
if data.dtype.fields is not None:
return {name: _as_dict(data[name]) for name in data.dtype.fields}
else:
return data
def _from_dict(data: np.ndarray, vals: Dict[str, Any]) -> None:
if data.dtype.fields is not None:
for name, (subtype, _) in data.dtype.fields.items():
if subtype.fields is not None:
_from_dict(data[name], vals[name])
else:
data[name] = vals[name]
else:
data[...] = vals
Shape = Tuple[int, ...]
Path = Tuple[str, ...]
class DTypeSubset:
dtype: np.dtype
subset_dtype: np.dtype
subset_view_dtype: np.dtype
coords: Dict[str, pd.Index]
dims: Dict[str, Any]
paths: List[Path]
subset_paths: List[Path]
# Map each path to a slice into the combined array
flat_slices: Dict[Path, slice]
flat_shapes: Dict[Path, Shape]
item_count: int
_remainder: Optional['DTypeSubset']
def __init__(
self,
dims: Dict[str, Any],
subset_paths: List[Path],
fixed_dtype: Optional[np.dtype] = None,
coords: Optional[Dict[str, pd.Index]] = None,
dim_basename: str = ''
) -> None:
if coords is None:
coords = {}
else:
coords = {name: pd.Index(coord) for name, coord in coords.items()}
dtype: List[Tuple[str, Any, Tuple[int, ...]]] = []
subset_dtype: List[Tuple[str, Any, Tuple[int, ...]]] = []
subset_view_dtype = []
paths: List[Tuple[str, ...]] = []
flat_slices: Dict[Tuple[str, ...], slice] = {}
flat_shapes: Dict[Tuple[str, ...], Tuple[int, ...]] = {}
dims_out: Dict[str, Any] = {}
subset_names = []
subset_offsets = []
offset = 0
item_count = 0
for name, val in dims.items():
if isinstance(val, dict):
flat_sub_paths = [p[1:] for p in subset_paths if len(p) > 0 and p[0] == name]
basename = f"dim_basename_{name}"
sub_subset = DTypeSubset(val, flat_sub_paths, fixed_dtype=fixed_dtype,
coords=coords, dim_basename=basename)
coords.update(sub_subset.coords)
dtype.append((name, sub_subset.dtype, ()))
if sub_subset.subset_dtype.itemsize > 0:
subset_dtype.append((name, sub_subset.subset_dtype, ()))
subset_view_dtype.append(sub_subset.subset_view_dtype)
subset_names.append(name)
subset_offsets.append(offset)
paths.extend((name,) + path for path in sub_subset.paths)
dims_out[name] = sub_subset.dims
for path in sub_subset.paths:
full_path = (name,) + path
assert full_path not in flat_slices and full_path not in flat_shapes
sub_slice = sub_subset.flat_slices[path]
flat_slices[full_path] = slice(
sub_slice.start + item_count,
sub_slice.stop + item_count,
)
flat_shapes[full_path] = sub_subset.flat_shapes[path]
item_count += sub_subset.item_count
else:
if fixed_dtype is None:
val_dtype, val = val
else:
val_dtype = fixed_dtype
if isinstance(val, (int, str)):
val = (val,)
shape = []
item_dims = []
for i, dim in enumerate(val):
if isinstance(dim, str):
if dim not in coords:
raise KeyError('Unknown dimension name: %s' % dim)
length = len(coords[dim])
dim_name = dim
else:
length = dim
index = pd.RangeIndex(length, name='%s_%s_dim%s__' % (dim_basename, name, i))
dim_name = index.name
if dim_name in coords:
raise ValueError(f"Can not create two different dimensions with the same name: {dim_name}.")
coords[dim_name] = index
item_dims.append(dim_name)
shape.append(length)
dims_out[name] = (val_dtype, item_dims)
dtype.append((name, val_dtype, tuple(shape)))
if (name,) in subset_paths:
subset_dtype.append((name, val_dtype, tuple(shape)))
subset_view_dtype.append((val_dtype, tuple(shape)))
subset_offsets.append(offset)
subset_names.append(name)
paths.append((name,))
length = 1
for dim_len in shape:
length *= dim_len
flat_slices[(name,)] = slice(item_count, item_count + length)
flat_shapes[(name,)] = tuple(shape)
item_count += length
offset += np.dtype([dtype[-1]]).itemsize
self.dtype = np.dtype(dtype)
self.subset_dtype = np.dtype(subset_dtype)
self.subset_view_dtype = np.dtype({
'names': subset_names,
'formats': subset_view_dtype,
'offsets': subset_offsets,
'itemsize': self.dtype.itemsize,
})
self.item_count = item_count
self.flat_shapes = flat_shapes
self.flat_slices = flat_slices
self.coords = coords
self.paths = paths
self.dims = dims_out
# Make sure the order of subset_paths is correct
self.subset_paths = [path for path in paths if path in subset_paths]
self._remainder = None
@property
def n_subset(self) -> int:
return count_items(self.subset_dtype)
@property
def n_items(self) -> int:
return count_items(self.dtype)
def set_from_subset(self, value_buffer: np.ndarray, subset_buffer: np.ndarray) -> None:
value_buffer.view(self.subset_dtype).fill(subset_buffer)
def as_dataclass(
self,
dataclass_name: str,
flat_subset: np.ndarray,
flat_remainder: np.ndarray,
item_map: Optional[Callable[[np.ndarray], np.ndarray]] = None,
) -> Any:
if item_map is None:
item_map = lambda x: x
def _as_dataclass(
dataclass_name: str,
dtype: np.dtype,
subset_paths: List[Path],
flat_subset: np.ndarray,
flat_remainder: np.ndarray,
item_map: Callable[[np.ndarray], np.ndarray],
) -> Any:
fields = []
for name, (subdtype, _) in dtype.fields.items():
if subdtype.fields is None:
count = count_items(subdtype)
if (name,) in subset_paths:
assert len(flat_subset) >= count
item = item_map(flat_subset[:count].reshape(subdtype.shape))
flat_subset = flat_subset[count:]
else:
assert len(flat_remainder) >= count
item = item_map(flat_remainder[:count].reshape(subdtype.shape))
flat_remainder = flat_remainder[count:]
else:
sub_paths = [p[1:] for p in subset_paths if len(p) > 0 and p[0] == name]
item, flat_subset, flat_remainder = _as_dataclass(
name, subdtype, sub_paths, flat_subset, flat_remainder, item_map)
fields.append((name, item))
Type = dataclasses.make_dataclass(dataclass_name, [name for name, _ in fields])
return Type(*[item for _, item in fields]), flat_subset, flat_remainder
params, flat_subset, flat_remainder = _as_dataclass(
dataclass_name, self.dtype, self.subset_paths, flat_subset, flat_remainder, item_map)
assert len(flat_subset) == 0
assert len(flat_remainder) == 0
return params
def from_dict(self, vals: Dict[str, Any], out: Optional[np.ndarray] = None) -> np.ndarray:
if out is None:
out = np.zeros((1,), dtype=self.dtype)[0]
_from_dict(out, vals)
return out
def subset_from_dict(self, vals: Dict[str, Any], out: Optional[np.ndarray] = None) -> np.ndarray:
if out is None:
out = np.zeros((1,), dtype=self.subset_dtype)[0]
_from_dict(out, vals)
return out
def as_dict(self, vals: np.ndarray) -> Dict[str, Any]:
if vals.dtype != self.dtype:
raise ValueError('Invalid dtype.')
return _as_dict(vals)
def subset_as_dict(self, vals: np.ndarray) -> Dict[str, Any]:
if vals.dtype != self.subset_dtype:
raise ValueError('Invalid dtype.')
return _as_dict(vals)
@property
def remainder(self) -> 'DTypeSubset':
if self._remainder is None:
remainder = list(set(self.paths) - set(self.subset_paths))
self._remainder = DTypeSubset(self.dims, remainder, coords=self.coords)
return self._remainder
|
{"hexsha": "3b233743c68b60746f11e5337a81109a85f98352", "size": 10530, "ext": "py", "lang": "Python", "max_stars_repo_path": "sunode/dtypesubset.py", "max_stars_repo_name": "aseyboldt/pysundials-cffi", "max_stars_repo_head_hexsha": "ccdcd0fd0252285a5440c397619c57378a32d33a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2019-12-13T17:42:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T01:39:40.000Z", "max_issues_repo_path": "sunode/dtypesubset.py", "max_issues_repo_name": "aseyboldt/pysundials-cffi", "max_issues_repo_head_hexsha": "ccdcd0fd0252285a5440c397619c57378a32d33a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2019-12-21T21:36:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-15T13:48:18.000Z", "max_forks_repo_path": "sunode/dtypesubset.py", "max_forks_repo_name": "aseyboldt/pysundials-cffi", "max_forks_repo_head_hexsha": "ccdcd0fd0252285a5440c397619c57378a32d33a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-12-21T19:52:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-30T03:12:43.000Z", "avg_line_length": 36.4359861592, "max_line_length": 120, "alphanum_fraction": 0.5424501425, "include": true, "reason": "import numpy", "num_tokens": 2298}
|
[STATEMENT]
lemma [enres_breakdown]: "EWHILET c (\<lambda>s. enres_lift (f s)) s = enres_lift (WHILET c f s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. EWHILET c (\<lambda>s. enres_lift (f s)) s = enres_lift (WHILE\<^sub>T c f s)
[PROOF STEP]
unfolding EWHILET_def WHILET_def enres_breakdown
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. enres_lift (WHILE\<^sub>T\<^bsup>\<lambda>_. True\<^esup> c f s) = enres_lift (WHILE\<^sub>T\<^bsup>\<lambda>_. True\<^esup> c f s)
[PROOF STEP]
..
|
{"llama_tokens": 236, "file": "VerifyThis2019_lib_Exc_Nres_Monad", "length": 2}
|
"""
PrecipFormation
Storage for tendencies due to precipitation formation
$(DocStringExtensions.FIELDS)
"""
Base.@kwdef struct PrecipFormation{FT}
θ_liq_ice_tendency::FT
qt_tendency::FT
qr_tendency::FT
qs_tendency::FT
end
"""
EntrDetr
$(DocStringExtensions.FIELDS)
"""
Base.@kwdef struct EntrDetr{FT}
"Dynamical entrainment"
ε_dyn::FT
"Dynamical detrainment"
δ_dyn::FT
"Turbulent entrainment"
ε_turb::FT
"Horizontal eddy-diffusivity"
K_ε::FT
end
"""
GeneralizedEntr
A general set of variables entrainment might depend on.
$(DocStringExtensions.FIELDS)
"""
Base.@kwdef struct GeneralizedEntr{FT}
"updraft condensate (liquid water + ice)"
q_cond_up::FT
"environment condensate (liquid water + ice)"
q_cond_en::FT
"updraft vertical velocity"
w_up::FT
"environment vertical velocity"
w_en::FT
"updraft buoyancy"
b_up::FT
"environment buoyancy"
b_en::FT
"environment tke"
tke::FT
"updraft momentum divergence"
dMdz::FT
"updraft momentum"
M::FT
"updraft area fraction"
a_up::FT
"environment area fraction"
a_en::FT
"pressure plume spacing"
R_up::FT
"updraft relative humidity"
RH_up::FT
"environment relative humidity"
RH_en::FT
"maximum updraft area"
max_area::FT
"vertical coordinate"
zc_i::FT
"Model time step"
Δt::FT
end
Base.eltype(::GeneralizedEntr{FT}) where {FT} = FT
struct MDEntr end # existing model
struct NNEntr end
"""
GradBuoy
Environmental buoyancy gradients.
$(DocStringExtensions.FIELDS)
"""
Base.@kwdef struct GradBuoy{FT}
"environmental vertical buoyancy gradient"
∂b∂z::FT
"vertical buoyancy gradient in the unsaturated part of the environment"
∂b∂z_unsat::FT
"vertical buoyancy gradient in the saturated part of the environment"
∂b∂z_sat::FT
end
abstract type AbstractEnvBuoyGradClosure end
struct BuoyGradMean <: AbstractEnvBuoyGradClosure end
struct BuoyGradQuadratures <: AbstractEnvBuoyGradClosure end
"""
EnvBuoyGrad
Variables used in the environmental buoyancy gradient computation.
$(DocStringExtensions.FIELDS)
"""
Base.@kwdef struct EnvBuoyGrad{FT, EBC <: AbstractEnvBuoyGradClosure}
"temperature in the saturated part"
t_sat::FT
"vapor specific humidity in the saturated part"
qv_sat::FT
"total specific humidity in the saturated part"
qt_sat::FT
"potential temperature in the saturated part"
θ_sat::FT
"liquid ice potential temperature in the saturated part"
θ_liq_ice_sat::FT
"virtual potential temperature gradient in the non saturated part"
∂θv∂z_unsat::FT
"total specific humidity gradient in the saturated part"
∂qt∂z_sat::FT
"liquid ice potential temperature gradient in the saturated part"
∂θl∂z_sat::FT
"reference pressure"
p0::FT
"cloud fraction"
en_cld_frac::FT
"specific volume"
alpha0::FT
end
function EnvBuoyGrad(::EBG; t_sat::FT, bg_kwargs...) where {FT <: Real, EBG <: AbstractEnvBuoyGradClosure}
return EnvBuoyGrad{FT, EBG}(; t_sat, bg_kwargs...)
end
"""
MixLen
$(DocStringExtensions.FIELDS)
"""
Base.@kwdef struct MixLen{FT}
"minimum length number"
min_len_ind::Int
"mixing length"
mixing_length::FT
"length ratio"
ml_ratio::FT
end
"""
MinDisspLen
Minimum dissipation model
$(DocStringExtensions.FIELDS)
"""
Base.@kwdef struct MinDisspLen{FT}
"height"
z::FT
"obukhov length"
obukhov_length::FT
"surface TKE values"
tke_surf::FT
"u star - surface velocity scale"
ustar::FT
"turbulent Prandtl number"
Pr::FT
"reference pressure"
p0::FT
"vertical buoyancy gradient struct"
∇b::GradBuoy{FT}
"env shear"
Shear²::FT
"environment turbulent kinetic energy"
tke::FT
"Updraft tke source"
b_exch::FT
end
Base.@kwdef mutable struct PrecipVariables
precipitation_model::String = "default_precipitation_model"
mean_rwp::Float64 = 0
mean_swp::Float64 = 0
cutoff_precipitation_rate::Float64 = 0
end
function PrecipVariables(namelist, grid::Grid)
precipitation_model = parse_namelist(
namelist,
"microphysics",
"precipitation_model";
default = "None",
valid_options = ["None", "cutoff", "clima_1m"],
)
if !(precipitation_model in ["None", "cutoff", "clima_1m"])
error("precipitation model not recognized")
end
return PrecipVariables(; precipitation_model)
end
mutable struct UpdraftVariables{A1}
n_updrafts::Int
cloud_base::A1
cloud_top::A1
cloud_cover::A1
updraft_top::A1
lwp::Float64
iwp::Float64
function UpdraftVariables(nu, namelist, grid::Grid)
n_updrafts = nu
# cloud and precipitation diagnostics for output
cloud_base = zeros(nu)
cloud_top = zeros(nu)
cloud_cover = zeros(nu)
updraft_top = zeros(nu)
lwp = 0.0
iwp = 0.0
A1 = typeof(cloud_base)
return new{A1}(n_updrafts, cloud_base, cloud_top, cloud_cover, updraft_top, lwp, iwp)
end
end
Base.@kwdef mutable struct GridMeanVariables{PS}
param_set::PS
lwp::Float64
iwp::Float64
cloud_base::Float64
cloud_top::Float64
cloud_cover::Float64
EnvThermo_scheme::String
end
function GridMeanVariables(namelist, grid::Grid, param_set::PS) where {PS}
lwp = 0.0
iwp = 0.0
cloud_base = 0.0
cloud_top = 0.0
cloud_cover = 0.0
EnvThermo_scheme = parse_namelist(namelist, "thermodynamics", "sgs"; default = "mean")
return GridMeanVariables(; param_set, lwp, iwp, cloud_base, cloud_top, cloud_cover, EnvThermo_scheme)
end
Base.@kwdef mutable struct EnvironmentVariables
cloud_base::Float64 = 0
cloud_top::Float64 = 0
cloud_cover::Float64 = 0
lwp::Float64 = 0
iwp::Float64 = 0
EnvThermo_scheme::String = "default_EnvThermo_scheme"
end
function EnvironmentVariables(namelist, grid::Grid)
# TODO: EnvThermo_scheme is repeated in GridMeanVariables
EnvThermo_scheme = parse_namelist(namelist, "thermodynamics", "sgs"; default = "mean")
return EnvironmentVariables(; EnvThermo_scheme)
end
struct EnvironmentThermodynamics
quadrature_order::Int
quadrature_type::String
function EnvironmentThermodynamics(namelist, grid::Grid)
quadrature_order = parse_namelist(namelist, "thermodynamics", "quadrature_order"; default = 3)
quadrature_type = parse_namelist(namelist, "thermodynamics", "quadrature_type"; default = "gaussian")
return new(quadrature_order, quadrature_type)
end
end
# Stochastic entrainment/detrainment closures:
struct NoneClosureType end
struct LogNormalClosureType end
struct SDEClosureType end
# Stochastic differential equation memory
Base.@kwdef mutable struct sde_struct{T}
u0::Float64
dt::Float64
end
# SurfaceMoninObukhovDry:
# Needed for dry cases (qt=0). They have to be initialized with nonzero qtg for the
# reference profiles. This surface subroutine sets the latent heat flux to zero
# to prevent errors due to nonzero qtg in vars such as the obukhov_length.
# SurfaceSullivanPatton
# Not fully implemented yet. Maybe not needed - Ignacio
struct SurfaceNone end
struct SurfaceFixedFlux end
struct SurfaceFixedCoeffs end
struct SurfaceMoninObukhov end
struct SurfaceMoninObukhovDry end
struct SurfaceSullivanPatton end
Base.@kwdef mutable struct SurfaceBase{T}
zrough::Float64 = 0
interactive_zrough::Bool = false
Tsurface::Float64 = 0
qsurface::Float64 = 0
shf::Float64 = 0
lhf::Float64 = 0
cm::Float64 = 0
ch::Float64 = 0
cq::Float64 = 0
bflux::Float64 = 0
windspeed::Float64 = 0
ustar::Float64 = 0
rho_qtflux::Float64 = 0
rho_hflux::Float64 = 0
rho_uflux::Float64 = 0
rho_vflux::Float64 = 0
obukhov_length::Float64 = 0
Ri_bulk_crit::Float64 = 0
ustar_fixed::Bool = false
ref_params::NamedTuple = NamedTuple()
end
function SurfaceBase(::Type{T}; namelist::Dict, ref_params) where {T}
Ri_bulk_crit = namelist["turbulence"]["Ri_bulk_crit"]
return SurfaceBase{T}(; Ri_bulk_crit, ref_params)
end
struct ForcingBaseType end
struct ForcingNone end
struct ForcingStandard end
struct ForcingDYCOMS_RF01 end
struct ForcingLES end
struct RadiationBaseType end
struct RadiationNone end
struct RadiationStandard end
struct RadiationDYCOMS_RF01 end
struct RadiationLES end
Base.@kwdef mutable struct LESData
"Start time index of LES"
imin::Int = 0
"End time index of LES"
imax::Int = 0
"Path to LES stats file used to drive SCM"
les_filename::String = nothing
"Drive SCM with LES data from t = [end - t_interval_from_end_s, end]"
t_interval_from_end_s::Float64 = 6 * 3600.0
"Length of time to average over for SCM initialization"
initial_condition_averaging_window_s::Float64 = 3600.0
end
"""
ForcingBase
LES-driven forcing
$(DocStringExtensions.FIELDS)
"""
Base.@kwdef mutable struct ForcingBase{T}
"Boolean specifying whether Coriolis forcing is applied"
apply_coriolis::Bool = false
"Boolean specifying whether subsidence forcing is applied"
apply_subsidence::Bool = false
"Coriolis parameter"
coriolis_param::Float64 = 0
"Momentum relaxation timescale"
nudge_tau::Float64 = 0.0
end
force_type(::ForcingBase{T}) where {T} = T
Base.@kwdef mutable struct RadiationBase{T}
divergence::Float64 = 0
alpha_z::Float64 = 0
kappa::Float64 = 0
F0::Float64 = 0
F1::Float64 = 0
end
rad_type(::RadiationBase{T}) where {T} = T
Base.@kwdef mutable struct CasesBase{T}
case::T
casename::String = "default_casename"
inversion_option::String = "default_inversion_option"
les_filename::String = "None"
Sur::SurfaceBase
Fo::ForcingBase
Rad::RadiationBase
rad_time::StepRangeLen = range(10, 360; length = 36) .* 60
rad::AbstractMatrix{Float64} = zeros(1, 1)
lhf0::Float64 = 0
shf0::Float64 = 0
LESDat::Union{LESData, Nothing} = nothing
end
CasesBase(case::T; kwargs...) where {T} = CasesBase{T}(; case = case, casename = string(nameof(T)), kwargs...)
mutable struct EDMF_PrognosticTKE{N_up, A1, EBGC, EC, SDES, UPVAR}
Ri_bulk_crit::Float64
zi::Float64
n_updrafts::Int
asp_label::String
extrapolate_buoyancy::Bool
surface_area::Float64
max_area::Float64
aspect_ratio::Float64
tke_ed_coeff::Float64
tke_diss_coeff::Float64
static_stab_coeff::Float64
minimum_area::Float64
Precip::PrecipVariables
UpdVar::UPVAR
EnvVar::EnvironmentVariables
EnvThermo::EnvironmentThermodynamics
area_surface_bc::A1
w_surface_bc::A1
h_surface_bc::A1
qt_surface_bc::A1
pressure_plume_spacing::A1
prandtl_number::Float64
wstar::Float64
entr_surface_bc::Float64
detr_surface_bc::Float64
dt_max::Float64
sde_model::SDES
bg_closure::EBGC
entr_closure::EC
function EDMF_PrognosticTKE(namelist, grid::Grid, param_set::PS) where {PS}
# get values from namelist
prandtl_number = namelist["turbulence"]["prandtl_number_0"]
Ri_bulk_crit = namelist["turbulence"]["Ri_bulk_crit"]
zi = 0.0
# Set the number of updrafts (1)
n_updrafts = parse_namelist(namelist, "turbulence", "EDMF_PrognosticTKE", "updraft_number"; default = 1)
pressure_func_drag_str = parse_namelist(
namelist,
"turbulence",
"EDMF_PrognosticTKE",
"pressure_closure_drag";
default = "normalmode",
valid_options = ["normalmode", "normalmode_signdf"],
)
asp_label = parse_namelist(
namelist,
"turbulence",
"EDMF_PrognosticTKE",
"pressure_closure_asp_label";
default = "const",
)
extrapolate_buoyancy =
parse_namelist(namelist, "turbulence", "EDMF_PrognosticTKE", "extrapolate_buoyancy"; default = true)
# Get values from namelist
# set defaults at some point?
surface_area = namelist["turbulence"]["EDMF_PrognosticTKE"]["surface_area"]
max_area = namelist["turbulence"]["EDMF_PrognosticTKE"]["max_area"]
# entrainment parameters
# pressure parameters
aspect_ratio = namelist["turbulence"]["EDMF_PrognosticTKE"]["aspect_ratio"]
# mixing length parameters
tke_ed_coeff = namelist["turbulence"]["EDMF_PrognosticTKE"]["tke_ed_coeff"]
tke_diss_coeff = namelist["turbulence"]["EDMF_PrognosticTKE"]["tke_diss_coeff"]
static_stab_coeff = namelist["turbulence"]["EDMF_PrognosticTKE"]["static_stab_coeff"]
# Need to code up as namelist option?
minimum_area = 1e-5
# Create the class for precipitation
Precip = PrecipVariables(namelist, grid)
# Create the updraft variable class (major diagnostic and prognostic variables)
UpdVar = UpdraftVariables(n_updrafts, namelist, grid)
# Create the environment variable class (major diagnostic and prognostic variables)
EnvVar = EnvironmentVariables(namelist, grid)
# Create the class for environment thermodynamics
EnvThermo = EnvironmentThermodynamics(namelist, grid)
# Near-surface BC of updraft area fraction
area_surface_bc = zeros(n_updrafts)
w_surface_bc = zeros(n_updrafts)
h_surface_bc = zeros(n_updrafts)
qt_surface_bc = zeros(n_updrafts)
pressure_plume_spacing = zeros(n_updrafts)
# Initialize SDE parameters
dt = parse_namelist(namelist, "time_stepping", "dt_min"; default = 1.0)
closure = parse_namelist(
namelist,
"turbulence",
"EDMF_PrognosticTKE",
"stochastic",
"closure";
default = "none",
valid_options = ["none", "lognormal", "sde"],
)
closure_type = if closure == "none"
NoneClosureType
elseif closure == "lognormal"
LogNormalClosureType
elseif closure == "sde"
SDEClosureType
else
error("Something went wrong. Invalid stochastic closure type '$closure'")
end
sde_model = sde_struct{closure_type}(u0 = 1, dt = dt)
bg_type = parse_namelist(
namelist,
"turbulence",
"EDMF_PrognosticTKE",
"env_buoy_grad";
default = "mean",
valid_options = ["mean", "quadratures"],
)
bg_closure = if bg_type == "mean"
BuoyGradMean()
elseif bg_type == "quadratures"
BuoyGradQuadratures()
else
error("Something went wrong. Invalid environmental buoyancy gradient closure type '$bg_type'")
end
# entr closure
entr_type = parse_namelist(
namelist,
"turbulence",
"EDMF_PrognosticTKE",
"entrainment";
default = "moisture_deficit",
valid_options = ["moisture_deficit", "NN"],
)
entr_closure = if entr_type == "moisture_deficit"
MDEntr()
elseif entr_type == "NN"
NNEntr()
else
error("Something went wrong. Invalid entrainment type '$entr_type'")
end
EC = typeof(entr_closure)
wstar = 0
entr_surface_bc = 0
detr_surface_bc = 0
dt_max = 0
A1 = typeof(area_surface_bc)
EBGC = typeof(bg_closure)
SDES = typeof(sde_model)
UPVAR = typeof(UpdVar)
return new{n_updrafts, A1, EBGC, EC, SDES, UPVAR}(
Ri_bulk_crit,
zi,
n_updrafts,
asp_label,
extrapolate_buoyancy,
surface_area,
max_area,
aspect_ratio,
tke_ed_coeff,
tke_diss_coeff,
static_stab_coeff,
minimum_area,
Precip,
UpdVar,
EnvVar,
EnvThermo,
area_surface_bc,
w_surface_bc,
h_surface_bc,
qt_surface_bc,
pressure_plume_spacing,
prandtl_number,
wstar,
entr_surface_bc,
detr_surface_bc,
dt_max,
sde_model,
bg_closure,
entr_closure,
)
end
end
parameter_set(obj) = obj.param_set
n_updrafts(edmf::EDMF_PrognosticTKE{N_up}) where {N_up} = N_up
struct State{P, A, T}
prog::P
aux::A
tendencies::T
end
|
{"hexsha": "bdac5d86e69ba66d76511d2601806ad2bdf405d3", "size": 16589, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/types.jl", "max_stars_repo_name": "haakon-e/TurbulenceConvection.jl", "max_stars_repo_head_hexsha": "84969de4867b06e464c3a36263e98be677792a2d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-16T17:59:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T17:59:15.000Z", "max_issues_repo_path": "src/types.jl", "max_issues_repo_name": "haakon-e/TurbulenceConvection.jl", "max_issues_repo_head_hexsha": "84969de4867b06e464c3a36263e98be677792a2d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/types.jl", "max_forks_repo_name": "haakon-e/TurbulenceConvection.jl", "max_forks_repo_head_hexsha": "84969de4867b06e464c3a36263e98be677792a2d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9276094276, "max_line_length": 112, "alphanum_fraction": 0.6632105612, "num_tokens": 4629}
|
% =========================================
% COMMAND: _SH
% =========================================
\newpage
\section{\_SH}
\label{cmd:_SH}
\paragraph{Syntax:}
\subparagraph{}
\texttt{\_SH shell script line or END}
\paragraph{Purpose:}
\subparagraph{}
Embeds a shell script within a tmp file, and executes
it when \texttt{END} is found.
% TODO: Beispiel? Und wieso tmp file? Wo genau?
|
{"hexsha": "df58bc6c981bb88936aadd3bcd56a6dd4c105641", "size": 391, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/users-guide/local-commands/cmd_sh.tex", "max_stars_repo_name": "ia97lies/httest", "max_stars_repo_head_hexsha": "5bbe912ffde2e74b382405f580ef5963bf792288", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-05-16T07:47:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-01T13:22:10.000Z", "max_issues_repo_path": "doc/users-guide/local-commands/cmd_sh.tex", "max_issues_repo_name": "ia97lies/httest", "max_issues_repo_head_hexsha": "5bbe912ffde2e74b382405f580ef5963bf792288", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/users-guide/local-commands/cmd_sh.tex", "max_forks_repo_name": "ia97lies/httest", "max_forks_repo_head_hexsha": "5bbe912ffde2e74b382405f580ef5963bf792288", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7222222222, "max_line_length": 53, "alphanum_fraction": 0.5652173913, "num_tokens": 101}
|
import math
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from matplotlib.animation import FuncAnimation
import numpy as np
import pandas
def read_data(paths):
''' Read all data from the files specified by the given list of paths.
Returns a list of dataframes, one per file.
'''
colnames = ['time', 'status', 'x', 'y', 'numpts', '_1', '_2']
data = []
for path in paths:
df = pandas.read_csv(path, header=None, names=colnames, index_col=0)
data.append(df)
return data
def get_timeslice(dataframes, start_time, end_time):
xpoints = []
ypoints = []
for df in dataframes:
xpoints.extend(df.x[start_time:end_time])
ypoints.extend(df.y[start_time:end_time])
return xpoints, ypoints
def plot_heatmap(ax, xpoints, ypoints, nbins, title=None, maxcount=None):
''' Plot a heatmap of the given data on on the given axes. '''
# imshow expects y,x for the image, but x,y for the extents,
# so we have to manage that here...
bins = np.concatenate( (np.arange(0,1.0,1.0/nbins), [1.0]) )
heatmap, yedges, xedges = np.histogram2d(ypoints, xpoints, bins=bins)
extent = [xedges[0],xedges[-1], yedges[0], yedges[-1]]
# make sure we always show the full extent of the tank and the full extent of the data,
# whichever is wider.
ax.set_xlim(min(0, xedges[0]), max(1, xedges[-1]))
ax.set_ylim(min(0, yedges[0]), max(1, yedges[-1]))
if title:
ax.set_title(title)
if maxcount is not None:
norm = Normalize(0, maxcount)
else:
norm = None
return ax.imshow(heatmap, extent=extent, cmap=plt.get_cmap('hot'), origin='lower', interpolation='nearest', norm=norm)
def make_heatmap(x, y, title):
''' Create a new figure w/ heatmap w/ the given data, title. '''
plt.figure(figsize=(4,4))
ax = plt.gca()
plot_heatmap(ax, x, y, nbins=50, title=title)
return ax
def make_animation(datasets):
''' Create and return a side-by-side heatmap animation for the given sets of data.
Data sets specified as a dictionary with key=title, value=list of dataframes.
'''
numplots = len(datasets)
fig, ax = plt.subplots(1, numplots)
# get an arbitrary dataset (assuming they're all the same length)
dataframes = datasets[next(iter(datasets))]
maxtime = dataframes[0].index.max()
sec_per_frame = 60
frame_overlap_percent = 150
frames = int(math.ceil(maxtime / sec_per_frame))
nbins = 50
maxcount = 100
def update(frame):
start_time = (frame*sec_per_frame) % maxtime
end_time = start_time + sec_per_frame*(frame_overlap_percent/100)
artists = []
for i, label in enumerate(datasets):
ax[i].clear()
x, y = get_timeslice(datasets[label], start_time, end_time)
artist = plot_heatmap(ax[i], x, y, nbins, title=label, maxcount=maxcount)
artists.append(artist)
return artists
ani = FuncAnimation(fig, update, frames=frames, interval=200, blit=True)
return ani
|
{"hexsha": "69c26398cf5ef3c18e12fe167f315aec9187fc05", "size": 3076, "ext": "py", "lang": "Python", "max_stars_repo_path": "heatmaps.py", "max_stars_repo_name": "bsheese/AtlesDescriptives", "max_stars_repo_head_hexsha": "febfde7d5bbe8abf686570db862f42738a4ad1fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-09-22T00:05:53.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-26T03:22:46.000Z", "max_issues_repo_path": "heatmaps.py", "max_issues_repo_name": "bsheese/AtlesDescriptives", "max_issues_repo_head_hexsha": "febfde7d5bbe8abf686570db862f42738a4ad1fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-01-01T23:36:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-26T03:31:54.000Z", "max_forks_repo_path": "src/analysis/heatmaps.py", "max_forks_repo_name": "liffiton/ATLeS", "max_forks_repo_head_hexsha": "14996261583a714e4664202ec05d9ecae8789542", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8021978022, "max_line_length": 122, "alphanum_fraction": 0.6537711313, "include": true, "reason": "import numpy", "num_tokens": 812}
|
import copy
import numpy as np
from scipy.interpolate import CubicSpline
import spherical_functions
class ModesTimeSeries(spherical_functions.Modes):
"""Object to store SWSH modes as functions of time
This class subclasses the spinsfast.Modes class, but also tracks corresponding time values,
allowing this class to have extra methods for interpolation, as well as differentiation and
integration in time.
NOTE: The time array is not copied; this class merely keeps a reference to the original time
array. If you change that array *in place* outside of this class, it changes inside of this
class as well. You can, of course, change the variable you used to label that array to point to
some other quantity without affecting the time array stored in this class.
"""
def __new__(cls, input_array, *args, **kwargs):
if len(args) > 2:
raise ValueError("Only one positional argument may be passed")
if len(args) == 1:
kwargs["time"] = args[0]
metadata = copy.copy(getattr(input_array, "_metadata", {}))
metadata.update(**kwargs)
input_array = np.asanyarray(input_array).view(complex)
time = metadata.get("time", None)
if time is None:
raise ValueError("Time data must be specified as part of input array or as constructor parameter")
time = np.asarray(time).view(float)
if time.ndim != 1:
raise ValueError(f"Input time array must have exactly 1 dimension; it has {time.ndim}.")
if input_array.ndim == 0:
input_array = input_array[np.newaxis, np.newaxis]
elif input_array.ndim == 1:
input_array = input_array[np.newaxis, :]
elif input_array.shape[-2] != time.shape[0] and input_array.shape[-2] != 1:
raise ValueError(
f"Second-to-last axis of input array must have size 1 or same size as time array.\n Their shapes are {input_array.shape} and {time.shape}, respectively."
)
obj = spherical_functions.Modes(input_array, **kwargs).view(cls)
obj._metadata["time"] = time
return obj
def __array_finalize__(self, obj):
if obj is None:
return
super().__array_finalize__(obj)
if "time" not in self._metadata:
self._metadata["time"] = None
@property
def time(self):
return self._metadata["time"]
@time.setter
def time(self, new_time):
self._metadata["time"][:] = new_time
return self.time
u = time
t = time
def interpolate(self, new_time, derivative_order=0, out=None):
new_time = np.asarray(new_time)
if new_time.ndim != 1:
raise ValueError(f"New time array must have exactly 1 dimension; it has {new_time.ndim}.")
new_shape = self.shape[:-2] + (new_time.size, self.shape[-1])
if out is not None:
out = np.asarray(out)
if out.shape != new_shape:
raise ValueError(
f"Output array should have shape {new_shape} for consistency with new time array and modes array"
)
if out.dtype != np.complex:
raise ValueError(f"Output array should have dtype `complex`; it has dtype {out.dtype}")
result = out or np.empty(new_shape, dtype=complex)
if derivative_order > 3:
raise ValueError(
f"{type(self)} interpolation uses CubicSpline, and cannot take a derivative of order {derivative_order}"
)
spline = CubicSpline(self.u, self.view(np.ndarray), axis=-2)
if derivative_order < 0:
spline = spline.antiderivative(-derivative_order)
elif 0 < derivative_order <= 3:
spline = spline.derivative(derivative_order)
result[:] = spline(new_time)
metadata = self._metadata.copy()
metadata["time"] = new_time
return type(self)(result, **metadata)
def antiderivative(self, antiderivative_order=1):
"""Integrate modes with respect to time"""
return self.interpolate(self.time, derivative_order=-antiderivative_order)
def derivative(self, derivative_order=1):
"""Differentiate modes with respect to time"""
return self.interpolate(self.time, derivative_order=derivative_order)
@property
def dot(self):
"""Differentiate modes once with respect to time"""
return self.derivative()
@property
def ddot(self):
"""Differentiate modes twice with respect to time"""
return self.derivative(2)
@property
def int(self):
"""Integrate modes once with respect to time"""
return self.antiderivative()
@property
def iint(self):
"""Integrate modes twice with respect to time"""
return self.antiderivative(2)
@property
def eth_GHP(self):
"""Raise spin-weight with GHP convention"""
return self.eth / np.sqrt(2)
@property
def ethbar_GHP(self):
"""Lower spin-weight with GHP convention"""
return self.ethbar / np.sqrt(2)
|
{"hexsha": "7f6677f8ae379dd8c92f7e1d10375257dac21e5a", "size": 5138, "ext": "py", "lang": "Python", "max_stars_repo_path": "scri/modes_time_series.py", "max_stars_repo_name": "akhairna/scri", "max_stars_repo_head_hexsha": "3b7f307d19ef303914cef2fa088ee750ef8533c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scri/modes_time_series.py", "max_issues_repo_name": "akhairna/scri", "max_issues_repo_head_hexsha": "3b7f307d19ef303914cef2fa088ee750ef8533c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scri/modes_time_series.py", "max_forks_repo_name": "akhairna/scri", "max_forks_repo_head_hexsha": "3b7f307d19ef303914cef2fa088ee750ef8533c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9242424242, "max_line_length": 180, "alphanum_fraction": 0.6344881277, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1137}
|
% ---------------------------------------------------------
% Project: PhD KAPPA
% File: introduction.tex
% Author: Andrea Discacciati
%
% Purpose: Introduction
% ---------------------------------------------------------
\chapter{Introduction}
\begin{quote}
\textit{``Epidemiological studies provide the only definitive information on the degree of cancer risk to man. Since malignant diseases are clearly of multifactorial origin, their investigation in man has become increasingly complex, and epidemiological and statistical studies on cancer require a correspondingly complex and rigorous methodology.''}
\par\raggedleft---Lorenzo Tomatis\footnote{Lorenzo Tomatis (*1929--\textdagger2007) was the Director from 1982 until 1993 of the International Agency for Research on Cancer in Lyon, France. This quotation is taken from the foreword he wrote for the book ``Statistical methods in cancer research. Volume 2 — The analysis of cohort studies'' \citep{breslow_statistical_1987}.}
\end{quote}
\bigskip
Prostate cancer was the second most common cancer in men worldwide and the most common one in developed countries in 2012 \citep{ferlay_cancer_2015}, yet its etiology remains poorly understood. To date, the only established risk factors are those that are non-modifiable: age, family history of the disease, and race/ethnicity \citep{gronberg_prostate_2003}. %Therefore, the identification of modifiable risk factors which might prevent prostate cancer development and progression is of paramount importance.
The identification of potential modifiable risk factors is complicated by the considerable biologic heterogeneity of the disease --- ranging from indolent to potentially lethal tumors --- suggesting different etiologies and distinct entities \citep{discacciati_lifestyle_2014, jahn_high_2015}. %As a consequence, epidemiologic studies should focus on the analysis of prostate cancer separately by its aggressiveness .
%which is reflected by diverging risk factors patterns between aggressive and indolent prostate cancer \citep{jahn_high_2015}. %Men who develop prostate cancer suffer significant impairments in quality of life, both attributable to the disease itself and to side effects of treatment.
Obesity is a major global public health concern, with 205 million men worldwide estimated to be obese. This obesity epidemic is particularly severe in developed countries, where, for example, as much as 20\% of men living in Western Europe and 30\% in the U.S. were estimated to be obese \citep{finucane_national_2011}.
Since body fatness is related to hormonal and metabolic changes and given that prostate cancer is a hormone-related cancer, the hypothesis of an association between obesity and prostate cancer risk --- possibly depending on the aggressiveness of the disease --- has been repeatedly formulated \citep{hsing_obesity_2007}.
Elucidating the possible association between obesity and prostate cancer is not only important to unravel the etiology of the disease, but it is also of public health significance, as these two medical conditions affect large proportions of the male population. In addition, the fact that obesity is a largely preventable condition might provide strategies for reducing prostate cancer incidence and mortality.
\bigskip
\bigskip
%Rather, it is also of paramount public health significance, as these two medical conditions affect large proportions of the male population.
As the words by Lorenzo Tomatis remind us, epidemiologic investigation cannot be separated from epidemiologic methods. Likewise, the two aims of this thesis are intertwined.
First, this thesis focuses on elucidating the association of body fatness measured during early and middle-late adulthood with localized and advanced prostate cancer incidence and mortality. This is done by analyzing primary data from a large population-based cohort study \citepalias{discacciati_body_2011} and by summarizing the existing epidemiologic evidence in form of aggregated data \citepalias{discacciati_body_2012}.
Second, this thesis deals with some methodological aspects related to the analysis of primary and aggregated data. In particular, \citetalias{bellavia_using_2015} extends the use of quantile regression for censored data to those situations where attained age is the time scale of interest, \citetalias{discacciati_interpretation_2015} clarifies the appropriate use and interpretation of risk and rate advancement periods, while \citetalias{discacciati_goodness_2015} presents relevant methods for assessing the goodness of fit of dose--response meta-analysis models for binary outcomes.
%This thesis focuses on elucidating the possible associations of body fatness measured during early and middle-late adulthood with localized and advanced prostate cancer incidence, as well as with prostate cancer mortality. This is done by analyzing primary data coming from a large population-based cohort study of around 45,000 Swedish men and by summarizing the existing epidemiologic evidence in form of aggregated data.
%Lastly, as the words written by Lorenzo Tomatis remind us, epidemiologic investigation cannot be separated from epidemiologic methods. Therefore, this thesis also addresses some methodological topics that are strictly related to the analysis of primary and aggregated data.
|
{"hexsha": "300a124bad36c43cd83708acca1ca011c42dd0ac", "size": 5346, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "introduction.tex", "max_stars_repo_name": "anddis/phd-thesis", "max_stars_repo_head_hexsha": "860faf0686c16f7c97865d99d801050a10d2df7c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "introduction.tex", "max_issues_repo_name": "anddis/phd-thesis", "max_issues_repo_head_hexsha": "860faf0686c16f7c97865d99d801050a10d2df7c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "introduction.tex", "max_forks_repo_name": "anddis/phd-thesis", "max_forks_repo_head_hexsha": "860faf0686c16f7c97865d99d801050a10d2df7c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-09-02T08:59:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T10:01:27.000Z", "avg_line_length": 111.375, "max_line_length": 586, "alphanum_fraction": 0.8071455294, "num_tokens": 1077}
|
import os
import unittest
import tempfile
import numpy as np
from pyV3D import WV_Wrapper, ConnectivitiesError
from pyV3D.cube import CubeGeometry, CubeSender
from pyV3D.stl import STLSender
from pyV3D import get_bounding_box, get_focus, adjust_points
class WV_Test_Wrapper(WV_Wrapper):
pass
class WV_test_Wrapper(WV_Wrapper):
def __init__(self, fname):
super(WV_test_Wrapper, self).__init__()
self.binfile = open(fname, 'wb')
def send(self, first=False):
self.prepare_for_sends()
if first:
# send init packet
self.send_GPrim(self, 1, self.send_binary_data)
# send initial suite of GPrims
self.send_GPrim(self, -1, self.send_binary_data)
else:
# send initial suite of GPrims
self.send_GPrim(self, -1, self.send_binary_data)
self.finish_sends()
def send_binary_data(self, wsi, buf, ibuf):
"""This is called multiple times during the sending of a
set of graphics primitives.
"""
self.binfile.write(buf)
return 0
class PyV3DTestCase(unittest.TestCase):
def setUp(self):
self.tdir = tempfile.mkdtemp()
self.path = os.path.dirname(os.path.abspath(__file__))
def tearDown(self):
try:
pass
#shutil.rmtree(self.tdir)
except:
pass
def _compare(self, s1, s2, name1, name2):
if len(s1) != len(s2):
self.fail("%s has different length than %s" % (name1, name2))
for i in range(len(s1)):
if s1[i] != s2[i]:
self.fail(
"byte %d (at least) "
"differs between files %s and %s. "
"(%s != %s)" % (i, name1, name2, s1[i], s2[i]))
def test_bounding_box(self):
a = np.array([[-1, -1, -1], [1, 1, 1]], dtype=np.float32)
b = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.float32)
bbox = get_bounding_box(a.flatten())
self.assertTrue((a[::-1] == bbox).all())
bbox = get_bounding_box(b.flatten())
self.assertTrue((b[::-1] == bbox).all())
bbox2 = get_bounding_box(bbox.flatten())
self.assertTrue((bbox2 == bbox).all())
def test_focus(self):
#test for cube
bounding_box = np.array([10, 10, 10, 0, 0, 0], dtype=np.float32)
expected_focus = np.array([5, 5, 5, 5], dtype=np.float32)
actual_focus = get_focus(bounding_box)
self.assertEqual(expected_focus.all(), actual_focus.all())
#test for box
bounding_box = np.array(
[1024, 512, 256, 0, 0, 0],
dtype=np.float32)
expected_focus = np.array([512, 256, 128, 512], dtype=np.float32)
actual_focus = get_focus(bounding_box)
self.assertEqual(expected_focus.all(), actual_focus.all())
#Test for max coordinate being corrected
#if coordinate values are shifted
x_max_bounding_box = np.array(
[512, 256, 128, 0, 0, 0],
dtype=np.float32)
y_max_bounding_box = np.array(
[256, 512, 128, 0, 0, 0],
dtype=np.float32)
z_max_bounding_box = np.array(
[256, 128, 512, 0, 0, 0],
dtype=np.float32)
x_max_expected_focus = np.array([256, 128, 64, 256], dtype=np.float32)
y_max_expected_focus = np.array([128, 256, 64, 256], dtype=np.float32)
z_max_expected_focus = np.array([128, 64, 256, 256], dtype=np.float32)
x_max_actual_focus = get_focus(x_max_bounding_box)
y_max_actual_focus = get_focus(y_max_bounding_box)
z_max_actual_focus = get_focus(z_max_bounding_box)
self.assertEqual(x_max_expected_focus.all(), x_max_actual_focus.all())
self.assertEqual(y_max_expected_focus.all(), y_max_actual_focus.all())
self.assertEqual(z_max_expected_focus.all(), z_max_actual_focus.all())
#Test for not so clean bounding box
bounding_box = np.array(
[1024, 500, -340, -472, 1, -2000],
dtype=np.float32)
expected_focus = np.array(
[788, 250.5, -1170, 1170],
dtype=np.float32)
actual_focus = get_focus(bounding_box)
self.assertEqual(expected_focus.all(), actual_focus.all())
def test_adjust_points(self):
#Center should always be translated to the origin
focus = np.array([10]*4, dtype=np.float32)
center = np.array([10]*3, dtype=np.float32)
origin = np.zeros((1, 3), dtype=np.float32)
translated_center = adjust_points(focus, center)
self.assertEqual(origin.all(), translated_center.all())
#Bounding box coordinates should be translated
#to be centerd about the origin
pmax = np.array([100, 50, 25], dtype=np.float32)
pmin = np.array([0, 0, 0], dtype=np.float32)
new_pmin = np.array([-50, -25, -12.5], dtype=np.float32)
new_pmax = np.array([50, 25, 12.5], dtype=np.float32)
translated_pmin = adjust_points(focus, pmin)
translated_pmax = adjust_points(focus, pmax)
self.assertEqual(translated_pmin.all(), new_pmin.all())
self.assertEqual(translated_pmax.all(), new_pmax.all())
def test_cube(self):
cname = os.path.join(self.path, 'cube.bin')
newname = os.path.join(self.tdir, 'cube.bin')
sender = CubeSender(WV_test_Wrapper(newname))
sender.send(CubeGeometry(), first=True)
sender.wv.binfile.close()
with open(cname) as f:
content = f.read()
with open(newname) as f:
newcontent = f.read()
self._compare(content, newcontent, cname, newname)
def test_ascii_stl(self):
cname = os.path.join(self.path, 'star.bin')
newname = os.path.join(self.tdir, 'star.bin')
sender = STLSender(WV_test_Wrapper(newname))
sender.send(os.path.join(self.path, 'star.stl'), first=True)
sender.wv.binfile.close()
with open(cname) as f:
content = f.read()
with open(newname) as f:
newcontent = f.read()
self._compare(content, newcontent, cname, newname)
def test_binary_stl(self):
cname = os.path.join(self.path, 'knot.bin')
newname = os.path.join(self.tdir, 'knot.bin')
sender = STLSender(WV_test_Wrapper(newname))
sender.send(os.path.join(self.path, 'knot.stl'), first=True)
sender.wv.binfile.close()
with open(cname) as f:
content = f.read()
with open(newname) as f:
newcontent = f.read()
self._compare(content, newcontent, cname, newname)
def test_checkConnectivities(self):
'''
Test for geometry with a single face with 4 points and two triangles
p0 *--* p3
| /|
|/ |
p1 *--* p2
'''
#Points are zero indexed
points = np.zeros((4, 3), dtype=np.float32).flatten()
#Connectivities should be zero indexed
good_triangles = np.array(
[[0, 1, 2], [1, 2, 3]],
dtype=int).flatten()
#Connectivites should not refecerence points
#outside of bounds [0,len(points)/3)
bad_triangles = np.array(
[[1, 2, 3], [2, 3, 4]],
dtype=int).flatten()
wrapper = WV_Test_Wrapper()
CubeSender(wrapper)
wrapper.set_face_data(points=points, tris=good_triangles, name="good")
try:
wrapper.set_face_data(points, bad_triangles, name="bad")
except ConnectivitiesError:
pass
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "4b24c8b529775cd1c4ac051740e4b78042bb7504", "size": 7678, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pyV3D/test/test_pyV3D.py", "max_stars_repo_name": "OpenMDAO/pyV3D", "max_stars_repo_head_hexsha": "2baf32c489e2c91531b89e51a879ba8074ae2803", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-05-13T23:43:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-20T10:15:17.000Z", "max_issues_repo_path": "src/pyV3D/test/test_pyV3D.py", "max_issues_repo_name": "OpenMDAO/pyV3D", "max_issues_repo_head_hexsha": "2baf32c489e2c91531b89e51a879ba8074ae2803", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2016-10-07T08:28:20.000Z", "max_issues_repo_issues_event_max_datetime": "2016-10-07T10:25:34.000Z", "max_forks_repo_path": "src/pyV3D/test/test_pyV3D.py", "max_forks_repo_name": "OpenMDAO/pyV3D", "max_forks_repo_head_hexsha": "2baf32c489e2c91531b89e51a879ba8074ae2803", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-07-16T03:57:36.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-01T23:57:45.000Z", "avg_line_length": 31.9916666667, "max_line_length": 78, "alphanum_fraction": 0.5915603022, "include": true, "reason": "import numpy", "num_tokens": 1987}
|
using MolarWeight
using Test
@testset "MolarWeight.jl" begin
# Write your tests here.
end
|
{"hexsha": "c6277d5aad0ca1b5b7214e80ace7a9d3727692c8", "size": 95, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "SeleneSofi/MolarWeight.jl", "max_stars_repo_head_hexsha": "21d1b3b3a77601d442f14e84613d48f42ef1496b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "SeleneSofi/MolarWeight.jl", "max_issues_repo_head_hexsha": "21d1b3b3a77601d442f14e84613d48f42ef1496b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "SeleneSofi/MolarWeight.jl", "max_forks_repo_head_hexsha": "21d1b3b3a77601d442f14e84613d48f42ef1496b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.5714285714, "max_line_length": 31, "alphanum_fraction": 0.7473684211, "num_tokens": 28}
|
__author__ = "Saswati Ray"
__email__ = "sray@cs.cmu.edu"
import pandas as pd
import numpy as np
import math, sys
from sklearn import metrics
from sklearn import preprocessing
import problem_pb2
import util
import logging
import d3m.index
from timeit import default_timer as timer
import operator
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, ExtraTreesClassifier, ExtraTreesRegressor, GradientBoostingClassifier, GradientBoostingRegressor
from sklearn.ensemble.bagging import BaggingClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression, Lasso, Ridge
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.svm import LinearSVC, LinearSVR, SVC, SVR
from sklearn.model_selection import GridSearchCV
gridsearch_estimators_parameters = {'d3m.primitives.regression.random_forest.SKlearn': [RandomForestRegressor(),
{'n_estimators': [100],
'max_depth': [8, 10, 15, None],
'min_samples_split': [2, 5, 10]}],
'd3m.primitives.classification.random_forest.SKlearn': [RandomForestClassifier(),
{'n_estimators': [100],
'max_depth': [8, 10, 15, None],
'min_samples_split': [2, 5, 10],
'class_weight': ['balanced', None]}],
'd3m.primitives.classification.extra_trees.SKlearn': [ExtraTreesClassifier(),
{'n_estimators': [100],
'max_depth': [8, 10, 15, None],
'min_samples_split': [2, 5, 10],
'class_weight': ['balanced', None]}],
'd3m.primitives.regression.extra_trees.SKlearn': [ExtraTreesRegressor(),
{'n_estimators': [100],
'max_depth': [8, 10, 15, None],
'min_samples_split': [2, 5, 10]}],
'd3m.primitives.classification.gradient_boosting.SKlearn': [GradientBoostingClassifier(),
{'n_estimators': [100],
'max_depth': [3, 5, 8, 10, 15],
'max_features': ['sqrt', None],
'min_samples_leaf': [1, 2, 5],
'min_samples_split': [2, 5, 10]}],
'd3m.primitives.regression.gradient_boosting.SKlearn': [GradientBoostingRegressor(),
{'n_estimators': [100],
'max_depth': [3, 5, 8, 10, 15],
'max_features': ['sqrt', None],
'min_samples_leaf': [1, 2, 5],
'min_samples_split': [2, 5, 10]}],
'd3m.primitives.classification.linear_svc.SKlearn': [LinearSVC(),
{'C': [0.01, 0.1, 1, 10, 100],
'class_weight': ['balanced', None]}],
'd3m.primitives.regression.linear_svr.SKlearn': [LinearSVR(),
{'C': [0.01, 0.1, 1, 10, 100]}],
'd3m.primitives.classification.svc.SKlearn': [SVC(),
{'C': [0.01, 0.1, 1, 10, 100],
'class_weight': ['balanced', None]}],
'd3m.primitives.regression.svr.SKlearn': [SVR(),
{'C': [0.01, 0.1, 1, 10, 100]}],
'd3m.primitives.classification.logistic_regression.SKlearn': [LogisticRegression(),
{'C': [0.1, 1, 10, 100],
'class_weight': ['balanced', None]}],
'd3m.primitives.regression.ridge.SKlearn': [Ridge(),
{'alpha': [0.001, 0.01, 0.1, 1, 5]}],
'd3m.primitives.regression.lasso.SKlearn': [Lasso(),
{'alpha': [0.001, 0.01, 0.1, 1, 5]}]
}
def rmse(y_true, y_pred):
return math.sqrt(metrics.mean_squared_error(y_true, y_pred))
def compute_min_class(y, pos_label):
frequencies = y.iloc[:,0].value_counts(normalize=True)
min_freq = frequencies.iat[len(frequencies)-1]
return min_freq
class PrimitiveDescription(object):
"""
Class representing single primitive.
Used for optimizing primitive hyper-parameters, doing cross-validation.
"""
def __init__(self, primitive, primitive_class):
self.id = primitive_class.id
self.primitive = primitive
self.primitive_class = primitive_class
def get_num_splits(self, length, cols):
splits = 2
if length < 500:
splits = 50
if length < splits:
splits = length
elif length < 1000:
splits = 25
elif length < 2500:
splits = 20
elif length < 5000:
splits = 10
elif length < 10000:
splits = 5
elif length < 20000:
splits = 3
else:
splits = 2
if cols > 500:
splits = min(5, splits)
return splits
def score_ESRNN_primitive(self, prim_instance, X, y):
prim_instance.set_training_data(inputs=X, outputs=y)
prim_instance.fit()
metric = min(prim_instance._esrnn.losses)
return metric
def score_Kanine_primitive(self, X, y, metric_type, posLabel):
targets = y
neighbors = [3, 5, 10, 20]
primitive_hyperparams = self.primitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
scores = []
for n in neighbors:
custom_hyperparams = dict()
custom_hyperparams['n_neighbors'] = n
model = self.primitive(hyperparams=primitive_hyperparams(primitive_hyperparams.defaults(), **custom_hyperparams))
model.set_training_data(inputs=X, outputs=y)
model.fit()
output = model.produce(inputs=X).value
metric = self.evaluate_metric(output, targets, metric_type, posLabel)
scores.append(metric)
print(scores)
indices = np.argsort(scores)
index = indices[len(neighbors)-1]
return (scores[index], neighbors[index])
def score_primitive(self, X, y, metric_type, posLabel, custom_hyperparams, taskname):
"""
Learns optimal hyperparameters for the primitive
Returns metric score and optimal parameters.
"""
python_path = self.primitive.metadata.query()['python_path']
optimal_params = dict()
if custom_hyperparams is not None:
for name, value in custom_hyperparams.items():
optimal_params[name] = value
if 'Kanine' in python_path:
(score, neighbors) = self.score_Kanine_primitive(X, y, metric_type, posLabel)
optimal_params['n_neighbors'] = neighbors
return (score, optimal_params)
if y is None or 'graph' in python_path or 'Vertex' in python_path or 'DistilLink' in python_path or 'community' in python_path or 'JHU' in python_path or 'yolo' in python_path or 'FCN' in python_path or 'retina' in python_path:
if util.invert_metric(metric_type) is True:
return (0.0, optimal_params)
else:
if 'JHU' in python_path:
return (0.99, optimal_params)
if 'retina' in python_path:
return (0.99, optimal_params)
return (1.0, optimal_params)
# Lasso CV can become very expensive for large number of columns!!!
# Use lasso's CV score
if 'lasso_cv' in python_path and len(X.columns) > 500:
return (1.0, optimal_params)
# Put constraints on primitive hyperparameters to avoid excessive time-complexity!
if 'SKlearn' in python_path:
hyperparam_spec = self.primitive.metadata.query()['primitive_code']['hyperparams']
if len(X) >= 50000 and ('random_forest' in python_path or 'gradient_boosting' in python_path or 'bagging' in python_path): # 'n_estimators' in hyperparam_spec:
optimal_params['n_estimators'] = 50
if len(X) >= 100000 and 'n_estimators' in hyperparam_spec:
optimal_params['n_estimators'] = 10
if len(X) >= 100000 and ('linear_svc' in python_path or 'linear_svr' in python_path):
optimal_params['max_iter'] = 100
if len(X.columns) > 500 and ('bagging' in python_path):
optimal_params['max_features'] = 15
if len(X.columns) > 50 and 'gradient_boosting' in python_path:
optimal_params['max_features'] = 7
primitive_hyperparams = self.primitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
prim_instance = self.primitive(hyperparams=primitive_hyperparams(primitive_hyperparams.defaults(), **optimal_params))
score = 0.0
if 'esrnn' in python_path:
start = timer()
score = self.score_ESRNN_primitive(prim_instance, X, y)
end = timer()
logging.critical("Time taken for esrnn = %s secs", end-start)
logging.critical("MAE Score for %s = %s", python_path, score)
return (score, optimal_params)
splits = self.get_num_splits(len(X), len(X.columns))
# Hard coding MAE for forecasting problems since thats what esrnn supports!
if taskname == 'FORE_REGRESSION' or 'FORECASTING' in taskname:
metric_type = "MEAN_ABSOLUTE_ERROR"
if metric_type == 'F1':
min_class = compute_min_class(y, posLabel)
#if min_class <= 0.1 and \
# ('random_forest' in python_path or \
# 'svc' in python_path or \
# 'extra_trees' in python_path or \
# 'logistic' in python_path or \
# 'passive' in python_path):
# (param, mean) = self.optimize_F1_metric(X, y, optimal_params, metric_type, posLabel)
# optimal_params['class_weight'] = param
# return (mean, optimal_params)
# Run k-fold CV and compute mean metric score
(score, metric_scores) = self.k_fold_CV(prim_instance, X, y, metric_type, posLabel, splits)
mean = np.mean(metric_scores)
lb = max((int)(0.025*len(metric_scores) + 0.5)-1,0)
ub = min((int)(0.975*len(metric_scores) + 0.5)-1, len(metric_scores)-1)
stderror = np.std(metric_scores)/math.sqrt(len(metric_scores))
z = 1.96*stderror
logging.critical("CV scores for %s = %s(%s - %s) k = %s", python_path, mean, mean-z, mean+z, len(metric_scores))
return (score, optimal_params)
def evaluate_metric(self, predictions, Ytest, metric, posLabel):
"""
Function to compute metric score for predicted-vs-true output.
"""
count = len(Ytest)
if metric == "ACCURACY":
return metrics.accuracy_score(Ytest, predictions)
elif metric == "PRECISION":
return metrics.precision_score(Ytest, predictions)
elif metric == "RECALL":
return metrics.recall_score(Ytest, predictions)
elif metric == "F1":
return metrics.f1_score(Ytest, predictions, pos_label=posLabel)
elif metric == "F1_MICRO":
return metrics.f1_score(Ytest, predictions, average='micro')
elif metric == "F1_MACRO":
return metrics.f1_score(Ytest, predictions, average='macro')
elif metric == "ROC_AUC":
return metrics.roc_auc_score(Ytest, predictions)
elif metric == "ROC_AUC_MICRO":
return metrics.roc_auc_score(Ytest, predictions, average='micro')
elif metric == "ROC_AUC_MACRO":
return metrics.roc_auc_score(Ytest, predictions, average='macro')
elif metric == "MEAN_SQUARED_ERROR":
return metrics.mean_squared_error(Ytest, predictions)
elif metric == "ROOT_MEAN_SQUARED_ERROR":
return math.sqrt(metrics.mean_squared_error(Ytest, predictions))
elif metric == "MEAN_ABSOLUTE_ERROR":
return metrics.mean_absolute_error(Ytest, predictions)
elif metric == "R_SQUARED":
return metrics.r2_score(Ytest, predictions)
elif metric == "NORMALIZED_MUTUAL_INFORMATION":
return metrics.normalized_mutual_info_score(Ytest, predictions)
elif metric == "JACCARD_SIMILARITY_SCORE":
return metrics.jaccard_similarity_score(Ytest, predictions)
elif metric == "PRECISION_AT_TOP_K":
return 0.0
elif metric == "OBJECT_DETECTION_AVERAGE_PRECISION":
return 0.0
elif metric == "MEAN_RECIPROCAL_RANK":
return 0.0
elif metric == "HITS_AT_K":
return 0.0
else:
return metrics.accuracy_score(Ytest, predictions)
def optimize_primitive_gridsearch(self, train, output, python_path, metric, posLabel):
# Do grid-search to learn optimal parameters for the model
if python_path in gridsearch_estimators_parameters:
start = timer()
(model, search_grid) = gridsearch_estimators_parameters[python_path]
splits = self.get_num_splits(len(train), len(train.columns))
from sklearn.metrics import make_scorer
if metric == "ACCURACY":
scorer = make_scorer(metrics.accuracy_score)
elif metric == "F1":
scorer = make_scorer(metrics.f1_score, pos_label=posLabel)
elif metric == "F1_MACRO":
scorer = make_scorer(metrics.f1_score, average='macro')
elif metric == "MEAN_SQUARED_ERROR":
scorer = make_scorer(metrics.mean_squared_error, greater_is_better=False)
elif metric == "ROOT_MEAN_SQUARED_ERROR":
scorer = make_scorer(rmse, greater_is_better=False)
else:
scorer = None
rf_random = GridSearchCV(estimator = model, param_grid = search_grid, scoring = scorer, cv = splits, verbose=0, n_jobs = -1)
# Fit the random search model
rf_random.fit(train, output)
print(rf_random.best_params_)
end = timer()
print("Time taken for ", python_path, " = ", end-start, " secs")
return rf_random.best_params_
else:
print("No grid search done for ", python_path)
return None
def optimize_F1_metric(self, train, y, optimal_params, metric_type, posLabel):
corex_hp = {'class_weight': ['balanced', None]}
RPI_path = self.primitive.metadata.query()['python_path']
primitive_hyperparams = self.primitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
splits = self.get_num_splits(len(train), len(train.columns))
scores = {}
start = timer()
for i in corex_hp['class_weight']:
optimal_params['class_weight'] = i
model = self.primitive(hyperparams=primitive_hyperparams(primitive_hyperparams.defaults(), **optimal_params))
(score, metric_scores) = self.k_fold_CV(model, train, y, metric_type, posLabel, splits)
mean = np.mean(metric_scores)
scores[(i,mean)] = mean
print(RPI_path)
print(scores)
sorted_x = sorted(scores.items(), key=operator.itemgetter(1))
sorted_x.reverse()
(key, value) = sorted_x[0]
end = timer()
logging.critical("%s time taken = %s secs", RPI_path, end-start)
return key
def optimize_RPI_bins(self, train, y, python_path, metric_type, posLabel):
corex_hp = {'nbins': [2, 3, 4, 10],# 12, 15, 20],
'method': ['counting', 'fullBayesian'], #'pseudoBayesian'],
'n_estimators': [20, 25, 30, 32]}
RPI_path = self.primitive.metadata.query()['python_path']
# For simultaneous_markov_blanket.AutoRPI
if 'markov' in RPI_path:
corex_hp['nbins'] = [10]
corex_hp['method'] = ['counting']#, 'pseudoBayesian', 'fullBayesian', 'BayesFactor']
if(len(train) > 25000): # Use defaults. No tuning
corex_hp = {'nbins': [10],
'method': ['counting'],
'n_estimators': [10]}
if 'linear_discriminant_analysis' in python_path:
corex_hp['n_estimators'] = [0]
prim = d3m.index.get_primitive(python_path)
model_hyperparams = prim.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive_hyperparams = self.primitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
sklearn_prim = d3m.index.get_primitive('d3m.primitives.data_cleaning.imputer.SKlearn')
sklearn_hyperparams = sklearn_prim.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
custom_hyperparams = dict()
custom_hyperparams['strategy'] = 'most_frequent'
sklearn_primitive = sklearn_prim(hyperparams=sklearn_hyperparams(sklearn_hyperparams.defaults(), **custom_hyperparams))
splits = self.get_num_splits(len(train), len(train.columns))
scores = {}
start = timer()
for i in corex_hp['nbins']:
custom_hyperparams = dict()
custom_hyperparams['nbins'] = i
for j in corex_hp['method']:
custom_hyperparams['method'] = j
model = self.primitive(hyperparams=primitive_hyperparams(primitive_hyperparams.defaults(), **custom_hyperparams))
model.set_training_data(inputs=train, outputs=y)
model.fit()
output = model.produce(inputs=train).value
sklearn_primitive.set_training_data(inputs=output)
sklearn_primitive.fit()
output = sklearn_primitive.produce(inputs=output).value
for k in corex_hp['n_estimators']:
model_hp = dict()
model_hp['n_estimators'] = k
if 'gradient_boosting' in python_path:
model_hp['learning_rate'] = 10/k
if 'linear_discriminant_analysis' in python_path:
rf_model = prim(hyperparams=model_hyperparams(model_hyperparams.defaults()))
else:
rf_model = prim(hyperparams=model_hyperparams(model_hyperparams.defaults(), **model_hp))
(score, metric_scores) = self.k_fold_CV(rf_model, output, y, metric_type, posLabel, splits)
mean = np.mean(metric_scores)
median = np.median(metric_scores)
stderror = np.std(metric_scores)/math.sqrt(len(metric_scores))
z = 1.96*stderror
#print("Mean = ", mean, " Median = ", median, " LB = ", mean-z, " diff = ", mean-median, " min = ", min(metric_scores), " ratio = ", mean/(mean-median))
if util.invert_metric(metric_type) is True:
scores[(i,j,k,mean)] = mean#-z
else:
scores[(i,j,k,mean)] = mean#/(mean-median)
sorted_x = sorted(scores.items(), key=operator.itemgetter(1))
if util.invert_metric(metric_type) is False:
sorted_x.reverse()
(key, value) = sorted_x[0]
end = timer()
logging.critical("%s time taken for %s = %s secs", RPI_path, python_path, end-start)
return key
def k_fold_CV(self, prim_instance, X, y, metric_type, posLabel, splits):
"""
Run k-fold CV.
k = splits
prim_instance has already been initialized with hyperparameters.
"""
python_path = self.primitive.metadata.query()['python_path']
metric_sum = 0
score = 0.0
if 'VAR' in python_path:
prim_instance.set_training_data(inputs=X, outputs=y)
prim_instance.fit()
predictions = prim_instance.produce(inputs=X).value
predictions = predictions.iloc[:,len(predictions.columns)-1]
metric = self.evaluate_metric(predictions, y, metric_type, posLabel)
return (metric, [metric, metric])
# Run k-fold CV and compute mean metric score
metric_scores = []
if 'classification' in python_path: # Classification
frequencies = y.iloc[:,0].value_counts()
min_freq = frequencies.iat[len(frequencies)-1]
if min_freq < splits:
from sklearn.model_selection import KFold as KFold
kf = KFold(n_splits=splits, shuffle=True, random_state=9001)
split_indices = kf.split(X)
else:
from sklearn.model_selection import StratifiedKFold as KFold
kf = KFold(n_splits=splits, shuffle=True, random_state=9001)
split_indices = kf.split(X, y)
else: # Regression
from sklearn.model_selection import KFold as KFold
kf = KFold(n_splits=splits, shuffle=True, random_state=9001)
split_indices = kf.split(X)
start = timer()
# Do the actual k-fold CV here
for train_index, test_index in split_indices:
X_train, X_test = X.iloc[train_index,:], X.iloc[test_index,:]
y_train, y_test = y.iloc[train_index,:], y.iloc[test_index,:]
X_test.reset_index(drop=True,inplace=True)
prim_instance.set_training_data(inputs=X_train, outputs=y_train)
prim_instance.fit()
predictions = prim_instance.produce(inputs=X_test).value
if ('xgboost' in python_path or 'arima' in python_path or 'DeepAR' in python_path) and len(predictions.columns) > 1:
predictions = predictions.iloc[:,len(predictions.columns)-1]
if 'iterative_labeling' in python_path:
labeledIx = np.where(y_test.iloc[:, 0].values != '')[0]
metric = self.evaluate_metric(predictions.iloc[labeledIx], y_test.iloc[labeledIx], metric_type, posLabel)
else:
metric = self.evaluate_metric(predictions, y_test, metric_type, posLabel)
metric_scores.append(metric)
metric_sum += metric
score = metric_sum/splits
end = timer()
if 'RPI' not in python_path:
logging.critical("Time taken for %s = %s secs", python_path, end-start)
return (score, metric_scores)
|
{"hexsha": "995b240bf4dec3df5eb46bf5d641e386a61b6cd9", "size": 24135, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/primitivedescription.py", "max_stars_repo_name": "tonyjo/cmu-ta2", "max_stars_repo_head_hexsha": "26a45fab27b4ca433029b05272130efcaeb2beaa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/primitivedescription.py", "max_issues_repo_name": "tonyjo/cmu-ta2", "max_issues_repo_head_hexsha": "26a45fab27b4ca433029b05272130efcaeb2beaa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/primitivedescription.py", "max_forks_repo_name": "tonyjo/cmu-ta2", "max_forks_repo_head_hexsha": "26a45fab27b4ca433029b05272130efcaeb2beaa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.7037815126, "max_line_length": 235, "alphanum_fraction": 0.5580277605, "include": true, "reason": "import numpy", "num_tokens": 5136}
|
@inline soft_th(x, ϵ) = max(x-ϵ,zero(x)) + min(x+ϵ,zero(x))
@inline soft_th(x, ϵ, l) = max(x-ϵ,l) + min(x+ϵ,l) - l
@inline function soft_th(x::Complex, ϵ)
m,a = abs(x), angle(x)
m = max(m-ϵ,zero(m)) + min(m+ϵ,zero(m))
m*cis(a)
end
function soft_hankel!(A, ϵ)
K,L = size(A)
N = K+L-1
for k = 1:N
ri = min(K,k):-1:max(k-L,1)
ci = max(1,k-K+1):L
m = mean(A[r,c] for (r,c) in zip(ri,ci))
for (r,c) in zip(ri,ci)
A[r,c] = soft_th(A[r,c], ϵ, m)
end
end
A
end
"""
unhankel(A)
The inverse of [`hankel`](@ref). Create a 1-D signal by antidiagonal averaging
"""
function unhankel(A)
K,L = size(A)
N = L+(K-1)
y = similar(A, N)
for k = 1:N
ri = min(K,k):-1:max(k-L,1)
ci = max(1,k-K+1):L
m = mean(A[r,c] for (r,c) in zip(ri,ci))
y[k] = m
end
y
end
"""
unhankel(A, lag, N, D=1)
The inverse of [`hankel`](@ref). Create a signal of the original dimension by antidiagonal averaging
# Arguments:
- `A`: A Hankel matrix
- `lag`: if lag was used to create `A`, you must provide it to `unhankel`
- `N`: length of the original signal
- `D`: dimension of the original signal
"""
function unhankel(A,lag,N,D=1)
lag == 1 && D == 1 && (return unhankel(A))
K = size(A,1)
L = size(A,2)÷D
y = zeros(eltype(A), N, D)
counts = zeros(Int, N, D)
indmat = CartesianIndex.(1:N, (1:D)')
inds = hankel(indmat, L, lag)
for (Aind,yind) in enumerate(inds)
y[yind] += A[Aind]
counts[yind] += 1
end
y ./= max.(counts, 1)
D == 1 && (return vec(y))
y
end
"""
X = hankel(x,L,lag=1)
Form a hankel "trajectory matrix" `X` of size KxL, K = N-L+1
x can be a vector or a matrix.
"""
function hankel(x,L,lag=1)
N = size(x,1)
D = size(x,2)
@assert L <= N/2 "L has to be less than N/2 = $(N/2)"
@assert lag <= L "lag must be <= L"
K = (N-L)÷lag+1
X = similar(x, K, L*D)
for d in 1:D
inds = 1:L
colinds = d:D:L*D
for k = 1:K
X[k,colinds] = x[inds,d]
inds = inds .+ lag
end
end
X
end
function ishankel(A)
K,L = size(A)
N = K+L-1
for k = 1:N
ri = min(K,k):-1:max(k-L,1)
ci = max(1,k-K+1):L
val = A[ri[1],ci[1]]
for (r,c) in zip(ri,ci)
A[r,c] != val && return false
end
end
true
end
"""
yf = lowrankfilter(y, n=min(length(y) ÷ 20, 2000); tol=1e-3, kwargs...)
Filter time series `y` by forming a lag-embedding T (a Toeplitz matrix) and using [`rpca`](@ref) to recover a low-rank matrix from which the a filtered signal `yf` can be extracted. The size of the embedding `n` determines the complexity, higher `n` generally gives better filtering at the cost of roughly cubic complexity.
# Arguments:
- `y`: A signal to be filtered, assumed corrupted with sparse noise
- `n`: Embedding size
- `kwargs`: See [`rpca`](@ref) for keyword arguments.
"""
function lowrankfilter(y, n=min(size(y,1)÷20,2000); sv=0, lag=1, tol=1e-3, svd = svd!, kwargs...) where {F <: Function}
H = hankel(y, n, lag)
if sv <= 0
A,E = rpca(H; tol=tol, svd = svd, kwargs...)
else
s = svd(H)
A = s.U[:,1:sv] * Diagonal(s.S[1:sv]) * s.Vt[1:sv,:]
end
unhankel(A, lag, size(y,1), size(y,2))
end
"""
A,E,s,sv = rpca(D::Matrix; λ=1.0 / √(maximum(size(D))), iters=1000, tol=1.0e-7, ρ=1.5, verbose=false, nonnegA=false, nonnegE=false, nukeA=true)
minimize_{A,E} ||A||_* + λ||E||₁ s.t. D = A+E
`s` is the last calculated svd of `A` and `sv` is the estimated rank.
Ref: "The Augmented Lagrange Multiplier Method for Exact Recovery of Corrupted Low-Rank Matrices", Zhouchen Lin, Minming Chen, Leqin Wu, Yi Ma, https://people.eecs.berkeley.edu/~yima/psfile/Lin09-MP.pdf
Significant inspiration taken from an early implementation by Ryuichi Yamamoto in RobustPCA.jl
# Arguments:
- `D`: Design matrix
- `λ`: Sparsity regularization
- `maxrank`: Upper limit on the rank of the estimated matrix. Setting a smaller value makes convergence faster.
- `iters`: Maximum number of iterations
- `tol`: Tolerance
- `ρ`: Algorithm tuning param
- `verbose`: Print status
- `nonnegA`: Hard thresholding on A
- `nonnegE`: Hard thresholding on E
- `nukeA`: Activate the nuclear penalty on `A`, if `false`, then `A` is not assumed to be low rank.
- `hankel`: Indicating whether or not `D` (and thus `A` and `E`) are Hankel matrices (constant anti diagonals). If this fact is known, the expected performance of this alogorithm goes up. If the matrix `D` is Toeplitz (constant diagonals) you may reverse the second dimension, i.e., `Dᵣ = D[:,end:-1:1]`. `hankel=true` should likely be paired with `nukeA=false`.
To speed up convergence you may either increase the tolerance or increase `ρ`. Increasing `tol` is often the best solution.
"""
function rpca(D::AbstractMatrix{T};
λ = real(T)(1.0/sqrt(maximum(size(D)))),
maxrank = typemax(Int),
iters::Int = 1000,
tol = sqrt(eps(real(T))),
ρ = real(T)(1.5),
verbose::Bool = false,
nonnegA::Bool = false,
nonnegE::Bool = false,
hankel::Bool = false,
# proxE = NormL1(λ),
nukeA = true,
svd::F1 = svd!,
opnorm::F2 = opnorm,
kwargs...) where {F1 <: Function, F2 <: Function, T}
RT = real(T)
M, N = size(D)
d = min(M,N)
A, E = zeros(T, M, N), zeros(T, M, N)
Z = similar(D)
Y = copy(D)
norm² = opnorm(Y)::RT # can be tuned # norm(Y)/minimum(size(D))^(2/5)
norm∞ = norm(Y, Inf) / λ
dual_norm = max(norm², norm∞)
d_norm = norm²
Y ./= dual_norm
μ = RT(1.25 / norm²)
μ̄ = RT(μ * 1.0e+7)
sv = svp = 10
local s
for k = 1:iters
# prox!(E, proxE, D .- A .+ (1/μ) .* Y, 1/μ)
E .= soft_th.(D .- A .+ (1/μ) .* Y, λ/μ)
if nonnegE
E .= max.(E, 0)
end
Z .= D .- E .+ (1/μ) .* Y
if svd ∈ (LinearAlgebra.svd, LinearAlgebra.svd!) || k == 1
s = LinearAlgebra.svd!(Z) # Z assignment just for storage
else
s = svd(Z, sv)
end
svp = sum(x-> x >= (1/μ), s.S)::Int
if svp < sv
sv = svp#min(svp + 1, N) # the paper says to use these formulas but sv=svp works way better
else
sv = svp#min(svp + round(Int, T(0.05) * d), d)
end
sv = clamp(sv, 1, maxrank)
@views if nukeA
# A .= s.U[:,1:svp] * Diagonal(s.S[1:svp] .- 1/μ) * s.Vt[1:svp,:]
mul!(Z[:,1:svp], s.U[:,1:svp], Diagonal(s.S[1:svp] .- 1/μ))
mul!(A, Z[:,1:svp], s.Vt[1:svp,:])
else
# A .= s.U[:,1:svp] * Diagonal(s.S[1:svp]) * s.Vt[1:svp,:]
mul!(Z[:,1:svp], s.U[:,1:svp], Diagonal(s.S[1:svp]))
mul!(A, Z[:,1:svp], s.Vt[1:svp,:])
end
if hankel
soft_hankel!(A, λ/μ)
end
if nonnegA
A .= max.(A, 0)
end
@. Z = D - A - E # Z are the reconstruction errors
@. Y = Y + μ * Z # Y can not be moved below as it depends on μ which is changed below
μ = RT(min(μ*ρ, μ̄))
cost = opnorm(Z) / d_norm
verbose && println("$(k) cost: $(round(cost, sigdigits=4))")
if cost < tol
verbose && println("converged")
break
end
k == iters && @warn "Maximum number of iterations reached, cost: $cost, tol: $tol"
end
if hankel
soft_hankel!(E, λ/μ)
end
A, E, s, sv
end
"""
Q = rpca_ga(X::AbstractMatrix{T}, r=minimum(size(X)), U=similar(X); μ = μ!(s,w,U), verbose=false, kwargs...) where T
"Grassmann Averages for Scalable Robust PCA", Hauberg et al.
http://www2.compute.dtu.dk/~sohau/papers/cvpr2014a/Hauberg_CVPR_2014.pdf
# Arguments:
- `X`: Data matrix
- `r`: Rank (number of components to estimate)
- `U`: Optional pre-allocated buffer
- `verbose`: print status
- `kwargs`: such as `tol=1e-7`, `iters=1000`
- `μ = μ!(s,w,U)` is a function that calculates the spherical average of a all columns in the matrix `U`, weighted by `w` and stores the result in `s`. The default is the standard weighted average. To get a robust estimate, consider using a robust average, such as `entrywise_trimmed_mean` or `entrywise_median` etc.
"""
function rpca_ga(X::AbstractMatrix{T}, r=minimum(size(X)), U = similar(X); verbose = false, kwargs...) where T
d,N = size(X)
X = copy(X)
Xs1 = similar(X,1,N)
Xs2 = similar(X)
Q = zeros(T, d, r)
w = zeros(T,N)
Xnorms = zeros(T,N)
for i = 1:r
@inbounds @views for n = 1:N
Xnorms[n] = sqrt(sum(abs2,X[:,n]))
U[:,n] .= X[:,n] ./ Xnorms[n]
end
q = rpca_ga_1(Xnorms, U, w; verbose = verbose, kwargs...)
Q[:,i] .= q
@static if VERSION >= v"1.3"
mul!(Xs1, q', X)
mul!(X, q,Xs1, -1, 1)
else
X .-= q*(q'X)
end
end
Q
end
"""
Find the first principal component. This is an internal function used by `rpca_ga`.
"""
function rpca_ga_1(Xnorms,U::AbstractMatrix{T},w; tol=1e-7, iters=1000, verbose=false, μ=μ!) where {T}
d,N = size(U)
q = randn(d)
q ./= norm(q)
qold = copy(q)
for i = 1:iters
@inbounds @views for n = eachindex(w,Xnorms)
w[n] = sign(U[:,n]'q)*Xnorms[n]
end
μᵢ = μ(q,w,U)
q .= μᵢ./norm(μᵢ)
dq = sqrt(sum(((x,y),)->abs2(x-y), zip(q,qold)))
verbose && @info "Change at iteration $i: $dq"
if dq < tol
verbose && @info "Converged after $i iterations"
break
end
qold .= q
i == iters && @warn "Reached maximum number of iterations"
end
q
end
function μ!(s,w,U)
ws = zero(eltype(w))
s .= 0
@views @inbounds @simd for n = 1:size(U,2)
ws += w[n]
s .+= w[n].*U[:,n]
end
s ./= ws
end
"""
entrywise_trimmed_mean(s, w, U, P=0.1)
Remove `P` percent of the data on each side before computing the weighted mean.
"""
function entrywise_trimmed_mean(s,w,U, P=0.1)
N = size(U,2)
range = (1+floor(Int, P*N)):floor(Int, (1-P)*N)
s .= 0
@views for j = 1:size(U,1)
I = sortperm(U[j,:])[range]
s[j] += w[I]'U[j,I] / sum(w[I])
# s[j] += sum(U[j,I])/length(I)
end
s
end
#
#
# function entrywise_trimmed_mean(s,w,U, P=0.05)
# d,N = size(U)
# range = (1+floor(Int, P*d)):floor(Int, (1-P)*d)
# s .= 0
# ws = zeros(size(s))
# @views for j = 1:size(U,2)
# I = sortperm(U[:,j])[range]
# s[I] .+= w[j] .* U[I,j]
# ws[I] .+= w[j]
# end
# s./ws
# end
function entrywise_median(s,w,U)
s .= 0
for j = 1:size(U,1)
I = sortperm((w).*U[j,:])
s[j] = sign(w[I[end÷2]])*U[j,I[end÷2]]#, StatsBase.Weights(abs.(w)))
# s[j] = median(U[j,:])
end
s
end
|
{"hexsha": "36af1530250255eaeea5841585d2294daedbc174", "size": 11360, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/robustPCA.jl", "max_stars_repo_name": "baggepinnen/TotalLeastSquares.jl", "max_stars_repo_head_hexsha": "297b9a8277c01358f54889944f3dcc711c0fe535", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2019-04-11T19:03:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T00:56:52.000Z", "max_issues_repo_path": "src/robustPCA.jl", "max_issues_repo_name": "baggepinnen/TotalLeastSquares.jl", "max_issues_repo_head_hexsha": "297b9a8277c01358f54889944f3dcc711c0fe535", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2019-04-17T09:24:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T13:25:27.000Z", "max_forks_repo_path": "src/robustPCA.jl", "max_forks_repo_name": "baggepinnen/TotalLeastSquares.jl", "max_forks_repo_head_hexsha": "297b9a8277c01358f54889944f3dcc711c0fe535", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:58:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-05T15:55:16.000Z", "avg_line_length": 31.7318435754, "max_line_length": 362, "alphanum_fraction": 0.5215669014, "num_tokens": 3841}
|
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from munch import Munch
import matplotlib
matplotlib.use('PS')
import matplotlib.pyplot as plt
import numpy as np
import torch
import os
# setup plot details
linestyle_tuple = {
'solid': 'solid', # Same as (0, ()) or '-'
'dotted': 'dotted', # Same as (0, (1, 1)) or '.'
'dashed': 'dashed', # Same as '--'
'dashdot': 'dashdot', # Same as '-.'
'dotted': (0, (1, 1)),
'densely dotted': (0, (1, 1)),
'dashed': (0, (5, 5)),
'densely dashed': (0, (5, 1)),
'dashdotted': (0, (3, 5, 1, 5)),
'densely dashdotted': (0, (3, 1, 1, 1)),
'dashdotdotted': (0, (3, 5, 1, 5, 1, 5)),
}
colors = ['navy', 'darkorange', 'turquoise', 'red', 'cornflowerblue', 'teal', 'orchid']
# marks = cycle(['+', 'x', 'v', 's', 'D', 'o'])
linestyles = ['dotted', 'dashdot', 'dashed', 'densely dotted', 'densely dashed', 'dashdotted', 'densely dashdotted']
marks = ['+', 'x', '1', '2', '3', '4', '*']
def compute_f1(dict_labels, classes):
real = dict_labels['real']
fake = dict_labels['fake']
attrs = real.keys()
result = Munch(P=Munch(), R=Munch(), ap=Munch(), f1=Munch())
# result = Munch(ap=Munch, f1=Munch)
# PR = Munch(p=Munch(), r=Munch())
for key in attrs:
precision = dict()
recall = dict()
average_precision = dict()
f1 = dict()
_real = torch.cat(real[key], dim=0).cpu().numpy()
_fake = torch.cat(fake[key], dim=0).cpu().numpy()
# import ipdb; ipdb.set_trace()
for i in range(_real.shape[1]):
_precision, _recall, _ = precision_recall_curve(_real[:, i], _fake[:, i])
_average_precision = average_precision_score(_real[:, i], _fake[:, i])
# f1[i] = f1_score(_real[:, i].astype(np.uint8), _fake[:, i], average='weighted')
_f1 = max(2*_precision*_recall / (_precision+_recall))
precision[classes[i]] = _precision
recall[classes[i]] = _recall
average_precision[classes[i]] = _average_precision
f1[classes[i]] = _f1
result['P'][key] = precision
result['R'][key] = recall
result['ap'][key] = average_precision
result['f1'][key] = f1
# r=recall,
# ap=average_precision,
# f1=f1
# )
return result
def plot_PR(data_munch, folder_to_save, classes, attr, mask=False):
plt.figure(figsize=(7, 8))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
for i, color, linestyle, marker in zip(classes, colors, linestyles, marks):
kwargs = {'linestyle': linestyle_tuple[linestyle], 'marker': marker}
kwargs['markeredgewidth'] = .3
kwargs['markevery'] = data_munch['P'][i].shape[0]//10 + 1
l, = plt.plot(data_munch['R'][i], data_munch['P'][i], color=color, **kwargs, lw=1)
# l, = plt.plot(data_munch['r'][i], data_munch['p'][i], color=color, marker=marker, lw=1)
lines.append(l)
labels.append('PR curve for class {} (AP = {:0.2f} - F1 max = {:0.2f})'
''.format(i, data_munch['ap'][i], data_munch['f1'][i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
manipulation = 'removing' if 'NOT_' in attr else 'adding'
attr_title = attr.replace('NOT_', '')
plt.title(f'Precision-Recall curve for {manipulation} {attr_title} manipulation.')
# import ipdb; ipdb.set_trace()
# plt.legend(lines, labels, loc=(0, -.38), prop=dict(size=10))
plt.legend(lines, labels, loc=(0.135, -.375), prop=dict(size=9), framealpha=0.8)
_mask = '_mask' if mask else ''
plt.savefig(os.path.join(folder_to_save, f'PRcurve_{attr}{_mask}.png'), dpi=500)
|
{"hexsha": "9a50f9958055ef31b0dfd4bdfd37881681fdd3ed", "size": 4239, "ext": "py", "lang": "Python", "max_stars_repo_path": "metrics/f1_score.py", "max_stars_repo_name": "affromero/SMILE", "max_stars_repo_head_hexsha": "931510d69b2e33f2fe633563833c50a7408f89ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2020-10-07T15:44:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T10:08:22.000Z", "max_issues_repo_path": "metrics/f1_score.py", "max_issues_repo_name": "affromero/SMILE", "max_issues_repo_head_hexsha": "931510d69b2e33f2fe633563833c50a7408f89ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-11T03:19:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T03:53:59.000Z", "max_forks_repo_path": "metrics/f1_score.py", "max_forks_repo_name": "affromero/SMILE", "max_forks_repo_head_hexsha": "931510d69b2e33f2fe633563833c50a7408f89ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-03T11:15:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-03T11:15:52.000Z", "avg_line_length": 40.7596153846, "max_line_length": 116, "alphanum_fraction": 0.5713611701, "include": true, "reason": "import numpy", "num_tokens": 1330}
|
import pandas as pd
import numpy as np
import os
from google.cloud import language_v1
from google.cloud import translate_v2 as translate
from apiclient import discovery
from google.oauth2 import service_account
import preprocessor as tp
from time import sleep
from requests.exceptions import ReadTimeout, ConnectionError
from country_list import countries_for_language
countries = dict(countries_for_language('en'))
from fuzzywuzzy import fuzz
from tqdm import tqdm
tqdm.pandas()
# get Google API credentials
TYPE_ = language_v1.Document.Type.PLAIN_TEXT
ENCODING_ = language_v1.EncodingType.UTF8
credentials = service_account.Credentials.from_service_account_file('ifrc-ns-covid-service-account.json')
translate_client = translate.Client(credentials=credentials)
nlp_client = language_v1.LanguageServiceClient(credentials=credentials)
df_demonyms = pd.read_csv('demonyms.csv', names=['demonym', 'country'])
demonym_dict = pd.Series(df_demonyms.country.values,index=df_demonyms.demonym).to_dict()
df_fdrs = pd.read_excel('donor_receiver_2019.xlsx', sheet_name='country_codes_2019', index_col=0)
df_fdrs.country = df_fdrs.country.str.lower()
fdrs_dict = pd.Series(df_fdrs.KPI_DON_Code.values,index=df_fdrs.country).to_dict()
def translate_to_english(row):
text = row['PNS_action_summary']
try:
result = translate_client.translate(text, target_language="en")
except ReadTimeout or ConnectionError:
sleep(60)
try:
result = translate_client.translate(text, target_language="en")
except ReadTimeout or ConnectionError:
sleep(60)
result = translate_client.translate(text, target_language="en")
trans = result["translatedText"]
return trans
def preprocess_text(row):
text = row['PNS_action_summary_en']
text = text.lower()
text = text.replace(' rc', '')
text = text.replace('red cross', '')
text = text.replace('cruz roja', '')
text = text.replace('croix rouge', '')
text = text.replace('croix-rouge', '')
text = text.replace(' cr ', ' ')
return text
def analyze_entities(row):
text = row['PNS_action_summary_en_clean']
# Available types: PLAIN_TEXT, HTML
type_ = language_v1.Document.Type.PLAIN_TEXT
# Optional. If not specified, the language is automatically detected.
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text, "type_": type_, "language": language}
# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
response = nlp_client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type})
locations = []
for entity in response.entities:
if language_v1.Entity.Type(entity.type_).name == "LOCATION":
for mention in entity.mentions:
locations.append(mention.text.content)
return locations
def match_countries(row):
location_entities = row['PNS_location_entities']
location_entities = list(set(location_entities))
countries_match = []
for locaction_entity in location_entities:
for demonym in demonym_dict.keys():
if fuzz.ratio(locaction_entity.lower(),demonym.lower()) > 90:
countries_match.append(demonym_dict[demonym])
for country in demonym_dict.values():
if fuzz.ratio(locaction_entity.lower(),country.lower()) > 90:
countries_match.append(country)
return list(set(countries_match))
def extract_fdrs(row):
countries = row['PNS_countries']
fdrs_match = []
for country in countries:
if country.lower() in fdrs_dict.keys():
fdrs_match.append(fdrs_dict[country.lower()])
else:
print(f"{country} NOT FOUND")
return fdrs_match
dir = 'data/field_reports_baseline'
data = 'field_reports.csv'
df_field_reports = pd.read_csv(f"{dir}/{data}")
print(df_field_reports.head())
df_field_reports_raw = df_field_reports.copy()
df_field_reports_raw = df_field_reports_raw.dropna(subset=['fdrs_report', 'fdrs_target'])
df_field_reports = df_field_reports.dropna(subset=['PNS_action_summary'])
# translate to english
print('translating to english')
df_field_reports['PNS_action_summary_en'] = df_field_reports.progress_apply(translate_to_english, axis=1)
df_field_reports['PNS_action_summary_en_clean'] = df_field_reports.progress_apply(preprocess_text, axis=1)
df_field_reports['PNS_location_entities'] = df_field_reports.progress_apply(analyze_entities, axis=1)
df_field_reports['PNS_countries'] = df_field_reports.progress_apply(match_countries, axis=1)
df_field_reports['PNS_FDRS'] = df_field_reports.progress_apply(extract_fdrs, axis=1)
print(df_field_reports.head())
df_field_reports.to_csv(f"{dir}/field_reports_parsed.csv", index=False)
df_adjacency = pd.DataFrame(0, index=fdrs_dict.values(), columns=fdrs_dict.values())
for ix, row in df_field_reports_raw.iterrows():
if row['fdrs_report'] != row['fdrs_target']:
df_adjacency.at[row['fdrs_report'], row['fdrs_target']] = 1
df_adjacency.at[row['fdrs_target'], row['fdrs_report']] = 1
for ix, row in df_field_reports.iterrows():
if len(row['PNS_FDRS']) == 0:
continue
for fdrs in row['PNS_FDRS']:
if row['fdrs_report'] != fdrs:
df_adjacency.at[row['fdrs_report'], fdrs] = 1
df_adjacency.at[fdrs, row['fdrs_report']] = 1
df_adjacency = df_adjacency.fillna(0)
df_adjacency.to_csv(f"{dir}/adjacency_matrix_field_reports.csv")
|
{"hexsha": "0f115df22e371315a78ce0988f22d5fe91926388", "size": 5562, "ext": "py", "lang": "Python", "max_stars_repo_path": "analyze_field_reports.py", "max_stars_repo_name": "rodekruis/ifrc-ns-solidarity", "max_stars_repo_head_hexsha": "ef80170440e667c376e57c7d2687bdaa4cef347e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-06T14:45:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T15:03:59.000Z", "max_issues_repo_path": "analyze_field_reports.py", "max_issues_repo_name": "rodekruis/ifrc-ns-solidarity", "max_issues_repo_head_hexsha": "ef80170440e667c376e57c7d2687bdaa4cef347e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analyze_field_reports.py", "max_forks_repo_name": "rodekruis/ifrc-ns-solidarity", "max_forks_repo_head_hexsha": "ef80170440e667c376e57c7d2687bdaa4cef347e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7285714286, "max_line_length": 108, "alphanum_fraction": 0.7303128371, "include": true, "reason": "import numpy", "num_tokens": 1321}
|
import numpy as np
from regression.api.cost import computeCost
def gradient_descent(X, y, theta, alpha, iterations):
m = len(X)
cost = np.zeros((iterations, 2))
for i in range(0, iterations):
h = np.matmul(X, theta) - y
sm = np.sum(np.multiply(h, X), axis=0)
z = np.multiply(sm, alpha*1/m)
theta = (theta.T - z).T
cost[i, 0] = i
cost[i, 1] = computeCost(X, y, theta)
return [cost, theta]
|
{"hexsha": "196b1f08650e9ca88c6dd5bb32eb3565641de8f1", "size": 469, "ext": "py", "lang": "Python", "max_stars_repo_path": "regression/api/gradient.py", "max_stars_repo_name": "vickykatoch/py-ml-scratchpad", "max_stars_repo_head_hexsha": "e4de515b657971f7889d73e09a1308f91856c09f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "regression/api/gradient.py", "max_issues_repo_name": "vickykatoch/py-ml-scratchpad", "max_issues_repo_head_hexsha": "e4de515b657971f7889d73e09a1308f91856c09f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "regression/api/gradient.py", "max_forks_repo_name": "vickykatoch/py-ml-scratchpad", "max_forks_repo_head_hexsha": "e4de515b657971f7889d73e09a1308f91856c09f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3125, "max_line_length": 54, "alphanum_fraction": 0.5586353945, "include": true, "reason": "import numpy", "num_tokens": 137}
|
import os
import numpy as np
import pandas as pd
import xarray as xr
import pickle as pkl
from datetime import datetime
from scipy import ndimage as ndi
import SimpleITK as sitk
import skimage as skim
from skimage import feature, morphology
import glob
class RegHearts:
'''Class that generates liver masks for MRE input images'''
def __init__(self, fixed_subj, moving_subj, tslice=0, verbose=False):
self.verbose = verbose
self.fixed_subj = fixed_subj
self.moving_subj = moving_subj
self.tslice = tslice
self.load_niftis()
def load_niftis(self):
fixed_ct_name = os.path.join(self.fixed_subj, f'CT_tslice_{self.tslice}.nii')
fixed_mask_name = os.path.join(self.fixed_subj, f'mask_tslice_{self.tslice}.nii')
moving_ct_name = os.path.join(self.moving_subj, f'CT_tslice_{self.tslice}.nii')
moving_mask_name = os.path.join(self.moving_subj, f'mask_tslice_{self.tslice}.nii')
self.fixed_ct = self.get_sitk_image(fixed_ct_name)
self.fixed_mask = self.get_sitk_image(fixed_mask_name)
self.moving_ct = self.get_sitk_image(moving_ct_name)
self.moving_mask = self.get_sitk_image(moving_mask_name)
def get_sitk_image(self, nifti_name):
reader = sitk.ImageFileReader()
reader.SetImageIO("NiftiImageIO")
reader.SetFileName(nifti_name)
img = reader.Execute()
size = img.GetSize()
dims = img.GetSpacing()
orig = img.GetOrigin()
if self.verbose:
print(f"Image info for {nifti_name}:")
print("Image size:", size[0], size[1], size[2])
print("Image dims:", dims[0], dims[1], dims[2])
print("Image orig:", orig[0], orig[1], orig[2])
caster = sitk.CastImageFilter()
caster.SetOutputPixelType(sitk.sitkFloat32)
return caster.Execute(img)
def gen_param_map(self):
self.p_map_vector = sitk.VectorOfParameterMap()
paff = sitk.GetDefaultParameterMap("affine")
pbsp = sitk.GetDefaultParameterMap("bspline")
paff['AutomaticTransformInitialization'] = ['true']
paff['AutomaticTransformInitializationMethod'] = ['GeometricalCenter']
paff['NumberOfSamplesForExactGradient'] = ['100000']
pbsp['NumberOfSamplesForExactGradient'] = ['100000']
# paff['MaximumNumberOfSamplingAttempts'] = ['2']
# pbsp['MaximumNumberOfSamplingAttempts'] = ['2']
paff['NumberOfSpatialSamples'] = ['5000']
pbsp['NumberOfSpatialSamples'] = ['5000']
paff['NumberOfHistogramBins'] = ['32', '32', '64', '128']
pbsp['NumberOfHistogramBins'] = ['32', '32', '64', '128']
paff['MaximumNumberOfIterations'] = ['1024']
pbsp['MaximumNumberOfIterations'] = ['1024']
# paff['NumberOfResolutions'] = ['4']
# pbsp['NumberOfResolutions'] = ['4']
paff['GridSpacingSchedule'] = ['6', '4', '2', '1.000000']
pbsp['GridSpacingSchedule'] = ['6', '4', '2', '1.000000']
# pbsp['FinalGridSpacingInPhysicalUnits'] = ['40', '40', '40']
pbsp['FinalGridSpacingInPhysicalUnits'] = ['32', '32', '32']
# pbsp['Metric0Weight'] = ['0.01']
# pbsp['Metric1Weight'] = ['0.1']
# paff['FixedImagePyramid'] = ['FixedShrinkingImagePyramid']
# pbsp['FixedImagePyramid'] = ['FixedShrinkingImagePyramid']
# attempting to use multiple fixed images at once
# paff['Registration'] = ['MultiMetricMultiResolutionRegistration']
# paff['FixedImagePyramid'] = ['FixedSmoothingImagePyramid', 'FixedSmoothingImagePyramid']
# paff['ImageSampler'] = ['RandomCoordinate', 'RandomCoordinate']
# paff['Metric'] = ['AdvancedMattesMutualInformation', 'AdvancedMattesMutualInformation']
# pbsp['Metric'] = ['AdvancedMattesMutualInformation', 'TransformBendingEnergyPenalty',
# 'AdvancedMattesMutualInformation', 'TransformBendingEnergyPenalty']
# pbsp['FixedImagePyramid'] = ['FixedSmoothingImagePyramid', 'FixedSmoothingImagePyramid']
# pbsp['ImageSampler'] = ['RandomCoordinate', 'RandomCoordinate']
# 'RandomCoordinate', 'RandomCoordinate']
self.p_map_vector.append(paff)
self.p_map_vector.append(pbsp)
if self.verbose:
sitk.PrintParameterMap(self.p_map_vector)
def register_imgs(self):
self.elastixImageFilter = sitk.ElastixImageFilter()
self.elastixImageFilter.SetFixedImage(self.fixed_ct)
self.elastixImageFilter.SetMovingImage(self.moving_ct)
self.elastixImageFilter.SetParameterMap(self.p_map_vector)
self.elastixImageFilter.Execute()
self.moving_ct_result = self.elastixImageFilter.GetResultImage()
self.moving_ct_result.CopyInformation(self.fixed_ct)
def gen_mask(self, smooth=False):
transformixImageFilter = sitk.TransformixImageFilter()
transformixImageFilter.SetTransformParameterMap(
self.elastixImageFilter.GetTransformParameterMap())
transformixImageFilter.SetMovingImage(self.moving_mask)
transformixImageFilter.Execute()
self.moving_mask_result = transformixImageFilter.GetResultImage()
if smooth:
tmp_img = sitk.GetArrayFromImage(self.moving_mask_result)
tmp_img = np.where((tmp_img > 0), 1, 0)
self.moving_mask_result = sitk.GetImageFromArray(tmp_img)
self.moving_mask_result.CopyInformation(self.fixed_ct)
self.moving_mask_result = sitk.Cast(self.moving_mask_result, sitk.sitkFloat32)
def recenter_img_z(self, sitk_img, offset=False):
spacing = sitk_img.GetSpacing()[2]
layers = sitk_img.GetSize()[2]
orig = sitk_img.GetOrigin()
if not offset:
sitk_img.SetOrigin([orig[0], orig[1], spacing*(-layers/2)])
else:
sitk_img.SetOrigin([orig[0], orig[1], spacing*(-layers/1.5)])
def add_liver_mask(ds, moving_name='19', extra_name='extra1'):
'''Generate a mask from the liver registration method, and place it into the given "extra" slot.
Assumes you are using an xarray dataset from the MREDataset class.'''
for sub in tqdm(ds.subject):
mask_maker = MRELiverMask(str(sub.values), moving_name, verbose=False, center=True,
fixed_seq='T1Pre', moving_seq='T1_inphase')
mask_maker.gen_param_map()
mask_maker.register_imgs()
mask_maker.gen_mask(smooth=True)
mask = sitk.GetArrayFromImage(mask_maker.moving_mask_result)
mask = np.where(mask >= 1, 1, 0)
ds['image'].loc[dict(sequence=extra_name, subject=sub)] = mask
new_sequence = [a.replace(extra_name, 'liverMsk') for a in ds.sequence.values]
ds = ds.assign_coords(sequence=new_sequence)
return ds
|
{"hexsha": "f105b721afb9f845c4e320b2b60eb5e5d9422cbd", "size": 6838, "ext": "py", "lang": "Python", "max_stars_repo_path": "code4step2/data_registration.py", "max_stars_repo_name": "yukeyi/MCDS-Capstone", "max_stars_repo_head_hexsha": "f7ce48fc5d3f5f96c1f29556585ed2338683c7d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code4step2/data_registration.py", "max_issues_repo_name": "yukeyi/MCDS-Capstone", "max_issues_repo_head_hexsha": "f7ce48fc5d3f5f96c1f29556585ed2338683c7d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code4step2/data_registration.py", "max_forks_repo_name": "yukeyi/MCDS-Capstone", "max_forks_repo_head_hexsha": "f7ce48fc5d3f5f96c1f29556585ed2338683c7d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.5866666667, "max_line_length": 100, "alphanum_fraction": 0.6651067564, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1722}
|
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier as CART
from sklearn.utils import shuffle
import numpy as np
import sys
import re
class MachineLearningLib(object):
@staticmethod
def svm(X, y, params):
clf = svm.SVC(n_jobs=params['n_jobs'] if 'n_jobs' in params else 1)
clf.fit(X, y)
return clf
@staticmethod
def infer(clf, X, y_true = None):
y = clf.predict(X)
if y_true is None:
return y
else:
return y, clf.score(X, y_true)
@staticmethod
def logistic(X, y, params):
clf = LogisticRegression(multi_class = 'multinomial', solver = 'newton-cg', max_iter = 10000, n_jobs=params['n_jobs'] if 'n_jobs' in params else 1)
clf.fit(X, y)
return clf
@staticmethod
def cart(X, y, params):
clf = CART(n_jobs=params['n_jobs'] if 'n_jobs' in params else 1)
clf.fit(X, y)
return clf
|
{"hexsha": "41c3acfe2b6a4d94fd0ce79c3884a955ea4d046e", "size": 1002, "ext": "py", "lang": "Python", "max_stars_repo_path": "DynamicNetworkEmbedding-master/src/utils/lib_ml.py", "max_stars_repo_name": "yanzihan1/Use-Dynamic-network-embedding-for-Social-Network-Aligment-", "max_stars_repo_head_hexsha": "9bec908d25fbe4c078b2f16e28530ef55f866e10", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-06-02T10:41:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:18:03.000Z", "max_issues_repo_path": "DynamicNetworkEmbedding-master/src/utils/lib_ml.py", "max_issues_repo_name": "yanzihan1/Use-Dynamic-network-embedding-for-Social-Network-Aligment-", "max_issues_repo_head_hexsha": "9bec908d25fbe4c078b2f16e28530ef55f866e10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DynamicNetworkEmbedding-master/src/utils/lib_ml.py", "max_forks_repo_name": "yanzihan1/Use-Dynamic-network-embedding-for-Social-Network-Aligment-", "max_forks_repo_head_hexsha": "9bec908d25fbe4c078b2f16e28530ef55f866e10", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-16T07:43:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-16T07:43:24.000Z", "avg_line_length": 27.8333333333, "max_line_length": 155, "alphanum_fraction": 0.6357285429, "include": true, "reason": "import numpy", "num_tokens": 260}
|
[STATEMENT]
lemma Invariants_sets: "sets Invariants = {A \<in> sets M. T-`A \<inter> space M = A}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sets Invariants = {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. sets Invariants = {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
have "sigma_algebra (space M) {A \<in> sets M. T-`A \<inter> space M = A}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
define I where "I = {A. T-`A \<inter> space M = A}"
[PROOF STATE]
proof (state)
this:
I = {A. T -` A \<inter> space M = A}
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
have i: "\<And>A. A \<in> I \<Longrightarrow> A \<subseteq> space M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>A. A \<in> I \<Longrightarrow> A \<subseteq> space M
[PROOF STEP]
unfolding I_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>A. A \<in> {A. T -` A \<inter> space M = A} \<Longrightarrow> A \<subseteq> space M
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
?A3 \<in> I \<Longrightarrow> ?A3 \<subseteq> space M
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
have "algebra (space M) I"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. algebra (space M) I
[PROOF STEP]
proof (subst algebra_iff_Un)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
have a: "I \<subseteq> Pow (space M)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. I \<subseteq> Pow (space M)
[PROOF STEP]
using i
[PROOF STATE]
proof (prove)
using this:
?A3 \<in> I \<Longrightarrow> ?A3 \<subseteq> space M
goal (1 subgoal):
1. I \<subseteq> Pow (space M)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
I \<subseteq> Pow (space M)
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
have b: "{} \<in> I"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {} \<in> I
[PROOF STEP]
unfolding I_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {} \<in> {A. T -` A \<inter> space M = A}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
{} \<in> I
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
{} \<in> I
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
fix A
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
assume *: "A \<in> I"
[PROOF STATE]
proof (state)
this:
A \<in> I
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
A \<in> I
[PROOF STEP]
have "T-`(space M - A) = T-`(space M) - T-`A"
[PROOF STATE]
proof (prove)
using this:
A \<in> I
goal (1 subgoal):
1. T -` (space M - A) = T -` space M - T -` A
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
T -` (space M - A) = T -` space M - T -` A
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
T -` (space M - A) = T -` space M - T -` A
[PROOF STEP]
have "T-`(space M - A) \<inter> space M = T-`(space M) \<inter> (space M) - T-`A \<inter> (space M)"
[PROOF STATE]
proof (prove)
using this:
T -` (space M - A) = T -` space M - T -` A
goal (1 subgoal):
1. T -` (space M - A) \<inter> space M = T -` space M \<inter> space M - T -` A \<inter> space M
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
T -` (space M - A) \<inter> space M = T -` space M \<inter> space M - T -` A \<inter> space M
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
T -` (space M - A) \<inter> space M = T -` space M \<inter> space M - T -` A \<inter> space M
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
have "... = space M - A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. T -` space M \<inter> space M - T -` A \<inter> space M = space M - A
[PROOF STEP]
using * I_def
[PROOF STATE]
proof (prove)
using this:
A \<in> I
I = {A. T -` A \<inter> space M = A}
goal (1 subgoal):
1. T -` space M \<inter> space M - T -` A \<inter> space M = space M - A
[PROOF STEP]
by (simp add: inf_absorb2 subsetI)
[PROOF STATE]
proof (state)
this:
T -` space M \<inter> space M - T -` A \<inter> space M = space M - A
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
T -` (space M - A) \<inter> space M = space M - A
[PROOF STEP]
have "space M - A \<in> I"
[PROOF STATE]
proof (prove)
using this:
T -` (space M - A) \<inter> space M = space M - A
goal (1 subgoal):
1. space M - A \<in> I
[PROOF STEP]
unfolding I_def
[PROOF STATE]
proof (prove)
using this:
T -` (space M - A) \<inter> space M = space M - A
goal (1 subgoal):
1. space M - A \<in> {A. T -` A \<inter> space M = A}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
space M - A \<in> I
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
?A5 \<in> I \<Longrightarrow> space M - ?A5 \<in> I
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?A5 \<in> I \<Longrightarrow> space M - ?A5 \<in> I
[PROOF STEP]
have c: "(\<forall>a\<in>I. space M - a \<in> I)"
[PROOF STATE]
proof (prove)
using this:
?A5 \<in> I \<Longrightarrow> space M - ?A5 \<in> I
goal (1 subgoal):
1. \<forall>a\<in>I. space M - a \<in> I
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>a\<in>I. space M - a \<in> I
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
have d: "(\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I
[PROOF STEP]
unfolding I_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>a\<in>{A. T -` A \<inter> space M = A}. \<forall>b\<in>{A. T -` A \<inter> space M = A}. a \<union> b \<in> {A. T -` A \<inter> space M = A}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
show "I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
using a b c d
[PROOF STATE]
proof (prove)
using this:
I \<subseteq> Pow (space M)
{} \<in> I
\<forall>a\<in>I. space M - a \<in> I
\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I
goal (1 subgoal):
1. I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
I \<subseteq> Pow (space M) \<and> {} \<in> I \<and> (\<forall>a\<in>I. space M - a \<in> I) \<and> (\<forall>a\<in>I. \<forall>b\<in>I. a \<union> b \<in> I)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
algebra (space M) I
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
algebra (space M) I
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
have "(\<forall>F. range F \<subseteq> I \<longrightarrow> (\<Union>i::nat. F i) \<in> I)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>F. range F \<subseteq> I \<longrightarrow> \<Union> (range F) \<in> I
[PROOF STEP]
unfolding I_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>F. range F \<subseteq> {A. T -` A \<inter> space M = A} \<longrightarrow> \<Union> (range F) \<in> {A. T -` A \<inter> space M = A}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>F. range F \<subseteq> I \<longrightarrow> \<Union> (range F) \<in> I
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
algebra (space M) I
\<forall>F. range F \<subseteq> I \<longrightarrow> \<Union> (range F) \<in> I
[PROOF STEP]
have "sigma_algebra (space M) I"
[PROOF STATE]
proof (prove)
using this:
algebra (space M) I
\<forall>F. range F \<subseteq> I \<longrightarrow> \<Union> (range F) \<in> I
goal (1 subgoal):
1. sigma_algebra (space M) I
[PROOF STEP]
using sigma_algebra_iff
[PROOF STATE]
proof (prove)
using this:
algebra (space M) I
\<forall>F. range F \<subseteq> I \<longrightarrow> \<Union> (range F) \<in> I
sigma_algebra ?\<Omega> ?M = (algebra ?\<Omega> ?M \<and> (\<forall>A. range A \<subseteq> ?M \<longrightarrow> \<Union> (range A) \<in> ?M))
goal (1 subgoal):
1. sigma_algebra (space M) I
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
sigma_algebra (space M) I
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
sigma_algebra (space M) I
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
have "sigma_algebra (space M) (sets M)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sigma_algebra (space M) (sets M)
[PROOF STEP]
using measure_space measure_space_def
[PROOF STATE]
proof (prove)
using this:
measure_space (space ?M) (sets ?M) (emeasure ?M)
measure_space ?\<Omega> ?A ?\<mu> = (sigma_algebra ?\<Omega> ?A \<and> positive ?A ?\<mu> \<and> countably_additive ?A ?\<mu>)
goal (1 subgoal):
1. sigma_algebra (space M) (sets M)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
sigma_algebra (space M) (sets M)
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
sigma_algebra (space M) I
sigma_algebra (space M) (sets M)
[PROOF STEP]
have "sigma_algebra (space M) (I \<inter> (sets M))"
[PROOF STATE]
proof (prove)
using this:
sigma_algebra (space M) I
sigma_algebra (space M) (sets M)
goal (1 subgoal):
1. sigma_algebra (space M) (I \<inter> sets M)
[PROOF STEP]
using sigma_algebra_intersection
[PROOF STATE]
proof (prove)
using this:
sigma_algebra (space M) I
sigma_algebra (space M) (sets M)
\<lbrakk>sigma_algebra ?\<Omega> ?A; sigma_algebra ?\<Omega> ?B\<rbrakk> \<Longrightarrow> sigma_algebra ?\<Omega> (?A \<inter> ?B)
goal (1 subgoal):
1. sigma_algebra (space M) (I \<inter> sets M)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
sigma_algebra (space M) (I \<inter> sets M)
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
sigma_algebra (space M) (I \<inter> sets M)
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
have "I \<inter> sets M = {A \<in> sets M. T-`A \<inter> space M = A}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. I \<inter> sets M = {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
unfolding I_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {A. T -` A \<inter> space M = A} \<inter> sets M = {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
I \<inter> sets M = {A \<in> sets M. T -` A \<inter> space M = A}
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
sigma_algebra (space M) (I \<inter> sets M)
I \<inter> sets M = {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
sigma_algebra (space M) (I \<inter> sets M)
I \<inter> sets M = {A \<in> sets M. T -` A \<inter> space M = A}
goal (1 subgoal):
1. sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
goal (1 subgoal):
1. sets Invariants = {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
goal (1 subgoal):
1. sets Invariants = {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
unfolding Invariants_def
[PROOF STATE]
proof (prove)
using this:
sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
goal (1 subgoal):
1. sets (sigma (space M) {A \<in> sets M. T -` A \<inter> space M = A}) = {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
using sigma_algebra.sets_measure_of_eq
[PROOF STATE]
proof (prove)
using this:
sigma_algebra (space M) {A \<in> sets M. T -` A \<inter> space M = A}
sigma_algebra ?\<Omega> ?M \<Longrightarrow> sets (measure_of ?\<Omega> ?M ?\<mu>) = ?M
goal (1 subgoal):
1. sets (sigma (space M) {A \<in> sets M. T -` A \<inter> space M = A}) = {A \<in> sets M. T -` A \<inter> space M = A}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
sets Invariants = {A \<in> sets M. T -` A \<inter> space M = A}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6790, "file": "Ergodic_Theory_Invariants", "length": 73}
|
[STATEMENT]
lemma (in encoding) refl_symm_trans_closure_of_indRelT:
fixes TRel :: "('procT \<times> 'procT) set"
assumes refl: "refl TRel"
and symm: "sym TRel"
shows "indRelTEQ TRel = (symcl ((indRelT TRel)\<^sup>=))\<^sup>+"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. indRelTEQ TRel = (symcl ((indRelT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. indRelTEQ TRel = (symcl ((indRelT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
have "(symcl ((indRelT TRel)\<^sup>=))\<^sup>+ = (symcl (indRelT TRel))\<^sup>*"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (symcl ((indRelT TRel)\<^sup>=))\<^sup>+ = (symcl (indRelT TRel))\<^sup>*
[PROOF STEP]
by (rule refl_symm_trans_closure_is_symm_refl_trans_closure[where Rel="indRelT TRel"])
[PROOF STATE]
proof (state)
this:
(symcl ((indRelT TRel)\<^sup>=))\<^sup>+ = (symcl (indRelT TRel))\<^sup>*
goal (1 subgoal):
1. indRelTEQ TRel = (symcl ((indRelT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(symcl ((indRelT TRel)\<^sup>=))\<^sup>+ = (symcl (indRelT TRel))\<^sup>*
goal (1 subgoal):
1. indRelTEQ TRel = (symcl ((indRelT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
from symm
[PROOF STATE]
proof (chain)
picking this:
sym TRel
[PROOF STEP]
have "symcl (indRelT TRel) = indRelT TRel"
[PROOF STATE]
proof (prove)
using this:
sym TRel
goal (1 subgoal):
1. symcl (indRelT TRel) = indRelT TRel
[PROOF STEP]
using indRelT_symm[where TRel="TRel"] symm_closure_of_symm_rel[where Rel="indRelT TRel"]
[PROOF STATE]
proof (prove)
using this:
sym TRel
sym TRel \<Longrightarrow> sym (indRelT TRel)
sym (indRelT TRel) \<Longrightarrow> symcl (indRelT TRel) = indRelT TRel
goal (1 subgoal):
1. symcl (indRelT TRel) = indRelT TRel
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
symcl (indRelT TRel) = indRelT TRel
goal (1 subgoal):
1. indRelTEQ TRel = (symcl ((indRelT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(symcl ((indRelT TRel)\<^sup>=))\<^sup>+ = (symcl (indRelT TRel))\<^sup>*
symcl (indRelT TRel) = indRelT TRel
[PROOF STEP]
show "indRelTEQ TRel = (symcl ((indRelT TRel)\<^sup>=))\<^sup>+"
[PROOF STATE]
proof (prove)
using this:
(symcl ((indRelT TRel)\<^sup>=))\<^sup>+ = (symcl (indRelT TRel))\<^sup>*
symcl (indRelT TRel) = indRelT TRel
goal (1 subgoal):
1. indRelTEQ TRel = (symcl ((indRelT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
using refl refl_trans_closure_of_indRelT[where TRel="TRel"]
[PROOF STATE]
proof (prove)
using this:
(symcl ((indRelT TRel)\<^sup>=))\<^sup>+ = (symcl (indRelT TRel))\<^sup>*
symcl (indRelT TRel) = indRelT TRel
refl TRel
refl TRel \<Longrightarrow> indRelTEQ TRel = (indRelT TRel)\<^sup>*
goal (1 subgoal):
1. indRelTEQ TRel = (symcl ((indRelT TRel)\<^sup>=))\<^sup>+
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
indRelTEQ TRel = (symcl ((indRelT TRel)\<^sup>=))\<^sup>+
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1330, "file": "Encodability_Process_Calculi_SourceTargetRelation", "length": 13}
|
'''
Author : Siddharth Nayak
email:ee16b073@smail.iitm.ac.in
'''
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import scipy as sp
import os, signals
from sklearn.externals import joblib
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import VotingClassifier
import pandas as pd
x_data = []
y_data = []
classes = {}
root="dynamic data"
print( "Loading the dataset from '{directory}'...".format(directory=root),)
'''
This module is for the first layer of SVM to classify between gestures.
'''
df=[]
for path, subdirs, files in os.walk(root):
for name in files:
#Get the filename
filename = os.path.join(path, name)
if filename[-6:]!='_Store':
print(filename)
#Load the sample from file
df1 = pd.read_csv(filename,delim_whitespace=False)
#Linearize the sample and then add it to the x_data list
df = df.append(df1)
#Extract the category from the file name
#For example, the file "a_sample_0.txt" will be considered as "a"
category = name.split("_")[0]
#Get a number for the category, as an offset from the category
#to the a char in Ascii
number = ord(category) - ord("a")
#Add the category to the y_data list
y_data.append(number)
#Include the category and the corresponding number into a dictionary
#for easy access and referencing
classes[number] = category
print('done')
print ("DONE")
x_data=df.iloc[:,0:10]
params = {'C':[0.001,0.01,0.1,1], 'kernel':['linear']}
svc1 = svm.SVC(probability = True)
clf = GridSearchCV(svc, params,verbose =10, n_jobs=8)
X_train, X_test, Y_train, Y_test = train_test_split(x_data,y_data, test_size=0.35, random_state=0)
print ("Starting the training process...")
clf.fit(X_train, Y_train)
score = clf.score(X_test, Y_test)
print ("\nSCORE: {score}\n".format(score = score))
joblib.dump(clf, 'model1.pkl')
joblib.dump(classes, 'classes1.pkl')
|
{"hexsha": "80cb864285099ea64e7defb3c3934a69accaf9c8", "size": 2005, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_classify_first_layer.py", "max_stars_repo_name": "ruc98/Inter-IIT-Tech-Meet-2018-Project", "max_stars_repo_head_hexsha": "8405acd3d650b4f41b669e9cfc59cd27f0fee1a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train_classify_first_layer.py", "max_issues_repo_name": "ruc98/Inter-IIT-Tech-Meet-2018-Project", "max_issues_repo_head_hexsha": "8405acd3d650b4f41b669e9cfc59cd27f0fee1a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_classify_first_layer.py", "max_forks_repo_name": "ruc98/Inter-IIT-Tech-Meet-2018-Project", "max_forks_repo_head_hexsha": "8405acd3d650b4f41b669e9cfc59cd27f0fee1a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3387096774, "max_line_length": 98, "alphanum_fraction": 0.7017456359, "include": true, "reason": "import scipy", "num_tokens": 519}
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.testing_utils import (
assert_op_count_match,
assert_model_is_valid,
get_op_types_in_program,
apply_pass_and_basic_check,
)
import pytest
import numpy as np
import itertools
np.random.seed(1984)
class TestElementwiseOptimizationPasses:
"""
Input graph:
Const
|
V
input -----> convolution -----> add/sub ----> relu ---> out
Output graph:
input -----> convolution -----> relu ----> out
"""
@pytest.mark.parametrize(
"conv_dim, \
flip_add_input_order, \
add_batch_dim_to_const, \
use_sub_instead, \
prebuilt_bias, \
scalar_elementwise, \
use_conv_transpose",
itertools.product(
[
2,
3,
], # 1D conv conversion broken even without the pass: rdar://problem/62960720
[True, False], # flip_add_input_order
[True, False], # add_batch_dim_to_const
[True, False], # use_sub_instead
[True, False], # prebuilt_bias
[True, False], # scalar_elementwise
[True, False], # use_conv_transpose
),
)
def test_fuse_conv_bias(
self,
conv_dim,
flip_add_input_order,
add_batch_dim_to_const,
use_sub_instead,
prebuilt_bias,
scalar_elementwise,
use_conv_transpose,
):
if flip_add_input_order and use_sub_instead:
return
if use_conv_transpose and conv_dim != 2:
return
input_shape = None
W = None
Cout = 8
Cin = 3
D = 10
const = (
np.random.rand(Cout) if add_batch_dim_to_const else np.random.rand(1, Cout)
)
const = np.expand_dims(const, axis=-1)
if conv_dim == 1:
input_shape = (1, Cin, D)
W = np.random.rand(Cout, Cin, 1)
elif conv_dim == 2:
input_shape = (1, Cin, D, D)
W = np.random.rand(Cout, Cin, 1, 1)
const = np.expand_dims(const, axis=-1)
elif conv_dim == 3:
input_shape = (1, Cin, D, D, D)
W = np.random.rand(Cout, Cin, 1, 1, 1)
const = np.expand_dims(const, axis=-1)
const = np.expand_dims(const, axis=-1)
if use_conv_transpose:
W = np.swapaxes(W, 0, 1)
output_shape = list(input_shape)
output_shape[1] = Cout
if scalar_elementwise:
const = np.random.uniform(0)
@mb.program(input_specs=[mb.TensorSpec(shape=input_shape)])
def prog(x):
kwargs = {
"x": x,
"weight": W,
"pad_type": "valid",
"dilations": [1] * conv_dim,
"strides": [1] * conv_dim,
}
if prebuilt_bias:
kwargs["bias"] = np.random.rand(Cout)
x = mb.conv_transpose(**kwargs) if use_conv_transpose else mb.conv(**kwargs)
if use_sub_instead:
x = mb.sub(x=x, y=const)
else:
x = mb.add(
x=const if flip_add_input_order else x,
y=x if flip_add_input_order else const,
)
x = mb.relu(x=x)
return x
element_op = "sub" if use_sub_instead else "add"
conv_op = "conv" if not use_conv_transpose else "conv_transpose"
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_conv_bias"
)
assert get_op_types_in_program(prev_prog) == [conv_op, element_op, "relu"]
assert get_op_types_in_program(prog) == [conv_op, "relu"]
old_bias = prev_block.find_ops(op_type=conv_op)[0].inputs.get("bias", None)
old_bias_val = 0 if old_bias is None else old_bias.val
assert old_bias_val is not None
assert block.find_ops(op_type=conv_op)[0].inputs["bias"] is not None
new_bias_val = block.find_ops(op_type=conv_op)[0].inputs["bias"].val
assert new_bias_val is not None
if use_sub_instead:
np.testing.assert_almost_equal(
old_bias_val - np.squeeze(const), new_bias_val
)
else:
np.testing.assert_almost_equal(
old_bias_val + np.squeeze(const), new_bias_val
)
assert_model_is_valid(
prog,
{"x": input_shape},
expected_output_shapes={block.outputs[0].name: tuple(output_shape)},
)
"""
Input graph:
Const
|
V
input -----> convolution -----> transpose -----> add/sub ---> out
Output graph:
input -----> convolution -----> transpose -----> out
"""
@pytest.mark.parametrize(
"conv_dim, has_bias, is_sub, is_conv_first_input, is_bias_scalar, is_deconv, is_all_1s",
itertools.product(
[1, 2, 3], # conv_dim
[True, False], # has_bias
[True, False], # is_sub
[True, False], # is_conv_first_input
[True, False], # is_bias_scalar
[True, False], # is_deconv
[True, False], # is_all_1s
),
)
def test_fuse_conv_bias_transpose_pattern(
self,
conv_dim,
has_bias,
is_sub,
is_conv_first_input,
is_bias_scalar,
is_deconv,
is_all_1s,
):
if is_all_1s and is_bias_scalar:
return
# construct the conv weight/bias
input_shape = None
Cout = 8
Cin = 3
D = 10
conv_weight = None
conv_bias = np.arange(Cout).astype(np.float32) if has_bias else np.zeros(Cout).astype(np.float32)
rank = conv_dim + 2
if conv_dim == 1:
input_shape = (1, Cin, D)
conv_weight = np.random.rand(Cout, Cin, 1)
elif conv_dim == 2:
input_shape = (1, Cin, D, D)
conv_weight = np.random.rand(Cout, Cin, 1, 1)
elif conv_dim == 3:
input_shape = (1, Cin, D, D, D)
conv_weight = np.random.rand(Cout, Cin, 1, 1, 1)
if is_deconv:
conv_weight = np.swapaxes(conv_weight, 0, 1)
output_shape = list(input_shape)
output_shape[1] = Cout
output_shape = np.array(output_shape)
# generate the perm for the tranpose op
perm = np.arange(rank)
np.random.shuffle(perm)
output_shape = output_shape[perm]
cout_index = np.where(perm == 1)[0][0]
# generate the const bias, and reshape it to a random broadcasable shape
bias = np.arange(Cout).astype(np.float32)
bias_shape = [1] * rank
bias_shape[cout_index] = Cout
if cout_index != 0:
crop_index = np.random.randint(low=0, high=cout_index + 1)
bias_shape = bias_shape[crop_index:]
bias = np.reshape(bias, bias_shape)
# for the scalar case, random generate a number
if is_bias_scalar:
bias = np.random.uniform(0)
# for the all 1s case, random generate a number and reshape it to (1, 1, ..., 1)
if is_all_1s:
bias = np.array([np.random.uniform(0)])
bias_rank = np.random.randint(low=1, high=rank+1)
bias_shape = [1] * bias_rank
bias = np.reshape(bias, bias_shape)
@mb.program(input_specs=[mb.TensorSpec(shape=input_shape)])
def prog(x):
# conv or conv_transpose
kwargs = {
"x": x,
"weight": conv_weight,
"pad_type": "valid",
"dilations": [1] * conv_dim,
"strides": [1] * conv_dim,
}
if has_bias:
kwargs["bias"] = conv_bias
x = mb.conv_transpose(**kwargs) if is_deconv else mb.conv(**kwargs)
# transpose
x = mb.transpose(x=x, perm=perm)
# elementwise op
element_args = {"x": x, "y": bias} if is_conv_first_input else {"x": bias, "y": x}
element_op = mb.sub if is_sub else mb.add
x = element_op(**element_args)
return x
element_op = "sub" if is_sub else "add"
conv_op = "conv" if not is_deconv else "conv_transpose"
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_conv_bias"
)
assert get_op_types_in_program(prev_prog) == [conv_op, "transpose", element_op]
assert get_op_types_in_program(prog) == [conv_op, "transpose"]
# get the value of new weight/bias
new_bias_val = block.find_ops(op_type=conv_op)[0].inputs["bias"].val
assert new_bias_val is not None
new_weight_val = block.find_ops(op_type=conv_op)[0].inputs["weight"].val
assert new_weight_val is not None
# compare the weight
if is_sub and not is_conv_first_input:
np.testing.assert_almost_equal(new_weight_val, -conv_weight)
else:
np.testing.assert_almost_equal(new_weight_val, conv_weight)
# compare the bias
if is_sub:
if is_conv_first_input:
bias = -bias
else:
conv_bias = -conv_bias
expected_conv_bias_val = conv_bias + np.squeeze(bias)
np.testing.assert_almost_equal(expected_conv_bias_val, new_bias_val)
# run the model
assert_model_is_valid(
prog,
{"x": input_shape},
expected_output_shapes={block.outputs[0].name: tuple(output_shape)},
)
"""
Input graph:
Const Const
| |
V V
input -----> transpose -----> mul ----> add ---> out
Output graph:
input -----> transpose -----> batchnorm ----> out
"""
@pytest.mark.parametrize(
"flip_mul_input_order, flip_add_input_order, rank_3_const_input",
itertools.product([False, True], [False, True], [False, True]),
)
def test_mul_add_fusion_to_batchnorm(
self, flip_mul_input_order, flip_add_input_order, rank_3_const_input
):
C = 3
gamma = np.random.rand(1, C, 1, 1)
beta = np.random.rand(1, C, 1, 1)
if rank_3_const_input:
gamma = np.squeeze(gamma, axis=0)
beta = np.squeeze(beta, axis=0)
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 10, 10, C))])
def prog(x):
x = mb.transpose(x=x, perm=[0, 3, 1, 2])
if flip_mul_input_order:
x = mb.mul(x=gamma, y=x)
else:
x = mb.mul(x=x, y=gamma)
if flip_add_input_order:
x = mb.add(x=beta, y=x)
else:
x = mb.add(x=x, y=beta)
return x
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_elementwise_to_batchnorm"
)
assert get_op_types_in_program(prev_prog) == ["transpose", "mul", "add"]
assert get_op_types_in_program(prog) == ["transpose", "batch_norm"]
assert_model_is_valid(
prog,
{"x": (1, 10, 10, C)},
expected_output_shapes={block.outputs[0].name: (1, C, 10, 10)},
)
|
{"hexsha": "e717cb672f85eda6af6f1efe2350be109d5778ac", "size": 12011, "ext": "py", "lang": "Python", "max_stars_repo_path": "coremltools/converters/mil/mil/passes/test_elementwise_fusions.py", "max_stars_repo_name": "LaudateCorpus1/coremltools", "max_stars_repo_head_hexsha": "777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2740, "max_stars_repo_stars_event_min_datetime": "2017-10-03T23:19:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T15:16:39.000Z", "max_issues_repo_path": "coremltools/converters/mil/mil/passes/test_elementwise_fusions.py", "max_issues_repo_name": "LaudateCorpus1/coremltools", "max_issues_repo_head_hexsha": "777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1057, "max_issues_repo_issues_event_min_datetime": "2017-10-05T22:47:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:51:15.000Z", "max_forks_repo_path": "coremltools/converters/mil/mil/passes/test_elementwise_fusions.py", "max_forks_repo_name": "LaudateCorpus1/coremltools", "max_forks_repo_head_hexsha": "777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 510, "max_forks_repo_forks_event_min_datetime": "2017-10-04T19:22:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:16:52.000Z", "avg_line_length": 33.7387640449, "max_line_length": 105, "alphanum_fraction": 0.5335109483, "include": true, "reason": "import numpy", "num_tokens": 2874}
|
try:
from astropy.io import fits
except:
import pyfits as fits
import numpy as np
import scipy.signal
def gen_bad_pix_mask(image, filsize=5, threshold=5.0, return_smoothed_image=False):
"""
"""
image_sm = scipy.signal.medfilt(image, filsize)
res = image - image_sm
sigma = np.std(res)
goodpix = np.abs(res) / sigma < threshold
return (goodpix, image_sm) if return_smoothed_image else goodpix
if __name__ == '__main__':
fn = 'CRSA00006343.fits'
datadir = '/Users/protostar/Dropbox/data/charis/lab/'
hdulist = fits.open(datadir + fn)
reads = np.array([h.data[4:-4, 64 + 4:-4] for h in hdulist[1:]])
diff = reads[5] - reads[0]
goodpix, image_sm = gen_bad_pix_mask(diff, return_smoothed_image=True)
|
{"hexsha": "f95f44ace533cc0863371accc5f1eda2aaff812c", "size": 761, "ext": "py", "lang": "Python", "max_stars_repo_path": "charis/tools/badpix.py", "max_stars_repo_name": "thaynecurrie/charis-dep", "max_stars_repo_head_hexsha": "238397bb3ec18edba6e59c7203a623709ff4b50d", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "charis/tools/badpix.py", "max_issues_repo_name": "thaynecurrie/charis-dep", "max_issues_repo_head_hexsha": "238397bb3ec18edba6e59c7203a623709ff4b50d", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2018-01-23T14:46:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-24T17:29:52.000Z", "max_forks_repo_path": "charis/tools/badpix.py", "max_forks_repo_name": "thaynecurrie/charis-dep", "max_forks_repo_head_hexsha": "238397bb3ec18edba6e59c7203a623709ff4b50d", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-12-28T10:10:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-23T20:36:55.000Z", "avg_line_length": 27.1785714286, "max_line_length": 83, "alphanum_fraction": 0.6780551905, "include": true, "reason": "import numpy,import scipy,from astropy", "num_tokens": 226}
|
using Plots
using LaTeXStrings
pgfplots()
c = 1
a₀ = 1
ω = 1
x(τ) = c*(a₀^2)/4 *(τ - 1/2* ω*sin(2ω*τ))
y(τ) = c*a₀/ω *(1-cos(ω*τ))
p1 = plot(x, y, 0, 15,
xlabel=L"x", ylabel=L"y",
xticks=nothing, yticks=nothing,
legend=:none,
framestyle=:box,
tex_output_standalone=true
)
v_drift = c*a₀^2/4
p2 = plot(τ->x(τ)-v_drift*τ, y, 1, 10,
xlabel=L"x", ylabel=L"y",
xticks=nothing, yticks=nothing,
legend=:none,
framestyle=:box,
tex_output_standalone=true
)
p = plot(p1, p2,
layout=grid(1,2,widths=[0.7,0.3]),
framestyle=:box,
tex_output_standalone=true
)
savefig(p, "figures/electron-in-plane-wave.tex")
|
{"hexsha": "dbd2f178dea2e51df6b64062f7c0ba4cd52b0f43", "size": 628, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/fig8.jl", "max_stars_repo_name": "SebastianM-C/MasterThesis", "max_stars_repo_head_hexsha": "ffbf25e087444644ee73f72a31be969375fa0d10", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/fig8.jl", "max_issues_repo_name": "SebastianM-C/MasterThesis", "max_issues_repo_head_hexsha": "ffbf25e087444644ee73f72a31be969375fa0d10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/fig8.jl", "max_forks_repo_name": "SebastianM-C/MasterThesis", "max_forks_repo_head_hexsha": "ffbf25e087444644ee73f72a31be969375fa0d10", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.972972973, "max_line_length": 48, "alphanum_fraction": 0.6385350318, "num_tokens": 263}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.