text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
"""
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
from __future__ import division
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet
from ..metrics.pairwise import pairwise_distances
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y
not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y
not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, X):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : array, shape = [n_samples,]
Squared Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
dist = pairwise_distances(X, self.location_[np.newaxis, :],
metric='mahalanobis', VI=precision)
return np.reshape(dist, (len(X),)) ** 2
| vortex-ape/scikit-learn | sklearn/covariance/empirical_covariance_.py | Python | bsd-3-clause | 9,139 | [
"Gaussian"
] | f7fe3cf31019a357f259b0e43e68d97d177f8c6a8a5bc2d07c5beaf8a3ef93f3 |
import io
import sys
import pytest
from webob import Request, Response
from firefly.app import Firefly, FireflyFunction, ctx
py2_only = pytest.mark.skipif(sys.version_info.major >= 3, reason="Requires Python 2")
py3_only = pytest.mark.skipif(sys.version_info.major < 3, reason="Requires Python 3+")
def square(a):
'''Computes square'''
return a**2
def dummy():
return
class TestFirefly:
def test_generate_function_list(self):
firefly = Firefly()
assert firefly.generate_function_list() == {}
firefly.add_route("/square", square, "square")
returned_dict = {
"square": {
"path": "/square",
"doc": "Computes square",
"parameters": [
{
"name": "a",
"kind": "POSITIONAL_OR_KEYWORD"
}
]
}
}
assert firefly.generate_function_list() == returned_dict
def test_generate_function_list_for_func_name(self):
firefly = Firefly()
firefly.add_route("/sq2", square, "sq")
returned_dict = {
"sq": {
"path": "/sq2",
"doc": "Computes square",
"parameters": [
{
"name": "a",
"kind": "POSITIONAL_OR_KEYWORD"
}
]
}
}
assert firefly.generate_function_list() == returned_dict
def test_function_call(self):
app = Firefly()
app.add_route("/", square)
request = Request.blank("/", POST='{"a": 3}')
response = app.process_request(request)
assert response.status == '200 OK'
assert response.text == '9'
def test_auth_failure(self):
app = Firefly(auth_token='abcd')
app.add_route("/", square)
request = Request.blank("/", POST='{"a": 3}')
response = app.process_request(request)
print(response.text)
assert response.status == '403 Forbidden'
headers = {
"Authorization": "token bad-token"
}
request = Request.blank("/", POST='{"a": 3}', headers=headers)
response = app.process_request(request)
assert response.status == '403 Forbidden'
def test_http_error_404(self):
app = Firefly()
app.add_route("/", square)
request = Request.blank("/sq", POST='{"a": 3}')
response = app.process_request(request)
assert response.status == '404 Not Found'
def test_ctx(self):
def peek_ctx():
keys = sorted(ctx.__dict__.keys())
return list(keys)
app = Firefly()
app.add_route("/", peek_ctx)
request = Request.blank("/", POST='{}')
response = app.process_request(request)
assert response.status == '200 OK'
assert response.json == ['request']
def test_ctx_cross_request(self):
def peek_ctx():
print("peek_ctx", ctx.__dict__)
ctx.count = getattr(ctx, "count", 0) + 1
return ctx.count
app = Firefly()
app.add_route("/", peek_ctx)
request = Request.blank("/", POST='{}')
response = app.process_request(request)
assert response.status == '200 OK'
assert response.json == 1
# Subsequent requests should not have count in the context
request = Request.blank("/", POST='{}')
response = app.process_request(request)
assert response.status == '200 OK'
assert response.json == 1
class TestFireflyFunction:
def test_call(self):
func = FireflyFunction(square)
request = Request.blank("/square", POST='{"a": 3}')
response = func(request)
assert response.status == '200 OK'
assert response.text == '9'
def test_call_for_bad_request(self):
def sum(a):
return sum(a)
func = FireflyFunction(sum)
request = Request.blank("/sum", POST='{"a": [3 8]}')
response = func(request)
assert response.status == '400 Bad Request'
def test_call_for_internal_function_error(self):
def dummy(a):
raise ValueError("This is a test")
req = Request.blank('/dummy', POST='{"a": 1}')
func = FireflyFunction(dummy)
resp = func(req)
assert resp.status == '500 Internal Server Error'
assert resp.json == {'error': 'ValueError: This is a test'}
def test_call_for_file_inputs(self):
def filesize(data):
return len(data.read())
f = io.StringIO(u"test file contents")
req = Request.blank('/filesize', POST={'data': ('test', f)})
func = FireflyFunction(filesize)
resp = func(req)
assert resp.status == '200 OK'
assert resp.body == b'18'
def test_get_multipart_formdata_inputs_with_files(self):
f = io.StringIO(u"test file contents")
g = io.StringIO(u"test file contents")
req = Request.blank('/filesize', POST={'data': ('test', f)})
func = FireflyFunction(dummy)
d = func.get_multipart_formdata_inputs(req)
assert d['data'].read().decode() == g.read()
def test_get_multipart_formdata_inputs_with_combined_inputs(self):
f = io.StringIO(u"test file contents")
g = io.StringIO(u"test file contents")
req = Request.blank('/filesize', POST={'data': ('test', f), 'abc': 'hi', 'xyz': '1'})
func = FireflyFunction(dummy)
d = func.get_multipart_formdata_inputs(req)
assert d['data'].read().decode() == g.read()
assert d['abc'] == 'hi'
assert d['xyz'] == '1'
def test_get_multipart_formdata_inputs_with_no_files(self):
def dummy():
pass
req = Request.blank('/filesize', POST={'abc': 'hi', 'xyz': 1})
func = FireflyFunction(dummy)
d = func.get_multipart_formdata_inputs(req)
assert d['abc'] == 'hi'
assert d['xyz'] == '1'
def test_get_content_type_present(self):
req = Request.blank('/', headers={'Content-Type': 'multipart/form-data'})
func = FireflyFunction(dummy)
content_type = func.get_content_type(req)
assert content_type == 'multipart/form-data'
def test_get_content_type_absent(self):
req = Request.blank('/')
func = FireflyFunction(dummy)
content_type = func.get_content_type(req)
assert content_type == 'application/octet-stream'
@py2_only
def test_generate_signature(self):
def sample_function(x, one="hey", two=None, **kwargs):
pass
func = FireflyFunction(sample_function)
assert len(func.sig) == 4
assert func.sig[0]['name'] == 'x'
assert func.sig[0]['kind'] == 'POSITIONAL_OR_KEYWORD'
assert func.sig[1]['name'] == 'one'
assert func.sig[1]['kind'] == 'POSITIONAL_OR_KEYWORD'
assert func.sig[1]['default'] == 'hey'
assert func.sig[2]['default'] == None
assert func.sig[3]['name'] == 'kwargs'
assert func.sig[3]['kind'] == 'VAR_KEYWORD'
@py3_only
def test_generate_signature_py3(self):
# work-around to avoid syntax error in python 2
code = 'def f(x, y=1, *, one="hey", two=None, **kwargs): pass'
env = {}
exec(code, env, env)
f = env['f']
func = FireflyFunction(f)
assert len(func.sig) == 5
assert func.sig[0]['name'] == 'x'
assert func.sig[0]['kind'] == 'POSITIONAL_OR_KEYWORD'
assert func.sig[1]['default'] == 1
assert func.sig[2]['name'] == 'one'
assert func.sig[2]['kind'] == 'KEYWORD_ONLY'
assert func.sig[2]['default'] == 'hey'
assert func.sig[3]['default'] == None
assert func.sig[4]['name'] == 'kwargs'
assert func.sig[4]['kind'] == 'VAR_KEYWORD'
| rorodata/firefly | tests/test_app.py | Python | apache-2.0 | 7,999 | [
"Firefly"
] | 450455478fdcbd66ee1e36a2cda2012e2ab168560a53ef1ea809d5fc42f646f2 |
# -*- Mode: Python; coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2011 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Implementation of till application. """
import decimal
from datetime import date
import logging
import pango
import gtk
from kiwi.currency import currency
from kiwi.datatypes import converter
from kiwi.ui.objectlist import Column
from storm.expr import And, Or
from stoqlib.api import api
from stoqlib.enums import SearchFilterPosition
from stoqlib.exceptions import (StoqlibError, TillError, SellError,
ModelDataError)
from stoqlib.database.expr import Date
from stoqlib.domain.sale import Sale, SaleView
from stoqlib.domain.till import Till
from stoqlib.domain.payment.payment import Payment
from stoqlib.domain.workorder import WorkOrder
from stoqlib.lib.dateutils import localtoday
from stoqlib.lib.formatters import format_quantity
from stoqlib.lib.message import yesno, warning
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext as _
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.dialogs.missingitemsdialog import (MissingItemsDialog,
get_missing_items)
from stoqlib.gui.dialogs.saledetails import SaleDetailsDialog
from stoqlib.gui.dialogs.tilldailymovement import TillDailyMovementDialog
from stoqlib.gui.dialogs.tillhistory import TillHistoryDialog
from stoqlib.gui.editors.paymentseditor import SalePaymentsEditor
from stoqlib.gui.editors.tilleditor import CashInEditor, CashOutEditor
from stoqlib.gui.fiscalprinter import FiscalPrinterHelper
from stoqlib.gui.search.paymentsearch import CardPaymentSearch
from stoqlib.gui.search.paymentreceivingsearch import PaymentReceivingSearch
from stoqlib.gui.search.personsearch import ClientSearch
from stoqlib.gui.search.salesearch import (SaleWithToolbarSearch,
SoldItemsByBranchSearch)
from stoqlib.gui.search.searchcolumns import IdentifierColumn, SearchColumn
from stoqlib.gui.search.searchfilters import ComboSearchFilter
from stoqlib.gui.search.tillsearch import TillFiscalOperationsSearch, TillClosedSearch
from stoqlib.gui.slaves.saleslave import return_sale
from stoqlib.gui.utils.keybindings import get_accels
from stoqlib.reporting.sale import SalesReport
from stoq.gui.shell.shellapp import ShellApp
log = logging.getLogger(__name__)
LOGO_WIDTH = 91
LOGO_HEIGHT = 32
class TillApp(ShellApp):
app_title = _(u'Till')
gladefile = 'till'
search_spec = SaleView
search_labels = _(u'matching:')
report_table = SalesReport
#
# Application
#
def create_actions(self):
group = get_accels('app.till')
actions = [
('SaleMenu', None, _('Sale')),
('TillOpen', None, _('Open till...'),
group.get('open_till')),
('TillClose', None, _('Close till...'),
group.get('close_till')),
('TillVerify', None, _('Verify till...'),
group.get('verify_till')),
("TillDailyMovement", None, _("Till daily movement..."),
group.get('daily_movement')),
('TillAddCash', None, _('Cash addition...'), ''),
('TillRemoveCash', None, _('Cash removal...'), ''),
("PaymentReceive", None, _("Payment receival..."),
group.get('payment_receive'),
_("Receive payments")),
("SearchClient", None, _("Clients..."),
group.get('search_clients'),
_("Search for clients")),
("SearchSale", None, _("Sales..."),
group.get('search_sale'),
_("Search for sales")),
("SearchCardPayment", None, _("Card payments..."),
None, _("Search for card payments")),
("SearchSoldItemsByBranch", None, _("Sold items by branch..."),
group.get('search_sold_items_by_branch'),
_("Search for items sold by branch")),
("SearchTillHistory", None, _("Till entry history..."),
group.get('search_till_history'),
_("Search for till history")),
("SearchFiscalTillOperations", None, _("Fiscal till operations..."),
group.get('search_fiscal_till_operations'),
_("Search for fiscal till operations")),
("SearchClosedTill", None, _("Closed till search..."),
group.get('search_closed_till'),
_("Search for all closed tills")),
("Confirm", gtk.STOCK_APPLY, _("Confirm..."),
group.get('confirm_sale'),
_("Confirm the selected sale, decreasing stock and making it "
"possible to receive it's payments")),
# FIXME: This button should change the label to "Cancel" when the
# selected sale can be cancelled and not returned, since that's
# what is going to happen when the user click in it
("Return", gtk.STOCK_CANCEL, _("Return..."),
group.get('return_sale'),
_("Return the selected sale, returning stock and the client's "
"payments")),
("Details", gtk.STOCK_INFO, _("Details..."),
group.get('sale_details'),
_("Show details of the selected sale")),
]
self.till_ui = self.add_ui_actions('', actions,
filename="till.xml")
self.set_help_section(_("Till help"), 'app-till')
self.Confirm.set_short_label(_('Confirm'))
self.Return.set_short_label(_('Return'))
self.Details.set_short_label(_('Details'))
self.Confirm.props.is_important = True
self.Return.props.is_important = True
self.Details.props.is_important = True
def create_ui(self):
self.popup = self.uimanager.get_widget('/TillSelection')
self.current_branch = api.get_current_branch(self.store)
# Groups
self.main_vbox.set_focus_chain([self.app_vbox])
self.app_vbox.set_focus_chain([self.search_holder, self.list_vbox])
# Setting up the toolbar
self.list_vbox.set_focus_chain([self.footer_hbox])
self._setup_printer()
self._setup_widgets()
self.status_link.set_use_markup(True)
self.status_link.set_justify(gtk.JUSTIFY_CENTER)
def get_title(self):
return _('[%s] - Till') % (
api.get_current_branch(self.store).get_description(), )
def activate(self, refresh=True):
self.window.add_new_items([self.TillAddCash,
self.TillRemoveCash])
self.window.add_search_items([self.SearchFiscalTillOperations,
self.SearchClient,
self.SearchSale])
self.window.Print.set_tooltip(_("Print a report of these sales"))
if refresh:
self.refresh()
self._printer.run_initial_checks()
self.check_open_inventory()
self.search.focus_search_entry()
def deactivate(self):
self.uimanager.remove_ui(self.till_ui)
def new_activate(self):
if not self.TillAddCash.get_sensitive():
return
self._run_add_cash_dialog()
def search_activate(self):
self._run_search_dialog(TillFiscalOperationsSearch)
#
# ShellApp
#
def set_open_inventory(self):
self.set_sensitive(self._inventory_widgets, False)
def create_filters(self):
self.search.set_query(self._query_executer)
self.set_text_field_columns(['client_name', 'salesperson_name',
'identifier_str'])
self.status_filter = ComboSearchFilter(_(u"Show orders"),
self._get_status_values())
self.add_filter(self.status_filter, position=SearchFilterPosition.TOP,
columns=['status'])
def get_columns(self):
return [IdentifierColumn('identifier', title=_('Sale #'),
sorted=True),
Column('status_name', title=_(u'Status'), data_type=str,
visible=True),
SearchColumn('open_date', title=_('Date Started'), width=110,
data_type=date, justify=gtk.JUSTIFY_RIGHT),
SearchColumn('client_name', title=_('Client'),
data_type=str, expand=True,
ellipsize=pango.ELLIPSIZE_END),
SearchColumn('salesperson_name', title=_('Salesperson'),
data_type=str, width=180,
ellipsize=pango.ELLIPSIZE_END),
SearchColumn('total_quantity', title=_('Quantity'),
data_type=decimal.Decimal, width=100,
format_func=format_quantity),
SearchColumn('total', title=_('Total'), data_type=currency,
width=100)]
#
# Private
#
def _query_executer(self, store):
# We should only show Sales that
# 1) In the current branch (FIXME: Should be on the same station.
# See bug 4266)
# 2) Are in the status QUOTE or ORDERED.
# 3) For the order statuses, the date should be the same as today
query = And(Sale.branch == self.current_branch,
Or(Sale.status == Sale.STATUS_QUOTE,
Sale.status == Sale.STATUS_ORDERED,
Date(Sale.open_date) == date.today()))
return store.find(self.search_spec, query)
def _setup_printer(self):
self._printer = FiscalPrinterHelper(self.store,
parent=self)
self._printer.connect('till-status-changed',
self._on_PrinterHelper__till_status_changed)
self._printer.connect('ecf-changed',
self._on_PrinterHelper__ecf_changed)
self._printer.setup_midnight_check()
def _get_status_values(self):
statuses = [(v, k) for k, v in Sale.statuses.items()]
statuses.insert(0, (_('Any'), None))
return statuses
def _create_sale_payments(self, order_view):
store = api.new_store()
sale = store.fetch(order_view.sale)
retval = run_dialog(SalePaymentsEditor, self, store, sale)
# Change the sale status to ORDERED
if retval and sale.can_order():
sale.order()
if store.confirm(retval):
self.refresh()
store.close()
def _confirm_order(self, order_view):
if self.check_open_inventory():
return
store = api.new_store()
sale = store.fetch(order_view.sale)
expire_date = sale.expire_date
if (sale.status == Sale.STATUS_QUOTE and
expire_date and expire_date.date() < date.today() and
not yesno(_("This quote has expired. Confirm it anyway?"),
gtk.RESPONSE_YES,
_("Confirm quote"), _("Don't confirm"))):
store.close()
return
missing = get_missing_items(sale, store)
if missing:
retval = run_dialog(MissingItemsDialog, self, sale, missing)
if retval:
self.refresh()
store.close()
return
coupon = self._open_coupon()
if not coupon:
store.close()
return
subtotal = self._add_sale_items(sale, coupon)
try:
if coupon.confirm(sale, store, subtotal=subtotal):
workorders = WorkOrder.find_by_sale(store, sale)
for order in workorders:
order.close()
store.commit()
self.refresh()
else:
coupon.cancel()
except SellError as err:
warning(str(err))
except ModelDataError as err:
warning(str(err))
store.close()
def _open_coupon(self):
coupon = self._printer.create_coupon()
if coupon:
while not coupon.open():
if not yesno(_("Failed to open the fiscal coupon.\n"
"Until it is opened, it's not possible to "
"confirm the sale. Do you want to try again?"),
gtk.RESPONSE_YES, _("Try again"), _("Cancel coupon")):
return None
return coupon
def _add_sale_items(self, sale, coupon):
subtotal = 0
for sale_item in sale.get_items():
coupon.add_item(sale_item)
subtotal += sale_item.price * sale_item.quantity
return subtotal
def _update_total(self):
balance = currency(self._get_till_balance())
text = _(u"Total: %s") % converter.as_string(currency, balance)
self.total_label.set_text(text)
def _update_payment_total(self):
balance = currency(self._get_total_paid_payment())
text = _(u"Total payments: %s") % converter.as_string(currency, balance)
self.total_payment_label.set_text(text)
def _get_total_paid_payment(self):
"""Returns the total of payments of the day"""
payments = self.store.find(Payment,
Date(Payment.paid_date) == localtoday())
return payments.sum(Payment.paid_value) or 0
def _get_till_balance(self):
"""Returns the balance of till operations"""
try:
till = Till.get_current(self.store)
except TillError:
till = None
if till is None:
return currency(0)
return till.get_balance()
def _setup_widgets(self):
# SearchSale is here because it's possible to return a sale inside it
self._inventory_widgets = [self.Confirm, self.SearchSale,
self.Return]
self.register_sensitive_group(self._inventory_widgets,
lambda: not self.has_open_inventory())
self.total_label.set_size('xx-large')
self.total_label.set_bold(True)
if not sysparam.get_bool('SHOW_TOTAL_PAYMENTS_ON_TILL'):
self.total_payment_label.hide()
else:
self.total_payment_label.set_size('large')
self.total_payment_label.set_bold(True)
self.total_label.set_size('large')
self.small_status.set_size('xx-large')
self.small_status.set_bold(True)
def _update_toolbar_buttons(self):
sale_view = self.results.get_selected()
if sale_view:
can_confirm = sale_view.can_confirm()
# when confirming sales in till, we also might want to cancel
# sales
can_return = (sale_view.can_return() or
sale_view.can_cancel())
else:
can_confirm = can_return = False
self.set_sensitive([self.Details], bool(sale_view))
self.set_sensitive([self.Confirm], can_confirm)
self.set_sensitive([self.Return], can_return)
def _check_selected(self):
sale_view = self.results.get_selected()
if not sale_view:
raise StoqlibError("You should have a selected item at "
"this point")
return sale_view
def _run_search_dialog(self, dialog_type, **kwargs):
store = api.new_store()
self.run_dialog(dialog_type, store, **kwargs)
store.close()
def _run_details_dialog(self):
sale_view = self._check_selected()
run_dialog(SaleDetailsDialog, self, self.store, sale_view)
def _run_add_cash_dialog(self):
with api.new_store() as store:
try:
run_dialog(CashInEditor, self, store)
except TillError as err:
# Inform the error to the user instead of crashing
warning(str(err))
return
if store.committed:
self._update_total()
def _return_sale(self):
if self.check_open_inventory():
return
sale_view = self._check_selected()
with api.new_store() as store:
return_sale(self.get_toplevel(), store.fetch(sale_view.sale), store)
if store.committed:
self._update_total()
self.refresh()
def _update_ecf(self, has_ecf):
# If we have an ecf, let the other events decide what to disable.
if has_ecf:
return
# We dont have an ecf. Disable till related operations
widgets = [self.TillOpen, self.TillClose, self.TillVerify, self.TillAddCash,
self.TillRemoveCash, self.SearchTillHistory, self.app_vbox,
self.Confirm, self.Return, self.Details]
self.set_sensitive(widgets, has_ecf)
text = _(u"Till operations requires a connected fiscal printer")
self.small_status.set_text(text)
def _update_till_status(self, closed, blocked):
# Three different situations:
#
# - Till is closed
# - Till is opened
# - Till was not closed the previous fiscal day (blocked)
self.set_sensitive([self.TillOpen], closed)
self.set_sensitive([self.TillClose], not closed or blocked)
widgets = [self.TillVerify, self.TillAddCash, self.TillRemoveCash,
self.SearchTillHistory, self.search_holder, self.PaymentReceive]
self.set_sensitive(widgets, not closed and not blocked)
def large(s):
return '<span weight="bold" size="xx-large">%s</span>' % (
api.escape(s), )
if closed:
text = large(_(u"Till closed"))
self.search_holder.hide()
self.footer_hbox.hide()
self.large_status.show()
self.clear()
self.setup_focus()
# Adding the label on footer without the link
self.small_status.set_text(text)
if not blocked:
text += '\n\n<span size="large"><a href="open-till">%s</a></span>' % (
api.escape(_('Open till')))
self.status_link.set_markup(text)
elif blocked:
self.search_holder.hide()
self.footer_hbox.hide()
text = large(_(u"Till blocked"))
self.status_link.set_markup(text)
self.small_status.set_text(text)
else:
self.search_holder.show()
self.footer_hbox.show()
self.large_status.hide()
till = Till.get_current(self.store)
text = _(u"Till opened on %s") % till.opening_date.strftime('%x')
self.small_status.set_text(text)
self._update_toolbar_buttons()
self._update_total()
if sysparam.get_bool('SHOW_TOTAL_PAYMENTS_ON_TILL'):
self._update_payment_total()
#
# Callbacks
#
def on_Confirm__activate(self, action):
selected = self.results.get_selected()
# If there are unfinished workorders associated with the sale, we
# cannot print the coupon yet. Instead lets just create the payments.
workorders = WorkOrder.find_by_sale(self.store, selected.sale)
if not all(wo.can_close() for wo in workorders):
self._create_sale_payments(selected)
else:
self._confirm_order(selected)
self._update_total()
def on_results__double_click(self, results, sale):
self._run_details_dialog()
def on_results__selection_changed(self, results, sale):
self._update_toolbar_buttons()
def on_results__has_rows(self, results, has_rows):
self._update_total()
def on_results__right_click(self, results, result, event):
self.popup.popup(None, None, None, event.button, event.time)
def on_Details__activate(self, action):
self._run_details_dialog()
def on_Return__activate(self, action):
self._return_sale()
def on_status_link__activate_link(self, button, link):
if link == 'open-till':
self._printer.open_till()
return True
def _on_PrinterHelper__till_status_changed(self, printer, closed, blocked):
self._update_till_status(closed, blocked)
def _on_PrinterHelper__ecf_changed(self, printer, ecf):
self._update_ecf(ecf)
def on_PaymentReceive__activate(self, action):
self.run_dialog(PaymentReceivingSearch, self.store)
# Till
def on_TillVerify__activate(self, button):
self._printer.verify_till()
def on_TillClose__activate(self, button):
self._printer.close_till()
def on_TillOpen__activate(self, button):
self._printer.open_till()
def on_TillAddCash__activate(self, action):
self._run_add_cash_dialog()
def on_TillRemoveCash__activate(self, action):
with api.new_store() as store:
run_dialog(CashOutEditor, self, store)
if store.committed:
self._update_total()
def on_TillDailyMovement__activate(self, button):
self.run_dialog(TillDailyMovementDialog, self.store)
# Search
def on_SearchClient__activate(self, action):
self._run_search_dialog(ClientSearch, hide_footer=True)
def on_SearchSale__activate(self, action):
if self.check_open_inventory():
return
self._run_search_dialog(SaleWithToolbarSearch)
self.refresh()
def on_SearchCardPayment__activate(self, action):
self.run_dialog(CardPaymentSearch, self.store)
def on_SearchSoldItemsByBranch__activate(self, button):
self._run_search_dialog(SoldItemsByBranchSearch)
def on_SearchTillHistory__activate(self, button):
self.run_dialog(TillHistoryDialog, self.store)
def on_SearchFiscalTillOperations__activate(self, button):
self._run_search_dialog(TillFiscalOperationsSearch)
def on_SearchClosedTill__activate(self, button):
self._run_search_dialog(TillClosedSearch)
| tiagocardosos/stoq | stoq/gui/till.py | Python | gpl-2.0 | 22,922 | [
"VisIt"
] | ebeddaea94e61bbeec1264f36d4c73e40150e7fdfc9893f78693999cde282c07 |
# This file is part of xrayutilities.
#
# xrayutilities is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2016 Dominik Kriegner <dominik.kriegner@gmail.com>
from multiprocessing import freeze_support
import matplotlib.pyplot as plt
import numpy
from mpl_toolkits.axes_grid1 import make_axes_locatable
import xrayutilities as xu
def main():
"""dummy main function to enable multiprocessing on windows"""
cryst_size = 40e-9 # meter
# create Fe BCC (space group nr. 229 Im3m) with a = 2.87 angstrom although
# this is already predefined as xu.materials.Fe we will repeat here for
# educational purposes
FeBCC = xu.materials.Crystal(
"Fe", xu.materials.SGLattice(229, 2.87,
atoms=[xu.materials.elements.Fe, ],
pos=['2a', ]))
print("Creating Fe powder ...")
Fe_powder = xu.simpack.Powder(FeBCC, 1, crystallite_size_gauss=cryst_size)
pm = xu.simpack.PowderModel(Fe_powder)
tt = numpy.arange(5, 120, 0.01)
inte = pm.simulate(tt)
print(pm)
# # to create a mixed powder sample one would use
# Co_powder = xu.simpack.Powder(xu.materials.Co, 5) # 5 times more Co
# pmix = xu.simpack.PowderModel(Fe_powder + Co_powder, I0=100)
# inte = pmix.simulate(tt)
# pmix.close() # after end-of-use for cleanup
ax = pm.plot(tt)
ax.set_xlim(5, 120)
pm.close()
if __name__ == '__main__':
freeze_support()
main()
| dkriegner/xrayutilities | examples/xrayutilities_experiment_Powder_example_Iron.py | Python | gpl-2.0 | 2,058 | [
"CRYSTAL"
] | 04a42ab36eef0fa4a340e3fffd8f25936bebf42462f64f79d26805e958dd3717 |
# -*- coding: utf-8 -*-
import numpy as np
from iminuit import Minuit
from probfit import AddPdf, BinnedLH, Extended, gen_toy
from probfit.pdf import HistogramPdf
bound = (0, 10)
np.random.seed(0)
bkg = gen_toy(lambda x: x ** 2, 100000, bound=bound) # a parabola background
sig = np.random.randn(50000) + 5 # a Gaussian signal
data = np.concatenate([sig, bkg])
# fill histograms with large statistics
hsig, be = np.histogram(sig, bins=40, range=bound)
hbkg, be = np.histogram(bkg, bins=be, range=bound)
# randomize data
data = np.random.permutation(data)
fitdata = data[:1000]
psig = HistogramPdf(hsig, be)
pbkg = HistogramPdf(hbkg, be)
epsig = Extended(psig, extname="N1")
epbkg = Extended(pbkg, extname="N2")
pdf = AddPdf(epbkg, epsig)
blh = BinnedLH(pdf, fitdata, bins=40, bound=bound, extended=True)
m = Minuit(blh, N1=330, N2=670, error_N1=20, error_N2=30)
# m.migrad()
blh.draw(m, parts=True)
| iminuit/probfit | doc/pyplots/pdf/histogrampdf.py | Python | mit | 905 | [
"Gaussian"
] | 3e685d021d4c2c5fd39e3885b13482d910ce24af691b0d615e10153002e3ec2d |
import os, time
import numpy as np
from desolvationGrid_util import *
# Most of the heavy lifting is done by these Cython routines
# from desolvationGrid_util:
# enumerate_SAS_points
# set_inside_sphere_to
# increment_inside_sphere
# decrement_inside_sphere
# fraction_r4inv_low_dielectric
# calc_desolvationGrid
def goldenSectionSpiral(n):
golden_angle = np.pi * (3 - np.sqrt(5))
theta = golden_angle * np.arange(n)
z = np.linspace(1 - 1.0 / n, 1.0 / n - 1, n)
radius = np.sqrt(1 - z * z)
unit_sphere_pts = np.transpose(np.vstack(\
[radius * np.cos(theta), radius * np.sin(theta), z]))
return unit_sphere_pts
class desolvationGridCalculation:
def __init__(self, **kwargs):
### Parse parameters
self.FNs = {'prmtop':kwargs['prmtop_FN'], \
'inpcrd':kwargs['inpcrd_FN'], \
'header':kwargs['header_FN'], \
'grid':'desolv.dx' if kwargs['grid_FN'] is None else kwargs['grid_FN']}
if 'counts' in kwargs.keys():
counts = kwargs['counts']
if 'spacing' in kwargs.keys():
spacing = kwargs['spacing']
# Check that input files are available
for FN in [self.FNs['prmtop'],self.FNs['inpcrd']]:
if not os.path.exists(FN):
raise Exception(FN+' missing!')
# Check that output directories are available
for FN in [self.FNs['grid']]:
dirN = os.path.dirname(FN)
if dirN!='' and (not os.path.isdir(dirN)):
os.system('mkdir -p '+os.path.dirname(FN))
### Read header from dx file
header = {}
if (self.FNs['header'] is not None) and os.path.isfile(self.FNs['header']):
print 'Reading header from '+self.FNs['header']
headerF = open(self.FNs['header'],'r')
headerData = headerF.read()
headerF.close()
headerLines = headerData.split('\n')
if counts is None:
counts = np.array([int(x) for x in headerLines.pop(0).split(' ')[-3:]])
for name in ['origin','d0','d1','d2']:
header[name] = [float(x) for x in headerLines.pop(0).split(' ')[-3:]]
if spacing is None:
spacing = np.array([header['d0'][0], header['d1'][1], header['d2'][2]])
del headerF, headerLines
### Loads coordinates
import AlGDock.IO
IO_crd = AlGDock.IO.crd()
self.crd = IO_crd.read(self.FNs['inpcrd'])
### Load Lennard-Jones radii
IO_prmtop = AlGDock.IO.prmtop()
prmtop = IO_prmtop.read(self.FNs['prmtop'], \
varnames=['POINTERS','CHARGE','ATOM_TYPE_INDEX','NONBONDED_PARM_INDEX',\
'LENNARD_JONES_ACOEF','LENNARD_JONES_BCOEF'])
prmtop['CHARGE'] = prmtop['CHARGE']/18.2223 # Convert to units of electric charge
NATOM = prmtop['POINTERS'][0]
NTYPES = prmtop['POINTERS'][1]
# Extract Lennard-Jones well depth and radii for each atom type
LJ_radius = np.zeros(shape=(NTYPES), dtype=float)
LJ_depth = np.zeros(shape=(NTYPES), dtype=float)
for i in range(NTYPES):
LJ_index = prmtop['NONBONDED_PARM_INDEX'][NTYPES*i+i]-1
if prmtop['LENNARD_JONES_ACOEF'][LJ_index]<1.0e-6:
LJ_radius[i] = 0
LJ_depth[i] = 0
else:
factor = 2 * prmtop['LENNARD_JONES_ACOEF'][LJ_index] / prmtop['LENNARD_JONES_BCOEF'][LJ_index]
LJ_radius[i] = pow(factor, 1.0/6.0) * 0.5
LJ_depth[i] = prmtop['LENNARD_JONES_BCOEF'][LJ_index] / 2 / factor
# Lennard Jones and SAS radii per atom
LJ_r = np.array([LJ_radius[prmtop['ATOM_TYPE_INDEX'][atom_index]-1] \
for atom_index in range(NATOM)])
self.LJ_r2 = LJ_r*LJ_r
self.SAS_r = LJ_r + kwargs['probe_radius']
self.unit_sphere_pts = goldenSectionSpiral(kwargs['SAS_points'])
# Outputs files and parameters
print '*** Files and parameters ***'
print 'Input AMBER prmtop :\t' + self.FNs['prmtop']
print 'Input AMBER inpcrd :\t' + self.FNs['inpcrd']
if self.FNs['header'] is not None:
print 'Input grid header file :\t' + self.FNs['header']
print 'Output grid :\t' + self.FNs['grid']
print 'Grid spacing :\t', spacing
print 'Grid counts :\t', counts
print
kwargs['counts'] = counts
kwargs['spacing'] = spacing
self.kwargs = kwargs
def calc_receptor_SAS_points(self):
print 'Finding receptor SAS points'
startTime = time.time()
self.receptor_SAS_points = enumerate_SAS_points(self.crd, self.crd, \
self.unit_sphere_pts, self.SAS_r, self.LJ_r2)
endTime = time.time()
print ' in %3.2f s'%(endTime-startTime)
def calc_receptor_MS(self):
print 'Determining the number of SAS points marking each grid point'
startTime = time.time()
self.receptor_MS_grid = np.ones(shape=tuple(self.kwargs['counts']), \
dtype=np.int)
# Tentatively assign the grid inside the SAS to low dielectric
for atom_index in range(len(self.SAS_r)):
set_inside_sphere_to(self.receptor_MS_grid, self.kwargs['spacing'], \
self.kwargs['counts'],
self.crd[atom_index,0], self.crd[atom_index,1], self.crd[atom_index,2], \
self.SAS_r[atom_index], 0)
# Determine number of SAS points marking each grid point
for SAS_point in self.receptor_SAS_points:
increment_inside_sphere(self.receptor_MS_grid, self.kwargs['spacing'], \
self.kwargs['counts'], SAS_point[0], SAS_point[1], SAS_point[2], \
self.kwargs['probe_radius'])
endTime = time.time()
print ' in %3.2f s'%(endTime-startTime)
def save_receptor_MS(self):
import AlGDock.IO
IO_Grid = AlGDock.IO.Grid()
print 'Writing grid output'
IO_Grid.write('receptor_MS.dx', \
{'origin':np.array([0., 0., 0.]), 'spacing':self.kwargs['spacing'], 'counts':self.kwargs['counts'], \
'vals':self.receptor_MS_grid.flatten()})
def calc_desolvationGrid(self):
print 'Calculating and saving desolvation grid'
startTime = time.time()
SAS_r = self.kwargs['ligand_atom_radius'] + self.kwargs['probe_radius']
SAS_sphere_pts = SAS_r*self.unit_sphere_pts
self.desolvationGrid = calc_desolvationGrid(self.receptor_MS_grid, \
self.kwargs['spacing'], self.kwargs['counts'], \
self.receptor_SAS_points, self.crd, \
SAS_sphere_pts, self.LJ_r2, max(np.sqrt(self.LJ_r2)), \
self.kwargs['ligand_atom_radius'], \
self.kwargs['probe_radius'], self.kwargs['integration_cutoff'])
import AlGDock.IO
IO_Grid = AlGDock.IO.Grid()
print 'Writing grid output'
IO_Grid.write(self.FNs['grid'], \
{'origin':np.array([0., 0., 0.]), \
'spacing':self.kwargs['spacing'], \
'counts':self.kwargs['counts'], \
'vals':self.desolvationGrid.flatten()})
endTime = time.time()
print ' in %3.2f s'%(endTime-startTime)
if __name__ == '__main__':
AstexDiv_Dir = '/Users/dminh/clusters/CCB/AstexDiv_xtal'
import argparse
parser = argparse.ArgumentParser(description='Calculates a desolvation grids')
parser.add_argument('--prmtop_FN', \
default = AstexDiv_Dir + '/1-build/1tow/receptor.prmtop', \
help='Input AMBER PRMTOP file')
parser.add_argument('--inpcrd_FN', \
default = AstexDiv_Dir + '/3-grids/1tow/receptor.trans.inpcrd', \
help='Input coordinates')
parser.add_argument('--header_FN', \
default = AstexDiv_Dir + '/3-grids/1tow/header_coarse.dx', \
help='Input grid header (optional)')
parser.add_argument('--grid_FN', \
default='desolv.dx', \
help='Output for desolvation grid')
parser.add_argument('--probe_radius', default=1.4, \
help='Radius of the solvent probe, in A')
parser.add_argument('--ligand_atom_radius', default=1.4, \
help='Radius of the ligand atom, in A')
parser.add_argument('--SAS_points', default=1000, \
help='Number of points on solvent accessible surface per receptor atom')
parser.add_argument('--integration_cutoff', default=10,
help='Numerical integration cutoff, in A')
parser.add_argument('--spacing', nargs=3, type=float, \
help='Grid spacing (overrides header)')
parser.add_argument('--counts', nargs=3, type=int, \
help='Number of point in each direction (overrides header)')
parser.add_argument('-f')
args = parser.parse_args()
self = desolvationGridCalculation(**vars(args))
self.calc_receptor_SAS_points()
self.calc_receptor_MS()
self.calc_desolvationGrid()
| CCBatIIT/AlGDock | Pipeline/desolvationGrid/desolvationGrid.py | Python | mit | 8,261 | [
"Amber"
] | d2be837d7ed66d0315d8574b5a2d405372515a30b3efaa6b42c573c6c62ac26b |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This module provides the base class for plugins.
"""
class Plugin:
"""
This class serves as a base class for all plugins that can be registered
with the plugin manager
"""
def __init__(self, name, description, module_name):
"""
:param name: A friendly name to call this plugin.
Example: "GEDCOM Import"
:type name: string
:param description: A short description of the plugin.
Example: "This plugin will import a GEDCOM file into a database"
:type description: string
:param module_name: The name of the module that contains this plugin.
Example: "gedcom"
:type module_name: string
:return: nothing
"""
self.__name = name
self.__desc = description
self.__mod_name = module_name
def get_name(self):
"""
Get the name of this plugin.
:return: a string representing the name of the plugin
"""
return self.__name
def get_description(self):
"""
Get the description of this plugin.
:return: a string that describes the plugin
"""
return self.__desc
def get_module_name(self):
"""
Get the name of the module that this plugin lives in.
:return: a string representing the name of the module for this plugin
"""
return self.__mod_name
| SNoiraud/gramps | gramps/gen/plug/_plugin.py | Python | gpl-2.0 | 2,269 | [
"Brian"
] | 2873bee23e967a0edea4aca038ada60eaf29dbf421247225c7ad5cbaf6b8b66e |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
Module of helper functions for ccresponse distributed property calculations.
Defines functions for interacting with the database created by the run_XXX
driver function.
Properties that are able to use this module should be added to
the registered_props dictionary.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import collections
from psi4 import core
from psi4.driver import p4util
def generate_inputs(db,name):
"""
Generates the input files in each sub-directory of the
distributed finite differences property calculation.
name: ( string ) method name passed to calling driver,
db: (database) The database object associated with this property
calculation. On exit this db['inputs_generated'] has been set True
Returns: nothing
Throws: Exception if the number of atomic displacements is not correct.
"""
molecule = core.get_active_molecule()
natom = molecule.natom()
# get list of displacements
displacement_geoms = core.atomic_displacements(molecule)
# Sanity Check
# there should be 3 cords * natoms *2 directions (+/-)
if not (6 * natom) == len(displacement_geoms):
raise Exception('The number of atomic displacements should be 6 times'
' the number of atoms!')
displacement_names = db['job_status'].keys()
for n, entry in enumerate(displacement_names):
if not os.path.exists(entry):
os.makedirs(entry)
# Setup up input file string
inp_template = 'molecule {molname}_{disp}'
inp_template += ' {{\n{molecule_info}\n}}\n{options}\n{jobspec}\n'
molecule.set_geometry(displacement_geoms[n])
molecule.fix_orientation(True)
molecule.fix_com(True)
inputfile = open('{0}/input.dat'.format(entry), 'w')
inputfile.write("# This is a psi4 input file auto-generated for"
"computing properties by finite differences.\n\n")
inputfile.write(
inp_template.format(
molname=molecule.name(),
disp=entry,
molecule_info=molecule.create_psi4_string_from_molecule(),
options=p4util.format_options_for_input(),
jobspec=db['prop_cmd']))
inputfile.close()
db['inputs_generated'] = True
# END generate_inputs
def initialize_database(database, name, prop, properties_array, additional_kwargs=None):
"""
Initialize the database for computation of some property
using distributed finite differences driver
database: (database) the database object passed from the caller
name: (string) name as passed to calling driver
prop: (string) the property being computed, used to add xxx_computed flag
to database
prop_array: (list of strings) properties to go in
properties kwarg of the property() cmd in each sub-dir
additional_kwargs: (list of strings) *optional*
any additional kwargs that should go in the call to the
property() driver method in each subdir
Returns: nothing
Throws: nothing
"""
database['inputs_generated'] = False
database['jobs_complete'] = False
prop_cmd ="property('{0}',".format(name)
prop_cmd += "properties=[ '{}' ".format(properties_array[0])
if len(properties_array) > 1:
for element in properties_array[1:]:
prop_cmd += ",'{}'".format(element)
prop_cmd += "]"
if additional_kwargs is not None:
for arg in additional_kwargs:
prop_cmd += ", {}".format(arg)
prop_cmd += ")"
database['prop_cmd'] = prop_cmd
database['job_status'] = collections.OrderedDict()
# Populate the job_status dict
molecule = core.get_active_molecule()
natom = molecule.natom()
coordinates = ['x', 'y', 'z']
#step_direction = ['p', 'm'] changing due to change in findif atomic_displacements
step_direction = ['m', 'p']
for atom in range(1, natom + 1):
for coord in coordinates:
for step in step_direction:
job_name = '{}_{}_{}'.format(atom, coord, step)
database['job_status'].update({job_name: 'not_started'})
database['{}_computed'.format(prop)] = False
# END initialize_database()
def stat(db):
"""
Checks displacement sub_directories for the status of each
displacement computation
db: (database) the database storing information for this distributed
property calculation
Returns: nothing
Throws: nothing
"""
n_finished = 0
for job, status in db['job_status'].items():
if status == 'finished':
n_finished += 1
elif status in ('not_started', 'running'):
try:
with open("{}/output.dat".format(job)) as outfile:
outfile.seek(-150, 2)
for line in outfile:
if 'Psi4 exiting successfully' in line:
db['job_status'][job] = 'finished'
n_finished += 1
break
else:
db['job_status'][job] = 'running'
except:
pass
# check all jobs done?
if n_finished == len(db['job_status'].keys()):
db['jobs_complete'] = True
# END stat()
| ashutoshvt/psi4 | psi4/driver/procrouting/findif_response_utils/db_helper.py | Python | lgpl-3.0 | 6,311 | [
"Psi4"
] | a3121fa659d41ee3a3e424dbe19a4b1588264cebca67816f35d1241f0c765e1c |
import numpy as np
import MDAnalysis as mda
from MDAnalysis.coordinates.memory import MemoryReader
from MDAnalysisTests.datafiles import DCD, PSF
from MDAnalysisTests.coordinates.base import (BaseReference,
MultiframeReaderTest)
from MDAnalysis.coordinates.memory import Timestep
from numpy.testing import assert_equal, dec
from MDAnalysisTests import parser_not_found
class MemoryReference(BaseReference):
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def __init__(self):
super(MemoryReference, self).__init__()
self.topology = PSF
self.trajectory = DCD
self.universe = mda.Universe(PSF, DCD)
self.n_atoms = self.universe.trajectory.n_atoms
self.n_frames = self.universe.trajectory.n_frames
self.dt = self.universe.trajectory.ts.dt
self.dimensions = self.universe.trajectory.ts.dimensions
self.totaltime = self.universe.trajectory.totaltime
self.volume = self.universe.trajectory.ts.volume
self.first_frame = Timestep(self.n_atoms)
self.first_frame.positions = np.array(self.universe.trajectory[0])
self.first_frame.frame = 0
self.first_frame.time = self.first_frame.frame*self.dt
self.second_frame = Timestep(self.n_atoms)
self.second_frame.positions = np.array(self.universe.trajectory[1])
self.second_frame.frame = 1
self.second_frame.time = self.second_frame.frame*self.dt
self.last_frame = Timestep(self.n_atoms)
self.last_frame.positions = \
np.array(self.universe.trajectory[self.n_frames - 1])
self.last_frame.frame = self.n_frames - 1
self.last_frame.time = self.last_frame.frame*self.dt
self.jump_to_frame = self.first_frame.copy()
self.jump_to_frame.positions = np.array(self.universe.trajectory[3])
self.jump_to_frame.frame = 3
self.jump_to_frame.time = self.jump_to_frame.frame*self.dt
def reader(self, trajectory):
return mda.Universe(self.topology,
trajectory, in_memory=True).trajectory
def iter_ts(self, i):
ts = self.universe.trajectory[i]
return ts
class TestMemoryReader(MultiframeReaderTest):
def __init__(self):
reference = MemoryReference()
super(TestMemoryReader, self).__init__(reference)
def test_filename_transefer_to_memory(self):
# MemoryReader should have a filename attribute set to the trajaectory filename
universe = mda.Universe(PSF, DCD)
universe.transfer_to_memory()
assert_equal(universe.trajectory.filename, DCD)
def test_filename_array(self):
# filename attribute of MemoryReader should be None when generated from an array
universe = mda.Universe(PSF, DCD)
coordinates = universe.trajectory.timeseries(universe.atoms)
universe2 = mda.Universe(PSF, coordinates, format=MemoryReader, order='afc')
assert_equal(universe2.trajectory.filename, None)
def test_default_memory_layout(self):
universe1 = mda.Universe(PSF, DCD, in_memory=True)
universe2 = mda.Universe(PSF, DCD, in_memory=True, order='fac')
assert_equal(universe1.trajectory.get_array().shape,
universe2.trajectory.get_array().shape)
def test_iteration(self):
frames = 0
for i, frame in enumerate(self.reader):
frames += 1
assert_equal(frames, self.ref.n_frames)
def test_extract_array_afc(self):
assert_equal(self.reader.timeseries(format='afc').shape, (3341, 98, 3))
def test_extract_array_fac(self):
assert_equal(self.reader.timeseries(format='fac').shape, (98, 3341, 3))
def test_extract_array_cfa(self):
assert_equal(self.reader.timeseries(format='cfa').shape, (3, 98, 3341))
def test_extract_array_acf(self):
assert_equal(self.reader.timeseries(format='acf').shape, (3341, 3, 98))
def test_extract_array_fca(self):
assert_equal(self.reader.timeseries(format='fca').shape, (98, 3, 3341))
def test_extract_array_caf(self):
assert_equal(self.reader.timeseries(format='caf').shape, (3, 3341, 98))
def test_timeseries_skip1(self):
assert_equal(self.reader.timeseries(self.ref.universe.atoms).shape,
(3341, 98, 3))
def test_timeseries_skip10(self):
# Check that timeseries skip works similar to numpy slicing
array1 = self.reader.timeseries(step=10)
array2 = self.reader.timeseries()[:,::10,:]
assert_equal(array1, array2)
def test_timeseries_view(self):
# timeseries() is expected to provide a view of the underlying array
assert_equal(self.reader.timeseries().base is self.reader.get_array(),
True)
def test_timeseries_subarray_view(self):
# timeseries() is expected to provide a view of the underlying array
# also in the case where we slice the array using the start, stop and
# step options.
assert_equal(
self.reader.timeseries(start=5,
stop=15,
step=2,
format='fac').base is self.reader.get_array(),
True)
def test_timeseries_view_from_universe_atoms(self):
# timeseries() is expected to provide a view of the underlying array
# also in the special case when asel=universe.atoms.
selection = self.ref.universe.atoms
assert_equal(self.reader.timeseries(
asel=selection).base is self.reader.get_array(),
True)
def test_timeseries_view_from_select_all(self):
# timeseries() is expected to provide a view of the underlying array
# also in the special case when using "all" in selections.
selection = self.ref.universe.select_atoms("all")
assert_equal(self.reader.timeseries(
asel=selection).base is self.reader.get_array(),
True)
def test_timeseries_noview(self):
# timeseries() is expected NOT to provide a view of the underlying array
# for any other selection than "all".
selection = self.ref.universe.select_atoms("name CA")
assert_equal(self.reader.timeseries(
asel=selection).base is self.reader.get_array(),
False)
def test_repr(self):
str_rep = str(self.reader)
expected = "<MemoryReader with 98 frames of 3341 atoms>"
assert_equal(str_rep, expected)
def test_get_writer_1(self):
pass
def test_get_writer_2(self):
pass
def test_float32(self):
# Check that we get float32 positions even when initializing with float64
coordinates = np.random.uniform(size=(100, self.ref.universe.atoms.n_atoms, 3)).cumsum(0)
universe = mda.Universe(self.ref.universe.filename, coordinates, format=MemoryReader)
assert_equal(universe.trajectory.get_array().dtype, np.dtype('float32'))
| alejob/mdanalysis | testsuite/MDAnalysisTests/coordinates/test_memory.py | Python | gpl-2.0 | 7,133 | [
"MDAnalysis"
] | 4ed480e92b4012ac89879f4988662989d249688cd4a23e1c964867797607a807 |
# -*- coding: utf-8 -*-
"""
Regression tests for the Test Client, especially the customized assertions.
"""
from __future__ import unicode_literals
import os
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.urlresolvers import reverse
from django.template import (TemplateDoesNotExist, TemplateSyntaxError,
Context, Template, loader)
import django.template.context
from django.test import Client, TestCase
from django.test.client import encode_file, RequestFactory
from django.test.utils import ContextList, override_settings, str_prefix
from django.template.response import SimpleTemplateResponse
from django.utils.translation import ugettext_lazy
from django.http import HttpResponse
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(__file__), 'templates'),)
)
class AssertContainsTests(TestCase):
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/test_client_regress/no_template_view/')
self.assertNotContains(response, 'never')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'once')
except AssertionError as e:
self.assertIn("Response should not contain 'once'", str(e))
try:
self.assertNotContains(response, 'once', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response should not contain 'once'", str(e))
try:
self.assertContains(response, 'never', 1)
except AssertionError as e:
self.assertIn("Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'never', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'once', 0)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 0, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 2)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'once', 2, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'twice', 1)
except AssertionError as e:
self.assertIn("Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'twice', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'thrice')
except AssertionError as e:
self.assertIn("Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', 3)
except AssertionError as e:
self.assertIn("Found 0 instances of 'thrice' in response (expected 3)", str(e))
try:
self.assertContains(response, 'thrice', 3, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'thrice' in response (expected 3)", str(e))
def test_unicode_contains(self):
"Unicode characters can be found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertContains(r, 'さかき')
self.assertContains(r, b'\xe5\xb3\xa0'.decode('utf-8'))
def test_unicode_not_contains(self):
"Unicode characters can be searched for, and not found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertNotContains(r, 'はたけ')
self.assertNotContains(r, b'\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8'))
def test_nontext_contains(self):
r = self.client.get('/test_client_regress/no_template_view/')
self.assertContains(r, ugettext_lazy('once'))
def test_nontext_not_contains(self):
r = self.client.get('/test_client_regress/no_template_view/')
self.assertNotContains(r, ugettext_lazy('never'))
def test_assert_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateReponse
without throwing an error.
Refs #15826.
"""
response = SimpleTemplateResponse(Template('Hello'), status=200)
self.assertContains(response, 'Hello')
def test_assert_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertContains(response, 'Hello')
def test_assert_not_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateReponse
without throwing an error.
Refs #15826.
"""
response = SimpleTemplateResponse(Template('Hello'), status=200)
self.assertNotContains(response, 'Bye')
def test_assert_not_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertNotContains(response, 'Bye')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AssertTemplateUsedTests(TestCase):
fixtures = ['testdata.json']
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/test_client_regress/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError as e:
self.assertIn("No templates used to render the response", str(e))
try:
self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: No templates used to render the response", str(e))
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/test_client/post_view/', {})
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError as e:
self.assertIn("Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError as e:
self.assertIn("Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError as e:
self.assertIn("Template 'form_view.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError as e:
self.assertIn("Template 'base.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError as e:
self.assertIn("Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html", str(e))
class AssertRedirectsTests(TestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/permanent_redirect_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e))
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/test_client/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/test_client/some_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/test_client/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/')
except AssertionError as e:
self.assertIn("Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
def test_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/further/more/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_multiple_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 3)
self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/redirects/further/', 301))
self.assertEqual(response.redirect_chain[1], ('http://testserver/test_client_regress/redirects/further/more/', 301))
self.assertEqual(response.redirect_chain[2], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_redirect_chain_to_non_existent(self):
"You can follow a chain to a non-existent view"
response = self.client.get('/test_client_regress/redirect_to_non_existent_view2/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/non_existent_view/',
status_code=301, target_status_code=404)
def test_redirect_chain_to_self(self):
"Redirections to self are caught and escaped"
response = self.client.get('/test_client_regress/redirect_to_self/', {}, follow=True)
# The chain of redirects stops once the cycle is detected.
self.assertRedirects(response, '/test_client_regress/redirect_to_self/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 2)
def test_circular_redirect(self):
"Circular redirect chains are caught and escaped"
response = self.client.get('/test_client_regress/circular_redirect_1/', {}, follow=True)
# The chain of redirects will get back to the starting point, but stop there.
self.assertRedirects(response, '/test_client_regress/circular_redirect_2/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 4)
def test_redirect_chain_post(self):
"A redirect chain will be followed from an initial POST post"
response = self.client.post('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_head(self):
"A redirect chain will be followed from an initial HEAD request"
response = self.client.head('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_options(self):
"A redirect chain will be followed from an initial OPTIONS request"
response = self.client.options('/test_client_regress/redirects/',
follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_put(self):
"A redirect chain will be followed from an initial PUT request"
response = self.client.put('/test_client_regress/redirects/',
follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_delete(self):
"A redirect chain will be followed from an initial DELETE request"
response = self.client.delete('/test_client_regress/redirects/',
follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_to_different_host(self):
"The test client will preserve scheme, host and port changes"
response = self.client.get('/test_client_regress/redirect_other_host/', follow=True)
self.assertRedirects(response,
'https://otherserver:8443/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
# We can't use is_secure() or get_host()
# because response.request is a dictionary, not an HttpRequest
self.assertEqual(response.request.get('wsgi.url_scheme'), 'https')
self.assertEqual(response.request.get('SERVER_NAME'), 'otherserver')
self.assertEqual(response.request.get('SERVER_PORT'), '8443')
def test_redirect_chain_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/', follow=True)
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
def test_redirect_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
class AssertFormErrorTests(TestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'wrong_form' was not used to render the response", str(e))
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'wrong_form' was not used to render the response", str(e))
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'form' in context 0 does not contain the field 'some_field'", str(e))
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'form' in context 0 does not contain the field 'some_field'", str(e))
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError as e:
self.assertIn("The field 'value' on form 'form' in context 0 contains no errors", str(e))
try:
self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The field 'value' on form 'form' in context 0 contains no errors", str(e))
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError as e:
self.assertIn(str_prefix("The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e))
try:
self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn(str_prefix("abc: The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e))
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError as e:
self.assertIn("The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
try:
self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class LoginTests(TestCase):
fixtures = ['testdata']
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/test_client_regress/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "http://testserver/test_client_regress/get_view/")
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
SESSION_ENGINE='regressiontests.test_client_regress.session'
)
class SessionEngineTests(TestCase):
fixtures = ['testdata']
def test_login(self):
"A session engine that modifies the session key can be used to log in"
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Try to access a login protected page.
response = self.client.get("/test_client/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
class URLEscapingTests(TestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class ExceptionTests(TestCase):
fixtures = ['testdata.json']
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
try:
response = self.client.get("/test_client_regress/staff_only/")
self.fail("General users should not be able to visit this page")
except SuspiciousOperation:
pass
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a problem.
login = self.client.login(username='staff', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/test_client_regress/staff_only/")
except SuspiciousOperation:
self.fail("Staff should be able to visit this page")
class TemplateExceptionTests(TestCase):
def setUp(self):
# Reset the loaders so they don't try to render cached templates.
if loader.template_source_loaders is not None:
for template_loader in loader.template_source_loaders:
if hasattr(template_loader, 'reset'):
template_loader.reset()
@override_settings(TEMPLATE_DIRS=(),)
def test_no_404_template(self):
"Missing templates are correctly reported by test client"
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about missing template")
except TemplateDoesNotExist:
pass
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(__file__), 'bad_templates'),)
)
def test_bad_404_template(self):
"Errors found when rendering 404 error templates are re-raised"
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about syntax error in template")
except TemplateSyntaxError:
pass
# We need two different tests to check URLconf substitution - one to check
# it was changed, and another one (without self.urls) to check it was reverted on
# teardown. This pair of tests relies upon the alphabetical ordering of test execution.
class UrlconfSubstitutionTests(TestCase):
urls = 'regressiontests.test_client_regress.urls'
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLconf on a per-test basis"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/arg_view/somename/')
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
class zzUrlconfSubstitutionTests(TestCase):
def test_urlconf_was_reverted(self):
"URLconf is reverted to original value after modification in a TestCase"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/test_client_regress/arg_view/somename/')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class ContextTests(TestCase):
fixtures = ['testdata']
def test_single_context(self):
"Context variables can be retrieved from a single context"
response = self.client.get("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, Context)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'sausage')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError as e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_inherited_context(self):
"Context variables can be retrieved from a list of contexts"
response = self.client.get("/test_client_regress/request_data_extended/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, ContextList)
self.assertEqual(len(response.context), 2)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'bacon')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError as e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_15368(self):
# Need to insert a context processor that assumes certain things about
# the request instance. This triggers a bug caused by some ways of
# copying RequestContext.
try:
django.template.context._standard_context_processors = (lambda request: {'path': request.special_path},)
response = self.client.get("/test_client_regress/request_context_view/")
self.assertContains(response, 'Path: /test_client_regress/request_context_view/')
finally:
django.template.context._standard_context_processors = None
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SessionTests(TestCase):
fixtures = ['testdata.json']
def test_session(self):
"The session isn't lost if a user logs in"
# The session doesn't exist to start.
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'NO')
# This request sets a session variable.
response = self.client.get('/test_client_regress/set_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'set_session')
# Check that the session has been modified
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
# Log in
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
# Session should still contain the modified value
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
def test_logout(self):
"""Logout should work whether the user is logged in or not (#9978)."""
self.client.logout()
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
self.client.logout()
self.client.logout()
class RequestMethodTests(TestCase):
def test_get(self):
"Request a view via request method GET"
response = self.client.get('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: GET')
def test_post(self):
"Request a view via request method POST"
response = self.client.post('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_head(self):
"Request a view via request method HEAD"
response = self.client.head('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
# A HEAD request doesn't return any content.
self.assertNotEqual(response.content, b'request method: HEAD')
self.assertEqual(response.content, b'')
def test_options(self):
"Request a view via request method OPTIONS"
response = self.client.options('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: OPTIONS')
def test_put(self):
"Request a view via request method PUT"
response = self.client.put('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
def test_delete(self):
"Request a view via request method DELETE"
response = self.client.delete('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: DELETE')
class RequestMethodStringDataTests(TestCase):
def test_post(self):
"Request a view with string data via request method POST"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.post('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_put(self):
"Request a view with string data via request method PUT"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.put('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
class QueryStringTests(TestCase):
def test_get_like_requests(self):
# See: https://code.djangoproject.com/ticket/10571.
for method_name in ('get', 'head'):
# A GET-like request can pass a query string as data
method = getattr(self.client, method_name)
response = method("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# A GET-like request can pass a query string as part of the URL
response = method("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# Data provided in the URL to a GET-like request is overridden by actual form data
response = method("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = method("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['get-bar'], 'bang')
self.assertEqual(response.context['request-foo'], None)
self.assertEqual(response.context['request-bar'], 'bang')
def test_post_like_requests(self):
# A POST-like request can pass a query string as data
response = self.client.post("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['post-foo'], 'whiz')
# A POST-like request can pass a query string as part of the URL
response = self.client.post("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['request-foo'], 'whiz')
# POST data provided in the URL augments actual form data
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['get-bar'], None)
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['post-bar'], 'bang')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['request-bar'], 'bang')
class UnicodePayloadTests(TestCase):
def test_simple_unicode_payload(self):
"A simple ASCII-only unicode JSON document can be POSTed"
# Regression test for #10571
json = '{"english": "mountain pass"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json")
self.assertEqual(response.content, json.encode())
def test_unicode_payload_utf8(self):
"A non-ASCII unicode data encoded as UTF-8 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-8")
self.assertEqual(response.content, json.encode('utf-8'))
def test_unicode_payload_utf16(self):
"A non-ASCII unicode data encoded as UTF-16 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-16")
self.assertEqual(response.content, json.encode('utf-16'))
def test_unicode_payload_non_utf(self):
"A non-ASCII unicode data as a non-UTF based encoding can be POSTed"
#Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=koi8-r")
self.assertEqual(response.content, json.encode('koi8-r'))
class DummyFile(object):
def __init__(self, filename):
self.name = filename
def read(self):
return b'TEST_FILE_CONTENT'
class UploadedFileEncodingTest(TestCase):
def test_file_encoding(self):
encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin'))
self.assertEqual(b'--TEST_BOUNDARY', encoded_file[0])
self.assertEqual(b'Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1])
self.assertEqual(b'TEST_FILE_CONTENT', encoded_file[-1])
def test_guesses_content_type_on_file_encoding(self):
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2])
self.assertEqual(b'Content-Type: text/plain',
encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2])
self.assertIn(encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2], (
b'Content-Type: application/x-compress',
b'Content-Type: application/x-zip',
b'Content-Type: application/x-zip-compressed',
b'Content-Type: application/zip',))
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2])
class RequestHeadersTest(TestCase):
def test_client_headers(self):
"A test client can receive custom headers"
response = self.client.get("/test_client_regress/check_headers/", HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertEqual(response.status_code, 200)
def test_client_headers_redirect(self):
"Test client headers are preserved through redirects"
response = self.client.get("/test_client_regress/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertRedirects(response, '/test_client_regress/check_headers/',
status_code=301, target_status_code=200)
class ReadLimitedStreamTest(TestCase):
"""
Tests that ensure that HttpRequest.body, HttpRequest.read() and
HttpRequest.read(BUFFER) have proper LimitedStream behavior.
Refs #14753, #15785
"""
def test_body_from_empty_request(self):
"""HttpRequest.body on a test client GET request should return
the empty string."""
self.assertEqual(self.client.get("/test_client_regress/body/").content, b'')
def test_read_from_empty_request(self):
"""HttpRequest.read() on a test client GET request should return the
empty string."""
self.assertEqual(self.client.get("/test_client_regress/read_all/").content, b'')
def test_read_numbytes_from_empty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client GET request should
return the empty string."""
self.assertEqual(self.client.get("/test_client_regress/read_buffer/").content, b'')
def test_read_from_nonempty_request(self):
"""HttpRequest.read() on a test client PUT request with some payload
should return that payload."""
payload = b'foobar'
self.assertEqual(self.client.put("/test_client_regress/read_all/",
data=payload,
content_type='text/plain').content, payload)
def test_read_numbytes_from_nonempty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client PUT request with
some payload should return that payload."""
payload = b'foobar'
self.assertEqual(self.client.put("/test_client_regress/read_buffer/",
data=payload,
content_type='text/plain').content, payload)
class RequestFactoryStateTest(TestCase):
"""Regression tests for #15929."""
# These tests are checking that certain middleware don't change certain
# global state. Alternatively, from the point of view of a test, they are
# ensuring test isolation behavior. So, unusually, it doesn't make sense to
# run the tests individually, and if any are failing it is confusing to run
# them with any other set of tests.
def common_test_that_should_always_pass(self):
request = RequestFactory().get('/')
request.session = {}
self.assertFalse(hasattr(request, 'user'))
def test_request(self):
self.common_test_that_should_always_pass()
def test_request_after_client(self):
# apart from the next line the three tests are identical
self.client.get('/')
self.common_test_that_should_always_pass()
def test_request_after_client_2(self):
# This test is executed after the previous one
self.common_test_that_should_always_pass()
class RequestFactoryEnvironmentTests(TestCase):
"""
Regression tests for #8551 and #17067: ensure that environment variables
are set correctly in RequestFactory.
"""
def test_should_set_correct_env_variables(self):
request = RequestFactory().get('/path/')
self.assertEqual(request.META.get('REMOTE_ADDR'), '127.0.0.1')
self.assertEqual(request.META.get('SERVER_NAME'), 'testserver')
self.assertEqual(request.META.get('SERVER_PORT'), '80')
self.assertEqual(request.META.get('SERVER_PROTOCOL'), 'HTTP/1.1')
self.assertEqual(request.META.get('SCRIPT_NAME') +
request.META.get('PATH_INFO'), '/path/')
| RaoUmer/django | tests/regressiontests/test_client_regress/tests.py | Python | bsd-3-clause | 48,630 | [
"VisIt"
] | 0ef8fce4af7177570c82f708abffc6e23c2d7e5b7341af1bd82b1489e8ffff83 |
"""Visualization tools for coarse grids, both C/F splittings and aggregation.
Output is either to file (VTK) or to the screen (matplotlib).
vis_splitting: visualize C/F splittings through vertex elements
vis_aggregate_groups: visualize aggregation through groupins of edges, elements
"""
import warnings
import numpy as np
from scipy.sparse import csr_matrix, coo_matrix, triu
from .vtk_writer import write_basic_mesh, write_vtu
def vis_aggregate_groups(V, E2V, AggOp, mesh_type,
fname='output.vtu'):
"""Coarse grid visualization of aggregate groups.
Create .vtu files for use in Paraview or display with Matplotlib.
Parameters
----------
V : {array}
coordinate array (N x D)
E2V : {array}
element index array (Nel x Nelnodes)
AggOp : {csr_matrix}
sparse matrix for the aggregate-vertex relationship (N x Nagg)
mesh_type : {string}
type of elements: vertex, tri, quad, tet, hex (all 3d)
fname : {string, file object}
file to be written, e.g. 'output.vtu'
Returns
-------
- Writes data to .vtu file for use in paraview (xml 0.1 format) or
displays to screen using matplotlib
Notes
-----
- Works for both 2d and 3d elements. Element groupings are colored
with data equal to 2.0 and stringy edges in the aggregate are colored
with 3.0
Examples
--------
>>> from pyamg.aggregation import standard_aggregation
>>> from pyamg.vis.vis_coarse import vis_aggregate_groups
>>> from pyamg.gallery import load_example
>>> data = load_example('unit_square')
>>> A = data['A'].tocsr()
>>> V = data['vertices']
>>> E2V = data['elements']
>>> AggOp = standard_aggregation(A)[0]
>>> vis_aggregate_groups(V=V, E2V=E2V, AggOp=AggOp,
... mesh_type='tri', fname='output.vtu')
>>> from pyamg.aggregation import standard_aggregation
>>> from pyamg.vis.vis_coarse import vis_aggregate_groups
>>> from pyamg.gallery import load_example
>>> data = load_example('unit_cube')
>>> A = data['A'].tocsr()
>>> V = data['vertices']
>>> E2V = data['elements']
>>> AggOp = standard_aggregation(A)[0]
>>> vis_aggregate_groups(V=V, E2V=E2V, AggOp=AggOp,
... mesh_type='tet', fname='output.vtu')
"""
check_input(V=V, E2V=E2V, AggOp=AggOp, mesh_type=mesh_type)
map_type_to_key = {'tri': 5, 'quad': 9, 'tet': 10, 'hex': 12}
if mesh_type not in map_type_to_key:
raise ValueError(f'Unknown mesh_type={mesh_type}')
key = map_type_to_key[mesh_type]
AggOp = csr_matrix(AggOp)
# remove elements with dirichlet BCs
if E2V.max() >= AggOp.shape[0]:
E2V = E2V[E2V.max(axis=1) < AggOp.shape[0]]
# 1 #
# Find elements with all vertices in same aggregate
# account for 0 rows. Mark them as solitary aggregates
if len(AggOp.indices) != AggOp.shape[0]:
full_aggs = ((AggOp.indptr[1:] - AggOp.indptr[:-1]) == 0).nonzero()[0]
new_aggs = np.array(AggOp.sum(axis=1), dtype=int).ravel()
new_aggs[full_aggs == 1] = AggOp.indices # keep existing aggregate IDs
new_aggs[full_aggs == 0] = AggOp.shape[1] # fill in singletons maxID+1
ElementAggs = new_aggs[E2V]
else:
ElementAggs = AggOp.indices[E2V]
# 2 #
# find all aggregates encompassing full elements
# mask[i] == True if all vertices in element i belong to the same aggregate
mask = np.where(abs(np.diff(ElementAggs)).max(axis=1) == 0)[0]
# mask = (ElementAggs[:,:] == ElementAggs[:,0]).all(axis=1)
E2V_a = E2V[mask, :] # elements where element is full
Nel_a = E2V_a.shape[0]
# 3 #
# find edges of elements in the same aggregate (brute force)
# construct vertex to vertex graph
col = E2V.ravel()
row = np.kron(np.arange(0, E2V.shape[0]),
np.ones((E2V.shape[1],), dtype=int))
data = np.ones((len(col),))
if len(row) != len(col):
raise ValueError('Problem constructing vertex-to-vertex map')
V2V = coo_matrix((data, (row, col)), shape=(E2V.shape[0], E2V.max()+1))
V2V = V2V.T * V2V
V2V = triu(V2V, 1).tocoo()
# get all the edges
edges = np.vstack((V2V.row, V2V.col)).T
# all the edges in the same aggregate
E2V_b = edges[AggOp.indices[V2V.row] == AggOp.indices[V2V.col]]
Nel_b = E2V_b.shape[0]
# 3.5 #
# single node aggregates
sums = np.array(AggOp.sum(axis=0)).ravel()
E2V_c = np.where(sums == 1)[0]
Nel_c = len(E2V_c)
# 4 #
# now write out the elements and edges
colors_a = 3*np.ones((Nel_a,)) # color triangles with threes
colors_b = 2*np.ones((Nel_b,)) # color edges with twos
colors_c = 1*np.ones((Nel_c,)) # color the vertices with ones
cells = {1: E2V_c, 3: E2V_b, key: E2V_a}
cdata = {1: colors_c, 3: colors_b, key: colors_a} # make sure it's a tuple
write_vtu(V=V, cells=cells, fname=fname, cdata=cdata)
def vis_splitting(V, splitting, output='vtk', fname='output.vtu'):
"""Coarse grid visualization for C/F splittings.
Parameters
----------
V : {array}
coordinate array (N x D)
splitting : {array}
coarse(1)/fine(0) flags
fname : {string, file object}
file to be written, e.g. 'output.vtu'
output : {string}
'vtk' or 'matplotlib'
Returns
-------
- Displays in screen or writes data to .vtu file for use in paraview
(xml 0.1 format)
Notes
-----
D :
dimension of coordinate space
N :
# of vertices in the mesh represented in V
Ndof :
# of dof (= ldof * N)
- simply color different points with different colors. This works
best with classical AMG.
- writes a file (or opens a window) for each dof
- for Ndof>1, they are assumed orderd [...dof1..., ...dof2..., etc]
Examples
--------
>>> import numpy as np
>>> from pyamg.vis.vis_coarse import vis_splitting
>>> V = np.array([[0.0,0.0],
... [1.0,0.0],
... [0.0,1.0],
... [1.0,1.0]])
>>> splitting = np.array([0,1,0,1,1,0,1,0]) # two variables
>>> vis_splitting(V,splitting,output='vtk',fname='output.vtu')
>>> from pyamg.classical.split import RS
>>> from pyamg.vis.vis_coarse import vis_splitting
>>> from pyamg.gallery import load_example
>>> data = load_example('unit_square')
>>> A = data['A'].tocsr()
>>> V = data['vertices']
>>> E2V = data['elements']
>>> splitting = RS(A)
>>> vis_splitting(V=V,splitting=splitting,output='vtk', fname='output.vtu')
"""
check_input(V, splitting)
N = V.shape[0]
Ndof = int(len(splitting) / N)
E2V = np.arange(0, N, dtype=int)
# adjust name in case of multiple variables
a = fname.split('.')
if len(a) < 2:
fname1 = a[0]
fname2 = '.vtu'
elif len(a) >= 2:
fname1 = ''.join(a[:-1])
fname2 = a[-1]
new_fname = fname
for d in range(0, Ndof):
# for each variables, write a file or open a figure
if Ndof > 1:
new_fname = f'{fname1}_{d+1}.{fname2}'
cdata = splitting[(d*N):((d+1)*N)]
if output not in ('vtk', 'matplotlib'):
raise ValueError('problem with outputtype')
if output == 'vtk':
write_basic_mesh(V=V, E2V=E2V, mesh_type='vertex',
cdata=cdata, fname=new_fname)
elif output == 'matplotlib':
try:
import matplotlib.pyplot as plt # pylint: disable=import-outside-toplevel
cdataF = np.where(cdata == 0)[0]
cdataC = np.where(cdata == 1)[0]
xC = V[cdataC, 0]
yC = V[cdataC, 1]
xF = V[cdataF, 0]
yF = V[cdataF, 1]
plt.figure()
plt.plot(xC, yC, 'r.', xF, yF, 'b.', clip_on=True)
plt.title('C/F splitting (red=coarse, blue=fine)')
plt.xlabel('x')
plt.ylabel('y')
plt.axis('off')
plt.show()
except ImportError:
print('\nNote: matplotlib is needed for plotting.')
def check_input(V=None, E2V=None, AggOp=None, A=None, splitting=None, mesh_type=None):
"""Check input for local functions."""
if V is not None and not np.issubdtype(V.dtype, np.floating):
raise ValueError('V should be of type float')
if E2V is not None:
if not np.issubdtype(E2V.dtype, np.integer):
raise ValueError('E2V should be of type integer')
if E2V.min() != 0:
warnings.warn(f'Element indices begin at {E2V.min()}')
if AggOp is not None and AggOp.shape[1] > AggOp.shape[0]:
raise ValueError('AggOp should be of size N x Nagg')
if A is not None and AggOp is None:
raise ValueError('problem with check_input')
if (A is not None and AggOp is not None
and ((A.shape[0] != A.shape[1]) or (A.shape[0] != AggOp.shape[0]))):
raise ValueError('expected square matrix A and compatible with AggOp')
if splitting is not None and V is None:
raise ValueError('problem with check_input')
if splitting is not None:
splitting = splitting.ravel()
if V is not None and (len(splitting) % V.shape[0]) != 0:
raise ValueError('splitting must be a multiple of N')
if mesh_type is not None:
valid_mesh_types = ('vertex', 'tri', 'quad', 'tet', 'hex')
if mesh_type not in valid_mesh_types:
raise ValueError(f'mesh_type should be {" or ".join(valid_mesh_types)}')
| pyamg/pyamg | pyamg/vis/vis_coarse.py | Python | mit | 9,747 | [
"ParaView",
"VTK"
] | 6263a2880e4069c6b6cf0753ea74fdb1097e4c2e5961fe2d38bac578c39a052e |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import setter
from .itemlist import ItemList
from .keyword import Keyword, Keywords
from .modelobject import ModelObject
from .tags import Tags
class TestCase(ModelObject):
"""Base model for single test case."""
__slots__ = ['parent', 'name', 'doc', 'timeout']
keyword_class = Keyword
def __init__(self, name='', doc='', tags=None, timeout=None):
#: :class:`~.model.testsuite.TestSuite` that contains this test.
self.parent = None
#: Test case name.
self.name = name
#: Test case documentation.
self.doc = doc
#: Test case tags as a list like :class:`~.model.tags.Tags` object.
self.tags = tags
#: Test case timeout.
self.timeout = timeout
#: Keyword results, a list of :class:`~.model.keyword.Keyword`
#: instances and contains also possible setup and teardown keywords.
self.keywords = None
@setter
def tags(self, tags):
return Tags(tags)
@setter
def keywords(self, keywords):
return Keywords(self.keyword_class, self, keywords)
@property
def id(self):
if not self.parent:
return 't1'
return '%s-t%d' % (self.parent.id, self.parent.tests.index(self)+1)
@property
def longname(self):
if not self.parent:
return self.name
return '%s.%s' % (self.parent.longname, self.name)
def visit(self, visitor):
visitor.visit_test(self)
class TestCases(ItemList):
__slots__ = []
def __init__(self, test_class=TestCase, parent=None, tests=None):
ItemList.__init__(self, test_class, {'parent': parent}, tests)
def _check_type_and_set_attrs(self, *tests):
ItemList._check_type_and_set_attrs(self, *tests)
for test in tests:
for visitor in test.parent._visitors:
test.visit(visitor)
| caio2k/RIDE | src/robotide/lib/robot/model/testcase.py | Python | apache-2.0 | 2,504 | [
"VisIt"
] | 6079aa00d038b32a1020e086f5b802f0860d1c6424a8784340bf83aec7181b82 |
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li (lili@bnl.gov) #
# created on 08/16/2014 #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import assert_allclose
from skbeam.core.fitting import snip_method
def test_snip_method():
"""
test of background function from xrf fit
"""
xmin = 0
# three gaussian peak
xval = np.arange(-20, 20, 0.1)
std = 0.01
yval1 = np.exp(-xval**2 / 2 / std**2)
yval2 = np.exp(-(xval - 10)**2 / 2 / std**2)
yval3 = np.exp(-(xval + 10)**2 / 2 / std**2)
# background as exponential
a0 = 1.0
a1 = 0.1
a2 = 0.5
bg_true = a0 * np.exp(-xval * a1 + a2)
yval = yval1 + yval2 + yval3 + bg_true
bg = snip_method(yval,
0.0, 1.0, 0.0,
xmin=xmin, xmax=3000,
spectral_binning=None, width=0.1)
# ignore the boundary part
cutval = 15
bg_true_part = bg_true[cutval:-cutval]
bg_cal_part = bg[cutval:-cutval]
assert_allclose(bg_true_part, bg_cal_part, rtol=1e-3, atol=1e-1)
| tacaswell/scikit-beam | skbeam/core/fitting/tests/test_background.py | Python | bsd-3-clause | 3,709 | [
"Gaussian"
] | f197689b998fbdf8199ffb32fb0841480ce5139535b3930e08bc0e4412149432 |
# -*- coding: utf-8 -*-
"""
Created on Thu May 5 17:12:20 2016
@author: nguyen
"""
import numpy as np
from netCDF4 import Dataset
# Constants
Nh = 96
Nt = 37
sspacing = 4
tspacing = 6
HTLS_sknots = np.arange(0,Nh,sspacing)
HTHS_sknots = np.arange(0,Nh,1)
LTHS_tknots = np.arange(0,Nh,tspacing)
Nl = len(HTLS_sknots)
Ns = len(LTHS_tknots)
Dh = Nh*Nh
Dl = Nl*Nl
N = Nt*Ns
#Load all training data
Xh_tr = np.zeros((N, Dh))
Xl_tr = np.zeros((N, Dl))
ncfile1 = Dataset('/data/ISOTROPIC/data/data_downsampled4.nc','r')
for t in range(Nt):
count = 0
for i in LTHS_tknots:
xh = np.array(ncfile1.variables['velocity_x'][t,0:Nh,0:Nh,i])
xl = xh[0:-1:sspacing,0:-1:sspacing] # xh[np.meshgrid(HTLS_sknots,HTLS_sknots)]
Xh_tr[t*Ns + count,:] = np.reshape(xh,(1, Dh))
Xl_tr[t*Ns + count,:] = np.reshape(xl,(1, Dl))
count = count + 1
ncfile1.close()
# normalized: centered, variance 1
mea_l = np.zeros(Dl)
sig_l = np.zeros(Dl)
for k in range(Dl):
mea_l[k] = np.mean(Xl_tr[:,k])
sig_l[k] = np.std(Xl_tr[:,k])
Xl_tr[:,k] = (Xl_tr[:,k]-mea_l[k])/sig_l[k]
mea_h = np.zeros(Dh)
sig_h = np.zeros(Dh)
for k in range(Dh):
mea_h[k] = np.mean(Xh_tr[:,k])
sig_h[k] = np.std(Xh_tr[:,k])
Xh_tr[:,k] = (Xh_tr[:,k]-mea_h[k])/sig_h[k]
############## Kernel Ridge Regression ########################################
from sklearn.kernel_ridge import KernelRidge
import scipy.io as sio
mf = sio.loadmat('/data/ISOTROPIC/regression/KRR_rbf_cv_alpha_gamma_sspacing4_tspacing6.mat',
squeeze_me=True, struct_as_record=False)
KRR_alpha_opt = mf['KRR_alpha_opt']
print('Optimal alpha:', KRR_alpha_opt)
KRR_gamma_opt = mf['KRR_gamma_opt']
print('Optimal gamma:', KRR_gamma_opt)
kr = KernelRidge(kernel='rbf',alpha=KRR_alpha_opt,gamma=KRR_gamma_opt)
kr.fit(Xl_tr, Xh_tr)
############## Prediction and save to file ####################################
import os
try:
os.remove('/data/ISOTROPIC/data/KRR_rbf_sspacing4_tspacing6.nc')
except OSError:
pass
ncfile2 = Dataset('/data/ISOTROPIC/data/KRR_rbf_sspacing4_tspacing6.nc', 'w')
ncfile1 = Dataset('/data/ISOTROPIC/data/data_downsampled4.nc','r')
# create the dimensions
ncfile2.createDimension('Nt',Nt)
ncfile2.createDimension('Nz',Nh)
ncfile2.createDimension('Ny',Nh)
ncfile2.createDimension('Nx',Nh)
# create the var and its attribute
var = ncfile2.createVariable('Urec', 'd',('Nt','Nz','Ny','Nx'))
for t in range(Nt):
print('3D snapshot:',t)
for i in range(Nh):
xl = np.array(ncfile1.variables['velocity_x'][t,0:Nh:sspacing,0:Nh:sspacing,i]) # load only LR
xl = np.divide(np.reshape(xl,(1, Nl*Nl)) - mea_l, sig_l) #pre-normalize
xrec = np.multiply(kr.predict(xl), sig_h) + mea_h # re-normalize the prediction
var[t,:,:,i] = np.reshape(xrec, (Nh,Nh)) # put to netcdf file
# Close file
ncfile1.close()
ncfile2.close() | linhvannguyen/PhDworks | codes/isotropic/regression/funcs/KRR_rbf_sspacing4_tspacing6.py | Python | mit | 2,903 | [
"NetCDF"
] | bfdc737388aa6b4b0d41728b7dbafeb3bd05d138d2e8337dc6840e8fa8d28bf6 |
import os
import sys
import argparse
import argparse
import logging
import extensions
import database
import commands
import html2latex
import utils
# Check for the necessary packages, this does a load so they should all get loaded.
if utils.check_configuration(['yaml', 'mkdocs', 'markdown', 'markdown_include', 'mdx_math']):
sys.exit(1)
import yaml
import mkdocs
from mkdocs.commands import serve, build
from MarkdownTable import MarkdownTable
from MooseObjectParameterTable import MooseObjectParameterTable
from MooseApplicationSyntax import MooseApplicationSyntax
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
MOOSE_DIR = os.getenv('MOOSE_DIR', os.path.join(os.getcwd(), 'moose'))
if not os.path.exists(MOOSE_DIR):
MOOSE_DIR = os.path.join(os.getenv('HOME'), 'projects', 'moose')
class MkMooseDocsFormatter(logging.Formatter):
"""
A formatter that is aware of the class hierarchy of the MooseDocs library.
Call the init_logging function to initialize the use of this custom formatter.
"""
COLOR = {'DEBUG':'CYAN', 'INFO':'RESET', 'WARNING':'YELLOW', 'ERROR':'RED', 'CRITICAL':'MAGENTA'}
COUNTS = {'DEBUG':0, 'INFO':0, 'WARNING':0, 'ERROR':0, 'CRITICAL':0}
def format(self, record):
msg = logging.Formatter.format(self, record)
if record.name.endswith('Item'):
level = 3
elif record.name.endswith('Database'):
level = 2
elif record.name.endswith('MooseApplicationSyntax') or record.name.endswith('MooseCommonFunctions'):
level = 1
else:
level = 0
if record.levelname in ['DEBUG', 'WARNING', 'ERROR', 'CRITICAL']:
msg = '{}{}: {}'.format(' '*4*level, record.levelname, msg)
else:
msg = '{}{}'.format(' '*4*level, msg)
if record.levelname in self.COLOR:
msg = utils.colorText(msg, self.COLOR[record.levelname])
# Increment counts
self.COUNTS[record.levelname] += 1
return msg
def init_logging(verbose=False):
"""
Call this function to initialize the MooseDocs logging formatter.
"""
# Setup the logger object
if verbose:
level = logging.DEBUG
else:
level = logging.INFO
# The markdown package dumps way too much information in debug mode (so always set it to INFO)
log = logging.getLogger('MARKDOWN')
log.setLevel(logging.INFO)
# Setup the custom formatter
log = logging.getLogger('MooseDocs')
formatter = MkMooseDocsFormatter()
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(level)
log = logging.getLogger('mkdocs')
log.addHandler(handler)
log.setLevel(level)
return formatter
class Loader(yaml.Loader):
"""
A custom loader that handles nested includes. The nested includes should use absolute paths from the
origin yaml file.
"""
def include(self, node):
"""
Allow for the embedding of yaml files.
http://stackoverflow.com/questions/528281/how-can-i-include-an-yaml-file-inside-another
"""
filename = self.construct_scalar(node)
if os.path.exists(filename):
with open(filename, 'r') as f:
return yaml.load(f, Loader)
def yaml_load(filename, loader=Loader):
"""
Load a YAML file capable of including other YAML files.
Args:
filename[str]: The name fo the file to load.b
loader[yaml.Loader]: The loader to utilize.
"""
## Attach the include constructor to our custom loader.
Loader.add_constructor('!include', Loader.include)
with open(filename, 'r') as fid:
yml = yaml.load(fid.read(), Loader)
return yml
def load_pages(filename, keys=[], **kwargs):
"""
A YAML loader for reading the pages file.
Args:
filename[str]: The name for the file to load. This is normally a '.yml' file that contains the complete
website layout. It may also be a markdown file, in this case only that single file will be
served with the "home" page.
keys[list]: A list of top-level keys to include.
kwargs: key, value pairs passed to yaml_load function.
"""
if filename.endswith('.md'):
pages = [{'Home':'index.md'}, {os.path.basename(filename):filename}]
else:
pages = yaml_load(filename, **kwargs)
# Restrict the top-level keys to those provided in the 'include' argument
if keys:
pages = [page for page in pages if page.keys()[0] in keys]
return pages
def purge(extensions):
"""
Removes generated files from repository.
Args:
extensions[list]: List of file extensions to purge (.e.g., 'png'); it will be prefixed with '.moose.'
so the files actually removed are '.moose.png'.
"""
for i, ext in enumerate(extensions):
extensions[i] = '.moose.{}'.format(ext)
log = logging.getLogger('MooseDocs')
for root, dirs, files in os.walk(os.getcwd(), topdown=False):
for name in files:
if any([name.endswith(ext) for ext in extensions]):
full_file = os.path.join(root, name)
log.debug('Removing: {}'.format(full_file))
os.remove(full_file)
def command_line_options():
"""
Return the command line options for the moosedocs script.
"""
# Command-line options
parser = argparse.ArgumentParser(description="Tool for building and developing MOOSE and MOOSE-based application documentation.")
parser.add_argument('--verbose', '-v', action='store_true', help="Execute with verbose (debug) output.")
parser.add_argument('--config-file', type=str, default=os.path.join('moosedocs.yml'), help="The configuration file to use for building the documentation using MOOSE. (Default: %(default)s)")
subparser = parser.add_subparsers(title='Commands', description="Documentation creation command to execute.", dest='command')
# Add the sub-commands
check_parser = subparser.add_parser('check', help="Perform error checking on documentation.")
generate_parser = commands.generate_options(parser, subparser)
serve_parser = commands.serve_options(parser, subparser)
build_parser = commands.build_options(parser, subparser)
latex_parser = commands.latex_options(parser, subparser)
presentation_parser = commands.presentation_options(parser, subparser)
# Both build and serve need config file
for p in [serve_parser, build_parser]:
p.add_argument('--theme', help="Build documentation using specified theme. The available themes are: cosmo, cyborg, readthedocs, yeti, journal, bootstrap, readable, united, simplex, flatly, spacelab, amelia, cerulean, slate, mkdocs")
p.add_argument('--pages', default='pages.yml', help="YAML file containing the pages that are supplied to the mkdocs 'pages' configuration item. It also supports passing the name of a single markdown file, in this case only this file will be served with the 'Home' page.")
p.add_argument('--page-keys', default=[], nargs='+', help='A list of top-level keys from the "pages" file to include. This is a tool to help speed up the serving for development of documentation.')
# Parse the arguments
options = parser.parse_args()
# Set livereload default
if options.command == 'serve' and not options.livereload:
options.livereload = 'dirtyreload'
return options
def moosedocs():
# Options
options = vars(command_line_options())
# Initialize logging
formatter = init_logging(options.pop('verbose'))
log = logging.getLogger('MooseDocs')
# Remove moose.svg files (these get generated via dot)
log.info('Removing *.moose.svg files from {}'.format(os.getcwd()))
purge(['svg'])
# Execute command
cmd = options.pop('command')
if cmd == 'check':
commands.generate(stubs=False, pages_stubs=False, **options)
elif cmd == 'generate':
commands.generate(**options)
elif cmd == 'serve':
commands.serve(**options)
elif cmd == 'build':
commands.build(**options)
elif cmd == 'latex':
commands.latex(**options)
# Display logging results
print 'WARNINGS: {} ERRORS: {}'.format(formatter.COUNTS['WARNING'], formatter.COUNTS['ERROR'])
return formatter.COUNTS['ERROR'] > 0
| vityurkiv/Ox | python/MooseDocs/__init__.py | Python | lgpl-2.1 | 7,975 | [
"MOOSE"
] | d981ca36466c0877c7ebacedb2edf048452037269eb6ff9614a57a051c13dc5b |
# -*. coding: utf-8 -*-
# Copyright (c) 2008-2012, Noel O'Boyle; 2012, Adrià Cereto-Massagué
# All rights reserved.
#
# This file is part of Cinfony.
# The contents are covered by the terms of the GPL v2 license
# which is included in the file LICENSE_GPLv2.txt.
"""
pybel - A Cinfony module for accessing Open Babel
Global variables:
ob - the underlying SWIG bindings for Open Babel
informats - a dictionary of supported input formats
outformats - a dictionary of supported output formats
descs - a list of supported descriptors
fps - a list of supported fingerprint types
forcefields - a list of supported forcefields
"""
import sys
import os.path
import tempfile
import json
import uuid
import xml.etree.ElementTree as ET
if sys.platform[:4] == "java":
import org.openbabel as ob
import java.lang.System
java.lang.System.loadLibrary("openbabel_java")
_obfuncs = ob.openbabel_java
_obconsts = ob.openbabel_javaConstants
import javax
elif sys.platform[:3] == "cli":
import System
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Windows.Forms import Application, DockStyle, Form, PictureBox
from System.Windows.Forms import PictureBoxSizeMode
from System.Drawing import Image, Size
_obdotnet = os.environ["OBDOTNET"]
if _obdotnet[0] == '"': # Remove trailing quotes
_obdotnet = _obdotnet[1:-1]
clr.AddReferenceToFileAndPath(os.path.join(_obdotnet, "OBDotNet.dll"))
import OpenBabel as ob
_obfuncs = ob.openbabel_csharp
_obconsts = ob.openbabel_csharp
else:
import openbabel as ob
_obfuncs = _obconsts = ob
try:
import Tkinter as tk
from PIL import Image as PIL
from PIL import ImageTk as piltk
except ImportError: # pragma: no cover
tk = None
def _formatstodict(list):
if sys.platform[:4] == "java":
list = [list.get(i) for i in range(list.size())]
broken = [x.replace("[Read-only]", "").replace("[Write-only]", "").split(
" -- ") for x in list]
broken = [(x, y.strip()) for x, y in broken]
return dict(broken)
def _getplugins(findplugin, names):
return dict([(x, findplugin(x)) for x in names if findplugin(x)])
def _getpluginnames(ptype):
if sys.platform[:4] == "cli":
plugins = ob.VectorString()
else:
plugins = ob.vectorString()
ob.OBPlugin.ListAsVector(ptype, None, plugins)
if sys.platform[:4] == "java":
plugins = [plugins.get(i) for i in range(plugins.size())]
return [x.split()[0] for x in plugins if x.strip()]
_obconv = ob.OBConversion()
_builder = ob.OBBuilder()
informats = _formatstodict(_obconv.GetSupportedInputFormat())
"""A dictionary of supported input formats"""
outformats = _formatstodict(_obconv.GetSupportedOutputFormat())
"""A dictionary of supported output formats"""
descs = _getpluginnames("descriptors")
"""A list of supported descriptors"""
_descdict = _getplugins(ob.OBDescriptor.FindType, descs)
fps = [_x.lower() for _x in _getpluginnames("fingerprints")]
"""A list of supported fingerprint types"""
_fingerprinters = _getplugins(ob.OBFingerprint.FindFingerprint, fps)
forcefields = [_x.lower() for _x in _getpluginnames("forcefields")]
"""A list of supported forcefields"""
_forcefields = _getplugins(ob.OBForceField.FindType, forcefields)
charges = [_x.lower() for _x in _getpluginnames("charges")]
"""A list of supported charge models"""
_charges = _getplugins(ob.OBChargeModel.FindType, charges)
operations = _getpluginnames("ops")
"""A list of supported operations"""
_operations = _getplugins(ob.OBOp.FindType, operations)
ipython_3d = False
"""Toggles 2D vs 3D molecule representations in IPython notebook"""
def readfile(format, filename, opt=None):
"""Iterate over the molecules in a file.
Required parameters:
format - see the informats variable for a list of available
input formats
filename
Optional parameters:
opt - a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
You can access the first molecule in a file using the next() method
of the iterator (or the next() keyword in Python 3):
mol = readfile("smi", "myfile.smi").next() # Python 2
mol = next(readfile("smi", "myfile.smi")) # Python 3
You can make a list of the molecules in a file using:
mols = list(readfile("smi", "myfile.smi"))
You can iterate over the molecules in a file as shown in the
following code snippet:
>>> atomtotal = 0
>>> for mol in readfile("sdf", "head.sdf"):
... atomtotal += len(mol.atoms)
...
>>> print atomtotal
43
"""
if opt is None:
opt = {}
obconversion = ob.OBConversion()
formatok = obconversion.SetInFormat(format)
for k, v in opt.items():
if v is None:
obconversion.AddOption(k, obconversion.INOPTIONS)
else:
obconversion.AddOption(k, obconversion.INOPTIONS, str(v))
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
if not os.path.isfile(filename):
raise IOError("No such file: '%s'" % filename)
def filereader():
obmol = ob.OBMol()
notatend = obconversion.ReadFile(obmol, filename)
while notatend:
yield Molecule(obmol)
obmol = ob.OBMol()
notatend = obconversion.Read(obmol)
return filereader()
def readstring(format, string, opt=None):
"""Read in a molecule from a string.
Required parameters:
format - see the informats variable for a list of available
input formats
string
Optional parameters:
opt - a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
Example:
>>> input = "C1=CC=CS1"
>>> mymol = readstring("smi", input)
>>> len(mymol.atoms)
5
"""
if opt is None:
opt = {}
obmol = ob.OBMol()
obconversion = ob.OBConversion()
formatok = obconversion.SetInFormat(format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
for k, v in opt.items():
if v is None:
obconversion.AddOption(k, obconversion.INOPTIONS)
else:
obconversion.AddOption(k, obconversion.INOPTIONS, str(v))
success = obconversion.ReadString(obmol, string)
if not success:
raise IOError("Failed to convert '%s' to format '%s'" % (
string, format))
return Molecule(obmol)
class Outputfile(object):
"""Represent a file to which *output* is to be sent.
Although it's possible to write a single molecule to a file by
calling the write() method of a molecule, if multiple molecules
are to be written to the same file you should use the Outputfile
class.
Required parameters:
format - see the outformats variable for a list of available
output formats
filename
Optional parameters:
overwrite -- if the output file already exists, should it
be overwritten? (default is False)
opt -- a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
Methods:
write(molecule)
close()
"""
def __init__(self, format, filename, overwrite=False, opt=None):
if opt is None:
opt = {}
self.format = format
self.filename = filename
if not overwrite and os.path.isfile(self.filename):
raise IOError(
"%s already exists. Use 'overwrite=True' to overwrite it." %
self.filename)
self.obConversion = ob.OBConversion()
formatok = self.obConversion.SetOutFormat(self.format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" %
format)
if filename:
if isinstance(filename, bytes):
gzextension = b'.gz'
else:
gzextension = '.gz'
if os.path.splitext(filename)[1] == gzextension:
self.obconversion.AddOption('z', self.obConversion.GENOPTIONS)
for k, v in opt.items():
if v is None:
self.obConversion.AddOption(k, self.obConversion.OUTOPTIONS)
else:
self.obConversion.AddOption(k, self.obConversion.OUTOPTIONS, str(v))
self.total = 0 # The total number of molecules written to the file
def write(self, molecule):
"""Write a molecule to the output file.
Required parameters:
molecule
"""
if not self.filename:
raise IOError("Outputfile instance is closed.")
if self.total == 0:
self.obConversion.WriteFile(molecule.OBMol, self.filename)
else:
self.obConversion.Write(molecule.OBMol)
self.total += 1
def close(self):
"""Close the Outputfile to further writing."""
self.obConversion.CloseOutFile()
self.filename = None
class Molecule(object):
"""Represent a Pybel Molecule.
Required parameter:
OBMol -- an Open Babel OBMol or any type of cinfony Molecule
Attributes:
atoms, charge, conformers, data, dim, energy, exactmass, formula,
molwt, spin, sssr, title, unitcell.
(refer to the Open Babel library documentation for more info).
Methods:
addh(), calcfp(), calcdesc(), draw(), localopt(), make2D(), make3D()
calccharges(), removeh(), write()
The underlying Open Babel molecule can be accessed using the attribute:
OBMol
"""
_cinfony = True
def __init__(self, OBMol):
if hasattr(OBMol, "_cinfony"):
a, b = OBMol._exchange
if a == 0:
mol = readstring("smi", b)
else:
mol = readstring("mol", b)
OBMol = mol.OBMol
self.OBMol = OBMol
@property
def atoms(self):
return [Atom(self.OBMol.GetAtom(i + 1))
for i in range(self.OBMol.NumAtoms())]
@property
def residues(self):
return [Residue(res) for res in ob.OBResidueIter(self.OBMol)]
@property
def charge(self):
return self.OBMol.GetTotalCharge()
@property
def conformers(self):
return self.OBMol.GetConformers()
@property
def data(self):
return MoleculeData(self.OBMol)
@property
def dim(self):
return self.OBMol.GetDimension()
@property
def energy(self):
return self.OBMol.GetEnergy()
@property
def exactmass(self):
return self.OBMol.GetExactMass()
@property
def formula(self):
return self.OBMol.GetFormula()
@property
def molwt(self):
return self.OBMol.GetMolWt()
@property
def spin(self):
return self.OBMol.GetTotalSpinMultiplicity()
@property
def sssr(self):
return self.OBMol.GetSSSR()
def _gettitle(self):
return self.OBMol.GetTitle()
def _settitle(self, val):
self.OBMol.SetTitle(val)
title = property(_gettitle, _settitle)
@property
def unitcell(self):
unitcell_index = _obconsts.UnitCell
if sys.platform[:3] == "cli":
unitcell_index = System.UInt32(unitcell_index)
unitcell = self.OBMol.GetData(unitcell_index)
if unitcell:
if sys.platform[:3] != "cli":
return _obfuncs.toUnitCell(unitcell)
else:
return unitcell.Downcast[ob.OBUnitCell]()
else:
raise AttributeError("Molecule has no attribute 'unitcell'")
@property
def clone(self):
return Molecule(ob.OBMol(self.OBMol))
@property
def _exchange(self):
if self.OBMol.HasNonZeroCoords():
return (1, self.write("mol"))
else:
return (0, self.write("can").split()[0])
def __iter__(self):
"""Iterate over the Atoms of the Molecule.
This allows constructions such as the following:
for atom in mymol:
print atom
"""
return iter(self.atoms)
def _repr_svg_(self):
"""For IPython notebook, renders 2D pybel.Molecule SVGs."""
# Returning None defers to _repr_javascript_
if ipython_3d:
return None
# Open babel returns a nested svg, which IPython unpacks and treats as
# two SVGs, messing with the display location. This parses out the
# inner svg before handing over to IPython.
namespace = "http://www.w3.org/2000/svg"
ET.register_namespace("", namespace)
obsvg = self.clone.write("svg")
tree = ET.fromstring(obsvg)
svg = tree.find("{{{ns}}}g/{{{ns}}}svg".format(ns=namespace))
return ET.tostring(svg).decode("utf-8")
def _repr_html_(self):
"""For IPython notebook, renders 3D pybel.Molecule webGL objects."""
# Returning None defers to _repr_svg_
if not ipython_3d:
return None
try:
import imolecule
except ImportError:
raise ImportError("Cannot import 3D rendering. Please install "
"with `pip install imolecule`.")
return imolecule.draw(self.clone, format="pybel", display_html=False)
def calcdesc(self, descnames=[]):
"""Calculate descriptor values.
Optional parameter:
descnames -- a list of names of descriptors
If descnames is not specified, all available descriptors are
calculated. See the descs variable for a list of available
descriptors.
"""
if not descnames:
descnames = descs
ans = {}
for descname in descnames:
try:
desc = _descdict[descname]
except KeyError:
raise ValueError(("%s is not a recognised Open Babel "
"descriptor type") % descname)
ans[descname] = desc.Predict(self.OBMol)
return ans
def calcfp(self, fptype="FP2"):
"""Calculate a molecular fingerprint.
Optional parameters:
fptype -- the fingerprint type (default is "FP2"). See the
fps variable for a list of of available fingerprint
types.
"""
if sys.platform[:3] == "cli":
fp = ob.VectorUInt()
else:
fp = ob.vectorUnsignedInt()
fptype = fptype.lower()
try:
fingerprinter = _fingerprinters[fptype]
except KeyError:
raise ValueError(
"%s is not a recognised Open Babel Fingerprint type" % fptype)
fingerprinter.GetFingerprint(self.OBMol, fp)
return Fingerprint(fp)
def calccharges(self, model="mmff94"):
"""Estimates atomic partial charges in the molecule.
Optional parameters:
model -- default is "mmff94". See the charges variable for a list
of available charge models (in shell, `obabel -L charges`)
This method populates the `partialcharge` attribute of each atom
in the molecule in place.
"""
model = model.lower()
try:
charge_model = _charges[model]
except KeyError:
raise ValueError(
"%s is not a recognised Open Babel Charge Model type" % model)
success = charge_model.ComputeCharges(self.OBMol)
if not success:
errors = ob.obErrorLog.GetMessagesOfLevel(ob.obError)
error = errors[-1] if errors else "Molecule failed to charge."
raise Exception(error)
return [atom.partialcharge for atom in self.atoms]
def write(self, format="smi", filename=None, overwrite=False, opt=None):
"""Write the molecule to a file or return a string.
Optional parameters:
format -- see the informats variable for a list of available
output formats (default is "smi")
filename -- default is None
overwite -- if the output file already exists, should it
be overwritten? (default is False)
opt -- a dictionary of format specific options
For format options with no parameters, specify the
value as None.
If a filename is specified, the result is written to a file.
Otherwise, a string is returned containing the result.
To write multiple molecules to the same file you should use
the Outputfile class.
"""
if opt is None:
opt = {}
obconversion = ob.OBConversion()
formatok = obconversion.SetOutFormat(format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" %
format)
if filename:
if isinstance(filename, bytes):
gzextension = b'.gz'
else:
gzextension = '.gz'
if os.path.splitext(filename)[1] == gzextension:
obconversion.AddOption('z', self.obConversion.GENOPTIONS)
for k, v in opt.items():
if v is None:
obconversion.AddOption(k, obconversion.OUTOPTIONS)
else:
obconversion.AddOption(k, obconversion.OUTOPTIONS, str(v))
if filename:
if not overwrite and os.path.isfile(filename):
raise IOError(("%s already exists. Use 'overwrite=True' to "
"overwrite it.") % filename)
obconversion.WriteFile(self.OBMol, filename)
obconversion.CloseOutFile()
else:
return obconversion.WriteString(self.OBMol)
def localopt(self, forcefield="mmff94", steps=500):
"""Locally optimize the coordinates.
Optional parameters:
forcefield -- default is "mmff94". See the forcefields variable
for a list of available forcefields.
steps -- default is 500
If the molecule does not have any coordinates, make3D() is
called before the optimization. Note that the molecule needs
to have explicit hydrogens. If not, call addh().
"""
forcefield = forcefield.lower()
if self.dim != 3:
self.make3D(forcefield)
ff = _forcefields[forcefield]
success = ff.Setup(self.OBMol)
if not success:
return
ff.SteepestDescent(steps)
ff.GetCoordinates(self.OBMol)
def make2D(self):
"""Generate 2D coordinates."""
_operations['gen2D'].Do(self.OBMol)
def make3D(self, forcefield="mmff94", steps=50):
"""Generate 3D coordinates.
Optional parameters:
forcefield -- default is "mmff94". See the forcefields variable
for a list of available forcefields.
steps -- default is 50
Once coordinates are generated, hydrogens are added and a quick
local optimization is carried out with 50 steps and the
MMFF94 forcefield. Call localopt() if you want
to improve the coordinates further.
"""
forcefield = forcefield.lower()
_builder.Build(self.OBMol)
self.addh()
self.localopt(forcefield, steps)
def addh(self):
"""Add hydrogens."""
self.OBMol.AddHydrogens()
def removeh(self):
"""Remove hydrogens."""
self.OBMol.DeleteHydrogens()
def convertdbonds(self):
"""Convert Dative Bonds."""
self.OBMol.ConvertDativeBonds()
def __str__(self):
return self.write()
def draw(self, show=True, filename=None, update=False, usecoords=False):
"""Create a 2D depiction of the molecule.
Optional parameters:
show -- display on screen (default is True)
filename -- write to file (default is None)
update -- update the coordinates of the atoms to those
determined by the structure diagram generator
(default is False)
usecoords -- don't calculate 2D coordinates, just use
the current coordinates (default is False)
Tkinter and Python Imaging Library are required for image display.
"""
obconversion = ob.OBConversion()
formatok = obconversion.SetOutFormat("_png2")
if not formatok:
raise ImportError("PNG depiction support not found. You should "
"compile Open Babel with support for Cairo. See "
"installation instructions for more "
"information.")
# Need to copy to avoid removing hydrogens from self
workingmol = Molecule(ob.OBMol(self.OBMol))
workingmol.removeh()
if not usecoords:
_operations['gen2D'].Do(workingmol.OBMol)
if update:
if workingmol.OBMol.NumAtoms() != self.OBMol.NumAtoms():
raise RuntimeError("It is not possible to update the original "
"molecule with the calculated coordinates, "
"as the original molecule contains "
"explicit hydrogens for which no "
"coordinates have been calculated.")
else:
for i in range(workingmol.OBMol.NumAtoms()):
self.OBMol.GetAtom(i + 1).SetVector(
workingmol.OBMol.GetAtom(i + 1).GetVector())
if filename:
filedes = None
else:
if sys.platform[:3] == "cli" and show:
raise RuntimeError("It is only possible to show the molecule "
"if you provide a filename. The reason for "
"this is that I kept having problems "
"when using temporary files.")
filedes, filename = tempfile.mkstemp()
workingmol.write("_png2", filename=filename, overwrite=True)
if show:
if sys.platform[:4] == "java":
image = javax.imageio.ImageIO.read(java.io.File(filename))
frame = javax.swing.JFrame(visible=1)
frame.getContentPane().add(
javax.swing.JLabel(javax.swing.ImageIcon(image)))
frame.setSize(300, 300)
frame.setDefaultCloseOperation(
javax.swing.WindowConstants.DISPOSE_ON_CLOSE)
frame.show()
elif sys.platform[:3] == "cli":
form = _MyForm()
form.setup(filename, self.title)
Application.Run(form)
else:
if not tk:
raise ImportError("Tkinter or Python Imaging Library not "
"found, but is required for image "
"display. See installation instructions "
"for more information.")
root = tk.Tk()
root.title((hasattr(self, "title") and self.title)
or self.__str__().rstrip())
frame = tk.Frame(root, colormap="new",
visual='truecolor').pack()
image = PIL.open(filename)
imagedata = piltk.PhotoImage(image)
tk.Label(frame, image=imagedata).pack()
tk.Button(root, text="Close", command=root.destroy).pack(
fill=tk.X)
root.mainloop()
if filedes:
os.close(filedes)
os.remove(filename)
class Atom(object):
"""Represent a Pybel atom.
Required parameter:
OBAtom -- an Open Babel OBAtom
Attributes:
atomicmass, atomicnum, cidx, coords, coordidx, exactmass,
formalcharge, heavyvalence, heterovalence, hyb, idx,
implicitvalence, isotope, partialcharge, residue, spin, type,
valence, vector.
(refer to the Open Babel library documentation for more info).
The original Open Babel atom can be accessed using the attribute:
OBAtom
"""
def __init__(self, OBAtom):
self.OBAtom = OBAtom
@property
def coords(self):
return (self.OBAtom.GetX(), self.OBAtom.GetY(), self.OBAtom.GetZ())
@property
def atomicmass(self):
return self.OBAtom.GetAtomicMass()
@property
def atomicnum(self):
return self.OBAtom.GetAtomicNum()
@property
def cidx(self):
return self.OBAtom.GetCIdx()
@property
def coordidx(self):
return self.OBAtom.GetCoordinateIdx()
@property
def exactmass(self):
return self.OBAtom.GetExactMass()
@property
def formalcharge(self):
return self.OBAtom.GetFormalCharge()
@property
def heavyvalence(self):
return self.OBAtom.GetHvyValence()
@property
def heterovalence(self):
return self.OBAtom.GetHeteroValence()
@property
def hyb(self):
return self.OBAtom.GetHyb()
@property
def idx(self):
return self.OBAtom.GetIdx()
@property
def implicitvalence(self):
return self.OBAtom.GetImplicitValence()
@property
def isotope(self):
return self.OBAtom.GetIsotope()
@property
def partialcharge(self):
return self.OBAtom.GetPartialCharge()
@property
def residue(self):
return Residue(self.OBAtom.GetResidue())
@property
def spin(self):
return self.OBAtom.GetSpinMultiplicity()
@property
def type(self):
return self.OBAtom.GetType()
@property
def valence(self):
return self.OBAtom.GetValence()
@property
def vector(self):
return self.OBAtom.GetVector()
def __str__(self):
c = self.coords
return "Atom: %d (%.2f %.2f %.2f)" % (self.atomicnum, c[0], c[1], c[2])
class Residue(object):
"""Represent a Pybel residue.
Required parameter:
OBResidue -- an Open Babel OBResidue
Attributes:
atoms, idx, name.
(refer to the Open Babel library documentation for more info).
The original Open Babel atom can be accessed using the attribute:
OBResidue
"""
def __init__(self, OBResidue):
self.OBResidue = OBResidue
@property
def atoms(self):
return [Atom(atom) for atom in ob.OBResidueAtomIter(self.OBResidue)]
@property
def idx(self):
return self.OBResidue.GetIdx()
@property
def name(self):
return self.OBResidue.GetName()
def __iter__(self):
"""Iterate over the Atoms of the Residue.
This allows constructions such as the following:
for atom in residue:
print atom
"""
return iter(self.atoms)
def _findbits(fp, bitsperint):
"""Find which bits are set in a list/vector.
This function is used by the Fingerprint class.
>>> _findbits([13, 71], 8)
[1, 3, 4, 9, 10, 11, 15]
"""
ans = []
start = 1
if sys.platform[:4] == "java":
fp = [fp.get(i) for i in range(fp.size())]
for x in fp:
i = start
while x > 0:
if x % 2:
ans.append(i)
x >>= 1
i += 1
start += bitsperint
return ans
class Fingerprint(object):
"""A Molecular Fingerprint.
Required parameters:
fingerprint -- a vector calculated by OBFingerprint.FindFingerprint()
Attributes:
fp -- the underlying fingerprint object
bits -- a list of bits set in the Fingerprint
Methods:
The "|" operator can be used to calculate the Tanimoto coeff. For
example, given two Fingerprints 'a', and 'b', the Tanimoto coefficient
is given by:
tanimoto = a | b
"""
def __init__(self, fingerprint):
self.fp = fingerprint
def __or__(self, other):
return ob.OBFingerprint.Tanimoto(self.fp, other.fp)
@property
def bits(self):
return _findbits(self.fp, ob.OBFingerprint.Getbitsperint())
def __str__(self):
fp = self.fp
if sys.platform[:4] == "java":
fp = [self.fp.get(i) for i in range(self.fp.size())]
return ", ".join([str(x) for x in fp])
class Smarts(object):
"""A Smarts Pattern Matcher
Required parameters:
smartspattern
Methods:
findall(molecule)
Example:
>>> mol = readstring("smi","CCN(CC)CC") # triethylamine
>>> smarts = Smarts("[#6][#6]") # Matches an ethyl group
>>> print smarts.findall(mol)
[(1, 2), (4, 5), (6, 7)]
The numbers returned are the indices (starting from 1) of the atoms
that match the SMARTS pattern. In this case, there are three matches
for each of the three ethyl groups in the molecule.
"""
def __init__(self, smartspattern):
"""Initialise with a SMARTS pattern."""
self.obsmarts = ob.OBSmartsPattern()
success = self.obsmarts.Init(smartspattern)
if not success:
raise IOError("Invalid SMARTS pattern")
def findall(self, molecule):
"""Find all matches of the SMARTS pattern to a particular molecule.
Required parameters:
molecule
"""
self.obsmarts.Match(molecule.OBMol)
vector = self.obsmarts.GetUMapList()
if sys.platform[:4] == "java":
vector = [vector.get(i) for i in range(vector.size())]
return list(vector)
class MoleculeData(object):
"""Store molecule data in a dictionary-type object
Required parameters:
obmol -- an Open Babel OBMol
Methods and accessor methods are like those of a dictionary except
that the data is retrieved on-the-fly from the underlying OBMol.
Example:
>>> mol = readfile("sdf", 'head.sdf').next() # Python 2
>>> # mol = next(readfile("sdf", 'head.sdf')) # Python 3
>>> data = mol.data
>>> print data
{'Comment': 'CORINA 2.61 0041 25.10.2001', 'NSC': '1'}
>>> print len(data), data.keys(), data.has_key("NSC")
2 ['Comment', 'NSC'] True
>>> print data['Comment']
CORINA 2.61 0041 25.10.2001
>>> data['Comment'] = 'This is a new comment'
>>> for k,v in data.items():
... print k, "-->", v
Comment --> This is a new comment
NSC --> 1
>>> del data['NSC']
>>> print len(data), data.keys(), data.has_key("NSC")
1 ['Comment'] False
"""
def __init__(self, obmol):
self._mol = obmol
def _data(self):
data = self._mol.GetData()
if sys.platform[:4] == "java":
data = [data.get(i) for i in range(data.size())]
answer = [x for x in data if
x.GetDataType() == _obconsts.PairData or
x.GetDataType() == _obconsts.CommentData]
if sys.platform[:3] != "cli":
answer = [_obfuncs.toPairData(x) for x in answer]
return answer
def _testforkey(self, key):
if key not in self:
raise KeyError("'%s'" % key)
def keys(self):
return [x.GetAttribute() for x in self._data()]
def values(self):
return [x.GetValue() for x in self._data()]
def items(self):
return iter(zip(self.keys(), self.values()))
def __iter__(self):
return iter(self.keys())
def iteritems(self): # Can remove for Python 3
return self.items()
def __len__(self):
return len(self._data())
def __contains__(self, key):
return self._mol.HasData(key)
def __delitem__(self, key):
self._testforkey(key)
self._mol.DeleteData(self._mol.GetData(key))
def clear(self):
for key in self:
del self[key]
def has_key(self, key):
return key in self
def update(self, dictionary):
for k, v in dictionary.items():
self[k] = v
def __getitem__(self, key):
self._testforkey(key)
answer = self._mol.GetData(key)
if sys.platform[:3] != "cli":
answer = _obfuncs.toPairData(answer)
return answer.GetValue()
def __setitem__(self, key, value):
if key in self:
if sys.platform[:3] != "cli":
pairdata = _obfuncs.toPairData(self._mol.GetData(key))
else:
pairdata = self._mol.GetData(key).Downcast[ob.OBPairData]()
pairdata.SetValue(str(value))
else:
pairdata = ob.OBPairData()
pairdata.SetAttribute(key)
pairdata.SetValue(str(value))
self._mol.CloneData(pairdata)
def __repr__(self):
return dict(self.items()).__repr__()
if sys.platform[:3] == "cli":
class _MyForm(Form):
def __init__(self):
Form.__init__(self)
def setup(self, filename, title):
# adjust the form's client area size to the picture
self.ClientSize = Size(300, 300)
self.Text = title
self.filename = filename
self.image = Image.FromFile(self.filename)
pictureBox = PictureBox()
# this will fit the image to the form
pictureBox.SizeMode = PictureBoxSizeMode.StretchImage
pictureBox.Image = self.image
# fit the picture box to the frame
pictureBox.Dock = DockStyle.Fill
self.Controls.Add(pictureBox)
self.Show()
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod(verbose=True)
| serval2412/openbabel | scripts/python/pybel.py | Python | gpl-2.0 | 33,954 | [
"Open Babel",
"Pybel"
] | d06220841bd112e766dcfc2f19ec4d54c2e1ac2861e1d92163139e21a84652d5 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Implement AST vistor."""
from invenio_query_parser.ast import (
AndOp,
DoubleQuotedValue,
EmptyQuery,
Keyword,
KeywordOp,
NotOp,
OrOp,
RangeOp,
RegexValue,
SingleQuotedValue,
Value,
ValueQuery,
)
from invenio_query_parser.visitor import make_visitor
class FacetsVisitor(object):
"""Implement visitor to extract all facets filters."""
visitor = make_visitor()
@staticmethod
def jsonable(parsedFacets):
"""Convert a visited query result to a structure which can be jsonified.
:param parsedFacets: a visited query result.
"""
result = {}
# sets cannot be converted to json. We need to convert them to lists.
for facet_name in parsedFacets:
result[facet_name] = {
'inc': list(parsedFacets[facet_name]['inc']),
'exc': list(parsedFacets[facet_name]['exc']),
}
return result
# pylint: disable=W0613,E0102,F999,D102
def _merge_facets(self, left, right):
"""merge faceting for an AND or OR operator.
:param left: left child node faceting
:param right: right child node faceting
"""
for k in right:
if k in left:
inc = left[k]['inc'].union(right[k]['inc'])
exc = left[k]['exc'].union(right[k]['exc'])
# Don't mark as included or excluded if only partially
# included/excluded
left[k] = {
'inc': inc.difference(exc),
'exc': exc.difference(inc),
}
else:
left[k] = right[k]
return left
def _invert_facets(self, facets):
"""invert facet filters included <-> excluded.
:param facets: facet filters
"""
for k in facets:
facets[k] = {
'inc': facets[k]['exc'],
'exc': facets[k]['inc'],
}
return facets
@visitor(AndOp)
def visit(self, node, left, right):
return self._merge_facets(left, right)
@visitor(OrOp)
def visit(self, node, left, right):
return self._merge_facets(left, right)
@visitor(NotOp)
def visit(self, node, op):
return self._invert_facets(op)
@visitor(KeywordOp)
def visit(self, node, left, right):
return {
node.left.value: {
'inc': set([node.right.value]),
'exc': set()
}
}
@visitor(ValueQuery)
def visit(self, node, op):
return {}
@visitor(Keyword)
def visit(self, node):
return {}
@visitor(Value)
def visit(self, node):
return {}
@visitor(SingleQuotedValue)
def visit(self, node):
return {}
@visitor(DoubleQuotedValue)
def visit(self, node):
return {}
@visitor(RegexValue)
def visit(self, node):
return {}
@visitor(RangeOp)
def visit(self, node, left, right):
return {}
@visitor(EmptyQuery)
def visit(self, node):
return {}
# pylint: enable=W0612,E0102,F999,D102
| eamonnmag/invenio-search | invenio_search/walkers/facets.py | Python | gpl-2.0 | 3,938 | [
"VisIt"
] | 62b13ccbaae2d899fc9af28c22c9e254f299925c6ffe11f4d0355420edb8175e |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import (
PDBQT_input, # pdbqt_inputpdbqt.pdbqt
)
class TestPDBQT(ParserBase):
parser = mda.topology.PDBQTParser.PDBQTParser
ref_filename = PDBQT_input
expected_attrs = [
'ids', 'names', 'charges', 'types', 'altLocs', 'resids', 'resnames',
'segids', 'record_types', 'icodes', 'occupancies', 'tempfactors'
]
guessed_attrs = ['masses']
expected_n_atoms = 1805
expected_n_residues = 199 # resids go 2-102 then 2-99
expected_n_segments = 2 # res2-102 are A, 2-99 are B
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/topology/test_pdbqt.py | Python | gpl-2.0 | 1,705 | [
"MDAnalysis"
] | e13b44c89f835c5b3d742aed66c860727ac3cea8c7acba62442b1fbc73164c05 |
# #
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Set of file tools.
:author: Stijn De Weirdt (Ghent University)
:author: Dries Verdegem (Ghent University)
:author: Kenneth Hoste (Ghent University)
:author: Pieter De Baets (Ghent University)
:author: Jens Timmerman (Ghent University)
:author: Toon Willems (Ghent University)
:author: Ward Poelmans (Ghent University)
:author: Fotis Georgatos (Uni.Lu, NTUA)
:author: Sotiris Fragkiskos (NTUA, CERN)
:author: Davide Vanzo (ACCRE, Vanderbilt University)
:author: Damian Alvarez (Forschungszentrum Juelich GmbH)
:author: Maxime Boissonneault (Compute Canada)
"""
import datetime
import difflib
import fileinput
import glob
import hashlib
import imp
import inspect
import os
import re
import shutil
import signal
import stat
import sys
import tempfile
import time
import zlib
from easybuild.base import fancylogger
from easybuild.tools import run
# import build_log must stay, to use of EasyBuildLog
from easybuild.tools.build_log import EasyBuildError, dry_run_msg, print_msg, print_warning
from easybuild.tools.config import DEFAULT_WAIT_ON_LOCK_INTERVAL, GENERIC_EASYBLOCK_PKG, build_option, install_path
from easybuild.tools.py2vs3 import HTMLParser, std_urllib, string_type
from easybuild.tools.utilities import nub, remove_unwanted_chars
try:
import requests
HAVE_REQUESTS = True
except ImportError:
HAVE_REQUESTS = False
_log = fancylogger.getLogger('filetools', fname=False)
# easyblock class prefix
EASYBLOCK_CLASS_PREFIX = 'EB_'
# character map for encoding strings
STRING_ENCODING_CHARMAP = {
r' ': "_space_",
r'!': "_exclamation_",
r'"': "_quotation_",
r'#': "_hash_",
r'$': "_dollar_",
r'%': "_percent_",
r'&': "_ampersand_",
r'(': "_leftparen_",
r')': "_rightparen_",
r'*': "_asterisk_",
r'+': "_plus_",
r',': "_comma_",
r'-': "_minus_",
r'.': "_period_",
r'/': "_slash_",
r':': "_colon_",
r';': "_semicolon_",
r'<': "_lessthan_",
r'=': "_equals_",
r'>': "_greaterthan_",
r'?': "_question_",
r'@': "_atsign_",
r'[': "_leftbracket_",
r'\'': "_apostrophe_",
r'\\': "_backslash_",
r']': "_rightbracket_",
r'^': "_circumflex_",
r'_': "_underscore_",
r'`': "_backquote_",
r'{': "_leftcurly_",
r'|': "_verticalbar_",
r'}': "_rightcurly_",
r'~': "_tilde_",
}
PATH_INDEX_FILENAME = '.eb-path-index'
CHECKSUM_TYPE_MD5 = 'md5'
CHECKSUM_TYPE_SHA256 = 'sha256'
DEFAULT_CHECKSUM = CHECKSUM_TYPE_MD5
# map of checksum types to checksum functions
CHECKSUM_FUNCTIONS = {
'adler32': lambda p: calc_block_checksum(p, ZlibChecksum(zlib.adler32)),
'crc32': lambda p: calc_block_checksum(p, ZlibChecksum(zlib.crc32)),
CHECKSUM_TYPE_MD5: lambda p: calc_block_checksum(p, hashlib.md5()),
'sha1': lambda p: calc_block_checksum(p, hashlib.sha1()),
CHECKSUM_TYPE_SHA256: lambda p: calc_block_checksum(p, hashlib.sha256()),
'sha512': lambda p: calc_block_checksum(p, hashlib.sha512()),
'size': lambda p: os.path.getsize(p),
}
CHECKSUM_TYPES = sorted(CHECKSUM_FUNCTIONS.keys())
EXTRACT_CMDS = {
# gzipped or gzipped tarball
'.gtgz': "tar xzf %(filepath)s",
'.gz': "gunzip -c %(filepath)s > %(target)s",
'.tar.gz': "tar xzf %(filepath)s",
'.tgz': "tar xzf %(filepath)s",
# bzipped or bzipped tarball
'.bz2': "bunzip2 -c %(filepath)s > %(target)s",
'.tar.bz2': "tar xjf %(filepath)s",
'.tb2': "tar xjf %(filepath)s",
'.tbz': "tar xjf %(filepath)s",
'.tbz2': "tar xjf %(filepath)s",
# xzipped or xzipped tarball
'.tar.xz': "unxz %(filepath)s --stdout | tar x",
'.txz': "unxz %(filepath)s --stdout | tar x",
'.xz': "unxz %(filepath)s",
# tarball
'.tar': "tar xf %(filepath)s",
# zip file
'.zip': "unzip -qq %(filepath)s",
# iso file
'.iso': "7z x %(filepath)s",
# tar.Z: using compress (LZW), but can be handled with gzip so use 'z'
'.tar.z': "tar xzf %(filepath)s",
}
# global set of names of locks that were created in this session
global_lock_names = set()
class ZlibChecksum(object):
"""
wrapper class for adler32 and crc32 checksums to
match the interface of the hashlib module
"""
def __init__(self, algorithm):
self.algorithm = algorithm
self.checksum = algorithm(b'') # use the same starting point as the module
self.blocksize = 64 # The same as md5/sha1
def update(self, data):
"""Calculates a new checksum using the old one and the new data"""
self.checksum = self.algorithm(data, self.checksum)
def hexdigest(self):
"""Return hex string of the checksum"""
return '0x%s' % (self.checksum & 0xffffffff)
def is_readable(path):
"""Return whether file at specified location exists and is readable."""
try:
return os.path.exists(path) and os.access(path, os.R_OK)
except OSError as err:
raise EasyBuildError("Failed to check whether %s is readable: %s", path, err)
def read_file(path, log_error=True, mode='r'):
"""Read contents of file at given path, in a robust way."""
txt = None
try:
with open(path, mode) as handle:
txt = handle.read()
except IOError as err:
if log_error:
raise EasyBuildError("Failed to read %s: %s", path, err)
return txt
def write_file(path, data, append=False, forced=False, backup=False, always_overwrite=True, verbose=False):
"""
Write given contents to file at given path;
overwrites current file contents without backup by default!
:param path: location of file
:param data: contents to write to file
:param append: append to existing file rather than overwrite
:param forced: force actually writing file in (extended) dry run mode
:param backup: back up existing file before overwriting or modifying it
:param always_overwrite: don't require --force to overwrite an existing file
:param verbose: be verbose, i.e. inform where backup file was created
"""
# early exit in 'dry run' mode
if not forced and build_option('extended_dry_run'):
dry_run_msg("file written: %s" % path, silent=build_option('silent'))
return
if os.path.exists(path):
if not append:
if always_overwrite or build_option('force'):
_log.info("Overwriting existing file %s", path)
else:
raise EasyBuildError("File exists, not overwriting it without --force: %s", path)
if backup:
backed_up_fp = back_up_file(path)
_log.info("Existing file %s backed up to %s", path, backed_up_fp)
if verbose:
print_msg("Backup of %s created at %s" % (path, backed_up_fp), silent=build_option('silent'))
# figure out mode to use for open file handle
# cfr. https://docs.python.org/3/library/functions.html#open
mode = 'a' if append else 'w'
# special care must be taken with binary data in Python 3
if sys.version_info[0] >= 3 and isinstance(data, bytes):
mode += 'b'
# note: we can't use try-except-finally, because Python 2.4 doesn't support it as a single block
try:
mkdir(os.path.dirname(path), parents=True)
with open(path, mode) as handle:
handle.write(data)
except IOError as err:
raise EasyBuildError("Failed to write to %s: %s", path, err)
def is_binary(contents):
"""
Check whether given bytestring represents the contents of a binary file or not.
"""
return isinstance(contents, bytes) and b'\00' in bytes(contents)
def resolve_path(path):
"""
Return fully resolved path for given path.
:param path: path that (maybe) contains symlinks
"""
try:
resolved_path = os.path.realpath(path)
except (AttributeError, OSError, TypeError) as err:
raise EasyBuildError("Resolving path %s failed: %s", path, err)
return resolved_path
def symlink(source_path, symlink_path, use_abspath_source=True):
"""
Create a symlink at the specified path to the given path.
:param source_path: source file path
:param symlink_path: symlink file path
:param use_abspath_source: resolves the absolute path of source_path
"""
if use_abspath_source:
source_path = os.path.abspath(source_path)
try:
os.symlink(source_path, symlink_path)
_log.info("Symlinked %s to %s", source_path, symlink_path)
except OSError as err:
raise EasyBuildError("Symlinking %s to %s failed: %s", source_path, symlink_path, err)
def remove_file(path):
"""Remove file at specified path."""
# early exit in 'dry run' mode
if build_option('extended_dry_run'):
dry_run_msg("file %s removed" % path, silent=build_option('silent'))
return
try:
# note: file may also be a broken symlink...
if os.path.exists(path) or os.path.islink(path):
os.remove(path)
except OSError as err:
raise EasyBuildError("Failed to remove file %s: %s", path, err)
def remove_dir(path):
"""Remove directory at specified path."""
# early exit in 'dry run' mode
if build_option('extended_dry_run'):
dry_run_msg("directory %s removed" % path, silent=build_option('silent'))
return
if os.path.exists(path):
ok = False
errors = []
# Try multiple times to cater for temporary failures on e.g. NFS mounted paths
max_attempts = 3
for i in range(0, max_attempts):
try:
shutil.rmtree(path)
ok = True
break
except OSError as err:
_log.debug("Failed to remove path %s with shutil.rmtree at attempt %d: %s" % (path, i, err))
errors.append(err)
time.sleep(2)
# make sure write permissions are enabled on entire directory
adjust_permissions(path, stat.S_IWUSR, add=True, recursive=True)
if ok:
_log.info("Path %s successfully removed." % path)
else:
raise EasyBuildError("Failed to remove directory %s even after %d attempts.\nReasons: %s",
path, max_attempts, errors)
def remove(paths):
"""
Remove single file/directory or list of files and directories
:param paths: path(s) to remove
"""
if isinstance(paths, string_type):
paths = [paths]
_log.info("Removing %d files & directories", len(paths))
for path in paths:
if os.path.isfile(path):
remove_file(path)
elif os.path.isdir(path):
remove_dir(path)
else:
raise EasyBuildError("Specified path to remove is not an existing file or directory: %s", path)
def change_dir(path):
"""
Change to directory at specified location.
:param path: location to change to
:return: previous location we were in
"""
# determining the current working directory can fail if we're in a non-existing directory
try:
cwd = os.getcwd()
except OSError as err:
_log.debug("Failed to determine current working directory (but proceeding anyway: %s", err)
cwd = None
try:
os.chdir(path)
except OSError as err:
raise EasyBuildError("Failed to change from %s to %s: %s", cwd, path, err)
return cwd
def extract_file(fn, dest, cmd=None, extra_options=None, overwrite=False, forced=False, change_into_dir=None):
"""
Extract file at given path to specified directory
:param fn: path to file to extract
:param dest: location to extract to
:param cmd: extract command to use (derived from filename if not specified)
:param extra_options: extra options to pass to extract command
:param overwrite: overwrite existing unpacked file
:param forced: force extraction in (extended) dry run mode
:param change_into_dir: change into resulting directory;
None (current default) implies True, but this is deprecated,
this named argument should be set to False or True explicitely
(in a future major release, default will be changed to False)
:return: path to directory (in case of success)
"""
if change_into_dir is None:
_log.deprecated("extract_file function was called without specifying value for change_into_dir", '5.0')
change_into_dir = True
if not os.path.isfile(fn) and not build_option('extended_dry_run'):
raise EasyBuildError("Can't extract file %s: no such file", fn)
mkdir(dest, parents=True)
# use absolute pathnames from now on
abs_dest = os.path.abspath(dest)
# change working directory
_log.debug("Unpacking %s in directory %s", fn, abs_dest)
cwd = change_dir(abs_dest)
if not cmd:
cmd = extract_cmd(fn, overwrite=overwrite)
else:
# complete command template with filename
cmd = cmd % fn
if not cmd:
raise EasyBuildError("Can't extract file %s with unknown filetype", fn)
if extra_options:
cmd = "%s %s" % (cmd, extra_options)
run.run_cmd(cmd, simple=True, force_in_dry_run=forced)
# note: find_base_dir also changes into the base dir!
base_dir = find_base_dir()
# if changing into obtained directory is not desired,
# change back to where we came from (unless that was a non-existing directory)
if not change_into_dir:
if cwd is None:
raise EasyBuildError("Can't change back to non-existing directory after extracting %s in %s", fn, dest)
else:
change_dir(cwd)
return base_dir
def which(cmd, retain_all=False, check_perms=True, log_ok=True, log_error=True):
"""
Return (first) path in $PATH for specified command, or None if command is not found
:param retain_all: returns *all* locations to the specified command in $PATH, not just the first one
:param check_perms: check whether candidate path has read/exec permissions before accepting it as a match
:param log_ok: Log an info message where the command has been found (if any)
:param log_error: Log a warning message when command hasn't been found
"""
if retain_all:
res = []
else:
res = None
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
cmd_path = os.path.join(path, cmd)
# only accept path if command is there
if os.path.isfile(cmd_path):
if log_ok:
_log.info("Command %s found at %s", cmd, cmd_path)
if check_perms:
# check if read/executable permissions are available
if not os.access(cmd_path, os.R_OK | os.X_OK):
_log.info("No read/exec permissions for %s, so continuing search...", cmd_path)
continue
if retain_all:
res.append(cmd_path)
else:
res = cmd_path
break
if not res and log_error:
_log.warning("Could not find command '%s' (with permissions to read/execute it) in $PATH (%s)" % (cmd, paths))
return res
def det_common_path_prefix(paths):
"""Determine common path prefix for a given list of paths."""
if not isinstance(paths, list):
raise EasyBuildError("det_common_path_prefix: argument must be of type list (got %s: %s)", type(paths), paths)
elif not paths:
return None
# initial guess for common prefix
prefix = paths[0]
found_common = False
while not found_common and prefix != os.path.dirname(prefix):
prefix = os.path.dirname(prefix)
found_common = all([p.startswith(prefix) for p in paths])
if found_common:
# prefix may be empty string for relative paths with a non-common prefix
return prefix.rstrip(os.path.sep) or None
else:
return None
def is_alt_pypi_url(url):
"""Determine whether specified URL is already an alternate PyPI URL, i.e. whether it contains a hash."""
# example: .../packages/5b/03/e135b19fadeb9b1ccb45eac9f60ca2dc3afe72d099f6bd84e03cb131f9bf/easybuild-2.7.0.tar.gz
alt_url_regex = re.compile('/packages/[a-f0-9]{2}/[a-f0-9]{2}/[a-f0-9]{60}/[^/]+$')
res = bool(alt_url_regex.search(url))
_log.debug("Checking whether '%s' is an alternate PyPI URL using pattern '%s'...: %s",
url, alt_url_regex.pattern, res)
return res
def pypi_source_urls(pkg_name):
"""
Fetch list of source URLs (incl. source filename) for specified Python package from PyPI, using 'simple' PyPI API.
"""
# example: https://pypi.python.org/simple/easybuild
# see also:
# - https://www.python.org/dev/peps/pep-0503/
# - https://wiki.python.org/moin/PyPISimple
simple_url = 'https://pypi.python.org/simple/%s' % re.sub(r'[-_.]+', '-', pkg_name.lower())
tmpdir = tempfile.mkdtemp()
urls_html = os.path.join(tmpdir, '%s_urls.html' % pkg_name)
if download_file(os.path.basename(urls_html), simple_url, urls_html) is None:
_log.debug("Failed to download %s to determine available PyPI URLs for %s", simple_url, pkg_name)
res = []
else:
urls_txt = read_file(urls_html)
res = []
# note: don't use xml.etree.ElementTree to parse HTML page served by PyPI's simple API
# cfr. https://github.com/pypa/warehouse/issues/7886
class HrefHTMLParser(HTMLParser):
"""HTML parser to extract 'href' attribute values from anchor tags (<a href='...'>)."""
def handle_starttag(self, tag, attrs):
if tag == 'a':
attrs = dict(attrs)
if 'href' in attrs:
res.append(attrs['href'])
parser = HrefHTMLParser()
parser.feed(urls_txt)
# links are relative, transform them into full URLs; for example:
# from: ../../packages/<dir1>/<dir2>/<hash>/easybuild-<version>.tar.gz#md5=<md5>
# to: https://pypi.python.org/packages/<dir1>/<dir2>/<hash>/easybuild-<version>.tar.gz#md5=<md5>
res = [re.sub('.*/packages/', 'https://pypi.python.org/packages/', x) for x in res]
return res
def derive_alt_pypi_url(url):
"""Derive alternate PyPI URL for given URL."""
alt_pypi_url = None
# example input URL: https://pypi.python.org/packages/source/e/easybuild/easybuild-2.7.0.tar.gz
pkg_name, pkg_source = url.strip().split('/')[-2:]
cand_urls = pypi_source_urls(pkg_name)
# md5 for old PyPI, sha256 for new PyPi (Warehouse)
regex = re.compile('.*/%s(?:#md5=[a-f0-9]{32}|#sha256=[a-f0-9]{64})$' % pkg_source.replace('.', '\\.'), re.M)
for cand_url in cand_urls:
res = regex.match(cand_url)
if res:
# e.g.: https://pypi.python.org/packages/<dir1>/<dir2>/<hash>/easybuild-<version>.tar.gz#md5=<md5>
alt_pypi_url = res.group(0).split('#sha256')[0].split('#md5')[0]
break
if not alt_pypi_url:
_log.debug("Failed to extract hash using pattern '%s' from list of URLs: %s", regex.pattern, cand_urls)
return alt_pypi_url
def download_file(filename, url, path, forced=False):
"""Download a file from the given URL, to the specified path."""
_log.debug("Trying to download %s from %s to %s", filename, url, path)
timeout = build_option('download_timeout')
if timeout is None:
# default to 10sec timeout if none was specified
# default system timeout (used is nothing is specified) may be infinite (?)
timeout = 10
_log.debug("Using timeout of %s seconds for initiating download" % timeout)
# make sure directory exists
basedir = os.path.dirname(path)
mkdir(basedir, parents=True)
# try downloading, three times max.
downloaded = False
max_attempts = 3
attempt_cnt = 0
# use custom HTTP header
headers = {'User-Agent': 'EasyBuild', 'Accept': '*/*'}
# for backward compatibility, and to avoid relying on 3rd party Python library 'requests'
url_req = std_urllib.Request(url, headers=headers)
used_urllib = std_urllib
switch_to_requests = False
while not downloaded and attempt_cnt < max_attempts:
attempt_cnt += 1
try:
if used_urllib is std_urllib:
# urllib2 (Python 2) / urllib.request (Python 3) does the right thing for http proxy setups,
# urllib does not!
url_fd = std_urllib.urlopen(url_req, timeout=timeout)
status_code = url_fd.getcode()
else:
response = requests.get(url, headers=headers, stream=True, timeout=timeout)
status_code = response.status_code
response.raise_for_status()
url_fd = response.raw
url_fd.decode_content = True
_log.debug('response code for given url %s: %s' % (url, status_code))
write_file(path, url_fd.read(), forced=forced, backup=True)
_log.info("Downloaded file %s from url %s to %s" % (filename, url, path))
downloaded = True
url_fd.close()
except used_urllib.HTTPError as err:
if used_urllib is std_urllib:
status_code = err.code
if status_code == 403 and attempt_cnt == 1:
switch_to_requests = True
elif 400 <= status_code <= 499:
_log.warning("URL %s was not found (HTTP response code %s), not trying again" % (url, status_code))
break
else:
_log.warning("HTTPError occurred while trying to download %s to %s: %s" % (url, path, err))
except IOError as err:
_log.warning("IOError occurred while trying to download %s to %s: %s" % (url, path, err))
error_re = re.compile(r"<urlopen error \[Errno 1\] _ssl.c:.*: error:.*:"
"SSL routines:SSL23_GET_SERVER_HELLO:sslv3 alert handshake failure>")
if error_re.match(str(err)):
switch_to_requests = True
except Exception as err:
raise EasyBuildError("Unexpected error occurred when trying to download %s to %s: %s", url, path, err)
if not downloaded and attempt_cnt < max_attempts:
_log.info("Attempt %d of downloading %s to %s failed, trying again..." % (attempt_cnt, url, path))
if used_urllib is std_urllib and switch_to_requests:
if not HAVE_REQUESTS:
raise EasyBuildError("SSL issues with urllib2. If you are using RHEL/CentOS 6.x please "
"install the python-requests and pyOpenSSL RPM packages and try again.")
_log.info("Downloading using requests package instead of urllib2")
used_urllib = requests
if downloaded:
_log.info("Successful download of file %s from url %s to path %s" % (filename, url, path))
return path
else:
_log.warning("Download of %s to %s failed, done trying" % (url, path))
return None
def create_index(path, ignore_dirs=None):
"""
Create index for files in specified path.
"""
if ignore_dirs is None:
ignore_dirs = []
index = set()
if not os.path.exists(path):
raise EasyBuildError("Specified path does not exist: %s", path)
elif not os.path.isdir(path):
raise EasyBuildError("Specified path is not a directory: %s", path)
for (dirpath, dirnames, filenames) in os.walk(path, topdown=True, followlinks=True):
for filename in filenames:
# use relative paths in index
rel_dirpath = os.path.relpath(dirpath, path)
# avoid that relative paths start with './'
if rel_dirpath == '.':
rel_dirpath = ''
index.add(os.path.join(rel_dirpath, filename))
# do not consider (certain) hidden directories
# note: we still need to consider e.g., .local !
# replace list elements using [:], so os.walk doesn't process deleted directories
# see https://stackoverflow.com/questions/13454164/os-walk-without-hidden-folders
dirnames[:] = [d for d in dirnames if d not in ignore_dirs]
return index
def dump_index(path, max_age_sec=None):
"""
Create index for files in specified path, and dump it to file (alphabetically sorted).
"""
if max_age_sec is None:
max_age_sec = build_option('index_max_age')
index_fp = os.path.join(path, PATH_INDEX_FILENAME)
index_contents = create_index(path)
curr_ts = datetime.datetime.now()
if max_age_sec == 0:
end_ts = datetime.datetime.max
else:
end_ts = curr_ts + datetime.timedelta(0, max_age_sec)
lines = [
"# created at: %s" % str(curr_ts),
"# valid until: %s" % str(end_ts),
]
lines.extend(sorted(index_contents))
write_file(index_fp, '\n'.join(lines), always_overwrite=False)
return index_fp
def load_index(path, ignore_dirs=None):
"""
Load index for specified path, and return contents (or None if no index exists).
"""
if ignore_dirs is None:
ignore_dirs = []
index_fp = os.path.join(path, PATH_INDEX_FILENAME)
index = set()
if build_option('ignore_index'):
_log.info("Ignoring index for %s...", path)
elif os.path.exists(index_fp):
lines = read_file(index_fp).splitlines()
valid_ts_regex = re.compile("^# valid until: (.*)", re.M)
valid_ts = None
for line in lines:
# extract "valid until" timestamp, so we can check whether index is still valid
if valid_ts is None:
res = valid_ts_regex.match(line)
else:
res = None
if res:
valid_ts = res.group(1)
try:
valid_ts = datetime.datetime.strptime(valid_ts, '%Y-%m-%d %H:%M:%S.%f')
except ValueError as err:
raise EasyBuildError("Failed to parse timestamp '%s' for index at %s: %s", valid_ts, path, err)
elif line.startswith('#'):
_log.info("Ignoring unknown header line '%s' in index for %s", line, path)
else:
# filter out files that are in an ignored directory
path_dirs = line.split(os.path.sep)[:-1]
if not any(d in path_dirs for d in ignore_dirs):
index.add(line)
# check whether index is still valid
if valid_ts:
curr_ts = datetime.datetime.now()
if curr_ts > valid_ts:
print_warning("Index for %s is no longer valid (too old), so ignoring it...", path)
index = None
else:
print_msg("found valid index for %s, so using it...", path)
return index or None
def find_easyconfigs(path, ignore_dirs=None):
"""
Find .eb easyconfig files in path
"""
if os.path.isfile(path):
return [path]
if ignore_dirs is None:
ignore_dirs = []
# walk through the start directory, retain all files that end in .eb
files = []
path = os.path.abspath(path)
for dirpath, dirnames, filenames in os.walk(path, topdown=True):
for f in filenames:
if not f.endswith('.eb') or f == 'TEMPLATE.eb':
continue
spec = os.path.join(dirpath, f)
_log.debug("Found easyconfig %s" % spec)
files.append(spec)
# ignore subdirs specified to be ignored by replacing items in dirnames list used by os.walk
dirnames[:] = [d for d in dirnames if d not in ignore_dirs]
return files
def find_glob_pattern(glob_pattern, fail_on_no_match=True):
"""Find unique file/dir matching glob_pattern (raises error if more than one match is found)"""
if build_option('extended_dry_run'):
return glob_pattern
res = glob.glob(glob_pattern)
if len(res) == 0 and not fail_on_no_match:
return None
if len(res) != 1:
raise EasyBuildError("Was expecting exactly one match for '%s', found %d: %s", glob_pattern, len(res), res)
return res[0]
def search_file(paths, query, short=False, ignore_dirs=None, silent=False, filename_only=False, terse=False,
case_sensitive=False):
"""
Search for files using in specified paths using specified search query (regular expression)
:param paths: list of paths to search in
:param query: search query to use (regular expression); will be used case-insensitive
:param short: figure out common prefix of hits, use variable to factor it out
:param ignore_dirs: list of directories to ignore (default: ['.git', '.svn'])
:param silent: whether or not to remain silent (don't print anything)
:param filename_only: only return filenames, not file paths
:param terse: stick to terse (machine-readable) output, as opposed to pretty-printing
"""
if ignore_dirs is None:
ignore_dirs = ['.git', '.svn']
if not isinstance(ignore_dirs, list):
raise EasyBuildError("search_file: ignore_dirs (%s) should be of type list, not %s",
ignore_dirs, type(ignore_dirs))
# escape some special characters in query that may also occur in actual software names: +
# do not use re.escape, since that breaks queries with genuine regex characters like ^ or .*
query = re.sub('([+])', r'\\\1', query)
# compile regex, case-insensitive
try:
if case_sensitive:
query = re.compile(query)
else:
# compile regex, case-insensitive
query = re.compile(query, re.I)
except re.error as err:
raise EasyBuildError("Invalid search query: %s", err)
var_defs = []
hits = []
var_index = 1
var = None
for path in paths:
path_hits = []
if not terse:
print_msg("Searching (case-insensitive) for '%s' in %s " % (query.pattern, path), log=_log, silent=silent)
path_index = load_index(path, ignore_dirs=ignore_dirs)
if path_index is None or build_option('ignore_index'):
if os.path.exists(path):
_log.info("No index found for %s, creating one...", path)
path_index = create_index(path, ignore_dirs=ignore_dirs)
else:
path_index = []
else:
_log.info("Index found for %s, so using it...", path)
for filepath in path_index:
filename = os.path.basename(filepath)
if query.search(filename):
if not path_hits:
var = "CFGS%d" % var_index
var_index += 1
if filename_only:
path_hits.append(filename)
else:
path_hits.append(os.path.join(path, filepath))
path_hits = sorted(path_hits)
if path_hits:
common_prefix = det_common_path_prefix(path_hits)
if not terse and short and common_prefix is not None and len(common_prefix) > len(var) * 2:
var_defs.append((var, common_prefix))
hits.extend([os.path.join('$%s' % var, fn[len(common_prefix) + 1:]) for fn in path_hits])
else:
hits.extend(path_hits)
return var_defs, hits
def dir_contains_files(path):
"""Return True if the given directory does contain any file in itself or any subdirectory"""
return any(files for _root, _dirs, files in os.walk(path))
def find_eb_script(script_name):
"""Find EasyBuild script with given name (in easybuild/scripts subdirectory)."""
filetools, eb_dir = __file__, None
if os.path.isabs(filetools):
eb_dir = os.path.dirname(os.path.dirname(filetools))
else:
# go hunting for absolute path to filetools module via sys.path;
# we can't rely on os.path.abspath or os.path.realpath, since they leverage os.getcwd()...
for path in sys.path:
path = os.path.abspath(path)
if os.path.exists(os.path.join(path, filetools)):
eb_dir = os.path.dirname(os.path.dirname(os.path.join(path, filetools)))
break
if eb_dir is None:
raise EasyBuildError("Failed to find parent directory for 'easybuild/scripts' subdirectory")
script_loc = os.path.join(eb_dir, 'scripts', script_name)
if not os.path.exists(script_loc):
prev_script_loc = script_loc
# fallback mechanism: check in location relative to location of 'eb'
eb_path = os.getenv('EB_SCRIPT_PATH') or which('eb')
if eb_path is None:
_log.warning("'eb' not found in $PATH, failed to determine installation prefix")
else:
install_prefix = os.path.dirname(os.path.dirname(resolve_path(eb_path)))
script_loc = os.path.join(install_prefix, 'easybuild', 'scripts', script_name)
if not os.path.exists(script_loc):
raise EasyBuildError("Script '%s' not found at expected location: %s or %s",
script_name, prev_script_loc, script_loc)
return script_loc
def compute_checksum(path, checksum_type=DEFAULT_CHECKSUM):
"""
Compute checksum of specified file.
:param path: Path of file to compute checksum for
:param checksum_type: type(s) of checksum ('adler32', 'crc32', 'md5' (default), 'sha1', 'sha256', 'sha512', 'size')
"""
if checksum_type not in CHECKSUM_FUNCTIONS:
raise EasyBuildError("Unknown checksum type (%s), supported types are: %s",
checksum_type, CHECKSUM_FUNCTIONS.keys())
try:
checksum = CHECKSUM_FUNCTIONS[checksum_type](path)
except IOError as err:
raise EasyBuildError("Failed to read %s: %s", path, err)
except MemoryError as err:
_log.warning("A memory error occurred when computing the checksum for %s: %s" % (path, err))
checksum = 'dummy_checksum_due_to_memory_error'
return checksum
def calc_block_checksum(path, algorithm):
"""Calculate a checksum of a file by reading it into blocks"""
# We pick a blocksize of 16 MB: it's a multiple of the internal
# blocksize of md5/sha1 (64) and gave the best speed results
try:
# in hashlib, blocksize is a class parameter
blocksize = algorithm.blocksize * 262144 # 2^18
except AttributeError:
blocksize = 16777216 # 2^24
_log.debug("Using blocksize %s for calculating the checksum" % blocksize)
try:
f = open(path, 'rb')
for block in iter(lambda: f.read(blocksize), b''):
algorithm.update(block)
f.close()
except IOError as err:
raise EasyBuildError("Failed to read %s: %s", path, err)
return algorithm.hexdigest()
def verify_checksum(path, checksums):
"""
Verify checksum of specified file.
:param file: path of file to verify checksum of
:param checksum: checksum value (and type, optionally, default is MD5), e.g., 'af314', ('sha', '5ec1b')
"""
filename = os.path.basename(path)
# if no checksum is provided, pretend checksum to be valid, unless presence of checksums to verify is enforced
if checksums is None:
if build_option('enforce_checksums'):
raise EasyBuildError("Missing checksum for %s", filename)
else:
return True
# make sure we have a list of checksums
if not isinstance(checksums, list):
checksums = [checksums]
for checksum in checksums:
if isinstance(checksum, dict):
if filename in checksum:
# Set this to a string-type checksum
checksum = checksum[filename]
elif build_option('enforce_checksums'):
raise EasyBuildError("Missing checksum for %s", filename)
else:
# Set to None and allow to fail elsewhere
checksum = None
if isinstance(checksum, string_type):
# if no checksum type is specified, it is assumed to be MD5 (32 characters) or SHA256 (64 characters)
if len(checksum) == 64:
typ = CHECKSUM_TYPE_SHA256
elif len(checksum) == 32:
typ = CHECKSUM_TYPE_MD5
else:
raise EasyBuildError("Length of checksum '%s' (%d) does not match with either MD5 (32) or SHA256 (64)",
checksum, len(checksum))
elif isinstance(checksum, tuple):
# if checksum is specified as a tuple, it could either be specifying:
# * the type of checksum + the checksum value
# * a set of alternative valid checksums to consider => recursive call
if len(checksum) == 2 and checksum[0] in CHECKSUM_FUNCTIONS:
typ, checksum = checksum
else:
_log.info("Found %d alternative checksums for %s, considering them one-by-one...", len(checksum), path)
for cand_checksum in checksum:
if verify_checksum(path, cand_checksum):
_log.info("Found matching checksum for %s: %s", path, cand_checksum)
return True
else:
_log.info("Ignoring non-matching checksum for %s (%s)...", path, cand_checksum)
else:
raise EasyBuildError("Invalid checksum spec '%s', should be a string (MD5) or 2-tuple (type, value).",
checksum)
actual_checksum = compute_checksum(path, typ)
_log.debug("Computed %s checksum for %s: %s (correct checksum: %s)" % (typ, path, actual_checksum, checksum))
if actual_checksum != checksum:
return False
# if we land here, all checksums have been verified to be correct
return True
def is_sha256_checksum(value):
"""Check whether provided string is a SHA256 checksum."""
res = False
if isinstance(value, string_type):
if re.match('^[0-9a-f]{64}$', value):
res = True
_log.debug("String value '%s' has the correct format to be a SHA256 checksum", value)
else:
_log.debug("String value '%s' does NOT have the correct format to be a SHA256 checksum", value)
else:
_log.debug("Non-string value %s is not a SHA256 checksum", value)
return res
def find_base_dir():
"""
Try to locate a possible new base directory
- this is typically a single subdir, e.g. from untarring a tarball
- when extracting multiple tarballs in the same directory,
expect only the first one to give the correct path
"""
def get_local_dirs_purged():
# e.g. always purge the log directory
# and hidden directories
ignoredirs = ["easybuild"]
lst = os.listdir(os.getcwd())
lst = [d for d in lst if not d.startswith('.') and d not in ignoredirs]
return lst
lst = get_local_dirs_purged()
new_dir = os.getcwd()
while len(lst) == 1:
new_dir = os.path.join(os.getcwd(), lst[0])
if not os.path.isdir(new_dir):
break
change_dir(new_dir)
lst = get_local_dirs_purged()
# make sure it's a directory, and not a (single) file that was in a tarball for example
while not os.path.isdir(new_dir):
new_dir = os.path.dirname(new_dir)
_log.debug("Last dir list %s" % lst)
_log.debug("Possible new dir %s found" % new_dir)
return new_dir
def find_extension(filename):
"""Find best match for filename extension."""
# sort by length, so longest file extensions get preference
suffixes = sorted(EXTRACT_CMDS.keys(), key=len, reverse=True)
pat = r'(?P<ext>%s)$' % '|'.join([s.replace('.', '\\.') for s in suffixes])
res = re.search(pat, filename, flags=re.IGNORECASE)
if res:
ext = res.group('ext')
else:
raise EasyBuildError('Unknown file type for file %s', filename)
return ext
def extract_cmd(filepath, overwrite=False):
"""
Determines the file type of file at filepath, returns extract cmd based on file suffix
"""
filename = os.path.basename(filepath)
ext = find_extension(filename)
target = filename.rstrip(ext)
cmd_tmpl = EXTRACT_CMDS[ext.lower()]
if overwrite:
if 'unzip -qq' in cmd_tmpl:
cmd_tmpl = cmd_tmpl.replace('unzip -qq', 'unzip -qq -o')
return cmd_tmpl % {'filepath': filepath, 'target': target}
def is_patch_file(path):
"""Determine whether file at specified path is a patch file (based on +++ and --- lines being present)."""
txt = read_file(path)
return bool(re.search(r'^\+{3}\s', txt, re.M) and re.search(r'^-{3}\s', txt, re.M))
def det_patched_files(path=None, txt=None, omit_ab_prefix=False, github=False, filter_deleted=False):
"""
Determine list of patched files from a patch.
It searches for "+++ path/to/patched/file" lines to determine the patched files.
Note: does not correctly handle filepaths with spaces.
:param path: the path to the diff
:param txt: the contents of the diff (either path or txt should be give)
:param omit_ab_prefix: ignore the a/ or b/ prefix of the files
:param github: only consider lines that start with 'diff --git' to determine list of patched files
:param filter_deleted: filter out all files that were deleted by the patch
"""
if github:
patched_regex = r"^diff --git (?:a/)?\S+\s*(?P<ab_prefix>b/)?(?P<file>\S+)"
else:
patched_regex = r"^\s*\+{3}\s+(?P<ab_prefix>[ab]/)?(?P<file>\S+)"
patched_regex = re.compile(patched_regex, re.M)
if path is not None:
# take into account that file may contain non-UTF-8 characters;
# so, read a byte string, and decode to UTF-8 string (ignoring any non-UTF-8 characters);
txt = read_file(path, mode='rb').decode('utf-8', 'replace')
elif txt is None:
raise EasyBuildError("Either a file path or a string representing a patch should be supplied")
patched_files = []
for match in patched_regex.finditer(txt):
patched_file = match.group('file')
if not omit_ab_prefix and match.group('ab_prefix') is not None:
patched_file = match.group('ab_prefix') + patched_file
delete_regex = re.compile(r"%s\ndeleted file" % re.escape(os.path.basename(patched_file)), re.M)
if patched_file in ['/dev/null']:
_log.debug("Ignoring patched file %s", patched_file)
elif filter_deleted and delete_regex.search(txt):
_log.debug("Filtering out deleted file %s", patched_file)
else:
patched_files.append(patched_file)
return patched_files
def guess_patch_level(patched_files, parent_dir):
"""Guess patch level based on list of patched files and specified directory."""
patch_level = None
for patched_file in patched_files:
# locate file by stripping of directories
tf2 = patched_file.split(os.path.sep)
n_paths = len(tf2)
path_found = False
level = None
for level in range(n_paths):
if os.path.isfile(os.path.join(parent_dir, *tf2[level:])):
path_found = True
break
if path_found:
patch_level = level
break
else:
_log.debug('No match found for %s, trying next patched file...' % patched_file)
return patch_level
def apply_patch(patch_file, dest, fn=None, copy=False, level=None, use_git_am=False):
"""
Apply a patch to source code in directory dest
- assume unified diff created with "diff -ru old new"
"""
if build_option('extended_dry_run'):
# skip checking of files in dry run mode
patch_filename = os.path.basename(patch_file)
dry_run_msg("* applying patch file %s" % patch_filename, silent=build_option('silent'))
elif not os.path.isfile(patch_file):
raise EasyBuildError("Can't find patch %s: no such file", patch_file)
elif fn and not os.path.isfile(fn):
raise EasyBuildError("Can't patch file %s: no such file", fn)
elif not os.path.isdir(dest):
raise EasyBuildError("Can't patch directory %s: no such directory", dest)
# copy missing files
if copy:
if build_option('extended_dry_run'):
dry_run_msg(" %s copied to %s" % (patch_file, dest), silent=build_option('silent'))
else:
copy_file(patch_file, dest)
_log.debug("Copied patch %s to dir %s" % (patch_file, dest))
# early exit, work is done after copying
return True
# use absolute paths
apatch = os.path.abspath(patch_file)
adest = os.path.abspath(dest)
# Attempt extracting the patch if it ends in .patch.gz, .patch.bz2, .patch.xz
# split in name + extension
apatch_root, apatch_file = os.path.split(apatch)
apatch_name, apatch_extension = os.path.splitext(apatch_file)
# Supports only bz2, gz and xz. zip can be archives which are not supported.
if apatch_extension in ['.gz', '.bz2', '.xz']:
# split again to get the second extension
apatch_subname, apatch_subextension = os.path.splitext(apatch_name)
if apatch_subextension == ".patch":
workdir = tempfile.mkdtemp(prefix='eb-patch-')
_log.debug("Extracting the patch to: %s", workdir)
# extracting the patch
apatch_dir = extract_file(apatch, workdir, change_into_dir=False)
change_dir(apatch_dir)
apatch = os.path.join(apatch_dir, apatch_name)
if level is None and build_option('extended_dry_run'):
level = '<derived>'
elif level is None:
# guess value for -p (patch level)
# - based on +++ lines
# - first +++ line that matches an existing file determines guessed level
# - we will try to match that level from current directory
patched_files = det_patched_files(path=apatch)
if not patched_files:
raise EasyBuildError("Can't guess patchlevel from patch %s: no testfile line found in patch", apatch)
return
level = guess_patch_level(patched_files, adest)
if level is None: # level can also be 0 (zero), so don't use "not level"
# no match
raise EasyBuildError("Can't determine patch level for patch %s from directory %s", patch_file, adest)
else:
_log.debug("Guessed patch level %d for patch %s" % (level, patch_file))
else:
_log.debug("Using specified patch level %d for patch %s" % (level, patch_file))
if use_git_am:
patch_cmd = "git am patch %s" % apatch
else:
patch_cmd = "patch -b -p%s -i %s" % (level, apatch)
out, ec = run.run_cmd(patch_cmd, simple=False, path=adest, log_ok=False, trace=False)
if ec:
raise EasyBuildError("Couldn't apply patch file %s. Process exited with code %s: %s", patch_file, ec, out)
return ec == 0
def apply_regex_substitutions(path, regex_subs, backup='.orig.eb'):
"""
Apply specified list of regex substitutions.
:param path: path to file to patch
:param regex_subs: list of substitutions to apply, specified as (<regexp pattern>, <replacement string>)
:param backup: create backup of original file with specified suffix (no backup if value evaluates to False)
"""
# only report when in 'dry run' mode
if build_option('extended_dry_run'):
dry_run_msg("applying regex substitutions to file %s" % path, silent=build_option('silent'))
for regex, subtxt in regex_subs:
dry_run_msg(" * regex pattern '%s', replacement string '%s'" % (regex, subtxt))
else:
_log.info("Applying following regex substitutions to %s: %s", path, regex_subs)
for i, (regex, subtxt) in enumerate(regex_subs):
regex_subs[i] = (re.compile(regex), subtxt)
if backup:
backup_ext = backup
else:
# no (persistent) backup file is created if empty string value is passed to 'backup' in fileinput.input
backup_ext = ''
try:
for line_id, line in enumerate(fileinput.input(path, inplace=1, backup=backup_ext)):
for regex, subtxt in regex_subs:
match = regex.search(line)
if match:
_log.info("Replacing line %d in %s: '%s' -> '%s'", (line_id + 1), path, match.group(0), subtxt)
line = regex.sub(subtxt, line)
sys.stdout.write(line)
except OSError as err:
raise EasyBuildError("Failed to patch %s: %s", path, err)
def modify_env(old, new):
"""NO LONGER SUPPORTED: use modify_env from easybuild.tools.environment instead"""
_log.nosupport("moved modify_env to easybuild.tools.environment", "2.0")
def convert_name(name, upper=False):
"""
Converts name so it can be used as variable name
"""
# no regexps
charmap = {
'+': 'plus',
'-': 'min',
'.': '',
}
for ch, new in charmap.items():
name = name.replace(ch, new)
if upper:
return name.upper()
else:
return name
def adjust_permissions(provided_path, permission_bits, add=True, onlyfiles=False, onlydirs=False, recursive=True,
group_id=None, relative=True, ignore_errors=False, skip_symlinks=None):
"""
Change permissions for specified path, using specified permission bits
:param add: add permissions relative to current permissions (only relevant if 'relative' is set to True)
:param onlyfiles: only change permissions on files (not directories)
:param onlydirs: only change permissions on directories (not files)
:param recursive: change permissions recursively (only makes sense if path is a directory)
:param group_id: also change group ownership to group with this group ID
:param relative: add/remove permissions relative to current permissions (if False, hard set specified permissions)
:param ignore_errors: ignore errors that occur when changing permissions
(up to a maximum ratio specified by --max-fail-ratio-adjust-permissions configuration option)
Add or remove (if add is False) permission_bits from all files (if onlydirs is False)
and directories (if onlyfiles is False) in path
"""
if skip_symlinks is not None:
depr_msg = "Use of 'skip_symlinks' argument for 'adjust_permissions' is deprecated "
depr_msg += "(symlinks are never followed anymore)"
_log.deprecated(depr_msg, '4.0')
provided_path = os.path.abspath(provided_path)
if recursive:
_log.info("Adjusting permissions recursively for %s", provided_path)
allpaths = [provided_path]
for root, dirs, files in os.walk(provided_path):
paths = []
if not onlydirs:
paths.extend(files)
if not onlyfiles:
# os.walk skips symlinked dirs by default, i.e., no special handling needed here
paths.extend(dirs)
for path in paths:
allpaths.append(os.path.join(root, path))
else:
_log.info("Adjusting permissions for %s (no recursion)", provided_path)
allpaths = [provided_path]
failed_paths = []
fail_cnt = 0
err_msg = None
for path in allpaths:
try:
# don't change permissions if path is a symlink, since we're not checking where the symlink points to
# this is done because of security concerns (symlink may point out of installation directory)
# (note: os.lchmod is not supported on Linux)
if os.path.islink(path):
_log.debug("Not changing permissions for %s, since it's a symlink", path)
else:
# determine current permissions
current_perms = os.lstat(path)[stat.ST_MODE]
_log.debug("Current permissions for %s: %s", path, oct(current_perms))
if relative:
# relative permissions (add or remove)
if add:
_log.debug("Adding permissions for %s: %s", path, oct(permission_bits))
new_perms = current_perms | permission_bits
else:
_log.debug("Removing permissions for %s: %s", path, oct(permission_bits))
new_perms = current_perms & ~permission_bits
else:
# hard permissions bits (not relative)
new_perms = permission_bits
_log.debug("Hard setting permissions for %s: %s", path, oct(new_perms))
# only actually do chmod if current permissions are not correct already
# (this is important because chmod requires that files are owned by current user)
if new_perms == current_perms:
_log.debug("Current permissions for %s are already OK: %s", path, oct(current_perms))
else:
_log.debug("Changing permissions for %s to %s", path, oct(new_perms))
os.chmod(path, new_perms)
if group_id:
# only change the group id if it the current gid is different from what we want
cur_gid = os.lstat(path).st_gid
if cur_gid == group_id:
_log.debug("Group id of %s is already OK (%s)", path, group_id)
else:
_log.debug("Changing group id of %s to %s", path, group_id)
os.lchown(path, -1, group_id)
except OSError as err:
if ignore_errors:
# ignore errors while adjusting permissions (for example caused by bad links)
_log.info("Failed to chmod/chown %s (but ignoring it): %s", path, err)
fail_cnt += 1
else:
failed_paths.append(path)
err_msg = err
if failed_paths:
raise EasyBuildError("Failed to chmod/chown several paths: %s (last error: %s)", failed_paths, err_msg)
# we ignore some errors, but if there are to many, something is definitely wrong
fail_ratio = fail_cnt / float(len(allpaths))
max_fail_ratio = float(build_option('max_fail_ratio_adjust_permissions'))
if fail_ratio > max_fail_ratio:
raise EasyBuildError("%.2f%% of permissions/owner operations failed (more than %.2f%%), "
"something must be wrong...", 100 * fail_ratio, 100 * max_fail_ratio)
elif fail_cnt > 0:
_log.debug("%.2f%% of permissions/owner operations failed, ignoring that...", 100 * fail_ratio)
def patch_perl_script_autoflush(path):
# patch Perl script to enable autoflush,
# so that e.g. run_cmd_qa receives all output to answer questions
# only report when in 'dry run' mode
if build_option('extended_dry_run'):
dry_run_msg("Perl script patched: %s" % path, silent=build_option('silent'))
else:
txt = read_file(path)
origpath = "%s.eb.orig" % path
write_file(origpath, txt)
_log.debug("Patching Perl script %s for autoflush, original script copied to %s" % (path, origpath))
# force autoflush for Perl print buffer
lines = txt.split('\n')
newtxt = '\n'.join([
lines[0], # shebang line
"\nuse IO::Handle qw();",
"STDOUT->autoflush(1);\n", # extra newline to separate from actual script
] + lines[1:])
write_file(path, newtxt)
def mkdir(path, parents=False, set_gid=None, sticky=None):
"""
Create a directory
Directory is the path to create
:param parents: create parent directories if needed (mkdir -p)
:param set_gid: set group ID bit, to make subdirectories and files inherit group
:param sticky: set the sticky bit on this directory (a.k.a. the restricted deletion flag),
to avoid users can removing/renaming files in this directory
"""
if set_gid is None:
set_gid = build_option('set_gid_bit')
if sticky is None:
sticky = build_option('sticky_bit')
if not os.path.isabs(path):
path = os.path.abspath(path)
# exit early if path already exists
if not os.path.exists(path):
_log.info("Creating directory %s (parents: %s, set_gid: %s, sticky: %s)", path, parents, set_gid, sticky)
# set_gid and sticky bits are only set on new directories, so we need to determine the existing parent path
existing_parent_path = os.path.dirname(path)
try:
if parents:
# climb up until we hit an existing path or the empty string (for relative paths)
while existing_parent_path and not os.path.exists(existing_parent_path):
existing_parent_path = os.path.dirname(existing_parent_path)
os.makedirs(path)
else:
os.mkdir(path)
except OSError as err:
raise EasyBuildError("Failed to create directory %s: %s", path, err)
# set group ID and sticky bits, if desired
bits = 0
if set_gid:
bits |= stat.S_ISGID
if sticky:
bits |= stat.S_ISVTX
if bits:
try:
new_subdir = path[len(existing_parent_path):].lstrip(os.path.sep)
new_path = os.path.join(existing_parent_path, new_subdir.split(os.path.sep)[0])
adjust_permissions(new_path, bits, add=True, relative=True, recursive=True, onlydirs=True)
except OSError as err:
raise EasyBuildError("Failed to set groud ID/sticky bit: %s", err)
else:
_log.debug("Not creating existing path %s" % path)
def det_lock_path(lock_name):
"""
Determine full path for lock with specifed name.
"""
locks_dir = build_option('locks_dir') or os.path.join(install_path('software'), '.locks')
return os.path.join(locks_dir, lock_name + '.lock')
def create_lock(lock_name):
"""Create lock with specified name."""
lock_path = det_lock_path(lock_name)
_log.info("Creating lock at %s...", lock_path)
try:
# we use a directory as a lock, since that's atomically created
mkdir(lock_path, parents=True)
global_lock_names.add(lock_name)
except EasyBuildError as err:
# clean up the error message a bit, get rid of the "Failed to create directory" part + quotes
stripped_err = str(err).split(':', 1)[1].strip().replace("'", '').replace('"', '')
raise EasyBuildError("Failed to create lock %s: %s", lock_path, stripped_err)
_log.info("Lock created: %s", lock_path)
def check_lock(lock_name):
"""
Check whether a lock with specified name already exists.
If it exists, either wait until it's released, or raise an error
(depending on --wait-on-lock configuration option).
"""
lock_path = det_lock_path(lock_name)
if os.path.exists(lock_path):
_log.info("Lock %s exists!", lock_path)
wait_interval = build_option('wait_on_lock_interval')
wait_limit = build_option('wait_on_lock_limit')
# --wait-on-lock is deprecated, should use --wait-on-lock-limit and --wait-on-lock-interval instead
wait_on_lock = build_option('wait_on_lock')
if wait_on_lock is not None:
depr_msg = "Use of --wait-on-lock is deprecated, use --wait-on-lock-limit and --wait-on-lock-interval"
_log.deprecated(depr_msg, '5.0')
# if --wait-on-lock-interval has default value and --wait-on-lock is specified too, the latter wins
# (required for backwards compatibility)
if wait_interval == DEFAULT_WAIT_ON_LOCK_INTERVAL and wait_on_lock > 0:
wait_interval = wait_on_lock
# if --wait-on-lock-limit is not specified we need to wait indefinitely if --wait-on-lock is specified,
# since the original semantics of --wait-on-lock was that it specified the waiting time interval (no limit)
if not wait_limit:
wait_limit = -1
# wait limit could be zero (no waiting), -1 (no waiting limit) or non-zero value (waiting limit in seconds)
if wait_limit != 0:
wait_time = 0
while os.path.exists(lock_path) and (wait_limit == -1 or wait_time < wait_limit):
print_msg("lock %s exists, waiting %d seconds..." % (lock_path, wait_interval),
silent=build_option('silent'))
time.sleep(wait_interval)
wait_time += wait_interval
if os.path.exists(lock_path) and wait_limit != -1 and wait_time >= wait_limit:
error_msg = "Maximum wait time for lock %s to be released reached: %s sec >= %s sec"
raise EasyBuildError(error_msg, lock_path, wait_time, wait_limit)
else:
_log.info("Lock %s was released!", lock_path)
else:
raise EasyBuildError("Lock %s already exists, aborting!", lock_path)
else:
_log.info("Lock %s does not exist", lock_path)
def remove_lock(lock_name):
"""
Remove lock with specified name.
"""
lock_path = det_lock_path(lock_name)
_log.info("Removing lock %s...", lock_path)
remove_dir(lock_path)
if lock_name in global_lock_names:
global_lock_names.remove(lock_name)
_log.info("Lock removed: %s", lock_path)
def clean_up_locks():
"""
Clean up all still existing locks that were created in this session.
"""
for lock_name in list(global_lock_names):
remove_lock(lock_name)
def clean_up_locks_signal_handler(signum, frame):
"""
Signal handler, cleans up locks & exits with received signal number.
"""
if not build_option('silent'):
print_warning("signal received (%s), cleaning up locks (%s)..." % (signum, ', '.join(global_lock_names)))
clean_up_locks()
# by default, a KeyboardInterrupt is raised with SIGINT, so keep doing so
if signum == signal.SIGINT:
raise KeyboardInterrupt("keyboard interrupt")
else:
sys.exit(signum)
def register_lock_cleanup_signal_handlers():
"""
Register signal handler for signals that cancel the current EasyBuild session,
so we can clean up the locks that were created first.
"""
signums = [
signal.SIGABRT,
signal.SIGINT, # Ctrl-C
signal.SIGTERM, # signal 15, soft kill (like when Slurm job is cancelled or received timeout)
signal.SIGQUIT, # kinda like Ctrl-C
]
for signum in signums:
signal.signal(signum, clean_up_locks_signal_handler)
def expand_glob_paths(glob_paths):
"""Expand specified glob paths to a list of unique non-glob paths to only files."""
paths = []
for glob_path in glob_paths:
add_paths = [f for f in glob.glob(os.path.expanduser(glob_path)) if os.path.isfile(f)]
if add_paths:
paths.extend(add_paths)
else:
raise EasyBuildError("No files found using glob pattern '%s'", glob_path)
return nub(paths)
def weld_paths(path1, path2):
"""Weld two paths together, taking into account overlap between tail of 1st path with head of 2nd path."""
# strip path1 for use in comparisons
path1s = path1.rstrip(os.path.sep)
# init part2 head/tail/parts
path2_head = path2.rstrip(os.path.sep)
path2_tail = ''
path2_parts = path2.split(os.path.sep)
# if path2 is an absolute path, make sure it stays that way
if path2_parts[0] == '':
path2_parts[0] = os.path.sep
while path2_parts and not path1s.endswith(path2_head):
path2_tail = os.path.join(path2_parts.pop(), path2_tail)
if path2_parts:
# os.path.join requires non-empty list
path2_head = os.path.join(*path2_parts)
else:
path2_head = None
return os.path.join(path1, path2_tail)
def path_matches(path, paths):
"""Check whether given path matches any of the provided paths."""
if not os.path.exists(path):
return False
for somepath in paths:
if os.path.exists(somepath) and os.path.samefile(path, somepath):
return True
return False
def rmtree2(path, n=3):
"""Wrapper around shutil.rmtree to make it more robust when used on NFS mounted file systems."""
_log.deprecated("Use 'remove_dir' rather than 'rmtree2'", '5.0')
remove_dir(path)
def find_backup_name_candidate(src_file):
"""Returns a non-existing file to be used as destination for backup files"""
# e.g. 20170817234510 on Aug 17th 2017 at 23:45:10
timestamp = datetime.datetime.now()
dst_file = '%s_%s_%s' % (src_file, timestamp.strftime('%Y%m%d%H%M%S'), os.getpid())
while os.path.exists(dst_file):
_log.debug("Backup of %s at %s already found at %s, trying again in a second...", src_file, dst_file, timestamp)
time.sleep(1)
timestamp = datetime.datetime.now()
dst_file = '%s_%s_%s' % (src_file, timestamp.strftime('%Y%m%d%H%M%S'), os.getpid())
return dst_file
def back_up_file(src_file, backup_extension='bak', hidden=False, strip_fn=None):
"""
Backs up a file appending a backup extension and timestamp to it (if there is already an existing backup).
:param src_file: file to be back up
:param backup_extension: extension to use for the backup file (can be empty or None)
:param hidden: make backup hidden (leading dot in filename)
:param strip_fn: strip specified trailing substring from filename of backup
:return: location of backed up file
"""
fn_prefix, fn_suffix = '', ''
if hidden:
fn_prefix = '.'
if backup_extension:
fn_suffix = '.%s' % backup_extension
src_dir, src_fn = os.path.split(src_file)
if strip_fn:
src_fn = src_fn.rstrip(strip_fn)
backup_fp = find_backup_name_candidate(os.path.join(src_dir, fn_prefix + src_fn + fn_suffix))
copy_file(src_file, backup_fp)
_log.info("File %s backed up in %s", src_file, backup_fp)
return backup_fp
def move_logs(src_logfile, target_logfile):
"""Move log file(s)."""
zip_log_cmd = build_option('zip_logs')
mkdir(os.path.dirname(target_logfile), parents=True)
src_logfile_len = len(src_logfile)
try:
# there may be multiple log files, due to log rotation
app_logs = glob.glob('%s*' % src_logfile)
for app_log in app_logs:
# retain possible suffix
new_log_path = target_logfile + app_log[src_logfile_len:]
# retain old logs
if os.path.exists(new_log_path):
back_up_file(new_log_path)
# move log to target path
move_file(app_log, new_log_path)
_log.info("Moved log file %s to %s" % (src_logfile, new_log_path))
if zip_log_cmd:
run.run_cmd("%s %s" % (zip_log_cmd, new_log_path))
_log.info("Zipped log %s using '%s'", new_log_path, zip_log_cmd)
except (IOError, OSError) as err:
raise EasyBuildError("Failed to move log file(s) %s* to new log file %s*: %s",
src_logfile, target_logfile, err)
def cleanup(logfile, tempdir, testing, silent=False):
"""
Cleanup the specified log file and the tmp directory, if desired.
:param logfile: path to log file to clean up
:param tempdir: path to temporary directory to clean up
:param testing: are we in testing mode? if so, don't actually clean up anything
:param silent: be silent (don't print anything to stdout)
"""
if build_option('cleanup_tmpdir') and not testing:
if logfile is not None:
try:
for log in [logfile] + glob.glob('%s.[0-9]*' % logfile):
os.remove(log)
except OSError as err:
raise EasyBuildError("Failed to remove log file(s) %s*: %s", logfile, err)
print_msg("Temporary log file(s) %s* have been removed." % (logfile), log=None, silent=testing or silent)
if tempdir is not None:
try:
shutil.rmtree(tempdir, ignore_errors=True)
except OSError as err:
raise EasyBuildError("Failed to remove temporary directory %s: %s", tempdir, err)
print_msg("Temporary directory %s has been removed." % tempdir, log=None, silent=testing or silent)
else:
msg = "Keeping temporary log file(s) %s* and directory %s." % (logfile, tempdir)
print_msg(msg, log=None, silent=testing or silent)
def copytree(src, dst, symlinks=False, ignore=None):
"""DEPRECATED and removed. Use copy_dir"""
_log.deprecated("Use 'copy_dir' rather than 'copytree'", '4.0')
def encode_string(name):
"""
This encoding function handles funky software names ad infinitum, like:
example: '0_foo+0x0x#-$__'
becomes: '0_underscore_foo_plus_0x0x_hash__minus__dollar__underscore__underscore_'
The intention is to have a robust escaping mechanism for names like c++, C# et al
It has been inspired by the concepts seen at, but in lowercase style:
* http://fossies.org/dox/netcdf-4.2.1.1/escapes_8c_source.html
* http://celldesigner.org/help/CDH_Species_01.html
* http://research.cs.berkeley.edu/project/sbp/darcsrepo-no-longer-updated/src/edu/berkeley/sbp/misc/ReflectiveWalker.java # noqa
and can be extended freely as per ISO/IEC 10646:2012 / Unicode 6.1 names:
* http://www.unicode.org/versions/Unicode6.1.0/
For readability of >2 words, it is suggested to use _CamelCase_ style.
So, yes, '_GreekSmallLetterEtaWithPsiliAndOxia_' *could* indeed be a fully
valid software name; software "electron" in the original spelling anyone? ;-)
"""
# do the character remapping, return same char by default
result = ''.join(map(lambda x: STRING_ENCODING_CHARMAP.get(x, x), name))
return result
def decode_string(name):
"""Decoding function to revert result of encode_string."""
result = name
for (char, escaped_char) in STRING_ENCODING_CHARMAP.items():
result = re.sub(escaped_char, char, result)
return result
def encode_class_name(name):
"""return encoded version of class name"""
return EASYBLOCK_CLASS_PREFIX + encode_string(name)
def decode_class_name(name):
"""Return decoded version of class name."""
if not name.startswith(EASYBLOCK_CLASS_PREFIX):
# name is not encoded, apparently
return name
else:
name = name[len(EASYBLOCK_CLASS_PREFIX):]
return decode_string(name)
def run_cmd(cmd, log_ok=True, log_all=False, simple=False, inp=None, regexp=True, log_output=False, path=None):
"""NO LONGER SUPPORTED: use run_cmd from easybuild.tools.run instead"""
_log.nosupport("run_cmd was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def run_cmd_qa(cmd, qa, no_qa=None, log_ok=True, log_all=False, simple=False, regexp=True, std_qa=None, path=None):
"""NO LONGER SUPPORTED: use run_cmd_qa from easybuild.tools.run instead"""
_log.nosupport("run_cmd_qa was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def parse_log_for_error(txt, regExp=None, stdout=True, msg=None):
"""NO LONGER SUPPORTED: use parse_log_for_error from easybuild.tools.run instead"""
_log.nosupport("parse_log_for_error was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def det_size(path):
"""
Determine total size of given filepath (in bytes).
"""
installsize = 0
try:
# walk install dir to determine total size
for (dirpath, _, filenames) in os.walk(path):
for filename in filenames:
fullpath = os.path.join(dirpath, filename)
if os.path.exists(fullpath):
installsize += os.path.getsize(fullpath)
except OSError as err:
_log.warn("Could not determine install size: %s" % err)
return installsize
def find_flexlm_license(custom_env_vars=None, lic_specs=None):
"""
Find FlexLM license.
Considered specified list of environment variables;
checks for path to existing license file or valid license server specification;
duplicate paths are not retained in the returned list of license specs.
If no license is found through environment variables, also consider 'lic_specs'.
:param custom_env_vars: list of environment variables to considered (if None, only consider $LM_LICENSE_FILE)
:param lic_specs: list of license specifications
:return: tuple with list of valid license specs found and name of first valid environment variable
"""
valid_lic_specs = []
lic_env_var = None
# regex for license server spec; format: <port>@<server>
server_port_regex = re.compile(r'^[0-9]+@\S+$')
# always consider $LM_LICENSE_FILE
lic_env_vars = ['LM_LICENSE_FILE']
if isinstance(custom_env_vars, string_type):
lic_env_vars.insert(0, custom_env_vars)
elif custom_env_vars is not None:
lic_env_vars = custom_env_vars + lic_env_vars
# grab values for defined environment variables
cand_lic_specs = {}
for env_var in lic_env_vars:
if env_var in os.environ:
cand_lic_specs[env_var] = nub(os.environ[env_var].split(os.pathsep))
# also consider provided license spec (last)
# use None as key to indicate that these license specs do not have an environment variable associated with them
if lic_specs:
cand_lic_specs[None] = lic_specs
_log.debug("Candidate license specs: %s", cand_lic_specs)
# check for valid license specs
# order matters, so loop over original list of environment variables to consider
valid_lic_specs = []
for env_var in lic_env_vars + [None]:
# obtain list of values to consider
# take into account that some keys may be missing, and that individual values may be None
values = [val for val in cand_lic_specs.get(env_var, None) or [] if val]
_log.info("Considering %s to find FlexLM license specs: %s", env_var, values)
for value in values:
# license files to consider
lic_files = None
if os.path.isfile(value):
lic_files = [value]
elif os.path.isdir(value):
# consider all *.dat and *.lic files in specified directory
lic_files = sorted(glob.glob(os.path.join(value, '*.dat')) + glob.glob(os.path.join(value, '*.lic')))
# valid license server spec
elif server_port_regex.match(value):
valid_lic_specs.append(value)
# check whether license files are readable before retaining them
if lic_files:
for lic_file in lic_files:
try:
open(lic_file, 'r')
valid_lic_specs.append(lic_file)
except IOError as err:
_log.warning("License file %s found, but failed to open it for reading: %s", lic_file, err)
# stop after finding valid license specs, filter out duplicates
if valid_lic_specs:
valid_lic_specs = nub(valid_lic_specs)
lic_env_var = env_var
break
if lic_env_var:
via_msg = '$%s' % lic_env_var
else:
via_msg = "provided license spec"
_log.info("Found valid license specs via %s: %s", via_msg, valid_lic_specs)
return (valid_lic_specs, lic_env_var)
def copy_file(path, target_path, force_in_dry_run=False):
"""
Copy a file from specified location to specified location
:param path: the original filepath
:param target_path: path to copy the file to
:param force_in_dry_run: force copying of file during dry run
"""
if not force_in_dry_run and build_option('extended_dry_run'):
dry_run_msg("copied file %s to %s" % (path, target_path))
else:
try:
target_exists = os.path.exists(target_path)
if target_exists and os.path.samefile(path, target_path):
_log.debug("Not copying %s to %s since files are identical", path, target_path)
# if target file exists and is owned by someone else than the current user,
# try using shutil.copyfile to just copy the file contents
# since shutil.copy2 will fail when trying to copy over file metadata (since chown requires file ownership)
elif target_exists and os.stat(target_path).st_uid != os.getuid():
shutil.copyfile(path, target_path)
_log.info("Copied contents of file %s to %s", path, target_path)
else:
mkdir(os.path.dirname(target_path), parents=True)
if os.path.exists(path):
shutil.copy2(path, target_path)
elif os.path.islink(path):
# special care for copying broken symlinks
link_target = os.readlink(path)
symlink(link_target, target_path)
_log.info("%s copied to %s", path, target_path)
except (IOError, OSError, shutil.Error) as err:
raise EasyBuildError("Failed to copy file %s to %s: %s", path, target_path, err)
def copy_files(paths, target_dir, force_in_dry_run=False):
"""
Copy list of files to specified target directory (which is created if it doesn't exist yet).
:param filepaths: list of files to copy
:param target_dir: target directory to copy files into
:param force_in_dry_run: force copying of files during dry run
"""
if not force_in_dry_run and build_option('extended_dry_run'):
dry_run_msg("copied files %s to %s" % (paths, target_dir))
else:
if os.path.exists(target_dir):
if os.path.isdir(target_dir):
_log.info("Copying easyconfigs into existing directory %s...", target_dir)
else:
raise EasyBuildError("%s exists but is not a directory", target_dir)
else:
mkdir(target_dir, parents=True)
for path in paths:
copy_file(path, target_dir)
def copy_dir(path, target_path, force_in_dry_run=False, dirs_exist_ok=False, **kwargs):
"""
Copy a directory from specified location to specified location
:param path: the original directory path
:param target_path: path to copy the directory to
:param force_in_dry_run: force running the command during dry run
:param dirs_exist_ok: boolean indicating whether it's OK if the target directory already exists
shutil.copytree is used if the target path does not exist yet;
if the target path already exists, the 'copy' function will be used to copy the contents of
the source path to the target path
Additional specified named arguments are passed down to shutil.copytree/copy if used.
"""
if not force_in_dry_run and build_option('extended_dry_run'):
dry_run_msg("copied directory %s to %s" % (path, target_path))
else:
try:
if not dirs_exist_ok and os.path.exists(target_path):
raise EasyBuildError("Target location %s to copy %s to already exists", target_path, path)
# note: in Python >= 3.8 shutil.copytree works just fine thanks to the 'dirs_exist_ok' argument,
# but since we need to be more careful in earlier Python versions we use our own implementation
# in case the target directory exists and 'dirs_exist_ok' is enabled
if dirs_exist_ok and os.path.exists(target_path):
# if target directory already exists (and that's allowed via dirs_exist_ok),
# we need to be more careful, since shutil.copytree will fail (in Python < 3.8)
# if target directory already exists;
# so, recurse via 'copy' function to copy files/dirs in source path to target path
# (NOTE: don't use distutils.dir_util.copy_tree here, see
# https://github.com/easybuilders/easybuild-framework/issues/3306)
entries = os.listdir(path)
# take into account 'ignore' function that is supported by shutil.copytree
# (but not by 'copy_file' function used by 'copy')
ignore = kwargs.get('ignore')
if ignore:
ignored_entries = ignore(path, entries)
entries = [x for x in entries if x not in ignored_entries]
# determine list of paths to copy
paths_to_copy = [os.path.join(path, x) for x in entries]
copy(paths_to_copy, target_path,
force_in_dry_run=force_in_dry_run, dirs_exist_ok=dirs_exist_ok, **kwargs)
else:
# if dirs_exist_ok is not enabled or target directory doesn't exist, just use shutil.copytree
shutil.copytree(path, target_path, **kwargs)
_log.info("%s copied to %s", path, target_path)
except (IOError, OSError, shutil.Error) as err:
raise EasyBuildError("Failed to copy directory %s to %s: %s", path, target_path, err)
def copy(paths, target_path, force_in_dry_run=False, **kwargs):
"""
Copy single file/directory or list of files and directories to specified location
:param paths: path(s) to copy
:param target_path: target location
:param force_in_dry_run: force running the command during dry run
:param kwargs: additional named arguments to pass down to copy_dir
"""
if isinstance(paths, string_type):
paths = [paths]
_log.info("Copying %d files & directories to %s", len(paths), target_path)
for path in paths:
full_target_path = os.path.join(target_path, os.path.basename(path))
mkdir(os.path.dirname(full_target_path), parents=True)
# copy broken symlinks only if 'symlinks=True' is used
if os.path.isfile(path) or (os.path.islink(path) and kwargs.get('symlinks')):
copy_file(path, full_target_path, force_in_dry_run=force_in_dry_run)
elif os.path.isdir(path):
copy_dir(path, full_target_path, force_in_dry_run=force_in_dry_run, **kwargs)
else:
raise EasyBuildError("Specified path to copy is not an existing file or directory: %s", path)
def get_source_tarball_from_git(filename, targetdir, git_config):
"""
Downloads a git repository, at a specific tag or commit, recursively or not, and make an archive with it
:param filename: name of the archive to save the code to (must be .tar.gz)
:param targetdir: target directory where to save the archive to
:param git_config: dictionary containing url, repo_name, recursive, and one of tag or commit
"""
# sanity check on git_config value being passed
if not isinstance(git_config, dict):
raise EasyBuildError("Found unexpected type of value for 'git_config' argument: %s" % type(git_config))
# Making a copy to avoid modifying the object with pops
git_config = git_config.copy()
tag = git_config.pop('tag', None)
url = git_config.pop('url', None)
repo_name = git_config.pop('repo_name', None)
commit = git_config.pop('commit', None)
recursive = git_config.pop('recursive', False)
keep_git_dir = git_config.pop('keep_git_dir', False)
# input validation of git_config dict
if git_config:
raise EasyBuildError("Found one or more unexpected keys in 'git_config' specification: %s", git_config)
if not repo_name:
raise EasyBuildError("repo_name not specified in git_config parameter")
if not tag and not commit:
raise EasyBuildError("Neither tag nor commit found in git_config parameter")
if tag and commit:
raise EasyBuildError("Tag and commit are mutually exclusive in git_config parameter")
if not url:
raise EasyBuildError("url not specified in git_config parameter")
if not filename.endswith('.tar.gz'):
raise EasyBuildError("git_config currently only supports filename ending in .tar.gz")
# prepare target directory and clone repository
mkdir(targetdir, parents=True)
targetpath = os.path.join(targetdir, filename)
# compose 'git clone' command, and run it
clone_cmd = ['git', 'clone']
if tag:
clone_cmd.extend(['--branch', tag])
if recursive:
clone_cmd.append('--recursive')
clone_cmd.append('%s/%s.git' % (url, repo_name))
tmpdir = tempfile.mkdtemp()
cwd = change_dir(tmpdir)
run.run_cmd(' '.join(clone_cmd), log_all=True, log_ok=False, simple=False, regexp=False)
# if a specific commit is asked for, check it out
if commit:
checkout_cmd = ['git', 'checkout', commit]
if recursive:
checkout_cmd.extend(['&&', 'git', 'submodule', 'update'])
run.run_cmd(' '.join(checkout_cmd), log_all=True, log_ok=False, simple=False, regexp=False, path=repo_name)
# create an archive and delete the git repo directory
if keep_git_dir:
tar_cmd = ['tar', 'cfvz', targetpath, repo_name]
else:
tar_cmd = ['tar', 'cfvz', targetpath, '--exclude', '.git', repo_name]
run.run_cmd(' '.join(tar_cmd), log_all=True, log_ok=False, simple=False, regexp=False)
# cleanup (repo_name dir does not exist in dry run mode)
change_dir(cwd)
remove(tmpdir)
return targetpath
def move_file(path, target_path, force_in_dry_run=False):
"""
Move a file from path to target_path
:param path: the original filepath
:param target_path: path to move the file to
:param force_in_dry_run: force running the command during dry run
"""
if not force_in_dry_run and build_option('extended_dry_run'):
dry_run_msg("moved file %s to %s" % (path, target_path))
else:
# remove first to ensure portability (shutil.move might fail when overwriting files in some systems)
remove_file(target_path)
try:
mkdir(os.path.dirname(target_path), parents=True)
shutil.move(path, target_path)
_log.info("%s moved to %s", path, target_path)
except (IOError, OSError) as err:
raise EasyBuildError("Failed to move %s to %s: %s", path, target_path, err)
def diff_files(path1, path2):
"""
Return unified diff between two files
"""
file1_lines = ['%s\n' % line for line in read_file(path1).split('\n')]
file2_lines = ['%s\n' % line for line in read_file(path2).split('\n')]
return ''.join(difflib.unified_diff(file1_lines, file2_lines, fromfile=path1, tofile=path2))
def install_fake_vsc():
"""
Put fake 'vsc' Python package in place, to catch easyblocks/scripts that still import from vsc.* namespace
(vsc-base & vsc-install were ingested into the EasyBuild framework for EasyBuild 4.0,
see https://github.com/easybuilders/easybuild-framework/pull/2708)
"""
# note: install_fake_vsc is called before parsing configuration, so avoid using functions that use build_option,
# like mkdir, write_file, ...
fake_vsc_path = os.path.join(tempfile.mkdtemp(prefix='fake_vsc_'))
fake_vsc_init = '\n'.join([
'import os',
'import sys',
'import inspect',
'',
'stack = inspect.stack()',
'filename, lineno = "UNKNOWN", "UNKNOWN"',
'',
'for frame in stack[1:]:',
' _, cand_filename, cand_lineno, _, code_context, _ = frame',
' if code_context:',
' filename, lineno = cand_filename, cand_lineno',
' break',
'',
'# ignore imports from pkgutil.py (part of Python standard library),',
'# which may happen due to a system-wide installation of vsc-base',
'# even if it is not actually actively used...',
'if os.path.basename(filename) != "pkgutil.py":',
' error_msg = "\\nERROR: Detected import from \'vsc\' namespace in %s (line %s)\\n" % (filename, lineno)',
' error_msg += "vsc-base & vsc-install were ingested into the EasyBuild framework in EasyBuild v4.0\\n"',
' error_msg += "The functionality you need may be available in the \'easybuild.base.*\' namespace.\\n"',
' sys.stderr.write(error_msg)',
' sys.exit(1)',
])
fake_vsc_init_path = os.path.join(fake_vsc_path, 'vsc', '__init__.py')
if not os.path.exists(os.path.dirname(fake_vsc_init_path)):
os.makedirs(os.path.dirname(fake_vsc_init_path))
with open(fake_vsc_init_path, 'w') as fp:
fp.write(fake_vsc_init)
sys.path.insert(0, fake_vsc_path)
return fake_vsc_path
def get_easyblock_class_name(path):
"""Make sure file is an easyblock and get easyblock class name"""
fn = os.path.basename(path).split('.')[0]
mod = imp.load_source(fn, path)
clsmembers = inspect.getmembers(mod, inspect.isclass)
for cn, co in clsmembers:
if co.__module__ == mod.__name__:
ancestors = inspect.getmro(co)
if any(a.__name__ == 'EasyBlock' for a in ancestors):
return cn
return None
def is_generic_easyblock(easyblock):
"""Return whether specified easyblock name is a generic easyblock or not."""
return easyblock and not easyblock.startswith(EASYBLOCK_CLASS_PREFIX)
def copy_easyblocks(paths, target_dir):
""" Find right location for easyblock file and copy it there"""
file_info = {
'eb_names': [],
'paths_in_repo': [],
'new': [],
}
subdir = os.path.join('easybuild', 'easyblocks')
if os.path.exists(os.path.join(target_dir, subdir)):
for path in paths:
cn = get_easyblock_class_name(path)
if not cn:
raise EasyBuildError("Could not determine easyblock class from file %s" % path)
eb_name = remove_unwanted_chars(decode_class_name(cn).replace('-', '_')).lower()
if is_generic_easyblock(cn):
pkgdir = GENERIC_EASYBLOCK_PKG
else:
pkgdir = eb_name[0]
target_path = os.path.join(subdir, pkgdir, eb_name + '.py')
full_target_path = os.path.join(target_dir, target_path)
file_info['eb_names'].append(eb_name)
file_info['paths_in_repo'].append(full_target_path)
file_info['new'].append(not os.path.exists(full_target_path))
copy_file(path, full_target_path, force_in_dry_run=True)
else:
raise EasyBuildError("Could not find %s subdir in %s", subdir, target_dir)
return file_info
def copy_framework_files(paths, target_dir):
""" Find right location for framework file and copy it there"""
file_info = {
'paths_in_repo': [],
'new': [],
}
paths = [os.path.abspath(path) for path in paths]
framework_topdir = 'easybuild-framework'
for path in paths:
target_path = None
dirnames = os.path.dirname(path).split(os.path.sep)
if framework_topdir in dirnames:
# construct subdirectory by grabbing last entry in dirnames until we hit 'easybuild-framework' dir
subdirs = []
while(dirnames[-1] != framework_topdir):
subdirs.insert(0, dirnames.pop())
parent_dir = os.path.join(*subdirs) if subdirs else ''
target_path = os.path.join(target_dir, parent_dir, os.path.basename(path))
else:
raise EasyBuildError("Specified path '%s' does not include a '%s' directory!", path, framework_topdir)
if target_path:
file_info['paths_in_repo'].append(target_path)
file_info['new'].append(not os.path.exists(target_path))
copy_file(path, target_path)
else:
raise EasyBuildError("Couldn't find parent folder of updated file: %s", path)
return file_info
| pescobar/easybuild-framework | easybuild/tools/filetools.py | Python | gpl-2.0 | 93,165 | [
"NetCDF"
] | 973ca1be28779958ca502655b038bcbe68b91c15da999628e1ac53cee62b10ee |
"""Machinery for interspersing lines of text with linked and colored regions
The typical entrypoints are es_lines() and html_line().
Within this file, "tag" means a tuple of (file-wide offset, is_start, payload).
"""
import cgi
from itertools import chain
try:
from itertools import compress
except ImportError:
from itertools import izip
def compress(data, selectors):
return (d for d, s in izip(data, selectors) if s)
import json
from warnings import warn
from jinja2 import Markup
from dxr.plugins import all_plugins
from dxr.utils import without_ending
class Line(object):
"""Representation of a line's beginning and ending as the contents of a tag
Exists to motivate the balancing machinery to close all the tags at the end
of every line (and reopen any afterward that span lines).
"""
sort_order = 0 # Sort Lines outermost.
def __repr__(self):
return 'Line()'
LINE = Line()
class RefClassIdTagger(type):
"""Metaclass which automatically generates an ``id`` attr on the class as
a serializable class identifier.
Having a dedicated identifier allows Ref subclasses to move or change name
without breaking index compatibility.
Expects a ``_plugin`` attr to use as a prefix.
"""
def __new__(metaclass, name, bases, dict):
dict['id'] = without_ending('Ref', name)
return type.__new__(metaclass, name, bases, dict)
class Ref(object):
"""Abstract superclass for a cross-reference attached to a run of text
Carries enough data to construct a context menu, highlight instances of
the same symbol, and show something informative on hover.
"""
sort_order = 1
__slots__ = ['menu_data', 'hover', 'qualname_hash']
__metaclass__ = RefClassIdTagger
def __init__(self, tree, menu_data, hover=None, qualname=None, qualname_hash=None):
"""
:arg menu_data: Arbitrary JSON-serializable data from which we can
construct a context menu
:arg hover: The contents of the <a> tag's title attribute. (The first
one wins.)
:arg qualname: A hashable unique identifier for the symbol surrounded
by this ref, for highlighting
:arg qualname_hash: The hashed version of ``qualname``, which you can
pass instead of ``qualname`` if you have access to the
already-hashed version
"""
self.tree = tree
self.menu_data = menu_data
self.hover = hover
self.qualname_hash = hash(qualname) if qualname else qualname_hash
def es(self):
"""Return a serialization of myself to store in elasticsearch."""
ret = {'plugin': self.plugin,
'id': self.id,
# Smash the data into a string, because it will have a
# different schema from subclass to subclass, and ES will freak
# out:
'menu_data': json.dumps(self.menu_data)}
if self.hover:
ret['hover'] = self.hover
if self.qualname_hash is not None: # could be 0
ret['qualname_hash'] = self.qualname_hash
return ret
@staticmethod
def es_to_triple(es_data, tree):
"""Convert ES-dwelling ref representation to a (start, end,
:class:`~dxr.lines.Ref` subclass) triple.
Return a subclass of Ref, chosen according to the ES data. Into its
attributes "menu_data", "hover" and "qualname_hash", copy the ES
properties of the same names, JSON-decoding "menu_data" first.
:arg es_data: An item from the array under the 'refs' key of an ES LINE
document
:arg tree: The :class:`~dxr.config.TreeConfig` representing the tree
from which the ``es_data`` was pulled
"""
def ref_class(plugin, id):
"""Return the subclass of Ref identified by a combination of
plugin and class ID."""
plugins = all_plugins()
try:
return plugins[plugin].refs[id]
except KeyError:
warn('Ref subclass from plugin %s with ID %s was referenced '
'in the index but not found in the current '
'implementation. Ignored.' % (plugin, id))
payload = es_data['payload']
cls = ref_class(payload['plugin'], payload['id'])
return (es_data['start'],
es_data['end'],
cls(tree,
json.loads(payload['menu_data']),
hover=payload.get('hover'),
qualname_hash=payload.get('qualname_hash')))
def menu_items(self):
"""Return an iterable of menu items to be attached to a ref.
Return an iterable of dicts of this form::
{
html: the HTML to be used as the menu item itself
href: the URL to visit when the menu item is chosen
title: the tooltip text given on hovering over the menu item
icon: the icon to show next to the menu item: the name of a PNG
from the ``icons`` folder, without the .png extension
}
Typically, this pulls data out of ``self.menu_data``.
"""
raise NotImplementedError
def opener(self):
"""Emit the opening anchor tag for a cross reference.
Menu item text, links, and metadata are JSON-encoded and dumped into a
data attr on the tag. JS finds them there and creates a menu on click.
"""
if self.hover:
title = ' title="' + cgi.escape(self.hover, True) + '"'
else:
title = ''
if self.qualname_hash is not None:
cls = ' class="tok%i"' % self.qualname_hash
else:
cls = ''
menu_items = list(self.menu_items())
return u'<a data-menu="%s"%s%s>' % (
cgi.escape(json.dumps(menu_items), True),
title,
cls)
def closer(self):
return u'</a>'
class Region(object):
"""A <span> tag with a CSS class, wrapped around a run of text"""
sort_order = 2 # Sort Regions innermost, as it doesn't matter if we split
# them.
__slots__ = ['css_class']
def __init__(self, css_class):
self.css_class = css_class
def es(self):
return self.css_class
@classmethod
def es_to_triple(cls, es_region):
"""Convert ES-dwelling region representation to a (start, end,
:class:`~dxr.lines.Region`) triple."""
return es_region['start'], es_region['end'], cls(es_region['payload'])
def opener(self):
return u'<span class="%s">' % cgi.escape(self.css_class, True)
def closer(self):
return u'</span>'
def __repr__(self):
"""Return a nice representation for debugging."""
return 'Region("%s")' % self.css_class
def balanced_tags(tags):
"""Come up with a balanced series of tags which express the semantics of
the given sorted interleaved ones.
Return an iterable of (point, is_start, Region/Reg/Line) without any
(pointless) zero-width tag spans. The output isn't necessarily optimal, but
it's fast and not embarrassingly wasteful of space.
"""
return without_empty_tags(balanced_tags_with_empties(tags))
def without_empty_tags(tags):
"""Filter zero-width tagged spans out of a sorted, balanced tag stream.
Maintain tag order. Line break tags are considered self-closing.
"""
buffer = [] # tags
depth = 0
for tag in tags:
point, is_start, payload = tag
if is_start:
buffer.append(tag)
depth += 1
else:
top_point, _, top_payload = buffer[-1]
if top_payload is payload and top_point == point:
# It's a closer, and it matches the last thing in buffer and, it
# and that open tag form a zero-width span. Cancel the last thing
# in buffer.
buffer.pop()
else:
# It's an end tag that actually encloses some stuff.
buffer.append(tag)
depth -= 1
# If we have a balanced set of non-zero-width tags, emit them:
if not depth:
for b in buffer:
yield b
del buffer[:]
def balanced_tags_with_empties(tags):
"""Come up with a balanced series of tags which express the semantics of
the given sorted interleaved ones.
Return an iterable of (point, is_start, Region/Reg/Line), possibly
including some zero-width tag spans. Each line is enclosed within Line tags.
:arg tags: An iterable of (offset, is_start, payload) tuples, with one
closer for each opener but possibly interleaved. There is one tag for
each line break, with a payload of LINE and an is_start of False. Tags
are ordered with closers first, then line breaks, then openers.
"""
def close(to=None):
"""Return an iterable of closers for open tags up to (but not
including) the one with the payload ``to``."""
# Loop until empty (if we're not going "to" anything in particular) or
# until the corresponding opener is at the top of the stack. We check
# that "to is None" just to surface any stack-tracking bugs that would
# otherwise cause opens to empty too soon.
while opens if to is None else opens[-1] is not to:
intermediate_payload = opens.pop()
yield point, False, intermediate_payload
closes.append(intermediate_payload)
def reopen():
"""Yield open tags for all temporarily closed ones."""
while closes:
intermediate_payload = closes.pop()
yield point, True, intermediate_payload
opens.append(intermediate_payload)
opens = [] # payloads of tags which are currently open
closes = [] # payloads of tags which we've had to temporarily close so we could close an overlapping tag
point = 0
yield 0, True, LINE
for point, is_start, payload in tags:
if is_start:
yield point, is_start, payload
opens.append(payload)
elif payload is LINE:
# Close all open tags before a line break (since each line is
# wrapped in its own <code> tag pair), and reopen them afterward.
for t in close(): # I really miss "yield from".
yield t
# Since preserving self-closing linebreaks would throw off
# without_empty_tags(), we convert to explicit closers here. We
# surround each line with them because empty balanced ones would
# get filtered out.
yield point, False, LINE
yield point, True, LINE
for t in reopen():
yield t
else:
# Temporarily close whatever's been opened between the start tag of
# the thing we're trying to close and here:
for t in close(to=payload):
yield t
# Close the current tag:
yield point, False, payload
opens.pop()
# Reopen the temporarily closed ones:
for t in reopen():
yield t
yield point, False, LINE
def tag_boundaries(tags):
"""Return a sequence of (offset, is_start, Region/Ref/Line) tuples.
Basically, split the atomic tags that come out of plugins into separate
start and end points, which can then be thrown together in a bag and sorted
as the first step in the tag-balancing process.
Like in Python slice notation, the offset of a tag refers to the index of
the source code char it comes before.
:arg tags: An iterable of (start, end, Ref) and (start, end, Region) tuples
"""
for start, end, data in tags:
# Filter out zero-length spans which don't do any good and
# which can cause starts to sort after ends, crashing the tag
# balancer. Incidentally filter out spans where start tags come
# after end tags, though that should never happen.
#
# Also filter out None starts and ends. I don't know where they
# come from. That shouldn't happen and should be fixed in the
# plugins.
if (start is not None and start != -1 and
end is not None and end != -1 and
start < end):
yield start, True, data
yield end, False, data
def line_boundaries(lines):
"""Return a tag for the end of each line in a string.
:arg lines: iterable of the contents of lines in a file, including any
trailing newline character
Endpoints and start points are coincident: right after a (universal)
newline.
"""
up_to = 0
for line in lines:
up_to += len(line)
yield up_to, False, LINE
def non_overlapping_refs(tags):
"""Yield a False for each Ref in ``tags`` that overlaps a subsequent one,
a True for the rest.
Assumes the incoming tags, while not necessarily well balanced, have the
start tag come before the end tag, if both are present. (Lines are weird.)
"""
blacklist = set()
open_ref = None
for point, is_start, payload in tags:
if isinstance(payload, Ref):
if payload in blacklist: # It's the evil close tag of a misnested tag.
blacklist.remove(payload)
yield False
elif open_ref is None: # and is_start: (should always be true if input is sane)
assert is_start
open_ref = payload
yield True
elif open_ref is payload: # it's the closer
open_ref = None
yield True
else: # It's an evil open tag of a misnested tag.
warn('htmlifier plugins requested overlapping <a> tags. Fix the plugins.')
blacklist.add(payload)
yield False
else:
yield True
def remove_overlapping_refs(tags):
"""For any series of <a> tags that overlap each other, filter out all but
the first.
There's no decent way to represent that sort of thing in the UI, so we
don't support it.
:arg tags: A list of (point, is_start, payload) tuples, sorted by point.
The tags do not need to be properly balanced.
"""
# Reuse the list so we don't use any more memory.
i = None
for i, tag in enumerate(compress(tags, non_overlapping_refs(tags))):
tags[i] = tag
if i is not None:
del tags[i + 1:]
def nesting_order((point, is_start, payload)):
"""Return a sorting key that places coincident Line boundaries outermost,
then Ref boundaries, and finally Region boundaries.
The Line bit saves some empty-tag elimination. The Ref bit saves splitting
an <a> tag (and the attendant weird UI) for the following case::
Ref ____________ # The Ref should go on the outside.
Region _____
Other scenarios::
Reg _______________ # Would be nice if Reg ended before Ref
Ref ________________ # started. We'll see about this later.
Reg _____________________ # Works either way
Ref _______
Reg _____________________
Ref _______ # This should be fine.
Reg _____________ # This should be fine as well.
Ref ____________
Reg _____
Ref _____ # This is fine either way.
Also, endpoints sort before coincident start points to save work for the
tag balancer.
"""
return point, is_start, (payload.sort_order if is_start else
-payload.sort_order)
def finished_tags(lines, refs, regions):
"""Return an ordered iterable of properly nested tags which fully describe
the refs and regions and their places in a file's text.
:arg lines: iterable of lines of text of the file to htmlify.
Benchmarking reveals that this function is O(number of tags) in practice,
on inputs on the order of thousands of lines. On my laptop, it takes .02s
for a 3000-line file with some pygmentize regions and some python refs.
"""
# Plugins return unicode offsets, not byte ones.
# Get start and endpoints of intervals:
tags = list(tag_boundaries(chain(refs, regions)))
tags.extend(line_boundaries(lines))
# Sorting is actually not a significant use of time in an actual indexing
# run.
tags.sort(key=nesting_order) # balanced_tags undoes this, but we tolerate
# that in html_lines().
remove_overlapping_refs(tags)
return balanced_tags(tags)
def tags_per_line(flat_tags):
"""Split tags on LINE tags, yielding the tags of one line at a time
(no LINE tags are yielded)
:arg flat_tags: An iterable of ordered, non-overlapping, non-empty tag
boundaries with Line endpoints at (and outermost at) the index of the
end of each line.
"""
tags = []
for tag in flat_tags:
point, is_start, payload = tag
if payload is LINE:
if not is_start:
yield tags
tags = []
else:
tags.append(tag)
def es_lines(tags):
"""Yield lists of dicts, one per source code line, that can be indexed
into the ``refs`` or ``regions`` field of the ``line`` doctype in
elasticsearch, depending on the payload type.
:arg tags: An iterable of ordered, non-overlapping, non-empty tag
boundaries with Line endpoints at (and outermost at) the index of the
end of each line.
"""
for line in tags_per_line(tags):
payloads = {}
for pos, is_start, payload in line:
if is_start:
payloads[payload] = {'start': pos}
else:
payloads[payload]['end'] = pos
# Index objects are refs or regions. Regions' payloads are just
# strings; refs' payloads are objects. See mappings in plugins/core.py
yield [{'payload': payload.es(),
'start': pos['start'],
'end': pos['end']}
for payload, pos in payloads.iteritems()]
# tags always ends with a LINE closer, so we don't need any additional
# yield here to catch remnants.
def html_line(text, tags, bof_offset):
"""Return a line of Markup, interleaved with the refs and regions that
decorate it.
:arg tags: An ordered iterable of tags from output of finished_tags
representing regions and refs
:arg text: The unicode text to decorate
:arg bof_offset: The byte position of the start of the line from the
beginning of the file.
"""
def segments(text, tags, bof_offset):
up_to = 0
for pos, is_start, payload in tags:
# Convert from file-based position to line-based position.
pos -= bof_offset
yield cgi.escape(text[up_to:pos])
up_to = pos
if not is_start: # It's a closer. Most common.
yield payload.closer()
else:
yield payload.opener()
yield cgi.escape(text[up_to:])
return Markup(u''.join(segments(text, tags, bof_offset)))
| pombredanne/dxr | dxr/lines.py | Python | mit | 19,336 | [
"VisIt"
] | 8e15955d8559c06532ea103138e962b2aec885f8933c65fcdedcb8821f017e0f |
from OWWidget import OWWidget
from OWkNNOptimization import *
import orange, math, random
import OWGUI, orngVisFuncts, numpy
from math import sqrt
from orngScaleLinProjData import *
from orngLinProj import *
class FreeVizOptimization(OWWidget, FreeViz):
settingsList = ["stepsBeforeUpdate", "restrain", "differentialEvolutionPopSize",
"s2nSpread", "s2nPlaceAttributes", "autoSetParameters",
"forceRelation", "mirrorSymmetry", "forceSigma", "restrain", "law", "forceRelation", "disableAttractive",
"disableRepulsive", "useGeneralizedEigenvectors", "touringSpeed"]
forceRelValues = ["4 : 1", "3 : 1", "2 : 1", "3 : 2", "1 : 1", "2 : 3", "1 : 2", "1 : 3", "1 : 4"]
attractRepelValues = [(4, 1), (3, 1), (2, 1), (3, 2), (1, 1), (2, 3), (1, 2), (1, 3), (1, 4)]
def __init__(self, parentWidget = None, signalManager = None, graph = None, parentName = "Visualization widget"):
OWWidget.__init__(self, None, signalManager, "FreeViz Dialog", savePosition = True, wantMainArea = 0, wantStatusBar = 1)
FreeViz.__init__(self, graph)
self.parentWidget = parentWidget
self.parentName = parentName
self.setCaption("FreeViz Optimization Dialog")
self.cancelOptimization = 0
self.forceRelation = 5
self.disableAttractive = 0
self.disableRepulsive = 0
self.touringSpeed = 4
self.graph = graph
if self.graph:
self.graph.hideRadius = 0
self.graph.showAnchors = 1
# differential evolution
self.differentialEvolutionPopSize = 100
self.DERadvizSolver = None
self.loadSettings()
self.layout().setMargin(0)
self.tabs = OWGUI.tabWidget(self.controlArea)
self.MainTab = OWGUI.createTabPage(self.tabs, "Main")
self.ProjectionsTab = OWGUI.createTabPage(self.tabs, "Projections")
# ###########################
# MAIN TAB
OWGUI.comboBox(self.MainTab, self, "implementation", box = "FreeViz implementation", items = ["Fast (C) implementation", "Slow (Python) implementation", "LDA"])
box = OWGUI.widgetBox(self.MainTab, "Optimization")
self.optimizeButton = OWGUI.button(box, self, "Optimize Separation", callback = self.optimizeSeparation)
self.stopButton = OWGUI.button(box, self, "Stop Optimization", callback = self.stopOptimization)
self.singleStepButton = OWGUI.button(box, self, "Single Step", callback = self.singleStepOptimization)
f = self.optimizeButton.font(); f.setBold(1)
self.optimizeButton.setFont(f)
self.stopButton.setFont(f); self.stopButton.hide()
self.attrKNeighboursCombo = OWGUI.comboBoxWithCaption(box, self, "stepsBeforeUpdate", "Number of steps before updating graph: ", tooltip = "Set the number of optimization steps that will be executed before the updated anchor positions will be visualized", items = [1, 3, 5, 10, 15, 20, 30, 50, 75, 100, 150, 200, 300], sendSelectedValue = 1, valueType = int)
OWGUI.checkBox(box, self, "mirrorSymmetry", "Keep mirror symmetry", tooltip = "'Rotational' keeps the second anchor upside")
vbox = OWGUI.widgetBox(self.MainTab, "Set anchor positions")
hbox1 = OWGUI.widgetBox(vbox, orientation = "horizontal")
OWGUI.button(hbox1, self, "Sphere" if "3d" in self.parentName.lower() else "Circle", callback = self.radialAnchors)
OWGUI.button(hbox1, self, "Random", callback = self.randomAnchors)
self.manualPositioningButton = OWGUI.button(hbox1, self, "Manual", callback = self.setManualPosition)
self.manualPositioningButton.setCheckable(1)
OWGUI.comboBox(vbox, self, "restrain", label="Restrain anchors:", orientation = "horizontal", items = ["Unrestrained", "Fixed Length", "Fixed Angle"], callback = self.setRestraints)
box2 = OWGUI.widgetBox(self.MainTab, "Forces", orientation = "vertical")
self.cbLaw = OWGUI.comboBox(box2, self, "law", label="Law", labelWidth = 40, orientation="horizontal", items=["Linear", "Square", "Gaussian", "KNN", "Variance"], callback = self.forceLawChanged)
hbox2 = OWGUI.widgetBox(box2, orientation = "horizontal")
hbox2.layout().addSpacing(10)
validSigma = QDoubleValidator(self); validSigma.setBottom(0.01)
self.spinSigma = OWGUI.lineEdit(hbox2, self, "forceSigma", label = "Kernel width (sigma) ", labelWidth = 110, orientation = "horizontal", valueType = float)
self.spinSigma.setFixedSize(60, self.spinSigma.sizeHint().height())
self.spinSigma.setSizePolicy(QSizePolicy(QSizePolicy.Maximum, QSizePolicy.Fixed))
box2.layout().addSpacing(20)
self.cbforcerel = OWGUI.comboBox(box2, self, "forceRelation", label= "Attractive : Repulsive ",orientation = "horizontal", items=self.forceRelValues, callback = self.updateForces)
self.cbforcebal = OWGUI.checkBox(box2, self, "forceBalancing", "Dynamic force balancing", tooltip="Normalize the forces so that the total sums of the\nrepulsive and attractive are in the above proportion.")
box2.layout().addSpacing(20)
self.cbDisableAttractive = OWGUI.checkBox(box2, self, "disableAttractive", "Disable attractive forces", callback = self.setDisableAttractive)
self.cbDisableRepulsive = OWGUI.checkBox(box2, self, "disableRepulsive", "Disable repulsive forces", callback = self.setDisableRepulsive)
box = OWGUI.widgetBox(self.MainTab, "Show anchors")
OWGUI.checkBox(box, self, 'graph.showAnchors', 'Show attribute anchors', callback = self.parentWidget.updateGraph)
OWGUI.qwtHSlider(box, self, "graph.hideRadius", label="Hide radius", minValue=0, maxValue=9, step=1, ticks=0, callback = self.parentWidget.updateGraph)
self.freeAttributesButton = OWGUI.button(box, self, "Remove hidden attributes", callback = self.removeHidden)
if parentName.lower() != "radviz" and parentName.lower() != "sphereviz":
pcaBox = OWGUI.widgetBox(self.ProjectionsTab, "Principal Component Analysis")
OWGUI.button(pcaBox, self, "Principal component analysis", callback = self.findPCAProjection)
OWGUI.button(pcaBox, self, "Supervised principal component analysis", callback = self.findSPCAProjection)
OWGUI.checkBox(pcaBox, self, "useGeneralizedEigenvectors", "Merge examples with same class value")
plsBox = OWGUI.widgetBox(self.ProjectionsTab, "Partial Least Squares")
OWGUI.button(plsBox, self, "Partial least squares", callback = self.findPLSProjection)
box = OWGUI.widgetBox(self.ProjectionsTab, "Projection Tours")
self.startTourButton = OWGUI.button(box, self, "Start Random Touring", callback = self.startRandomTouring)
self.stopTourButton = OWGUI.button(box, self, "Stop Touring", callback = self.stopRandomTouring)
self.stopTourButton.hide()
OWGUI.hSlider(box, self, 'touringSpeed', label = "Speed: ", minValue=1, maxValue=10, step=1)
OWGUI.rubber(self.ProjectionsTab)
box = OWGUI.widgetBox(self.ProjectionsTab, "Signal to Noise Heuristic")
#OWGUI.comboBoxWithCaption(box, self, "s2nSpread", "Anchor spread: ", tooltip = "Are the anchors for each class value placed together or are they distributed along the circle", items = range(11), callback = self.s2nMixAnchors)
box2 = OWGUI.widgetBox(box, 0, orientation = "horizontal")
OWGUI.widgetLabel(box2, "Anchor spread: ")
OWGUI.hSlider(box2, self, 's2nSpread', minValue=0, maxValue=10, step=1, callback = self.s2nMixAnchors, labelFormat=" %d", ticks=0)
OWGUI.comboBoxWithCaption(box, self, "s2nPlaceAttributes", "Attributes to place: ", tooltip = "Set the number of top ranked attributes to place. You can select a higher value than the actual number of attributes", items = self.attrsNum, callback = self.s2nMixAnchors, sendSelectedValue = 1, valueType = int)
OWGUI.checkBox(box, self, 'autoSetParameters', 'Automatically find optimal parameters')
self.s2nMixButton = OWGUI.button(box, self, "Place anchors", callback = self.s2nMixAnchorsAutoSet)
self.forceLawChanged()
self.updateForces()
self.cbforcebal.setDisabled(self.cbDisableAttractive.isChecked() or self.cbDisableRepulsive.isChecked())
self.resize(320,650)
## self.parentWidget.learnersArray[3] = S2NHeuristicLearner(self, self.parentWidget)
def startRandomTouring(self):
self.startTourButton.hide()
self.stopTourButton.show()
labels = [self.graph.anchorData[i][2] for i in range(len(self.graph.anchorData))]
newXPositions = numpy.array([x[0] for x in self.graph.anchorData])
newYPositions = numpy.array([x[1] for x in self.graph.anchorData])
step = steps = 0
self.canTour = 1
while hasattr(self, "canTour"):
if step >= steps:
oldXPositions = newXPositions
oldYPositions = newYPositions
newXPositions = numpy.random.uniform(-1, 1, len(self.graph.anchorData))
newYPositions = numpy.random.uniform(-1, 1, len(self.graph.anchorData))
m = math.sqrt(max(newXPositions**2 + newYPositions**2))
newXPositions/= m
newYPositions/= m
maxDist = max(numpy.sqrt((newXPositions - oldXPositions)**2 + (newYPositions - oldYPositions)**2))
steps = int(maxDist * 300)
step = 0
midX = newXPositions * step/steps + oldXPositions * (steps-step)/steps
midY = newYPositions * step/steps + oldYPositions * (steps-step)/steps
self.graph.anchorData = [(midX[i], midY[i], labels[i]) for i in range(len(labels))]
step += self.touringSpeed
self.graph.updateData()
if step % 10 == 0:
qApp.processEvents()
#self.graph.repaint()
def stopRandomTouring(self):
self.startTourButton.show()
self.stopTourButton.hide()
if hasattr(self, "canTour"):
delattr(self, "canTour")
# ##############################################################
# EVENTS
# ##############################################################
def setManualPosition(self):
self.parentWidget.graph.manualPositioning = self.manualPositioningButton.isChecked()
def updateForces(self):
if self.disableAttractive or self.disableRepulsive:
self.attractG, self.repelG = 1 - self.disableAttractive, 1 - self.disableRepulsive
self.cbforcerel.setDisabled(True)
self.cbforcebal.setDisabled(True)
else:
self.attractG, self.repelG = self.attractRepelValues[self.forceRelation]
self.cbforcerel.setDisabled(False)
self.cbforcebal.setDisabled(False)
self.printEvent("Updated: %i, %i" % (self.attractG, self.repelG), eventVerbosity = 1)
def forceLawChanged(self):
self.spinSigma.setDisabled(self.cbLaw.currentIndex() not in [2, 3])
def setRestraints(self):
if self.restrain:
attrList = self.getShownAttributeList()
if not attrList:
return
if "3d" in self.parentName.lower():
positions = numpy.array([x[:3] for x in self.graph.anchorData])
if self.restrain == 1:
positions = numpy.transpose(positions) * numpy.sum(positions**2, 1)**-0.5
self.graph.anchorData = [(positions[0][i], positions[1][i], positions[2][i], a) for i, a in enumerate(attrList)]
else:
self.graph.create_anchors(len(attrList), attrList)
self.graph.updateData()
self.graph.repaint()
return
positions = numpy.array([x[:2] for x in self.graph.anchorData])
if self.restrain == 1:
positions = numpy.transpose(positions) * numpy.sum(positions**2,1)**-0.5
self.graph.setAnchors(positions[0], positions[1], attrList)
#self.graph.anchorData = [(positions[0][i], positions[1][i], a) for i, a in enumerate(attrList)]
else:
r = numpy.sqrt(numpy.sum(positions**2, 1))
phi = 2*math.pi/len(r)
self.graph.anchorData = [(r[i] * math.cos(i*phi), r[i] * math.sin(i*phi), a) for i, a in enumerate(attrList)]
self.graph.updateData()
self.graph.repaint()
def setDisableAttractive(self):
if self.cbDisableAttractive.isChecked():
self.disableRepulsive = 0
self.updateForces()
def setDisableRepulsive(self):
if self.cbDisableRepulsive.isChecked():
self.disableAttractive = 0
self.updateForces()
# ###############################################################
## FREE VIZ FUNCTIONS
# ###############################################################
def randomAnchors(self):
FreeViz.randomAnchors(self)
self.graph.updateData()
self.graph.repaint()
#self.recomputeEnergy()
def radialAnchors(self):
FreeViz.radialAnchors(self)
self.graph.updateData()
self.graph.repaint()
#self.recomputeEnergy()
def removeHidden(self):
rad2 = (self.graph.hideRadius/10)**2
newAnchorData = []
shownAttrList = []
for i, t in enumerate(self.graph.anchorData):
if t[0]**2 + t[1]**2 >= rad2:
shownAttrList.append(t[2])
newAnchorData.append(t)
self.parentWidget.setShownAttributeList(shownAttrList)
self.graph.anchorData = newAnchorData
self.graph.updateData()
self.graph.repaint()
#self.recomputeEnergy()
def singleStepOptimization(self):
FreeViz.optimizeSeparation(self, 1, 1)
self.graph.potentialsBmp = None
self.graph.updateData()
def optimizeSeparation(self, steps = 10, singleStep = False):
self.optimizeButton.hide()
self.stopButton.show()
self.cancelOptimization = 0
#qApp.processEvents()
if hasattr(self.graph, 'animate_points'):
self.graph_is_animated = self.graph.animate_points
self.graph.animate_points = False
ns = FreeViz.optimizeSeparation(self, self.stepsBeforeUpdate, singleStep, self.parentWidget.distances)
self.graph.potentialsBmp = None
self.graph.updateData()
self.stopButton.hide()
self.optimizeButton.show()
def stopOptimization(self):
self.cancelOptimization = 1
if hasattr(self, 'graph_is_animated'):
self.graph.animate_points = self.graph_is_animated
# # #############################################################
# # DIFFERENTIAL EVOLUTION
# # #############################################################
# def createPopulation(self):
# if not self.graph.haveData: return
# l = len(self.graph.dataDomain.attributes)
# self.DERadvizSolver = RadvizSolver(self.parentWidget, l * 2 , self.differentialEvolutionPopSize)
# Min = [0.0] * 2* l
# Max = [1.0] * 2* l
# self.DERadvizSolver.Setup(Min, Max, 0, 0.95, 1)
#
# def evolvePopulation(self):
# if not self.graph.haveData: return
# if not self.DERadvizSolver:
# QMessageBox.critical( None, "Differential evolution", 'To evolve a population you first have to create one by pressing "Create population" button', QMessageBox.Ok)
#
# self.DERadvizSolver.Solve(5)
# solution = self.DERadvizSolver.Solution()
# self.graph.anchorData = [(solution[2*i], solution[2*i+1], self.graph.dataDomain.attributes[i].name) for i in range(len(self.graph.dataDomain.attributes))]
# self.graph.updateData([attr.name for attr in self.graph.dataDomain.attributes], 0)
# self.graph.repaint()
def findPCAProjection(self):
self.findProjection(DR_PCA, setAnchors = 1)
def findSPCAProjection(self):
if not self.graph.dataHasClass:
QMessageBox.information( None, self.parentName, 'Supervised PCA can only be applied on data with a class attribute.', QMessageBox.Ok + QMessageBox.Default)
return
self.findProjection(DR_SPCA, setAnchors = 1)
def findPLSProjection(self):
self.findProjection(DR_PLS, setAnchors = 1)
def hideEvent(self, ev):
self.stopRandomTouring() # if we were touring then stop
self.saveSettings()
OWWidget.hideEvent(self, ev)
# if autoSetParameters is set then try different values for parameters and see how good projection do we get
# if not then just use current parameters to place anchors
def s2nMixAnchorsAutoSet(self):
# check if we have data and a discrete class
if not self.graph.haveData or len(self.graph.rawData) == 0 or not self.graph.dataHasDiscreteClass:
self.setStatusBarText("No data or data without a discrete class")
return
vizrank = self.parentWidget.vizrank
if self.__class__ != FreeViz: from PyQt4.QtGui import qApp
if self.autoSetParameters:
results = {}
self.s2nSpread = 0
permutations = orngVisFuncts.generateDifferentPermutations(range(len(self.graph.dataDomain.classVar.values)))
for perm in permutations:
self.classPermutationList = perm
for val in self.attrsNum:
if self.attrsNum[self.attrsNum.index(val)-1] > len(self.graph.dataDomain.attributes): continue # allow the computations once
self.s2nPlaceAttributes = val
if not self.s2nMixAnchors(0):
return
if self.__class__ != FreeViz:
qApp.processEvents()
acc, other = vizrank.kNNComputeAccuracy(self.graph.createProjectionAsExampleTable(None, useAnchorData = 1))
if results.keys() != []: self.setStatusBarText("Current projection value is %.2f (best is %.2f)" % (acc, max(results.keys())))
else: self.setStatusBarText("Current projection value is %.2f" % (acc))
results[acc] = (perm, val)
if results.keys() == []: return
self.classPermutationList, self.s2nPlaceAttributes = results[max(results.keys())]
if self.__class__ != FreeViz:
qApp.processEvents()
if not self.s2nMixAnchors(0): # update the best number of attributes
return
results = []
anchors = self.graph.anchorData
attributeNameIndex = self.graph.attributeNameIndex
attrIndices = [attributeNameIndex[val[2]] for val in anchors]
for val in range(10):
self.s2nSpread = val
if not self.s2nMixAnchors(0):
return
acc, other = vizrank.kNNComputeAccuracy(self.graph.createProjectionAsExampleTable(attrIndices, useAnchorData = 1))
results.append(acc)
if results != []: self.setStatusBarText("Current projection value is %.2f (best is %.2f)" % (acc, max(results)))
else: self.setStatusBarText("Current projection value is %.2f" % (acc))
self.s2nSpread = results.index(max(results))
self.setStatusBarText("Best projection value is %.2f" % (max(results)))
# always call this. if autoSetParameters then because we need to set the attribute list in radviz. otherwise because it finds the best attributes for current settings
self.s2nMixAnchors()
# #############################################################################
# class that represents S2N Heuristic classifier
class S2NHeuristicClassifier(orange.Classifier):
def __init__(self, optimizationDlg, radvizWidget, data, nrOfFreeVizSteps = 0):
self.optimizationDlg = optimizationDlg
self.radvizWidget = radvizWidget
self.radvizWidget.setData(data)
self.optimizationDlg.s2nMixAnchorsAutoSet()
if nrOfFreeVizSteps > 0:
self.optimizationDlg.optimize(nrOfFreeVizSteps)
# for a given example run argumentation and find out to which class it most often fall
def __call__(self, example, returnType):
table = orange.ExampleTable(example.domain)
table.append(example)
self.radvizWidget.setSubsetData(table) # show the example is we use the widget
self.radvizWidget.handleNewSignals()
anchorData = self.radvizWidget.graph.anchorData
attributeNameIndex = self.radvizWidget.graph.attributeNameIndex
scaleFunction = self.radvizWidget.graph.scaleExampleValue
attrListIndices = [attributeNameIndex[val[2]] for val in anchorData]
attrVals = [scaleFunction(example, index) for index in attrListIndices]
table = self.radvizWidget.graph.createProjectionAsExampleTable(attrListIndices, scaleFactor = self.radvizWidget.graph.trueScaleFactor, useAnchorData = 1)
knn = self.radvizWidget.optimizationDlg.createkNNLearner(kValueFormula = 0)(table)
[xTest, yTest] = self.radvizWidget.graph.getProjectedPointPosition(attrListIndices, attrVals, useAnchorData = 1)
(classVal, prob) = knn(orange.Example(table.domain, [xTest, yTest, "?"]), orange.GetBoth)
if returnType == orange.GetBoth: return classVal, prob
else: return classVal
class S2NHeuristicLearner(orange.Learner):
def __init__(self, optimizationDlg, radvizWidget):
self.radvizWidget = radvizWidget
self.optimizationDlg = optimizationDlg
self.name = "S2N Feature Selection Learner"
def __call__(self, examples, weightID = 0, nrOfFreeVizSteps = 0):
return S2NHeuristicClassifier(self.optimizationDlg, self.radvizWidget, examples, nrOfFreeVizSteps)
#test widget appearance
if __name__=="__main__":
import sys
a=QApplication(sys.argv)
ow=FreeVizOptimization()
ow.show()
a.exec_()
| yzl0083/orange | Orange/OrangeWidgets/OWFreeVizOptimization.py | Python | gpl-3.0 | 22,281 | [
"Gaussian"
] | 78744986b448f0b2368d9fe48fdb54400ed21d5bc2ac8b218252b0c34eb05c8d |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles function calls, by generating compiled function names and calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import types
import gast
from tensorflow.contrib.py2tf.pyct import anno
from tensorflow.contrib.py2tf.pyct import templates
class FunctionNamer(object):
"""Describes the interface for CallTreeTransformer's namer."""
def compiled_function_name(self, original_name, live_object=None):
"""Generate the name corresponding to the compiled version of a function.
Args:
original_name: String
live_object: Callable, the actual target function, if known.
Returns:
String.
"""
raise NotImplementedError()
class CallTreeTransformer(gast.NodeTransformer):
"""Transforms the call tree by renaming transformed symbols."""
def __init__(self, namer, uncompiled_modules):
self.namer = namer
self.uncompiled_modules = uncompiled_modules
# pylint:disable=invalid-name
def visit_FunctionDef(self, node):
self.generic_visit(node)
node.name = self.namer.compiled_function_name(node.name)
return node
def _should_compile(self, fqn):
for i in range(1, len(fqn)):
if fqn[:i] in self.uncompiled_modules:
return False
return True
def _rename_compilable_function(self, node):
assert anno.hasanno(node.func, 'live_val')
assert anno.hasanno(node.func, 'fqn')
target_obj = anno.getanno(node.func, 'live_val')
target_fqn = anno.getanno(node.func, 'fqn')
if not self._should_compile(target_fqn):
return node
new_name = self.namer.compiled_function_name(
'.'.join(target_fqn), live_object=target_obj)
node.func = gast.Name(id=new_name, ctx=gast.Load(), annotation=None)
return node
def _rename_member_function_of_known_type(self, node):
target_fqn = anno.getanno(node.func, 'type_fqn')
if not self._should_compile(target_fqn):
return node
raise NotImplementedError('Member function call (of known type).')
def _wrap_to_py_func_no_return(self, node):
args_scope = anno.getanno(node, 'args_scope')
# TODO(mdan): Properly handle varargs, kwargs, etc.
args = tuple(gast.Name(n, gast.Load(), None) for n in args_scope.used)
# pylint:disable=undefined-variable,unused-argument,function-redefined
def template(call, wrapper, args):
def wrapper(args):
call(args)
return 1
tf.py_func(wrapper, [args], [tf.int64])
# pylint:enable=undefined-variable,unused-argument,function-redefined
wrapper_name = self.namer.compiled_function_name(node.func.id)
wrapper_def, call_expr = templates.replace(
template,
call=node.func,
wrapper=gast.Name(wrapper_name, gast.Load(), None),
args=args)
anno.setanno(call_expr.value, 'args_scope', args_scope)
anno.setanno(wrapper_def, 'skip_processing', True)
return (wrapper_def, call_expr)
def _function_is_compilable(self, target_obj):
# TODO(mdan): This is just a placeholder. Implement.
return not isinstance(target_obj, types.BuiltinFunctionType)
def visit_Expr(self, node):
if isinstance(node.value, gast.Call):
if anno.hasanno(node.value.func, 'live_val'):
target_obj = anno.getanno(node.value.func, 'live_val')
if not self._function_is_compilable(target_obj):
if anno.hasanno(node.value.func, 'fqn'):
target_fqn = anno.getanno(node.value.func, 'fqn')
if not self._should_compile(target_fqn):
return node
node = self._wrap_to_py_func_no_return(node.value)
return node
# Only the case of py_func with no return value is special.
# Everything else is processed by visit_Call.
self.visit(node.value)
else:
self.generic_visit(node)
return node
def visit_Call(self, node):
self.generic_visit(node)
if anno.hasanno(node.func, 'live_val'):
target_obj = anno.getanno(node.func, 'live_val')
if self._function_is_compilable(target_obj):
node = self._rename_compilable_function(node)
else:
raise NotImplementedError('py_func with return values')
elif anno.hasanno(node.func, 'type_fqn'):
node = self._rename_member_function_of_known_type(node)
else:
raise NotImplementedError(
'Member function call (of unknown type): %s.' % node.func.id)
return node
# pylint:enable=invalid-name
def transform(node, namer, uncompiled_modules):
"""Transform function call to the compiled counterparts.
Args:
node: AST to transform.
namer: FunctionNamer-like.
uncompiled_modules: set of string tuples, each tuple represents the fully
qualified name of a package containing functions that will not be
compiled.
Returns:
A tuple (node, new_names):
node: The transformed AST
new_names: set(string), containing any newly-generated names
"""
transformer = CallTreeTransformer(namer, uncompiled_modules)
node = transformer.visit(node)
return node
| jwlawson/tensorflow | tensorflow/contrib/py2tf/convert/call_trees.py | Python | apache-2.0 | 5,766 | [
"VisIt"
] | d06826a687647906bd0ad03fd1c6e2cd48113e6161b183856f74e4c08886f50b |
# Copyright (C) 2016-2019 The ESPResSo project
# Copyright (C) 2014 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Define the espressomd package
# Initialize MPI, start the main loop on the slaves
from . import _init
from .system import System
from .code_info import features, all_features
from .cuda_init import gpu_available
class FeaturesError(Exception):
def __init__(self, missing_features_list):
message = "Missing features " + ", ".join(missing_features_list)
super().__init__(message)
def has_features(*args):
"""Tests whether a list of features is a subset of the compiled-in features"""
if len(args) == 1 and not isinstance(
args[0], str) and hasattr(args[0], "__iter__"):
check_set = set(args[0])
else:
check_set = set(args)
if not check_set < all_features():
raise RuntimeError(
"'{}' is not a feature".format(','.join(check_set - all_features())))
return check_set <= set(features())
def missing_features(*args):
"""Returns a list of the missing features in the argument"""
if len(args) == 1 and not isinstance(
args[0], str) and hasattr(args[0], "__iter__"):
return set(args[0]) - set(features())
return set(args) - set(features())
def assert_features(*args):
"""Raises an exception when a list of features is not a subset of the compiled-in features"""
if not has_features(*args):
raise FeaturesError(missing_features(*args))
| KaiSzuttor/espresso | src/python/espressomd/__init__.py | Python | gpl-3.0 | 2,126 | [
"ESPResSo"
] | 0d31ea316a05598b170e8444becf90473777a4fa6298b8cd1723f97fe2171735 |
# Copyright 2009 by Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the multiple alignment program PRANK.
"""
from __future__ import print_function
from Bio.Application import _Option, _Switch, AbstractCommandline
class PrankCommandline(AbstractCommandline):
"""Command line wrapper for the multiple alignment program PRANK.
http://www.ebi.ac.uk/goldman-srv/prank/prank/
Example:
--------
To align a FASTA file (unaligned.fasta) with the output in aligned
FASTA format with the output filename starting with "aligned" (you
can't pick the filename explicitly), no tree output and no XML output,
use:
>>> from Bio.Align.Applications import PrankCommandline
>>> prank_cline = PrankCommandline(d="unaligned.fasta",
... o="aligned", # prefix only!
... f=8, # FASTA output
... notree=True, noxml=True)
>>> print(prank_cline)
prank -d=unaligned.fasta -o=aligned -f=8 -noxml -notree
You would typically run the command line with prank_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Citations:
----------
Loytynoja, A. and Goldman, N. 2005. An algorithm for progressive
multiple alignment of sequences with insertions. Proceedings of
the National Academy of Sciences, 102: 10557--10562.
Loytynoja, A. and Goldman, N. 2008. Phylogeny-aware gap placement
prevents errors in sequence alignment and evolutionary analysis.
Science, 320: 1632.
Last checked against version: 081202
"""
def __init__(self, cmd="prank", **kwargs):
OUTPUT_FORMAT_VALUES = list(range(1, 18))
self.parameters = [
# ################# input/output parameters: ##################
# -d=sequence_file
_Option(["-d", "d"],
"Input filename",
filename=True,
is_required=True),
# -t=tree_file [default: no tree, generate approximate NJ tree]
_Option(["-t", "t"], "Input guide tree filename",
filename=True),
# -tree="tree_string" [tree in newick format; in double quotes]
_Option(["-tree", "tree"],
"Input guide tree as Newick string"),
# -m=model_file [default: HKY2/WAG]
_Option(["-m", "m"],
"User-defined alignment model filename. Default: "
"HKY2/WAG"),
# -o=output_file [default: 'output']
_Option(["-o", "o"],
"Output filenames prefix. Default: 'output'\n "
"Will write: output.?.fas (depending on requested "
"format), output.?.xml and output.?.dnd",
filename=True),
# -f=output_format [default: 8]
_Option(["-f", "f"],
"Output alignment format. Default: 8 FASTA\n"
"Option are:\n"
"1. IG/Stanford 8. Pearson/Fasta\n"
"2. GenBank/GB 11. Phylip3.2\n"
"3. NBRF 12. Phylip\n"
"4. EMBL 14. PIR/CODATA\n"
"6. DNAStrider 15. MSF\n"
"7. Fitch 17. PAUP/NEXUS",
checker_function=lambda x: x in OUTPUT_FORMAT_VALUES),
_Switch(["-noxml", "noxml"],
"Do not output XML files "
"(PRANK versions earlier than v.120626)"),
_Switch(["-notree", "notree"],
"Do not output dnd tree files "
"(PRANK versions earlier than v.120626)"),
_Switch(["-showxml", "showxml"],
"Output XML files (PRANK v.120626 and later)"),
_Switch(["-showtree", "showtree"],
"Output dnd tree files (PRANK v.120626 and later)"),
_Switch(["-shortnames", "shortnames"],
"Truncate names at first space"),
_Switch(["-quiet", "quiet"],
"Reduce verbosity"),
# ###################### model parameters: ######################
# +F [force insertions to be always skipped]
# -F [equivalent]
_Switch(["-F", "+F", "F"],
"Force insertions to be always skipped: same as +F"),
# -dots [show insertion gaps as dots]
_Switch(["-dots", "dots"],
"Show insertion gaps as dots"),
# -gaprate=# [gap opening rate; default: dna 0.025 / prot 0.0025]
_Option(["-gaprate", "gaprate"],
"Gap opening rate. Default: dna 0.025 prot 0.0025",
checker_function=lambda x: isinstance(x, float)),
# -gapext=# [gap extension probability; default: dna 0.5 / prot 0.5]
_Option(["-gapext", "gapext"],
"Gap extension probability. Default: dna 0.5 "
"/ prot 0.5",
checker_function=lambda x: isinstance(x, float)),
# -dnafreqs=#,#,#,# [ACGT; default: empirical]
_Option(["-dnafreqs", "dnafreqs"],
"DNA frequencies - 'A,C,G,T'. eg '25,25,25,25' as a quote "
"surrounded string value. Default: empirical",
checker_function=lambda x: isinstance(x, bytes)),
# -kappa=# [ts/tv rate ratio; default:2]
_Option(["-kappa", "kappa"],
"Transition/transversion ratio. Default: 2",
checker_function=lambda x: isinstance(x, int)),
# -rho=# [pur/pyr rate ratio; default:1]
_Option(["-rho", "rho"],
"Purine/pyrimidine ratio. Default: 1",
checker_function=lambda x: isinstance(x, int)),
# -codon [for DNA: use empirical codon model]
# Assuming this is an input file as in -m
_Option(["-codon", "codon"],
"Codon model filename. Default: empirical codon model"),
# -termgap [penalise terminal gaps normally]
_Switch(["-termgap", "termgap"],
"Penalise terminal gaps normally"),
# ############### other parameters: ################################
# -nopost [do not compute posterior support; default: compute]
_Switch(["-nopost", "nopost"],
"Do not compute posterior support. Default: compute"),
# -pwdist=# [expected pairwise distance for computing guidetree;
# default: dna 0.25 / prot 0.5]
_Option(["-pwdist", "pwdist"],
"Expected pairwise distance for computing guidetree. "
"Default: dna 0.25 / prot 0.5",
checker_function=lambda x: isinstance(x, float)),
_Switch(["-once", "once"],
"Run only once. Default: twice if no guidetree given"),
_Switch(["-twice", "twice"],
"Always run twice"),
_Switch(["-skipins", "skipins"],
"Skip insertions in posterior support"),
_Switch(["-uselogs", "uselogs"],
"Slower but should work for a greater number of sequences"),
_Switch(["-writeanc", "writeanc"],
"Output ancestral sequences"),
_Switch(["-printnodes", "printnodes"],
"Output each node; mostly for debugging"),
# -matresize=# [matrix resizing multiplier]
# Doesn't specify type but Float and Int work
_Option(["-matresize", "matresize"],
"Matrix resizing multiplier",
checker_function=lambda x: isinstance(x, float) or
isinstance(x, int)),
# -matinitsize=# [matrix initial size multiplier]
# Doesn't specify type but Float and Int work
_Option(["-matinitsize", "matinitsize"],
"Matrix initial size multiplier",
checker_function=lambda x: isinstance(x, float) or
isinstance(x, int)),
_Switch(["-longseq", "longseq"],
"Save space in pairwise alignments"),
_Switch(["-pwgenomic", "pwgenomic"],
"Do pairwise alignment, no guidetree"),
# -pwgenomicdist=# [distance for pairwise alignment; default: 0.3]
_Option(["-pwgenomicdist", "pwgenomicdist"],
"Distance for pairwise alignment. Default: 0.3",
checker_function=lambda x: isinstance(x, float)),
# -scalebranches=# [scale branch lengths; default: dna 1 / prot 2]
_Option(["-scalebranches", "scalebranches"],
"Scale branch lengths. Default: dna 1 / prot 2",
checker_function=lambda x: isinstance(x, int)),
# -fixedbranches=# [use fixed branch lengths]
# Assume looking for a float
_Option(["-fixedbranches", "fixedbranches"],
"Use fixed branch lengths of input value",
checker_function=lambda x: isinstance(x, float)),
# -maxbranches=# [set maximum branch length]
# Assume looking for a float
_Option(["-maxbranches", "maxbranches"],
"Use maximum branch lengths of input value",
checker_function=lambda x: isinstance(x, float)),
# -realbranches [disable branch length truncation]
_Switch(["-realbranches", "realbranches"],
"Disable branch length truncation"),
_Switch(["-translate", "translate"],
"Translate to protein"),
_Switch(["-mttranslate", "mttranslate"],
"Translate to protein using mt table"),
# ##################### other: ####################
_Switch(["-convert", "convert"],
"Convert input alignment to new format. Do "
"not perform alignment")
]
AbstractCommandline.__init__(self, cmd, **kwargs)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| zjuchenyuan/BioWeb | Lib/Bio/Align/Applications/_Prank.py | Python | mit | 10,550 | [
"Biopython"
] | 57fa5b68cf8a6a17ff64253f3ed41ab5dfc6dd108cfd2ce12d1c3d2cd047d0dd |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import mock
from stoqlib.api import api
from stoqlib.domain.costcenter import CostCenterEntry
from stoqlib.domain.stockdecrease import StockDecrease
from stoqlib.gui.test.uitestutils import GUITest
from stoqlib.gui.wizards.salewizard import PaymentMethodStep
from stoqlib.gui.wizards.stockdecreasewizard import StockDecreaseWizard
from stoqlib.lib.parameters import sysparam
class TestStockDecreaseWizard(GUITest):
@mock.patch('stoqlib.gui.wizards.stockdecreasewizard.'
'StockDecreaseWizard._receipt_dialog')
def test_wizard(self, receipt_dialog):
branch = api.get_current_branch(self.store)
storable = self.create_storable(branch=branch, stock=1)
sellable = storable.product.sellable
wizard = StockDecreaseWizard(self.store)
step = wizard.get_current_step()
self.assertFalse(step.create_payments.get_visible())
self.assertNotSensitive(wizard, ['next_button'])
step.reason.update('text')
self.assertSensitive(wizard, ['next_button'])
self.check_wizard(wizard, 'start-stock-decrease-step')
self.click(wizard.next_button)
step = wizard.get_current_step()
self.assertNotSensitive(wizard, ['next_button'])
step.barcode.set_text(sellable.barcode)
step.sellable_selected(sellable)
step.quantity.update(1)
self.click(step.add_sellable_button)
self.check_wizard(wizard, 'decrease-item-step')
self.assertSensitive(wizard, ['next_button'])
module = 'stoqlib.gui.events.StockDecreaseWizardFinishEvent.emit'
with mock.patch(module) as emit:
with mock.patch.object(self.store, 'commit'):
self.click(wizard.next_button)
self.assertEquals(emit.call_count, 1)
args, kwargs = emit.call_args
self.assertTrue(isinstance(args[0], StockDecrease))
self.assertEquals(receipt_dialog.call_count, 1)
# Assert wizard decreased stock.
self.assertEquals(storable.get_balance_for_branch(branch), 0)
@mock.patch('stoqlib.gui.wizards.stockdecreasewizard.yesno')
def test_wizard_create_payment(self, yesno):
yesno.return_value = False
sysparam.set_bool(self.store, 'CREATE_PAYMENTS_ON_STOCK_DECREASE', True)
till = self.create_till()
till.open_till()
branch = api.get_current_branch(self.store)
storable = self.create_storable(branch=branch, stock=1)
sellable = storable.product.sellable
wizard = StockDecreaseWizard(self.store)
step = wizard.get_current_step()
self.assertTrue(step.create_payments.get_visible())
step.create_payments.update(True)
step.reason.update('reason')
self.check_wizard(wizard, 'start-stock-decrease-step-create-payments')
self.assertSensitive(wizard, ['next_button'])
self.click(wizard.next_button)
step = wizard.get_current_step()
step.barcode.set_text(sellable.barcode)
step.sellable_selected(sellable)
step.quantity.update(1)
self.click(step.add_sellable_button)
self.click(wizard.next_button)
step = wizard.get_current_step()
self.assertTrue(isinstance(step, PaymentMethodStep))
@mock.patch('stoqlib.gui.wizards.stockdecreasewizard.yesno')
def test_wizard_with_cost_center(self, yesno):
sysparam.set_bool(self.store, 'CREATE_PAYMENTS_ON_STOCK_DECREASE', True)
yesno.return_value = False
branch = api.get_current_branch(self.store)
storable = self.create_storable(branch=branch, stock=1)
sellable = storable.product.sellable
cost_center = self.create_cost_center()
wizard = StockDecreaseWizard(self.store)
entry = self.store.find(CostCenterEntry,
cost_center=wizard.model.cost_center)
self.assertEquals(len(list(entry)), 0)
step = wizard.get_current_step()
step.reason.update('test')
step.cost_center.select(cost_center)
self.check_wizard(wizard, 'stock-decrease-with-cost-center')
self.click(wizard.next_button)
step = wizard.get_current_step()
step.barcode.set_text(sellable.barcode)
step.sellable_selected(sellable)
step.quantity.update(1)
self.click(step.add_sellable_button)
with mock.patch.object(self.store, 'commit'):
self.click(wizard.next_button)
self.assertEquals(wizard.model.cost_center, cost_center)
entry = self.store.find(CostCenterEntry,
cost_center=wizard.model.cost_center)
self.assertEquals(len(list(entry)), 1)
| andrebellafronte/stoq | stoqlib/gui/test/test_stockdecreasewizard.py | Python | gpl-2.0 | 5,583 | [
"VisIt"
] | 4d3887e60f4e472e754c3dfab4da12987f83c18dad1448d23c9dabf38d3dd8be |
from __future__ import annotations
import copy
import logging
import math
import libtbx
from dxtbx.model.experiment_list import Experiment, ExperimentList
from dials.algorithms.indexing import DialsIndexError, DialsIndexRefineError
from dials.algorithms.indexing.indexer import Indexer
from dials.algorithms.indexing.known_orientation import IndexerKnownOrientation
from dials.algorithms.indexing.lattice_search import BasisVectorSearch, LatticeSearch
from dials.algorithms.indexing.nave_parameters import NaveParameters
from dials.array_family import flex
from dials.util.multi_dataset_handling import generate_experiment_identifiers
logger = logging.getLogger(__name__)
def calc_2D_rmsd_and_displacements(reflections):
displacements = flex.vec2_double(
reflections["xyzobs.px.value"].parts()[0],
reflections["xyzobs.px.value"].parts()[1],
) - flex.vec2_double(
reflections["xyzcal.px"].parts()[0], reflections["xyzcal.px"].parts()[1]
)
rmsd = math.sqrt(flex.mean(displacements.dot(displacements)))
return rmsd, displacements
def plot_displacements(reflections, predictions, experiments):
rmsd, displacements = calc_2D_rmsd_and_displacements(predictions)
from matplotlib import pyplot as plt
plt.figure()
for cv in displacements:
plt.plot([cv[0]], [-cv[1]], "r.")
plt.title(f" {len(displacements)} spots, r.m.s.d. {rmsd:5.2f} pixels")
plt.axes().set_aspect("equal")
plt.show()
plt.close()
plt.figure()
sz1, sz2 = experiments[0].detector[0].get_image_size()
for item, cv in zip(predictions, displacements):
plt.plot([item["xyzcal.px"][0]], [sz1 - item["xyzcal.px"][1]], "r.")
plt.plot([item["xyzobs.px.value"][0]], [sz1 - item["xyzobs.px.value"][1]], "g.")
plt.plot(
[item["xyzcal.px"][0], item["xyzcal.px"][0] + 10.0 * cv[0]],
[sz1 - item["xyzcal.px"][1], sz1 - item["xyzcal.px"][1] - 10.0 * cv[1]],
"r-",
)
plt.xlim([0, experiments[0].detector[0].get_image_size()[0]])
plt.ylim([0, experiments[0].detector[0].get_image_size()[1]])
plt.title(f" {len(displacements)} spots, r.m.s.d. {rmsd:5.2f} pixels")
plt.axes().set_aspect("equal")
plt.show()
plt.close()
def e_refine(params, experiments, reflections, graph_verbose=False):
# Stills-specific parameters we always want
assert params.refinement.reflections.outlier.algorithm in (
None,
"null",
), "Cannot index, set refinement.reflections.outlier.algorithm=null" # we do our own outlier rejection
from dials.algorithms.refinement.refiner import RefinerFactory
refiner = RefinerFactory.from_parameters_data_experiments(
params, reflections, experiments
)
refiner.run()
ref_sel = refiner.selection_used_for_refinement()
assert ref_sel.count(True) == len(reflections)
if not graph_verbose:
return refiner
RR = refiner.predict_for_reflection_table(reflections)
plot_displacements(reflections, RR, experiments)
return refiner
class StillsIndexer(Indexer):
"""Class for indexing stills"""
def __init__(self, reflections, experiments, params=None):
if params.refinement.reflections.outlier.algorithm in ("auto", libtbx.Auto):
# The stills_indexer provides its own outlier rejection
params.refinement.reflections.outlier.algorithm = "null"
super().__init__(reflections, experiments, params)
def index(self):
# most of this is the same as dials.algorithms.indexing.indexer.indexer_base.index(), with some stills
# specific modifications (don't re-index after choose best orientation matrix, but use the indexing from
# choose best orientation matrix, also don't use macrocycles) of refinement after indexing.
# 2017 update: do accept multiple lattices per shot
experiments = ExperimentList()
while True:
self.d_min = self.params.refinement_protocol.d_min_start
max_lattices = self.params.multiple_lattice_search.max_lattices
if max_lattices is not None and len(experiments) >= max_lattices:
break
if len(experiments) > 0:
cutoff_fraction = (
self.params.multiple_lattice_search.recycle_unindexed_reflections_cutoff
)
d_spacings = 1 / self.reflections["rlp"].norms()
d_min_indexed = flex.min(d_spacings.select(self.indexed_reflections))
min_reflections_for_indexing = cutoff_fraction * len(
self.reflections.select(d_spacings > d_min_indexed)
)
crystal_ids = self.reflections.select(d_spacings > d_min_indexed)["id"]
if (crystal_ids == -1).count(True) < min_reflections_for_indexing:
logger.info(
"Finish searching for more lattices: %i unindexed reflections remaining.",
min_reflections_for_indexing,
)
break
n_lattices_previous_cycle = len(experiments)
# index multiple lattices per shot
if len(experiments) == 0:
new = self.find_lattices()
generate_experiment_identifiers(new)
experiments.extend(new)
if len(experiments) == 0:
raise DialsIndexError("No suitable lattice could be found.")
else:
try:
new = self.find_lattices()
generate_experiment_identifiers(new)
experiments.extend(new)
except Exception as e:
logger.info("Indexing remaining reflections failed")
logger.debug(
"Indexing remaining reflections failed, exception:\n" + str(e)
)
# reset reflection lattice flags
# the lattice a given reflection belongs to: a value of -1 indicates
# that a reflection doesn't belong to any lattice so far
self.reflections["id"] = flex.int(len(self.reflections), -1)
self.index_reflections(experiments, self.reflections)
if len(experiments) == n_lattices_previous_cycle:
# no more lattices found
break
if (
not self.params.stills.refine_candidates_with_known_symmetry
and self.params.known_symmetry.space_group is not None
):
self._apply_symmetry_post_indexing(
experiments, self.reflections, n_lattices_previous_cycle
)
# discard nearly overlapping lattices on the same shot
if self._check_have_similar_crystal_models(experiments):
break
self.indexed_reflections = self.reflections["id"] > -1
if self.d_min is None:
sel = self.reflections["id"] <= -1
else:
sel = flex.bool(len(self.reflections), False)
lengths = 1 / self.reflections["rlp"].norms()
isel = (lengths >= self.d_min).iselection()
sel.set_selected(isel, True)
sel.set_selected(self.reflections["id"] > -1, False)
self.unindexed_reflections = self.reflections.select(sel)
reflections_for_refinement = self.reflections.select(
self.indexed_reflections
)
if len(self.params.stills.isoforms) > 0:
logger.info("")
logger.info("#" * 80)
logger.info("Starting refinement")
logger.info("#" * 80)
logger.info("")
isoform_experiments = ExperimentList()
isoform_reflections = flex.reflection_table()
# Note, changes to params after initial indexing. Cannot use tie to target when fixing the unit cell.
self.all_params.refinement.reflections.outlier.algorithm = "null"
self.all_params.refinement.parameterisation.crystal.fix = "cell"
self.all_params.refinement.parameterisation.crystal.unit_cell.restraints.tie_to_target = (
[]
)
for expt_id, experiment in enumerate(experiments):
reflections = reflections_for_refinement.select(
reflections_for_refinement["id"] == expt_id
)
reflections["id"] = flex.int(len(reflections), 0)
refiners = []
for isoform in self.params.stills.isoforms:
iso_experiment = copy.deepcopy(experiment)
crystal = iso_experiment.crystal
if (
isoform.lookup_symbol
!= crystal.get_space_group().type().lookup_symbol()
):
logger.info(
"Crystal isoform lookup_symbol %s does not match isoform %s lookup_symbol %s",
crystal.get_space_group().type().lookup_symbol(),
isoform.name,
isoform.lookup_symbol,
)
continue
crystal.set_B(isoform.cell.fractionalization_matrix())
logger.info("Refining isoform %s", isoform.name)
refiners.append(
e_refine(
params=self.all_params,
experiments=ExperimentList([iso_experiment]),
reflections=reflections,
graph_verbose=False,
)
)
if len(refiners) == 0:
raise DialsIndexError(
"No isoforms had a lookup symbol that matched"
)
positional_rmsds = [
math.sqrt(P.rmsds()[0] ** 2 + P.rmsds()[1] ** 2)
for P in refiners
]
logger.info(
"Positional rmsds for all isoforms:" + str(positional_rmsds)
)
minrmsd_mm = min(positional_rmsds)
minindex = positional_rmsds.index(minrmsd_mm)
logger.info(
"The smallest rmsd is %5.1f um from isoform %s",
1000.0 * minrmsd_mm,
self.params.stills.isoforms[minindex].name,
)
if self.params.stills.isoforms[minindex].rmsd_target_mm is not None:
logger.info(
"Asserting %f < %f",
minrmsd_mm,
self.params.stills.isoforms[minindex].rmsd_target_mm,
)
assert (
minrmsd_mm
< self.params.stills.isoforms[minindex].rmsd_target_mm
)
logger.info(
"Acceptable rmsd for isoform %s.",
self.params.stills.isoforms[minindex].name,
)
if len(self.params.stills.isoforms) == 2:
logger.info(
"Rmsd gain over the other isoform %5.1f um.",
1000.0 * abs(positional_rmsds[0] - positional_rmsds[1]),
)
R = refiners[minindex]
# Now one last check to see if direct beam is out of bounds
if self.params.stills.isoforms[minindex].beam_restraint is not None:
from scitbx import matrix
refined_beam = matrix.col(
R.get_experiments()[0]
.detector[0]
.get_beam_centre_lab(experiments[0].beam.get_s0())[0:2]
)
known_beam = matrix.col(
self.params.stills.isoforms[minindex].beam_restraint
)
logger.info(
"Asserting difference in refined beam center and expected beam center %f < %f",
(refined_beam - known_beam).length(),
self.params.stills.isoforms[minindex].rmsd_target_mm,
)
assert (
refined_beam - known_beam
).length() < self.params.stills.isoforms[
minindex
].rmsd_target_mm
# future--circle of confusion could be given as a separate length in mm instead of reusing rmsd_target
experiment = R.get_experiments()[0]
experiment.crystal.identified_isoform = self.params.stills.isoforms[
minindex
].name
isoform_experiments.append(experiment)
reflections["id"] = flex.int(len(reflections), expt_id)
isoform_reflections.extend(reflections)
experiments = isoform_experiments
reflections_for_refinement = isoform_reflections
if self.params.refinement_protocol.mode == "repredict_only":
from dials.algorithms.indexing.nave_parameters import NaveParameters
from dials.algorithms.refinement.prediction.managed_predictors import (
ExperimentsPredictorFactory,
)
refined_experiments, refined_reflections = (
experiments,
reflections_for_refinement,
)
ref_predictor = ExperimentsPredictorFactory.from_experiments(
experiments,
force_stills=True,
spherical_relp=self.all_params.refinement.parameterisation.spherical_relp_model,
)
ref_predictor(refined_reflections)
refined_reflections["delpsical2"] = (
refined_reflections["delpsical.rad"] ** 2
)
for expt_id in range(len(refined_experiments)):
refls = refined_reflections.select(
refined_reflections["id"] == expt_id
)
nv = NaveParameters(
params=self.all_params,
experiments=refined_experiments[expt_id : expt_id + 1],
reflections=refls,
refinery=None,
graph_verbose=False,
)
experiments[expt_id].crystal = nv()
ref_predictor = ExperimentsPredictorFactory.from_experiments(
experiments,
force_stills=True,
spherical_relp=self.all_params.refinement.parameterisation.spherical_relp_model,
)
ref_predictor(refined_reflections)
elif self.params.refinement_protocol.mode is None:
refined_experiments, refined_reflections = (
experiments,
reflections_for_refinement,
)
else:
try:
refined_experiments, refined_reflections = self.refine(
experiments, reflections_for_refinement
)
except Exception as e:
s = str(e)
if len(experiments) == 1:
raise DialsIndexRefineError(e)
logger.info("Refinement failed:")
logger.info(s)
del experiments[-1]
break
self._unit_cell_volume_sanity_check(experiments, refined_experiments)
self.refined_reflections = refined_reflections.select(
refined_reflections["id"] > -1
)
for i, expt in enumerate(self.experiments):
ref_sel = self.refined_reflections.select(
self.refined_reflections["imageset_id"] == i
)
ref_sel = ref_sel.select(ref_sel["id"] >= 0)
for i_expt in set(ref_sel["id"]):
refined_expt = refined_experiments[i_expt]
expt.detector = refined_expt.detector
expt.beam = refined_expt.beam
expt.goniometer = refined_expt.goniometer
expt.scan = refined_expt.scan
refined_expt.imageset = expt.imageset
if not (
self.all_params.refinement.parameterisation.beam.fix == "all"
and self.all_params.refinement.parameterisation.detector.fix == "all"
):
# Experimental geometry may have changed - re-map centroids to
# reciprocal space
self.reflections.map_centroids_to_reciprocal_space(self.experiments)
# update for next cycle
experiments = refined_experiments
self.refined_experiments = refined_experiments
if self.refined_experiments is None:
raise DialsIndexRefineError("None of the experiments could refine.")
# discard experiments with zero reflections after refinement
id_set = set(self.refined_reflections["id"])
if len(id_set) < len(self.refined_experiments):
filtered_refined_reflections = flex.reflection_table()
for i in range(len(self.refined_experiments)):
if i not in id_set:
del self.refined_experiments[i]
for old, new in zip(sorted(id_set), range(len(id_set))):
subset = self.refined_reflections.select(
self.refined_reflections["id"] == old
)
subset["id"] = flex.int(len(subset), new)
filtered_refined_reflections.extend(subset)
self.refined_reflections = filtered_refined_reflections
if len(self.refined_experiments) > 1:
from dials.algorithms.indexing.compare_orientation_matrices import (
rotation_matrix_differences,
)
logger.info(
rotation_matrix_differences(self.refined_experiments.crystals())
)
logger.info("Final refined crystal models:")
for i, crystal_model in enumerate(self.refined_experiments.crystals()):
n_indexed = 0
for _ in experiments.where(crystal=crystal_model):
n_indexed += (self.reflections["id"] == i).count(True)
logger.info("model %i (%i reflections):", i + 1, n_indexed)
logger.info(crystal_model)
if (
"xyzcal.mm" in self.refined_reflections
): # won't be there if refine_all_candidates = False and no isoforms
self._xyzcal_mm_to_px(self.experiments, self.refined_reflections)
def experiment_list_for_crystal(self, crystal):
experiments = ExperimentList()
for imageset in self.experiments.imagesets():
experiments.append(
Experiment(
imageset=imageset,
beam=imageset.get_beam(),
detector=imageset.get_detector(),
goniometer=imageset.get_goniometer(),
scan=imageset.get_scan(),
crystal=crystal,
)
)
return experiments
def choose_best_orientation_matrix(self, candidate_orientation_matrices):
logger.info("*" * 80)
logger.info("Selecting the best orientation matrix")
logger.info("*" * 80)
class CandidateInfo(libtbx.group_args):
pass
candidates = []
params = copy.deepcopy(self.all_params)
for icm, cm in enumerate(candidate_orientation_matrices):
if icm >= self.params.basis_vector_combinations.max_refine:
break
# Index reflections in P1
sel = self.reflections["id"] == -1
refl = self.reflections.select(sel)
experiments = self.experiment_list_for_crystal(cm)
self.index_reflections(experiments, refl)
indexed = refl.select(refl["id"] >= 0)
indexed = indexed.select(indexed.get_flags(indexed.flags.indexed))
# If target symmetry supplied, try to apply it. Then, apply the change of basis to the reflections
# indexed in P1 to the target setting
if (
self.params.stills.refine_candidates_with_known_symmetry
and self.params.known_symmetry.space_group is not None
):
new_crystal, cb_op = self._symmetry_handler.apply_symmetry(cm)
if new_crystal is None:
logger.info("Cannot convert to target symmetry, candidate %d", icm)
continue
cm = new_crystal.change_basis(cb_op)
experiments = self.experiment_list_for_crystal(cm)
if not cb_op.is_identity_op():
indexed["miller_index"] = cb_op.apply(indexed["miller_index"])
if params.indexing.stills.refine_all_candidates:
try:
logger.info(
"$$$ stills_indexer::choose_best_orientation_matrix, candidate %d initial outlier identification",
icm,
)
acceptance_flags = self.identify_outliers(
params, experiments, indexed
)
# create a new "indexed" list with outliers thrown out:
indexed = indexed.select(acceptance_flags)
logger.info(
"$$$ stills_indexer::choose_best_orientation_matrix, candidate %d refinement before outlier rejection",
icm,
)
R = e_refine(
params=params,
experiments=experiments,
reflections=indexed,
graph_verbose=False,
)
ref_experiments = R.get_experiments()
# try to improve the outcome with a second round of outlier rejection post-initial refinement:
acceptance_flags = self.identify_outliers(
params, ref_experiments, indexed
)
# insert a round of Nave-outlier rejection on top of the r.m.s.d. rejection
nv0 = NaveParameters(
params=params,
experiments=ref_experiments,
reflections=indexed,
refinery=R,
graph_verbose=False,
)
nv0()
acceptance_flags_nv0 = nv0.nv_acceptance_flags
indexed = indexed.select(acceptance_flags & acceptance_flags_nv0)
logger.info(
"$$$ stills_indexer::choose_best_orientation_matrix, candidate %d after positional and delta-psi outlier rejection",
icm,
)
R = e_refine(
params=params,
experiments=ref_experiments,
reflections=indexed,
graph_verbose=False,
)
ref_experiments = R.get_experiments()
nv = NaveParameters(
params=params,
experiments=ref_experiments,
reflections=indexed,
refinery=R,
graph_verbose=False,
)
crystal_model = nv()
assert (
len(crystal_model) == 1
), "$$$ stills_indexer::choose_best_orientation_matrix, Only one crystal at this stage"
crystal_model = crystal_model[0]
# Drop candidates that after refinement can no longer be converted to the known target space group
if (
not self.params.stills.refine_candidates_with_known_symmetry
and self.params.known_symmetry.space_group is not None
):
(
new_crystal,
cb_op_to_primitive,
) = self._symmetry_handler.apply_symmetry(crystal_model)
if new_crystal is None:
logger.info(
"P1 refinement yielded model diverged from target, candidate %d",
icm,
)
continue
rmsd, _ = calc_2D_rmsd_and_displacements(
R.predict_for_reflection_table(indexed)
)
except Exception as e:
logger.info(
"Couldn't refine candidate %d, %s: %s",
icm,
e.__class__.__name__,
str(e),
)
else:
logger.info(
"$$$ stills_indexer::choose_best_orientation_matrix, candidate %d done",
icm,
)
candidates.append(
CandidateInfo(
crystal=crystal_model,
green_curve_area=nv.green_curve_area,
ewald_proximal_volume=nv.ewald_proximal_volume(),
n_indexed=len(indexed),
rmsd=rmsd,
indexed=indexed,
experiments=ref_experiments,
)
)
else:
from dials.algorithms.refinement.prediction.managed_predictors import (
ExperimentsPredictorFactory,
)
ref_predictor = ExperimentsPredictorFactory.from_experiments(
experiments,
force_stills=True,
spherical_relp=params.refinement.parameterisation.spherical_relp_model,
)
rmsd, _ = calc_2D_rmsd_and_displacements(ref_predictor(indexed))
candidates.append(
CandidateInfo(
crystal=cm,
n_indexed=len(indexed),
rmsd=rmsd,
indexed=indexed,
experiments=experiments,
)
)
if len(candidates) == 0:
raise DialsIndexError("No suitable indexing solution found")
logger.info("**** ALL CANDIDATES:")
for i, XX in enumerate(candidates):
logger.info("\n****Candidate %d %s", i, XX)
cc = XX.crystal
if hasattr(cc, "get_half_mosaicity_deg"):
logger.info(
" half mosaicity %5.2f deg.", (cc.get_half_mosaicity_deg())
)
logger.info(" domain size %.0f Ang.", (cc.get_domain_size_ang()))
logger.info("\n**** BEST CANDIDATE:")
results = flex.double([c.rmsd for c in candidates])
best = candidates[flex.min_index(results)]
logger.info(best)
if params.indexing.stills.refine_all_candidates:
if best.rmsd > params.indexing.stills.rmsd_min_px:
raise DialsIndexError(f"RMSD too high, {best.rmsd:f}")
if len(candidates) > 1:
for i in range(len(candidates)):
if i == flex.min_index(results):
continue
if best.ewald_proximal_volume > candidates[i].ewald_proximal_volume:
logger.info(
"Couldn't figure out which candidate is best; picked the one with the best RMSD."
)
best.indexed["entering"] = flex.bool(best.n_indexed, False)
return best.crystal, best.n_indexed
def identify_outliers(self, params, experiments, indexed):
if not params.indexing.stills.candidate_outlier_rejection:
return flex.bool(len(indexed), True)
logger.info("$$$ stills_indexer::identify_outliers")
refiner = e_refine(params, experiments, indexed, graph_verbose=False)
RR = refiner.predict_for_reflection_table(indexed)
px_sz = experiments[0].detector[0].get_pixel_size()
class Match:
pass
matches = []
for item in RR.rows():
m = Match()
m.x_obs = item["xyzobs.px.value"][0] * px_sz[0]
m.y_obs = item["xyzobs.px.value"][1] * px_sz[1]
m.x_calc = item["xyzcal.px"][0] * px_sz[0]
m.y_calc = item["xyzcal.px"][1] * px_sz[1]
m.miller_index = item["miller_index"]
matches.append(m)
import iotbx.phil
from rstbx.phil.phil_preferences import indexing_api_defs
hardcoded_phil = iotbx.phil.parse(input_string=indexing_api_defs).extract()
from rstbx.indexing_api.outlier_procedure import OutlierPlotPDF
# comment this in if PDF graph is desired:
# hardcoded_phil.indexing.outlier_detection.pdf = "outlier.pdf"
# new code for outlier rejection inline here
if hardcoded_phil.indexing.outlier_detection.pdf is not None:
hardcoded_phil.__inject__(
"writer", OutlierPlotPDF(hardcoded_phil.indexing.outlier_detection.pdf)
)
# execute Sauter and Poon (2010) algorithm
from rstbx.indexing_api import outlier_detection
od = outlier_detection.find_outliers_from_matches(
matches, verbose=True, horizon_phil=hardcoded_phil
)
if hardcoded_phil.indexing.outlier_detection.pdf is not None:
od.make_graphs(canvas=hardcoded_phil.writer.R.c, left_margin=0.5)
hardcoded_phil.writer.R.c.showPage()
hardcoded_phil.writer.R.c.save()
return od.get_cache_status()
def refine(self, experiments, reflections):
acceptance_flags = self.identify_outliers(
self.all_params, experiments, reflections
)
# create a new "reflections" list with outliers thrown out:
reflections = reflections.select(acceptance_flags)
R = e_refine(
params=self.all_params,
experiments=experiments,
reflections=reflections,
graph_verbose=False,
)
ref_experiments = R.get_experiments()
# try to improve the outcome with a second round of outlier rejection post-initial refinement:
acceptance_flags = self.identify_outliers(
self.all_params, ref_experiments, reflections
)
# insert a round of Nave-outlier rejection on top of the r.m.s.d. rejection
nv0 = NaveParameters(
params=self.all_params,
experiments=ref_experiments,
reflections=reflections,
refinery=R,
graph_verbose=False,
)
nv0()
acceptance_flags_nv0 = nv0.nv_acceptance_flags
reflections = reflections.select(acceptance_flags & acceptance_flags_nv0)
R = e_refine(
params=self.all_params,
experiments=ref_experiments,
reflections=reflections,
graph_verbose=False,
)
ref_experiments = R.get_experiments()
nv = NaveParameters(
params=self.all_params,
experiments=ref_experiments,
reflections=reflections,
refinery=R,
graph_verbose=False,
)
nv()
rmsd, _ = calc_2D_rmsd_and_displacements(
R.predict_for_reflection_table(reflections)
)
matches = R.get_matches()
xyzcal_mm = flex.vec3_double(len(reflections))
xyzcal_mm.set_selected(matches["iobs"], matches["xyzcal.mm"])
reflections["xyzcal.mm"] = xyzcal_mm
reflections.set_flags(matches["iobs"], reflections.flags.used_in_refinement)
reflections["entering"] = flex.bool(len(reflections), False)
if self.all_params.indexing.stills.set_domain_size_ang_value is not None:
for exp in ref_experiments:
exp.crystal.set_domain_size_ang(
self.all_params.indexing.stills.set_domain_size_ang_value
)
if self.all_params.indexing.stills.set_mosaic_half_deg_value is not None:
for exp in ref_experiments:
exp.crystal.set_half_mosaicity_deg(
self.all_params.indexing.stills.set_mosaic_half_deg_value
)
return ref_experiments, reflections
"""Mixin class definitions that override the dials indexing class methods specific to stills"""
class StillsIndexerKnownOrientation(IndexerKnownOrientation, StillsIndexer):
pass
class StillsIndexerBasisVectorSearch(StillsIndexer, BasisVectorSearch):
pass
class StillsIndexerLatticeSearch(StillsIndexer, LatticeSearch):
pass
| dials/dials | algorithms/indexing/stills_indexer.py | Python | bsd-3-clause | 34,055 | [
"CRYSTAL"
] | aab859a5a4302f81cc8a8a3cc906a895b8cc922db7226bc995ed4803c1432ff6 |
# -*- coding: utf-8 -*-
{
"'Cancel' will indicate an asset log entry did not occur": "'cancelar' irá indicar que a entrada de log de ativo não ocorreu",
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'Um local que especifica a área geográfica dessa região. Este pode ser um local a partir da hierarquia local, ou um "grupo local", ou um local que tem um limite para a área.',
"Acronym of the organization's name, eg. IFRC.": 'Acrônimo do nome da organização, por exemplo, FICV.',
"Authenticate system's Twitter account": 'Sistema de Autenticação para conta de Twitter',
"Can't import tweepy": 'Não pode importar tweepy',
"Caution: doesn't respect the framework rules!": 'Cuidado: não respeitar as regras de enquadramento!',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Formatar a lista de valores de atributos & o valor RGB a ser usado para esses como o objeto JSON, Exemplo: {Red: '#FF0000, Green: '#00FF00', Yellow: '#FFFF00'}",
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Se selecionado, esta localização do ativo será atualizado sempre que a localização da pessoa é atualizada.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Se esta configuração representa uma região para o menu regiões, dê-lhe um nome a ser utilizado no menu. O nome de uma configuração pessoal do mapa será configurado para o nome do usuário.',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Se esse campo for Preenchido, então, um usuário que especificar esta organização quando se registrar será designado como um agente desta organização a menos que seu domínio não corresponde ao campo de domínio.',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Se isso for ticado, se tornará a base geográfica do usuário e, consequentemente onde este aparece no mapa.',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "Se você não vê o Hospital na lista, você pode incluir um novo clicando no link 'Criar Hospital'.",
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "Se você não vê o escritório na lista, você pode incluir um novo clicando no link 'Criar escritório'.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'Se voce não vê a Organização na lista, voce poderá adicionar uma nova clicando no link "Criar Organização"',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Em vez de sincronizar automaticamente com outros pares pela rede, voce também pode sincronizar com arquivos, o que é necessário quando não há rede. Você pode utilizar esta página para importar dados de sincronização de arquivos e também exportar dados para arquivos de Sincronização. Clique no link à direita para ir para esta página.',
"Level is higher than parent's": 'Nível superior ao dos pais',
"Need a 'url' argument!": "Precisa de um argumento ' url!",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "opcional O nome da coluna de geometria. Em PostGIS padroniza para 'the_geom'.",
"Parent level should be higher than this record's level. Parent level is": 'Nível dos pais deve ser maior que o nível do registro. Nível do Pai é',
"Password fields don't match": 'Os campos de senha não são iguais.',
"Phone number to donate to this organization's relief efforts.": 'Número de telefone para doar ao serviço de assistência social desta organização',
"Please come back after sometime if that doesn't help.": 'Por favor, volte após algum tempo se isso não ajuda.',
"Quantity in %s's Inventory": 'Quantidade de %s do Inventário',
"Select a Room from the list or click 'Create Room'": "Escolha uma sala da lista ou clique 'Criar sala'",
"Select a person in charge for status 'assigned'": "Selecione uma pessoa responsável para status 'DESIGNADO'",
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Selecione isto se todas as localidades especificas precisarem de um pai no nível mais alto da hierarquia. Por exemplo, se 'distrito' é a menor divisão na hierarquia e, em seguida, todos os locais específicos seriam obrigados a ter um distrito como um pai.",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'Selecione isto se todos os locais específicos de uma posição pai na hierarquia do local. Isso pode ajudar na configuração de uma "região" representando uma área afetada.',
"Sorry, things didn't get done on time.": 'Desculpe ! As tarefas não foram concluídas em tempo útil.',
"Sorry, we couldn't find that page.": 'Desculpe, não foi possível localizar essa página.',
"System's Twitter account updated": 'DO SISTEMA Chilreiam conta ATUALIZADO',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "O doador(s) para este projeto. Vários valores podem ser selecionados ao manter pressionado a chave 'control'",
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'A URL do arquivo de imagem. Se voce não fizer o upload de um arquivo de imagem, então voce deverá especificar sua localização aqui.',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Para pesquisar por nome, digite qualquer do primeiro, meio ou últimos nomes, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as pessoas.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "Para procurar um corpo, digite o número da ID do corpo. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os organismos.",
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Para procurar um hospital, digite qualquer um dos nomes ou IDs do hospital, ou o nome da organização ou Acrônimo, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os hospitais.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Para procurar um hospital, digite qualquer um dos nomes ou IDs do hospital, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os hospitais.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Para procurar um local, digite o nome. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os locais.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Para procurar por uma pessoa, digite qualquer do primeiro, meio ou últimos nomes e/ou um número de ID de uma pessoa, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as pessoas.",
"To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Para procurar por uma pessoa, digite ou o primeiro nome, ou o nome do meio ou sobrenome, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as pessoas.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "Para procurar por uma avaliação, digite qualquer parte o número da permissão da avaliação. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as avaliações.",
"Type the first few characters of one of the Person's names.": 'Digite os primeiros caracteres de um dos nomes da pessoa.',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Fazer atualizacao de um arquivo de imagem aqui. Se voce não fizer o upload de um arquivo de imagem, então voce deverá especificar sua localização no campo URL',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": 'Visualizar/Alterar a base de dados directamente ( cuidado : não cumpre com as regras da infraestrutura ! ) ).',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Quando Sincronizando dados com outros, os conflitos acontecem em casos onde dois (ou mais) grupos desejam sincronizar informações que os dois tenham modificado, ou seja, informações conflitantes. Módulo de sincronização tenta resolver esses conflitos automaticamente mas em alguns casos isso não consegue. Nesses casos, cabe a si resolver esses conflitos manualmente, clique no link à direita para ir para esta página.',
"You haven't made any calculations": 'Não fez quaisquer cálculos.',
"couldn't be parsed so NetworkLinks not followed.": 'Não pôde ser analisado então o NetworkLinks não seguiu.',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Inclui um GroundOverlay ou ScreenOverlay que não são ainda suportados em OpenLayuers, portanto poderá não funcionar na totalidade.',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "field1=\'newvalue\'". Não é possível atualizar ou excluir os resultados de uma junção',
'# of International Staff': '# De equipe internacional',
'# of National Staff': '# De equipe nacional',
'# of Vehicles': '# De Veículos',
'%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nSe o tipo de pedido é "%(type)s", digite a %(type)s na próxima tela.',
'%(system_name)s - Verify Email': '%(system_name)s - Verificar E-Mail',
'%s Create a new site or ensure that you have permissions for an existing site.': '%s Cria um novo site ou garante que você tenha permissões para um site existente.',
'%s rows deleted': '%s linhas excluídas',
'%s rows updated': '%s linhas atualizadas',
'& then click on the map below to adjust the Lat/Lon fields': 'Em seguida selecione o mapa abaixo para ajustar os campos Lat/Lon',
'* Required Fields': '* campos obrigatórios',
'0-15 minutes': '0-15 minutos',
'1 Assessment': '1 Avaliação',
'1 location, shorter time, can contain multiple Tasks': '1 Local, menos tempo, pode conter várias Tarefas',
'1-3 days': '1 a 3 dias',
'15-30 minutes': '15 a 30 minutos',
'2 different options are provided here currently:': '2 opções diferentes são fornecidos aqui atualmente:',
'2x4 Car': 'Carro 2x4',
'30-60 minutes': '30-60 minutos',
'4-7 days': '4-7 Dias',
'4x4 Car': 'Carro 4x4',
'8-14 days': '8-14 Dias',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Um marcador assinalado para um local individual é configurado se há a necessidade de substituir um marcador assinalado para o Recurso Classe.',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Um documento de referência como um arquivo, URL ou contacto pessoal para verificar esses dados. Pode inserir as primeiras letras do nome dum documento para chegar a esse documento.',
'A brief description of the group (optional)': 'Uma descrição breve do grupo (opcional)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Um ficheiro descarregado de um GPS contendo uma série de pontos geográficos em formato XML.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Um ficheiro em formato GPX retirado de um GPS cujas datas e horas podem ser correlacionadas com as de fotografias para localização num mapa.',
'A library of digital resources, such as photos, documents and reports': 'Uma biblioteca de recursos digitais, como fotos, documentos e relatórios',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Um grupo local pode ser usado para definir a extensão de uma área afetada, se não cair dentro de uma região administrativa.',
'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': 'Um grupo de localização é um conjunto de locais (muitas vezes, um conjunto de regiões administrativas que representam uma área Combinada). Membros locais são adicionados em grupos locais aqui. Grupos locais podem ser utilizados para filtrar o que é mostrado no mapa e nos resultados da procura apenas as entidades locais abrangidas no grupo. Um grupo local pode ser usado para definir a extensão de uma área afetada, se não cair dentro de uma região administrativa. Grupos local pode ser utilizado no menu Regiões.',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Um grupo de localização é um conjunto de locais (muitas vezes, um conjunto de regiões administrativas que representam uma área Combinada).',
'A location group must have at least one member.': 'Um grupo de localização deve ter, pelo menos, um membro.',
'A survey series with id %s does not exist. Please go back and create one.': 'Id% não foi encontrado na pesquisa. Por favor voltar e crie um.',
'ABOUT THIS MODULE': 'SOBRE ESTE MÓDULO',
'ACCESS DATA': 'Dados de Acesso',
'ANY': 'Todos',
'API is documented here': 'API está documentado aqui',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 Rápida Avaliação modificado para a Nova Zelândia',
'Abbreviation': 'Abreviatura',
'Ability to Fill Out Surveys': 'Capacidade para preencher Inquéritos',
'Ability to customize the list of details tracked at a Shelter': 'Capacidade de Customizar a lista de detalhes rastreados em um Abrigo',
'Ability to customize the list of human resource tracked at a Shelter': 'Capacidade de Customizar a lista de recursos humanos Rastreados em um Abrigo',
'Ability to customize the list of important facilities needed at a Shelter': 'Capacidade de Customizar a lista das instalações importante necessária em um Abrigo',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Capacidade para visualizar resultados de Concluída e/ou parcialmente preenchido Pesquisas',
'About Sahana': 'Sobre Sahana',
'About': 'sobre',
'Access denied': 'Acesso negado',
'Access to Shelter': 'Acesso a Abrigo',
'Access to education services': 'Acesso a serviços de educação',
'Accessibility of Affected Location': 'Acessibilidade do Local Afectado',
'Account Registered - Please Check Your Email': 'Conta registrada - verifique seu e-mail',
'Account registered, however registration is still pending approval - please wait until confirmation received.': 'Conta registrada, mas registro pende aprovação - por favor aguarde até confirmação ser recebida.',
'Acronym': 'Iniciais',
'Actionable by all targeted recipients': 'Acionáveis por todos os destinatários de destino',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Acionáveis apenas pelos participantes exercício designado; Identificação do excercício deve aparecer em',
'Actioned?': 'Acionado?',
'Actions taken as a result of this request.': 'Ações tomadas como resultado desse pedido.',
'Actions': 'Ações',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Ativar eventos dos templates de cenário para alocação adequada de recursos (humanos, ativos e equipamentos)',
'Active Problems': 'Problemas ativos',
'Active': 'ativo',
'Activities matching Assessments:': 'Atividades correspondentes a Avaliações:',
'Activities of boys 13-17yrs before disaster': 'Atividades de garotos 13-17 anos antes do desastre',
'Activities of boys 13-17yrs now': 'Atividades de garotos 13-17yrs agora',
'Activities of boys <12yrs before disaster': 'Atividades de garotos <12 anos antes do desastre',
'Activities of boys <12yrs now': 'Atividades de garotos <12 anos agora',
'Activities of children': 'Atividades de crianças',
'Activities of girls 13-17yrs before disaster': 'Atividades de meninas 13-17yrs antes de desastres',
'Activities of girls 13-17yrs now': 'Atividades de meninas 13-17yrs agora',
'Activities of girls <12yrs before disaster': 'Atividades de meninas <12yrs antes de desastres',
'Activities of girls <12yrs now': 'Agora atividades de meninas de menos de 12 anos',
'Activities': 'Atividades',
'Activity Added': 'Atividade Incluída',
'Activity Deleted': 'Atividade Apagada',
'Activity Details': 'Detalhes da Atividade',
'Activity Report': 'Relatório de atividades',
'Activity Reports': 'Relatório de Atividades',
'Activity Type': 'Tipo de atividade',
'Activity Updated': 'Atividade Atualizada',
'Activity': 'atividade',
'Add Activity Type': 'Incluir tipo de atividade',
'Add Address': 'Incluir Endereço',
'Add Alternative Item': 'Incluir item alternativo',
'Add Assessment Summary': 'Incluir Avaliação De Resumo',
'Add Assessment': 'Incluir Avaliação',
'Add Asset Log Entry - Change Label': 'Incluir recurso de entrada de entrada - trocar a Etiqueta',
'Add Availability': 'Incluir Disponibilidade',
'Add Baseline Type': 'Incluir Linha De Tipo',
'Add Baseline': 'Incluir Linha',
'Add Bundle': 'Incluir Pacote Configurável',
'Add Camp Service': 'Incluir acampamento de serviço',
'Add Camp Type': 'Incluir tipo de acampamento',
'Add Camp': 'Incluir acampamento',
'Add Certificate for Course': 'Incluir Certificado de Curso',
'Add Certification': 'Adicionar Certificação',
'Add Competency': 'incluir competência',
'Add Contact': 'Criar contato',
'Add Contact Information': 'Incluir informações de contato',
'Add Credential': 'Incluir referência',
'Add Credentials': 'Incluir Referências',
'Add Disaster Victims': 'Incluir Vítimas de Desastre',
'Add Distribution.': 'Incluir distribuição.',
'Add Donor': 'Incluir doador',
'Add Flood Report': 'Incluir Relatório Enchente',
'Add Group Member': 'Incluir Membro do Grupo',
'Add Human Resource': 'Incluir Recurso Humano',
'Add Identity': 'Incluir Identidade',
'Add Image': 'Incluir Imagem',
'Add Impact Type': 'Incluir Tipo De Impacto',
'Add Impact': 'Adicionar Impacto',
'Add Inventory Item': 'Inclúir item de inventário',
'Add Item to Catalog': 'Incluir Item no Catálogo',
'Add Item to Commitment': 'Incluir Item no Compromisso',
'Add Item to Inventory': 'Incluir Item de Inventário',
'Add Item to Request': 'Incluir Item para pedido',
'Add Item to Shipment': 'Adicionar Item para Embarque',
'Add Item': 'Incluir item',
'Add Job Role': 'Incluir tarefa Função',
'Add Key': 'Incluir Chave',
'Add Kit': 'Adicionar Kit',
'Add Level 1 Assessment': 'Incluir nível de Avaliação 1',
'Add Level 2 Assessment': 'Incluir nível de Avaliação 2',
'Add Log Entry': 'Incluir Entrada de Log',
'Add Member': 'Incluir Membro',
'Add Membership': 'Incluir Associação',
'Add Message': 'Incluir Mensagem',
'Add Mission': 'Incluir Missão',
'Add Need Type': 'Adicionar o tipo Necessário',
'Add Need': 'Incluir o necessário',
'Add New Assessment Summary': 'Incluir novo Resumo de Avaliação',
'Add New Baseline Type': 'Incluir novo tipo de linha de base',
'Add New Baseline': 'Incluir nova linha de base',
'Add New Budget': 'Adicionar Novo Orçamento',
'Add New Bundle': 'Incluir Novo Pacote',
'Add New Camp Service': 'Inlcuir Novo Campo de Serviço',
'Add New Camp Type': 'Incluir Novo Campo de Tipo',
'Add New Camp': 'Incluir novo Campo',
'Add New Cluster Subsector': 'Adicionar novo subgrupo',
'Add New Cluster': 'Adicionar novo grupo',
'Add New Commitment Item': 'Incluir novo item de compromisso',
'Add New Document': 'Incluir Novo Documento',
'Add New Donor': 'Adicionar novo doador',
'Add New Entry': 'Incluir Nova Entrada',
'Add New Event': 'Adicionar novo evento',
'Add New Flood Report': 'Adicionar novo relatório de cheias',
'Add New Human Resource': 'Incluir novos recursos humanos',
'Add New Image': 'Adicionar nova imagem',
'Add New Impact Type': 'Incluir novo Tipo De Impacto',
'Add New Impact': 'Adicionar novo impacto',
'Add New Inventory Item': 'Incluir novo Item De Inventário',
'Add New Item to Kit': 'Incluir novo Item de Kit',
'Add New Key': 'Adicionar Nova Chave',
'Add New Level 1 Assessment': 'Incluir novo nível 1 avaliação',
'Add New Level 2 Assessment': 'Incluir novo nível 2 avaliação',
'Add New Member': 'Incluir Novo Membro',
'Add New Membership': 'Incluir novo membro',
'Add New Need Type': 'Incluir novo Tipo Necessário',
'Add New Need': 'Adicionar novas necessidades',
'Add New Note': 'Adicionar NOVA NOTA',
'Add New Population Statistic': 'Incluir nova População De Estatística',
'Add New Problem': 'Incluir novo Problema',
'Add New Rapid Assessment': 'Incluir nova Avaliação Rápida',
'Add New Received Item': 'Incluir novo Item Recebido',
'Add New Record': 'Incluir Novo Registro',
'Add New Request Item': 'Incluir novo Item de Pedido',
'Add New Request': 'Incluir novo pedido',
'Add New River': 'Incluir novo Rio',
'Add New Role to User': 'Incluir nova função para o usuário',
'Add New Scenario': 'Adicionar Novo cenário',
'Add New Sent Item': 'Incluir novo Item Enviado',
'Add New Setting': 'Adicionar Nova Configuração',
'Add New Solution': 'Adicionar nova solução',
'Add New Staff Type': 'Incluir novo tipo de equipe.',
'Add New Staff': 'Adicionar Nova Equipe',
'Add New Subsector': 'Incluir novo Subsector',
'Add New Survey Answer': 'Incluir nova resposta na pesquisa.',
'Add New Survey Question': 'Incluir nova pergunta na pesquisa.',
'Add New Survey Section': 'Incluir nova seção na pesquisa.',
'Add New Survey Series': 'Incluir nova série na pesquisa.',
'Add New Survey Template': 'Incluir novo Modelo De Pesquisa',
'Add New Team': 'Adicionar nova equipe',
'Add New Ticket': 'Incluir nova permissão',
'Add New Track': 'Adicionar Nova Pista',
'Add New User to Role': 'Adicionar Novo usuário para Função',
'Add New': 'Incluir novo',
'Add Note': 'Incluir nota',
'Add Peer': 'Incluír Par',
'Add Person': 'incluir pessoa',
'Add Photo': 'Incluir Foto',
'Add Population Statistic': 'Incluir População Estatística',
'Add Position': 'Adicionar Posição',
'Add Problem': 'Adicionar Problema',
'Add Question': 'Adicionar Pergunta',
'Add Rapid Assessment': 'Adicionar Avaliação Rápida',
'Add Record': 'Incluir Registro',
'Add Reference Document': 'Incluir documento de referência',
'Add Report': 'Incluir Relatório',
'Add Request': 'Incluir Pedido',
'Add Section': 'Incluir Secção',
'Add Setting': 'Adicionar Definição',
'Add Skill Equivalence': 'Incluir equivalência de habilidades',
'Add Skill Provision': 'Incluir provisão de habilidades',
'Add Solution': 'Incluir Solução',
'Add Staff Type': 'Incluir tipo de equipe',
'Add Staff': 'Incluir equipe',
'Add Subscription': 'Incluir Assinatura',
'Add Subsector': 'Incluir Subsetor',
'Add Survey Answer': 'Incluir resposta de pesquisa',
'Add Survey Question': 'Adicionar pergunta da pesquisa',
'Add Survey Section': 'Incluir seção da pesquisa',
'Add Survey Series': 'Incluir série da pesquisa',
'Add Survey Template': 'Incluir Modelo De Pesquisa',
'Add Team Member': 'Incluir membro',
'Add Team': 'Incluir equipe',
'Add Ticket': 'Adicionar Bilhete',
'Add Training': 'Incluir Treinamento',
'Add Unit': 'Incluir Unidade',
'Add Volunteer Availability': 'Incluir disponibilidade do voluntário',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Adicionar um documento de referência como um arquivo, URL ou contacto pessoal para verificar esses dados. Se você não inserir um documento de referência, seu e-mail será exibido no lugar.',
'Add a Volunteer': 'Incluir um Voluntário',
'Add a new certificate to the catalog.': 'Incluir um novo certificado no catálogo.',
'Add a new competency rating to the catalog.': 'Adicionar uma classificação nova competência para o catálogo.',
'Add a new course to the catalog.': 'Adicionar um novo rumo para o catálogo.',
'Add a new job role to the catalog.': 'Incluir uma função nova tarefa para o catálogo.',
'Add a new skill provision to the catalog.': 'Incluir uma disposição nova habilidade para o catálogo.',
'Add a new skill to the catalog.': 'Incluir uma nova habilidade para o catálogo.',
'Add a new skill type to the catalog.': 'Incluir um tipo novo de hailidade para o catálogo.',
'Add new Group': 'Adicionar novo grupo',
'Add new Individual': 'Incluir novo indivíduo',
'Add new project.': 'Adicionar novo projeto.',
'Add new staff role.': 'Incluir função de novos funcionários.',
'Add staff members': 'Incluir membros da equipe',
'Add to Bundle': 'Incluir no Pacote Configurável',
'Add to budget': 'Incluir no orçamento',
'Add volunteers': 'Incluir voluntários',
'Add': 'incluir',
'Add/Edit/Remove Layers': 'Incluir/editar/remover camadas',
'Added to Group': 'Associação incluído',
'Added to Team': 'Associação incluído',
'Additional Beds / 24hrs': 'Camas adicionais / 24 horas',
'Address Details': 'Detalhes do Endereço',
'Address Type': 'Tipo de Endereço',
'Address added': 'Endereço incluído',
'Address deleted': 'Endereço excluído',
'Address updated': 'Endereço actualizado',
'Address': 'endereços',
'Addresses': 'Endereços',
'Adequate food and water available': 'Comida e água adequado disponível',
'Adequate': 'adequar',
'Admin Email': 'email do administrador',
'Admin Name': 'nome do administrador',
'Admin Tel': 'Telefone do administrador',
'Administration': 'administração',
'Admissions/24hrs': 'admissões/24 horas',
'Adolescent (12-20)': 'adolescente (12-20)',
'Adolescent participating in coping activities': 'Adolescente participando em actividades de superação',
'Adult (21-50)': 'Adulto (21-50)',
'Adult ICU': 'UTI para adultos',
'Adult Psychiatric': 'Psiquiátrico para adultos',
'Adult female': 'Mulher adulta',
'Adult male': 'Homem adulto',
'Adults in prisons': 'Adultos em prisões',
'Advanced:': 'Avançado:',
'Advisory': 'Aconselhamento',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'Depois de pressionar o botão será mostrado um conjunto de dois elementos, um de cada vez. Por favor selecione a uma solução de cada par de sua preferência sobre o outro.',
'Age Group': 'Grupo etário',
'Age group does not match actual age.': 'Grupo etário não corresponde à idade real.',
'Age group': 'Grupo etário',
'Aggravating factors': 'Fatores agravantes',
'Agriculture': 'Agricultura',
'Air Transport Service': 'Serviço de Transporte Aéreo',
'Aircraft Crash': 'Despenho de Avião',
'Aircraft Hijacking': 'Sequestro de Avião',
'Airport Closure': 'Encerramento de Aeroporto',
'Airspace Closure': 'Encerramento de Espaço Aéreo',
'Alcohol': 'álcool',
'Alert': 'Alertar',
'All Inbound & Outbound Messages are stored here': 'Todas as mensagens enviadas e recebidas são armazenados aqui',
'All Resources': 'Todos os recursos',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Todos os dados fornecidos pelos Sahana Software Foundation a partir deste site é licenciado sob uma Licença Atribuição Comuns criativos. No entanto, nem todos os dados se origina aqui. Por favor consulte o campo de origem de cada entrada.',
'All': 'Tudo',
'Allowed to push': 'Permissão para pressionar',
'Allows a Budget to be drawn up': 'Permite que um orçamento seja estabelecido',
'Allows authorized users to control which layers are available to the situation map.': 'Permite usuários autorizados a controlar quais camadas estão disponíveis no mapa de situação.',
'Alternative Item Details': 'Detalhes do Item alternativo',
'Alternative Item added': 'Item alternativo incluído',
'Alternative Item deleted': 'Item alternativo excluído',
'Alternative Item updated': 'Item Alternativo atualizado',
'Alternative Item': 'Item Alternativo',
'Alternative Items': 'Itens alternativos',
'Alternative places for studying': 'Locais alternativos para estudo',
'Ambulance Service': 'Serviço de Ambulância',
'An asset must be assigned to a person, site OR location.': 'Um ATIVO deve ser designado a uma pessoa, local ou site.',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Um sistema de admissão, um sistema de gestão de depósitos, tracking and commodity, gestão da cadeia de fornecimentos, aquisições de ativos e outros e os recursos de gerenciamento de recurso.',
'An item which can be used in place of another item': 'Um item que pode ser utilizado no lugar de outro item',
'Analysis of Completed Surveys': 'Análise das Pesquisas Concluídas',
'Animal Die Off': 'Morte Animal',
'Animal Feed': 'Alimentação Animal',
'Answer Choices (One Per Line)': 'Resposta opções (Um por linha)',
'Antibiotics available': 'Antibióticos disponíveis',
'Antibiotics needed per 24h': 'Antibióticos necessário por H',
'Apparent Age': 'Idade aparente',
'Apparent Gender': 'Género aparente',
'Application Deadline': 'Prazo Final da aplicação',
'Applications': 'Requisições',
'Approve': 'Aprovar',
'Approved': 'aprovado',
'Approver': 'Aprovador',
'Arctic Outflow': 'Árctico Exfluxo',
'Area': 'Área',
'Areas inspected': 'Inspeccionados áreas',
'Assessment Details': 'Detalhes da Avaliação',
'Assessment Reported': 'Avaliação Relatada',
'Assessment Summaries': 'Sumário de Avaliações',
'Assessment Summary Details': 'Detalhes do sumário de avaliação',
'Assessment Summary added': 'Anexado sumário de avaliações',
'Assessment Summary deleted': 'Avaliação de resumo apagado',
'Assessment Summary updated': 'Sumário de avaliação atualizado',
'Assessment added': 'Avaliação incluída',
'Assessment admin level': 'Avaliação de nível administrativo',
'Assessment deleted': 'Avaliação excluída',
'Assessment timeline': 'sequência temporal de avaliação',
'Assessment updated': 'Avaliação atualizada',
'Assessment': 'Avaliação',
'Assessments Needs vs. Activities': 'Necessidades de Avaliações vs. Atividades',
'Assessments and Activities': 'Avaliações e Atividades',
'Assessments': 'avaliações',
'Assessments:': 'Avaliações',
'Assessor': 'Avaliador',
'Asset Assigned': 'Ativo Designado',
'Asset Assignment Details': 'Detalhes da Designação de Recursos',
'Asset Assignment deleted': 'Designação De ativo excluído',
'Asset Assignment updated': 'Atribuição de Ativo atualizada',
'Asset Assignments': 'Designações de Ativo',
'Asset Details': 'Detalhes do Ativo',
'Asset Log Details': 'Detalhes do Log de ativos',
'Asset Log Empty': 'Log de Ativos vazio',
'Asset Log Entry Added - Change Label': 'Adicionada uma entrada no Log de ativos -Alterar Etiqueta',
'Asset Log Entry deleted': 'Apagada uma entrada no Log de ativos',
'Asset Log Entry updated': 'Atualizada uma entrada no Log de Ativos',
'Asset Log': 'Log de ATIVOS',
'Asset Management': 'gerenciamento de recursos',
'Asset Number': 'número do recurso',
'Asset added': 'Ativo Incluído',
'Asset deleted': 'ativo excluído',
'Asset removed': 'Ativo Removido',
'Asset updated': 'recurso atualizado',
'Asset': 'Recurso',
'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Os ativos são recursos que não são consumíveis e serão devolvidos, portanto precisam de rastreamento.',
'Assets': 'recursos',
'Assign Asset': 'designar recurso',
'Assign Group': 'Designar Grupo',
'Assign Staff': 'Atribuir Equipe',
'Assign to Org.': 'Designar para Org.',
'Assign to Organization': 'Atribuir para Organização',
'Assign to Person': 'Atribuir uma Pessoa',
'Assign to Site': 'Atribuir um Site',
'Assign': 'Designar',
'Assigned By': 'Designado por',
'Assigned To': 'Designado Para',
'Assigned to Organization': 'Designado para Organização',
'Assigned to Person': 'Designado para a Pessoa',
'Assigned to Site': 'Designado para o Site',
'Assigned to': 'Designado para',
'Assigned': 'Designado',
'Assignments': 'Designações',
'At/Visited Location (not virtual)': 'Em/Visitou Local (não virtual)',
'Attend to information sources as described in <instruction>': 'Participar de fontes de informação, conforme descrito em<instruction>',
'Attribution': 'Atribuição',
'Author': 'autor',
'Availability': 'Disponibilidade',
'Available Alternative Inventories': 'Alternativas de Inventário disponíveis',
'Available Alternative Inventory Items': 'Itens alternativos de Inventário disponíveis',
'Available Beds': 'camas disponíveis',
'Available Inventories': 'Inventários disponíveis',
'Available Inventory Items': 'Itens de inventário disponíveis',
'Available Messages': 'Mensagens disponíveis',
'Available Records': 'Registros disponíveis',
'Available databases and tables': 'Banco de Dados e Tabelas disponíveis',
'Available for Location': 'Disponível para locação',
'Available from': 'disponível de',
'Available in Viewer?': 'Disponível no visualizador?',
'Available until': 'Disponível até',
'Avoid the subject event as per the <instruction>': 'Evitar o assunto do evento de acordo com a',
'Background Color for Text blocks': 'Cor de segundo plano para blocos de texto',
'Background Color': 'Cor de Plano de Fundo',
'Baldness': 'Calvície',
'Bank/micro finance': 'banco/micro finanças',
'Barricades are needed': 'Barricadas são necessárias',
'Base Layer?': 'Camada De Base?',
'Base Location': 'Local da Base',
'Base Site Set': 'Conjunto de Site básico',
'Baseline Data': 'Dados básicos',
'Baseline Number of Beds': 'Numero de camadas base de camas',
'Baseline Type Details': 'Detalhes de Tipo de Linha Base',
'Baseline Type added': 'Tipo de Linha Base adicionado',
'Baseline Type deleted': 'Tipo de Linha Base removido',
'Baseline Type updated': 'Tipo de Linha Base actualizado',
'Baseline Type': 'Tipo de Linha Base',
'Baseline Types': 'Tipos de Linha Base',
'Baseline added': 'Camada Base incluída',
'Baseline deleted': 'Camada Base Excluída',
'Baseline number of beds of that type in this unit.': 'Numero de camadas base de camas desse tipo nesta unidade.',
'Baseline updated': 'Linha Base actulizada',
'Baselines Details': 'Detalhes de Camadas Base',
'Baselines': 'Camadas Base',
'Basic Assessment Reported': 'Avaliação Básica Relatada',
'Basic Assessment': 'Avaliação Básica',
'Basic Details': 'Detalhes Básicos',
'Basic reports on the Shelter and drill-down by region': 'Relatórios básicos sobre o Abrigo e abertura por região',
'Baud rate to use for your modem - The default is safe for most cases': 'Taxa de transmissão para ser usada pelo seu modem - O padrão é seguro para a maioria dos casos',
'Baud': 'Transmissão',
'Beam': 'feixe',
'Bed Capacity per Unit': 'Capacidade cama por Unidade',
'Bed Capacity': 'Capacidade de leitos',
'Bed Type': 'Tipo de cama',
'Bed type already registered': 'Tipo de cama já registrado',
'Below ground level': 'Abaixo do nível do solo',
'Beneficiary Type': 'Tipo de beneficiário',
'Biological Hazard': 'Risco Biológico',
'Biscuits': 'Biscoitos',
'Blizzard': 'Nevasca',
'Blood Type (AB0)': 'Tipo sanguíneo (AB0)',
'Blowing Snow': 'Soprando neve',
'Boat': 'Barco',
'Bodies found': 'Corpos encontrados',
'Bodies recovered': 'corpos recuperados',
'Body Recovery Request': 'Pedido de recuperação de corpos',
'Body Recovery Requests': 'Pedidos de recuperação de corpos',
'Body': 'corpo',
'Bomb Explosion': 'Explosão de bomba',
'Bomb Threat': 'Ameaça de bomba',
'Bomb': 'Bomba',
'Border Color for Text blocks': 'Cor da borda para blocos de texto',
'Bounding Box Insets': 'Delimitadora Inserções Caixa',
'Bounding Box Size': 'CAIXA delimitadora Tamanho',
'Brand Details': 'Detalhes da Marca',
'Brand added': 'Marca incluída',
'Brand deleted': 'Marca excluída',
'Brand updated': 'marca atualizada',
'Brand': 'Marca',
'Brands': 'marcas',
'Bricks': 'Tijolos',
'Bridge Closed': 'PONTE FECHADA',
'Bucket': 'Balde',
'Buddhist': 'Budista',
'Budget Details': 'Detalhes de Orçamento',
'Budget Updated': 'Orçamento Atualizado',
'Budget added': 'Orçamento incluído',
'Budget deleted': 'Orçamento excluído',
'Budget updated': 'Orçamento atualizado',
'Budget': 'Orçamento',
'Budgeting Module': 'Módulo de Orçamento',
'Budgets': 'Orçamentos',
'Buffer': 'buffer',
'Bug': 'erro',
'Building Assessments': 'Avaliações de construção',
'Building Collapsed': 'Construção Fechada',
'Building Name': 'Nome do edifício',
'Building Safety Assessments': 'Regras de Segurança do Edifício',
'Building Short Name/Business Name': 'Nome curto/Nome completo do Edifício',
'Building or storey leaning': 'Edifício ou andar em inclinação',
'Built using the Template agreed by a group of NGOs working together as the': 'Construído de acordo com o formulário acordado por um grupo de ONGs',
'Bulk Uploader': 'Carregador em massa',
'Bundle Contents': 'Conteúdo do Pacote',
'Bundle Details': 'Detalhes do Pacote',
'Bundle Updated': 'Pacote configurável ATUALIZADO',
'Bundle added': 'Pacote incluído',
'Bundle deleted': 'Pacote Excluído',
'Bundle updated': 'Pacote atualizado',
'Bundle': 'Pacote',
'Bundles': 'Pacotes',
'Burn ICU': 'Queimar ICU',
'Burn': 'Gravar',
'Burned/charred': 'Queimados/carbonizados',
'By Facility': 'Por Facilidade',
'By Inventory': 'Por Inventário',
'By Person': 'Por pessoa',
'By Site': 'Por Site',
'CBA Women': 'CBA Mulheres',
'CSS file %s not writable - unable to apply theme!': 'Arquivo CSS %s não é gravável - Impossível aplicar o tema!',
'Calculate': 'calcular',
'Camp Coordination/Management': 'Campo Coordenação/gestão',
'Camp Details': 'Detalhes do Alojamento',
'Camp Service Details': 'Detalhe do Serviço de Campo',
'Camp Service added': 'Serviço de Alojamento incluído',
'Camp Service deleted': 'Serviço de Alojamento excluído',
'Camp Service updated': 'Serviço de campo atualizado',
'Camp Service': 'Serviço de Alojamento',
'Camp Services': 'Serviço de campo',
'Camp Type Details': 'Detalhes do tipo de campo',
'Camp Type added': 'Tipo de Campo incluso.',
'Camp Type deleted': 'Tipo de campo excluído.',
'Camp Type updated': 'Tipo De acampamento atualizado',
'Camp Type': 'Tipo de Campo',
'Camp Types and Services': 'Tipos e serviços de acampamentos',
'Camp Types': 'TIPOS DE acampamento',
'Camp added': 'Alojamento incluído',
'Camp deleted': 'Alojamento excluído',
'Camp updated': 'Acampamento atualizado',
'Camp': 'Acampamento',
'Camps': 'Alojamentos',
'Can only disable 1 record at a time!': 'Pode desativar apenas 1 registro por vez!',
'Cancel Log Entry': 'Cancelar Registro De Entrada',
'Cancel Shipment': 'Cancelar Embarque',
'Cancel': 'Cancelar',
'Canceled': 'cancelado',
'Candidate Matches for Body %s': 'Candidato Corresponde ao Corpo %s',
'Canned Fish': 'Conservas de Peixe',
'Cannot be empty': 'Não pode ser vazio',
'Cannot disable your own account!': 'Voce não pode desativar sua própria conta!',
'Capacity (Max Persons)': 'Capacidade (Máximo De pessoas)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'CAPTURA informações sobre grupos Desastre Vítima (Turistas, passageiros, Famílias, etc. ).',
'Capture Information on each disaster victim': 'Informações de captura em cada vítima Desastre',
'Capturing organizational information of a relief organization and all the projects they have in the region': 'Capturando informações organizacionais de uma organização de ajuda e todos os projetos têm na região',
'Capturing the projects each organization is providing and where': 'Capturando os projetos que cada organização está fornecendo e onde',
'Cardiology': 'Cardiologia',
'Cassava': 'Mandioca',
'Casual Labor': 'Trabalho Casual',
'Casualties': 'Acidentes',
'Catalog Details': 'Detalhes do Catálogo',
'Catalog Item added': 'Item incluído no catálogo',
'Catalog Item deleted': 'Catálogo de Item excluído',
'Catalog Item updated': 'Item do catálogo de atualização',
'Catalog Item': 'Item do catálogo de',
'Catalog Items': 'Itens do Catálogo',
'Catalog added': 'Catálogo Incluído',
'Catalog deleted': 'Catálogo excluído',
'Catalog updated': 'Catálogo Atualizado',
'Catalog': 'catálogo',
'Catalogs': 'Catálogos',
'Categories': 'Categorias',
'Category': 'category',
'Ceilings, light fixtures': 'Tetos, luminarias',
'Central point to record details on People': 'Ponto Central para registrar detalhes sobre pessoas',
'Certificate Catalog': 'Catálogo de Certificados',
'Certificate Details': 'Detalhes do Certificado',
'Certificate Status': 'Status do Certificado',
'Certificate added': 'Certificado incluído',
'Certificate deleted': 'Certificado Removido',
'Certificate updated': 'Certificado Actualizado',
'Certificates': 'Certificados',
'Certification Details': 'Detalhes da Certificação',
'Certification added': 'Certificação incluída',
'Certification deleted': 'Certificação excluída',
'Certification updated': 'Certificação atualizada',
'Certification': 'Certificação',
'Certifications': 'Certificações',
'Certifying Organization': 'Certificação da Organização',
'Change Password': 'Alterar Senha',
'Check Request': 'Verificar Pedido',
'Check for errors in the URL, maybe the address was mistyped.': 'Verifique se há erros na URL, talvez o endereço foi digitado incorretamente.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Verifique se a URL está apontando para um diretório em vez de uma página da Web.',
'Check outbox for the message status': 'Outbox para verificar o status da mensagem',
'Check to delete': 'Verificar para Excluir',
'Check': 'Verifique',
'Check-in': 'Registrar Entrada',
'Check-out': 'Registrar Saída',
'Checked': 'verificado',
'Checklist created': 'Lista de verificação criada',
'Checklist deleted': 'Lista de verificação excluída',
'Checklist of Operations': 'Lista de Verificação das Operações',
'Checklist updated': 'Lista de verificação atualizado',
'Checklist': 'lista de verificação',
'Chemical Hazard': 'Risco Químico',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Ameaça ou ataque Químico, Biológico, Radiológico, Nuclear ou de alto concentração Explosiva',
'Chicken': 'Frango',
'Child (2-11)': 'Criança (2-11)',
'Child (< 18 yrs)': 'Criança (< 18 anos)',
'Child Abduction Emergency': 'Emergência de Rapto De Criança',
'Child headed households (<18 yrs)': 'Famílias chefiadas por Filho (<18 anos)',
'Child': 'Criança',
'Children (2-5 years)': 'Crianças (2 a 5 anos)',
'Children (5-15 years)': 'Crianças (5 a 15 anos)',
'Children (< 2 years)': 'Crianças (< 2 anos)',
'Children in adult prisons': 'Crianças nas prisões para adultos',
'Children in boarding schools': 'Crianças em internatos',
'Children in homes for disabled children': 'Crianças em lares para crianças deficientes',
'Children in juvenile detention': 'Crianças em detenção juvenil',
'Children in orphanages': 'Crianças nos orfanatos',
'Children living on their own (without adults)': 'Crianças vivendo por conta própria (sem adultos)',
'Children not enrolled in new school': 'Crianças não matriculadas em Nova Escola',
'Children orphaned by the disaster': 'Crianças órfãs pela catástrofe',
'Children separated from their parents/caregivers': 'Crianças SEPARADAS de seus pais/responsáveis',
'Children that have been sent to safe places': 'Crianças que foram enviadas para locais seguros',
'Children who have disappeared since the disaster': 'Crianças que desapareceram desde o desastre',
'Chinese (Taiwan)': 'Chinês (Taiwan)',
'Cholera Treatment Capability': 'Capacidade de Tratamento da Cólera',
'Cholera Treatment Center': 'Centro de Tratamento de Cólera',
'Cholera Treatment': 'Tratamento da cólera',
'Cholera-Treatment-Center': 'Centro de tratamento de cólera',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Escolha uma nova alocação baseada na nova avaliação e julgamento do time. Condições severas que afetem o prédio inteiro são base para uma colocação INSEGURA. Grave localizada e no geral condições moderadas podem exigir um USO RESTRITO. Local INSPECCIONADO cartaz na entrada principal. Coloque todos os outros cartazes em cada entrada importante.',
'Christian': 'Cristão',
'Church': 'Igreja',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Circunstâncias do desaparecimento, outras vítimas/testemunhas quais viram pela última vez a pessoa desaparecida viva.',
'City': 'CIDADE',
'Civil Emergency': 'Emergência Civil',
'Cladding, glazing': 'Revestimentos, vidros',
'Click on the link %(url)s to reset your password': 'Clique no link %(url)s para Reconfigurar sua senha',
'Click on the link %(url)s to verify your email': 'Clique no link %(url)s para verificar seu e-mail',
'Clinical Laboratory': 'Laboratório clínico',
'Clinical Operations': 'operações clinicas',
'Clinical Status': 'estado clínico',
'Closed': 'fechado',
'Clothing': 'vestuário',
'Cluster Details': 'Detalhes do Grupo',
'Cluster Distance': 'Distância entre Grupos',
'Cluster Subsector Details': 'Detalhes do sub-setor do cluster',
'Cluster Subsector added': 'Subsector de Grupos incluído',
'Cluster Subsector deleted': 'Subsector de Grupos removido',
'Cluster Subsector updated': 'Sub-setores do cluster atualizado',
'Cluster Subsector': 'Subsector de Grupos',
'Cluster Subsectors': 'Sub-setores do cluster',
'Cluster Threshold': 'Limite do Cluster',
'Cluster added': 'adicionar agrupamento',
'Cluster deleted': 'Grupo removido',
'Cluster updated': 'Cluster atualizado',
'Cluster': 'agrupamento',
'Cluster(s)': 'Grupo(s)',
'Clusters': 'clusters',
'Code': 'Código',
'Cold Wave': 'onda fria',
'Collapse, partial collapse, off foundation': 'Reduzir, reduzir parciais, off foundation',
'Collective center': 'Centro coletivo',
'Color for Underline of Subheadings': 'Cor para Sublinhar de Subposições',
'Color of Buttons when hovering': 'Cor dos botões quando erguidos',
'Color of bottom of Buttons when not pressed': 'Cor da parte inferior dos botões quando não for pressionado',
'Color of bottom of Buttons when pressed': 'Cor da parte de baixo dos botões quando pressionados',
'Color of dropdown menus': 'Cor de menus DROP-',
'Color of selected Input fields': 'Cor dos campos de entrada selecionados',
'Color of selected menu items': 'cor dos ítens selecionados do menu',
'Column Choices (One Per Line': 'Coluna de opções (uma por linha)',
'Columns, pilasters, corbels': 'Colunas, pilastras , cavaletes',
'Combined Method': 'Método combinado',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Volte mais tarde. Todos que visitam este site esta, provavelmente, enfrentando o mesmo problema que você.',
'Come back later.': 'Volte mais tarde.',
'Comments': 'Comentários',
'Commercial/Offices': 'Comercial/Escritórios',
'Commit Date': 'Commit Data',
'Commit from %s': 'Consolidação de s%',
'Commit': 'Consolidar',
'Commiting a changed spreadsheet to the database': 'Consolidando uma planilha alterada no banco de dados',
'Commitment Added': 'Compromisso Incluído',
'Commitment Canceled': 'Compromisso cancelado',
'Commitment Details': 'Detalhes do compromisso',
'Commitment Item Details': 'Detalhes do item de compromisso',
'Commitment Item added': 'Item de compromisso incluído',
'Commitment Item deleted': 'Item do compromisso excluído',
'Commitment Item updated': 'Compromisso Item atualizado',
'Commitment Item': 'Item do compromisso',
'Commitment Items': 'Itens compromisso',
'Commitment Status': 'Empenhamento Status',
'Commitment Updated': 'Compromisso Atualizado',
'Commitment': 'Comprometimento',
'Commitments': 'Compromissos',
'Committed By': 'Cometido por',
'Committed': 'Comprometido',
'Committing Inventory': 'Confirmando Inventário',
'Communication problems': 'Problemas de Comunicação',
'Community Centre': 'Comunidade Centro',
'Community Health Center': 'Centro Comunitário de Saúde',
'Community Member': 'Membro da Comunidade',
'Competencies': 'Competências',
'Competency Details': 'Competência Detalhes',
'Competency Rating Catalog': 'Catálogo de Classificação de Competências',
'Competency Rating Details': 'Detalhes da classificação de competências',
'Competency Rating added': 'Classificação de Habilidades incluída',
'Competency Rating deleted': 'Classificação de competência excluída',
'Competency Rating updated': 'Atualização da classificação de competências',
'Competency Ratings': 'Classificação de competências',
'Competency added': 'Competência incluída',
'Competency deleted': 'Competência excluído',
'Competency updated': 'Competência atualizada',
'Competency': 'Competência',
'Complete': 'Concluir',
'Completed': 'Concluído',
'Complexion': 'Compleição',
'Compose': 'Redigir',
'Compromised': 'Comprometida',
'Concrete frame': 'Quadro concreto',
'Concrete shear wall': 'Muro de corteconcreto',
'Condition': 'Condição',
'Configurations': 'Configurações',
'Configure Run-time Settings': 'Configurar as configurações de tempo de execução',
'Confirm Shipment Received': 'Confirmar Remessa Recebida',
'Confirmed': 'Confirmado',
'Confirming Organization': 'Confirmando Organização',
'Conflict Details': 'Detalhes Do conflito',
'Conflict Resolution': 'Resolução de Conflito',
'Consignment Note': 'NOTA REMESSA',
'Constraints Only': 'Somente restrições',
'Consumable': 'Consumível',
'Contact Data': 'Dados contato',
'Contact Details': 'Detalhes do contato',
'Contact Info': 'Informações de Contato',
'Contact Information Added': 'Informação de contato incluída',
'Contact Information Deleted': 'Informação de contato excluída',
'Contact Information Updated': 'Informações de contato atualizadas',
'Contact Information': 'Informações de Contato',
'Contact Method': 'Método de Contato',
'Contact Name': 'Nome do contato',
'Contact Person': 'Pessoa de Contato',
'Contact Phone': 'Telefone para Contato',
'Contact details': 'Detalhes do contato',
'Contact information added': 'Informações de contato incluídas',
'Contact information deleted': 'Informações de contato excluídas',
'Contact information updated': 'Informações de contato atualizadas',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Pessoa(s) a contactar em caso de notícias ou mais perguntas (se for diferente da pessoa que reportou). Incluir número de telefone, endereço e correio electrónico se disponível.',
'Contact us': 'Fale Conosco',
'Contact': 'contato',
'Contacts': 'contatos',
'Contents': 'Conteúdo',
'Contributor': 'Contribuidor',
'Conversion Tool': 'Ferramenta de Conversão',
'Cooking NFIs': 'Cozinhando NFIs',
'Cooking Oil': 'Cozinhando Óleo',
'Coordinate Conversion': 'COORDENAR a Conversão',
'Coping Activities': 'Atividades de lida',
'Copy': 'copiar',
'Corn': 'Milho',
'Cost Type': 'Tipo de custo',
'Cost per Megabyte': 'Custo por megabyte',
'Cost per Minute': 'Custo por Minuto',
'Country of Residence': 'País de Residência',
'Country': 'País',
'County': 'Município',
'Course Catalog': 'Catálogo de Cursos',
'Course Certificate Details': 'Detalhes do Certificado do Curso',
'Course Certificate added': 'Certificado do Curso adicionado',
'Course Certificate deleted': 'Certificado do Curso excluído',
'Course Certificate updated': 'Certificado do Curso atualizado',
'Course Certificates': 'Certificados de Curso',
'Course Details': 'Detalhes do curso',
'Course added': 'Curso incluído',
'Course deleted': 'Curso excluído',
'Course updated': 'Curso atualizado',
'Course': 'Curso',
'Courses': 'Cursos',
'Create & manage Distribution groups to receive Alerts': 'Criar & gerenciar grupos de distribuição de receber alertas',
'Create Activity Report': 'Criar Relatório de atividade',
'Create Activity Type': 'Criar tipo de atividade',
'Create Activity': 'Criar Atividade',
'Create Assessment': 'Criar Avaliação',
'Create Asset': 'Criar Ativo',
'Create Bed Type': 'Criar Tipo De Cama',
'Create Brand': 'Criar Marca',
'Create Budget': 'Criar Orçamento',
'Create Catalog Item': 'Criar Item de Catálogo',
'Create Catalog': 'Criar Catálogo',
'Create Certificate': 'Criar certificado',
'Create Checklist': 'Criar Lista de Verificação',
'Create Cholera Treatment Capability Information': 'Criar Informação sobre capacidade para tratamento de cólera',
'Create Cluster Subsector': 'Criar Subsetor de Cluster',
'Create Cluster': 'Criar cluster',
'Create Competency Rating': 'Criar Classificação da Competência',
'Create Contact': 'Criar contato',
'Create Course': 'Criar curso',
'Create Dead Body Report': 'Criar Relatório de Cadáver',
'Create Event': 'Criar Evento',
'Create Facility': 'Criar Recurso',
'Create Feature Layer': 'Criar camada de recurso',
'Create Group Entry': 'Criar Grupo De Entrada',
'Create Group': 'Criar Grupo',
'Create Hospital': 'Criar Hospital',
'Create Identification Report': 'Criar Identificação Relatório',
'Create Impact Assessment': 'Criar Avaliação de Impacto',
'Create Incident Report': 'Criar relatório de incidente',
'Create Incident': 'Criar Incidente',
'Create Item Category': 'Criar categoria de item',
'Create Item Pack': 'Criar pacote de itens',
'Create Item': 'Criar novo item',
'Create Kit': 'Criar novo Kit',
'Create Layer': 'Criar Camada',
'Create Location': 'Criar Local',
'Create Map Profile': 'Criar Mapa de configuração',
'Create Marker': 'Criar Marcador',
'Create Member': 'Criar Membro',
'Create Mobile Impact Assessment': 'Criar Avaliação de Impacto Movel',
'Create Office': 'Criar Escritório',
'Create Organization': 'Criar Organização',
'Create Personal Effects': 'Criar efeitos pessoais',
'Create Project': 'Criar projeto',
'Create Projection': 'Criar Projeção',
'Create Rapid Assessment': 'Criar Avaliação Rápida',
'Create Report': 'Criar Relatório',
'Create Request': 'Criar Pedido',
'Create Resource': 'Criar Recurso',
'Create River': 'Criar Rio',
'Create Role': 'Criar Função',
'Create Room': 'Criar Sala',
'Create Scenario': 'Criar cenário',
'Create Sector': 'Criar Sector',
'Create Service Profile': 'Criar Perfil de Serviço',
'Create Shelter Service': 'Criar Serviço de Abrigo',
'Create Shelter Type': 'Criar Tipo de Abrigo',
'Create Shelter': 'Criar Abrigo',
'Create Skill Type': 'Criar Tipo de Habilidade',
'Create Skill': 'Criar Habilidade',
'Create Staff Member': 'Criar membro da equipe',
'Create Status': 'Criar Status',
'Create Task': 'Criar Tarefa',
'Create Theme': 'Criar Tema',
'Create User': 'Criar Usuário',
'Create Volunteer': 'Criar Voluntário',
'Create Warehouse': 'Criar Armazém',
'Create a Person': 'Criar uma pessoa',
'Create a group entry in the registry.': 'Criar uma entrada de grupo no registro.',
'Create, enter, and manage surveys.': 'Criar, digitar e gerenciar pesquisas.',
'Creation of Surveys': 'Criação de Pesquisas',
'Credential Details': 'Detalhes da Credencial',
'Credential added': 'Credencial incluída',
'Credential deleted': 'Credencial Excluída',
'Credential updated': 'Credencial ATUALIZADA',
'Credentialling Organization': 'Organização acreditada',
'Credentials': 'credenciais',
'Credit Card': 'Cartão de crédito',
'Crime': 'crime',
'Criteria': 'Critério',
'Currency': 'moeda',
'Current Entries': 'Entradas Atuais',
'Current Group Members': 'Membros do Grupo atual',
'Current Identities': 'Identidades atuais',
'Current Location': 'Posição Atual',
'Current Log Entries': 'Entradas de Log atuais',
'Current Memberships': 'Participações atuais',
'Current Notes': 'Notes atual',
'Current Records': 'Registros atuais',
'Current Registrations': 'Registros atuais',
'Current Status': 'Status atual',
'Current Team Members': 'Os atuais membros da equipe',
'Current Twitter account': 'Conta atual no Twitter',
'Current community priorities': 'Atuais prioridades da comunidade',
'Current general needs': 'Atuais necessidades gerais',
'Current greatest needs of vulnerable groups': 'Maiores necessidades atuais dos grupos vulneráveis',
'Current health problems': 'Problemas de saúde atuais',
'Current number of patients': 'Número atual de pacientes',
'Current problems, categories': 'Problemas atuais, categorias',
'Current problems, details': 'Problemas atuais, detalhes',
'Current request': 'Pedido atual',
'Current response': 'Resposta atual',
'Current session': 'Sessão atual',
'Currently no Certifications registered': 'Nenhuma certificação registrada atualmente',
'Currently no Competencies registered': 'Nenhuma competência registrada atualmente',
'Currently no Course Certificates registered': 'Nenhum Curso Certificado registrado atualmente',
'Currently no Credentials registered': 'Nenhuma credencial registrada atualmente',
'Currently no Missions registered': 'Nenhuma missão registrada atualmente',
'Currently no Skill Equivalences registered': 'Nenhuma equivelência de habilidade registrada atualmente',
'Currently no Trainings registered': 'Atualmente não há treinamentos registrados',
'Currently no entries in the catalog': 'Nenhuma entrada no catálogo atualmente',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'Bnaco de Dados customizado de Recursos (por exemplo, nada definido como recurso no Sahana)',
'DNA Profile': 'Perfil de DNA',
'DNA Profiling': 'Perfil de DNA',
'DVI Navigator': 'Navegador DVI',
'Dam Overflow': 'Barragem ESTOURO',
'Damage': 'dano',
'Dangerous Person': 'Pessoa perigosa',
'Dashboard': 'Painel',
'Data uploaded': 'Dados carregados',
'Data': 'Dados',
'Database': 'DATABASE',
'Date Available': 'Data Disponível',
'Date Received': 'Data do recebimento',
'Date Requested': 'Data do pedido',
'Date Required': 'Necessária',
'Date Sent': 'Data de Envio',
'Date Until': 'Data Até',
'Date and Time': 'Data e Hora',
'Date and time this report relates to.': 'Data e hora relacionadas a este relatório.',
'Date of Birth': 'Data de Nascimento',
'Date of Latest Information on Beneficiaries Reached': 'Data da última informação sobre Beneficiários Alcançado',
'Date of Report': 'Data do relatório',
'Date': 'date',
'Date/Time of Find': 'Pesquisa de data/hora',
'Date/Time of disappearance': 'Data/hora do desaparecimento',
'Date/Time when found': 'Data/hora quando foi encontrado',
'Date/Time when last seen': 'Data/ hora em que foi visto pela última vez',
'Date/Time': 'data/hora',
'De-duplicator': 'Anti duplicador',
'Dead Body Details': 'Detalhes do Cadáver',
'Dead Body Reports': 'Relatórios de Cadáver',
'Dead Body': 'Cadáver',
'Dead body report added': 'Relatório de cadaver incluso.',
'Dead body report deleted': 'Relatório de cadáver excluído.',
'Dead body report updated': 'Relatório de cadáver atualizado',
'Deaths in the past 24h': 'Mortes nas últimas 24 horas',
'Deaths/24hrs': 'Mortes/24hrs',
'Decimal Degrees': 'Graus decimais',
'Decision': 'DECISÃO',
'Decomposed': 'Decomposto',
'Default Height of the map window.': 'Altura Padrão da janela do mapa.',
'Default Map': 'Mapa padrão',
'Default Marker': 'Padrão de mercado',
'Default Width of the map window.': 'Padrão de largura da janela do mapa.',
'Default synchronization policy': 'Política de sincronização de padrão',
'Defecation area for animals': 'Área de defecação para animais',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Cenários De definir para alocação adequado de recursos (humanos, Ativos & instalações).',
'Defines the icon used for display of features on handheld GPS.': 'Define o ícone utilizado para exibição de recursos no GPS portátil.',
'Defines the icon used for display of features on interactive map & KML exports.': 'Define o ícone utilizado para exibição de recursos no mapa interativo & exportações KML.',
'Defines the marker used for display & the attributes visible in the popup.': 'Define o marcador utilizado para exibir & os atributos visíveis no pop-up.',
'Degrees must be a number between -180 and 180': 'Os graus devem ser um número entre -180 e 180',
'Dehydration': 'Desidratação',
'Delete Alternative Item': 'EXCLUIR Item Alternativo',
'Delete Assessment Summary': 'Excluir Resumo da Avaliação',
'Delete Assessment': 'Excluir Avaliação',
'Delete Asset Assignment': 'Excluir o recurso designado',
'Delete Asset Log Entry': 'EXCLUIR recurso de entrada de Log',
'Delete Asset': 'Excluir Ativo',
'Delete Baseline Type': 'apagar tipo de linha base',
'Delete Baseline': 'apagar linha base',
'Delete Brand': 'apagar marca',
'Delete Budget': 'apagar orçamento',
'Delete Bundle': 'apagar pacote',
'Delete Catalog Item': 'apagar item do catálogo',
'Delete Catalog': 'Excluir o Catálogo',
'Delete Certificate': 'Excluir Certificado',
'Delete Certification': 'Excluir Certificação',
'Delete Cluster Subsector': 'EXCLUIR Cluster Subsector',
'Delete Cluster': 'Exclui Cluster',
'Delete Commitment Item': 'Excluir Item de Compromisso',
'Delete Commitment': 'Excluir Compromisso',
'Delete Competency Rating': 'Excluir Classificação da Competência',
'Delete Competency': 'Excluir Competência',
'Delete Contact Information': 'Excluir Informações de Contato',
'Delete Course Certificate': 'Excluir Certificado do Curso',
'Delete Course': 'Excluir Curso',
'Delete Credential': 'Excluir Credencial',
'Delete Document': 'Excluir documento',
'Delete Donor': 'Excluir Dador',
'Delete Entry': 'Excluir Entrada',
'Delete Event': 'Excluir Evento',
'Delete Feature Layer': 'Excluir Camada de Componentes',
'Delete Group': 'Excluir Grupo',
'Delete Hospital': 'Excluir Hospital',
'Delete Image': 'Excluir Imagem',
'Delete Impact Type': 'Excluir Tipo De Impacto',
'Delete Impact': 'Excluir Impacto',
'Delete Incident Report': 'Excluir Relatório de Incidentes',
'Delete Inventory Item': 'Excluir Item De Inventário',
'Delete Item Category': 'Excluir categoria de Itens',
'Delete Item Pack': 'Excluir Pacote de Itens',
'Delete Item': 'Excluir Item',
'Delete Job Role': 'Excluir Cargo',
'Delete Key': 'Tecla de exclusão',
'Delete Kit': 'Excluir Kit',
'Delete Layer': 'Excluir Camada',
'Delete Level 1 Assessment': 'Excluir Nível 1 Avaliação',
'Delete Level 2 Assessment': 'Excluir Nível 2 Avaliação',
'Delete Location': 'Excluir locação',
'Delete Map Profile': 'Excluir Mapa de configuração',
'Delete Marker': 'Excluir Marcador',
'Delete Membership': 'Excluir membro',
'Delete Message': 'Excluir mensagem',
'Delete Mission': 'Excluir Missão',
'Delete Need Type': 'Excluir tipos de necessidades',
'Delete Need': 'Excluir necessidades',
'Delete Office': 'Excluir escritório',
'Delete Organization': 'Excluir organização',
'Delete Peer': 'Excluir par',
'Delete Person': 'excluir pessoa',
'Delete Photo': 'Excluir Foto',
'Delete Population Statistic': 'Excluir População Estatística',
'Delete Position': 'Excluir Posição',
'Delete Project': 'Excluir Projeto',
'Delete Projection': 'Excluir Projeção',
'Delete Rapid Assessment': 'Excluir Avaliação Rápida',
'Delete Received Item': 'Excluir Item Recebido',
'Delete Received Shipment': 'Excluir Embarque Recebido',
'Delete Record': 'Excluir Registro',
'Delete Report': 'Excluir Relatório',
'Delete Request Item': 'Excluir item de solicitação',
'Delete Request': 'Excluir Solicitação',
'Delete Resource': 'Excluir Recurso',
'Delete Room': 'Excluir Sala',
'Delete Scenario': 'Excluir Cenário',
'Delete Section': 'Excluir seção',
'Delete Sector': 'Excluir Setor',
'Delete Sent Item': 'Excluir Item Enviado',
'Delete Sent Shipment': 'Excluir Embarque Enviado',
'Delete Service Profile': 'Excluir perfil de serviço',
'Delete Setting': 'Excluir Definição',
'Delete Skill Equivalence': 'Excluir equivalência de habilidade',
'Delete Skill Provision': 'Excluir Provisão de Habilidade',
'Delete Skill Type': 'Excluir Tipo de Habilidade',
'Delete Skill': 'Excluir habilidade',
'Delete Staff Type': 'Excluir Tipo De Equipe',
'Delete Status': 'Excluir Posição/Estado',
'Delete Subscription': 'Excluir assinatura',
'Delete Subsector': 'Excluir subsetor',
'Delete Survey Answer': 'Excluir reposta da pesquisa',
'Delete Survey Question': 'Excluir pergunta da pesquisa',
'Delete Survey Section': 'Excluir seção da pesquisa',
'Delete Survey Series': 'Excluir série da pesquisa',
'Delete Survey Template': 'Excluir modelo da pesquisa',
'Delete Training': 'Excluir Treinamento',
'Delete Unit': 'Excluir Unidade',
'Delete User': 'Excluir usuário',
'Delete Volunteer': 'Excluir Voluntário',
'Delete Warehouse': 'Excluír Armazém',
'Delete from Server?': 'Excluir do Servidor?',
'Delete': 'Excluir',
'Delphi Decision Maker': 'tomador de decisão Delphi',
'Demographic': 'Demográfico',
'Demonstrations': 'Demonstrações',
'Dental Examination': 'Exame Dentário',
'Dental Profile': 'Perfil Dentário',
'Describe the condition of the roads to your hospital.': 'Descreva as condições da estrada até o seu hospital.',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'Descreva o procedimento ao qual este registro está relacionado (Ex: "exame médico")',
'Description of Contacts': 'Descrição dos Contatos',
'Description of defecation area': 'Descrição da área de defecação',
'Description of drinking water source': 'Descrição da fonte de água potável',
'Description of sanitary water source': 'Descrição da fonte de água sanitária',
'Description of water source before the disaster': 'Descrição da fonte de água antes do desastre',
'Description': 'Descrição',
'Descriptive Text (e.g., Prose, etc)': 'Texto Descritivo (por exemplo, Prosa, etc.)',
'Desire to remain with family': 'O desejo de permanecer com a família',
'Destination': 'destino',
'Destroyed': 'Destruído',
'Details field is required!': 'Campo de detalhes é obrigatório!',
'Details': 'detalhes',
'Dialysis': 'Diálise',
'Diaphragms, horizontal bracing': 'Diafragmas, interditará horizontal',
'Diarrhea': 'Diarréia',
'Dignitary Visit': 'Visita de Dignatários',
'Direction': 'Endereço',
'Disable': 'Desativar',
'Disabled participating in coping activities': 'Deficiente participando de enfrentamento',
'Disabled': 'desativado',
'Disabled?': 'Desativado?',
'Disaster Victim Identification': 'Identificação de Vítima de Desastre',
'Disaster Victim Registry': 'Registro de Vítima de Desastre',
'Disaster clean-up/repairs': 'Desastre limpeza/reparos',
'Discharge (cusecs)': 'Quitação (cusecs)',
'Discharges/24hrs': 'Descargas/24horas',
'Discussion Forum on item': 'Fórum de discussão do item',
'Discussion Forum': 'Fórum de Discussão',
'Disease vectors': 'Vectores doença',
'Dispensary': 'Dispensário',
'Displaced Populations': 'Populações deslocadas',
'Displaced': 'Deslocadas',
'Display Polygons?': 'exibir Polígonos?',
'Display Routes?': 'Exibir Rotas?',
'Display Tracks?': 'exibir Trilhas?',
'Display Waypoints?': 'Exibir Rota?',
'Distance between defecation area and water source': 'Distância entre área de esgoto e fonte de água',
'Distance from %s:': 'Distância de %s:',
'Distance(Kms)': 'Distância(Kms)',
'Distribution groups': 'Grupos de distribuição',
'Distribution': 'Distribuição de',
'District': 'Distrito',
'Do you really want to delete these records?': 'Você realmente deseja excluir esses registros?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Você deseja cancelar este carregamento que foi recebido? Os itens serão removidos do inventário. Esta ação não pode ser desfeita!',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Você deseja cancelar esse carregamento enviado? Os itens serão retornados para o inventário. Esta ação não pode ser desfeita!',
'Do you want to receive this shipment?': 'Você deseja receber esse carregamento?',
'Do you want to send these Committed items?': 'Você deseja enviar esses itens Consolidados?',
'Do you want to send this shipment?': 'Você deseja enviar este carregamento?',
'Document Details': 'Detalhes do Documento',
'Document Scan': 'Scanear Documento',
'Document added': 'Documento incluído',
'Document deleted': 'Documento excluído',
'Document updated': 'Documento Atualizado',
'Document': 'documento',
'Documents and Photos': 'Documentos e Fotos',
'Documents': 'Documentos',
'Does this facility provide a cholera treatment center?': 'Esta facilidade proporciona um centro de tratamento da cólera?',
'Doing nothing (no structured activity)': 'Fazendo nada (sem atividade estruturada)',
'Dollars': 'dólares',
'Domain': 'domínio',
'Domestic chores': 'Afazeres domésticos',
'Donated': 'Doado',
'Donation Certificate': 'Certificado de doaçao',
'Donation Phone #': 'Número de Telefone de doaçao',
'Donor Details': 'Doador Detalhes',
'Donor added': 'Doador incluído',
'Donor deleted': 'Doador excluído',
'Donor updated': 'Doador ATUALIZADO',
'Donor': 'Dador',
'Donors Report': 'Relatório de Doadores',
'Donors': 'Doadores',
'Door frame': 'Quadro de porta',
'Download PDF': 'Fazer download do PDF',
'Draft': 'rascunho',
'Drainage': 'Drenagem',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Elaborar um orçamento para Equipe & Equipamento de vários locais.',
'Drill Down by Group': 'Detalhar por grupo',
'Drill Down by Incident': 'Detalhar por incidente',
'Drill Down by Shelter': 'Detalhar por abrigo',
'Driving License': 'Carteira de Motorista',
'Drought': 'Seca',
'Drugs': 'Drogas',
'Dug Well': 'Cavaram Bem',
'Duplicate?': 'Duplicado?',
'Duration': 'Duração',
'Dust Storm': 'Tempestade de Poeira',
'Dwelling': 'Habitação',
'Dwellings': 'Habitações',
'EMS Reason': 'Razão EMS',
'ER Status Reason': 'Razão ER Status',
'Early Recovery': 'Início De Recuperação',
'Earthquake': 'Terremotos',
'Edit Activity': 'Editar Atividade',
'Edit Address': 'Editar Endereço',
'Edit Alternative Item': 'Editar Item Alternativo',
'Edit Application': 'Editar Aplicação',
'Edit Assessment Summary': 'Editar resumo da avaliação',
'Edit Assessment': 'Editar avaliação',
'Edit Asset Assignment': 'Editar designação do recurso',
'Edit Asset Log Entry': 'EDITAR ENTRADA DE Log de ATIVOs',
'Edit Asset': 'Editar recurso',
'Edit Baseline Type': 'Editar tipo de base de avaliação',
'Edit Baseline': 'Editar base de avaliação',
'Edit Brand': 'Editar marca',
'Edit Budget': 'Editar orçamento',
'Edit Bundle': 'Editar Pacote',
'Edit Camp Service': 'EDITAR Serviço de acampamento',
'Edit Camp Type': 'Editar Tipo de Campo',
'Edit Camp': 'EDITAR acampamento',
'Edit Catalog Item': 'Editar item do catálogo',
'Edit Catalog': 'Editar catálogo',
'Edit Certificate': 'Editar Certificado',
'Edit Certification': 'Editar Certificação',
'Edit Cluster Subsector': 'Editar subgrupo',
'Edit Cluster': 'Editar grupo',
'Edit Commitment Item': 'Editar Item De Compromisso',
'Edit Commitment': 'Editar compromisso',
'Edit Competency Rating': 'Editar Classificação da Competência',
'Edit Competency': 'Editar Competência',
'Edit Contact Information': 'Editar Informações de Contato',
'Edit Contact': 'Editar Contato',
'Edit Contents': 'Editar Conteúdo',
'Edit Course Certificate': 'Editar Certificado de Curso',
'Edit Course': 'Editar Curso',
'Edit Credential': 'Editar Credencial',
'Edit Dead Body Details': 'Editar Detalhes do Cadáver',
'Edit Description': 'Editar Descrição',
'Edit Details': 'Editar detalhes',
'Edit Disaster Victims': 'Editar vítimas do desastre',
'Edit Document': 'Editar documento',
'Edit Donor': 'Editar Doador',
'Edit Email Settings': 'Editar As Configurações De E-Mail',
'Edit Entry': 'Editar Entrada',
'Edit Event': 'Editar evento',
'Edit Facility': 'Editar recurso',
'Edit Feature Layer': 'Editar Recurso Camada',
'Edit Flood Report': 'Editar Relatório de Enchente',
'Edit Gateway Settings': 'Editar Configurações de Gateway',
'Edit Group': 'Grupo de edição',
'Edit Hospital': 'Editar Hospital',
'Edit Human Resource': 'Editar Recursos Humanos',
'Edit Identification Report': 'Editar Relatório de identificação',
'Edit Identity': 'Editar Identidade',
'Edit Image Details': 'Editar Detalhes da Imagem',
'Edit Image': 'Editar Imagem',
'Edit Impact Type': 'Editar Tipo De Impacto',
'Edit Impact': 'Editar Impacto',
'Edit Incident Report': 'Editar Relatório de Incidente',
'Edit Inventory Item': 'Editar Item De Inventário',
'Edit Item Category': 'Editar Item de categoria',
'Edit Item Pack': 'Editar Pacote de Itens',
'Edit Item': 'Editar Item',
'Edit Job Role': 'Editar cargo',
'Edit Key': 'Editar Tecla',
'Edit Kit': 'Editar Kit',
'Edit Layer': 'Editar Camada',
'Edit Level %d Locations?': 'Editar Locais Nível d% ?',
'Edit Level 1 Assessment': 'Editar Avaliação Nível 1',
'Edit Level 2 Assessment': 'Editar nível 2 de acesso',
'Edit Location': 'Local de edição',
'Edit Log Entry': 'EDITAR ENTRADA DE Log',
'Edit Map Profile': 'Editar Mapa de configuração',
'Edit Map Services': 'Editar mapa de serviços',
'Edit Marker': 'Marcador de Edição',
'Edit Membership': 'Editar inscrição',
'Edit Message': 'Editar mensagem',
'Edit Messaging Settings': 'Editar Configurações De Mensagens',
'Edit Mission': 'Editar Missão',
'Edit Modem Settings': 'Editar Configurações Do Modem',
'Edit Need Type': 'Editar tipo de necessidade',
'Edit Need': 'Ediçao Necessária',
'Edit Note': 'Editar nota',
'Edit Office': 'Escritório de edição',
'Edit Options': 'Opções de edição',
'Edit Organization': 'Organizar edições',
'Edit Parameters': 'Parametros de edição',
'Edit Peer Details': 'Detalhes do par editado',
'Edit Person Details': 'Editar detalhes pessoais',
'Edit Personal Effects Details': 'Editar detalhes de objectos pessoais',
'Edit Photo': 'Editar Foto',
'Edit Population Statistic': 'Editar Estatística da População',
'Edit Position': 'Editar Posição',
'Edit Problem': 'Editar Problema',
'Edit Project': 'Editar Projecto',
'Edit Projection': 'Editar Projeção',
'Edit Rapid Assessment': 'Editar Rápida Avaliação',
'Edit Received Item': 'Editar Item Recebido',
'Edit Received Shipment': 'Editar Embarque Recebido',
'Edit Record': 'Editar Registro',
'Edit Registration Details': 'Editar Detalhes De Registro',
'Edit Registration': 'Editar Registro',
'Edit Report': 'Editar Relatório',
'Edit Request Item': 'Editar Item Pedido',
'Edit Request': 'Editar Pedido',
'Edit Resource': 'Editar Recurso',
'Edit River': 'EDITAR RIO',
'Edit Role': 'Editar Função',
'Edit Room': 'Editar Sala',
'Edit Scenario': 'Editar cenário',
'Edit Sector': 'Editar Setor',
'Edit Sent Item': 'Editar Item Enviado',
'Edit Setting': 'Editar Definição',
'Edit Settings': 'Editar Configurações',
'Edit Shelter Service': 'Editar Serviço de Abrigo',
'Edit Shelter Type': 'EDITAR Tipo De Abrigo',
'Edit Shelter': 'EDITAR ABRIGO',
'Edit Skill Equivalence': 'Editar Equivalência de Habilidade',
'Edit Skill Provision': 'Editar Habilidade de Fornecimento',
'Edit Skill Type': 'editar tipo de competência',
'Edit Skill': 'editar competência',
'Edit Solution': 'editar solução',
'Edit Staff Member Details': 'Editar detalhes do membro da equipe',
'Edit Staff Type': 'EDITAR Tipo De Equipe',
'Edit Staff': 'editar pessoal',
'Edit Subscription': 'Editar assinatura',
'Edit Subsector': 'EDITAR Subsector',
'Edit Survey Answer': 'Editar resposta da pesquisa',
'Edit Survey Question': 'Editar pergunta da pesquisa',
'Edit Survey Section': 'EDITAR Seção de Pesquisa',
'Edit Survey Series': 'EDITAR Pesquisa de Série',
'Edit Survey Template': 'EDITAR MODELO DE PESQUISA',
'Edit Task': 'Editar Tarefa',
'Edit Team': 'Editar equipe',
'Edit Theme': 'Editar tema',
'Edit Themes': 'EDITAR TEMAs',
'Edit Ticket': 'EDITAR Bilhete',
'Edit Track': 'EDITAR RASTREAMENTO',
'Edit Training': 'Editar Treinamento',
'Edit Tropo Settings': 'Editar Configurações Tropo',
'Edit User': 'Editar Usuário',
'Edit Volunteer Availability': 'Editar Disponibilidade de Voluntário',
'Edit Volunteer Details': 'Editar Detalhes de Voluntário',
'Edit Warehouse': 'Editar Armazém',
'Edit current record': 'Editar Registro Atual',
'Edit message': 'Editar mensagem',
'Edit the Application': 'Editar a Aplicação',
'Edit': 'Editar',
'Editable?': 'Editável?',
'Education materials received': 'Materiais de educação recebido',
'Education materials, source': 'materiais de Educação, origem',
'Education': 'Educação',
'Effects Inventory': 'Inventário de efeitos',
'Eggs': 'Ovos',
'Either a shelter or a location must be specified': 'Um abrigo ou um local deve ser especificado',
'Either file upload or document URL required.': 'Um arquivo de upload ou URL do documento são necessários.',
'Either file upload or image URL required.': 'Um arquivo de upload ou URL de imagem são necessárias.',
'Elderly person headed households (>60 yrs)': 'Chefes de Familia de idade avançada (>60 anos)',
'Electrical': 'Elétrico',
'Electrical, gas, sewerage, water, hazmats': 'Elétrica, gás, esgotos, água, hazmats',
'Elevated': 'Elevado',
'Elevators': 'Elevadores',
'Email Address': 'Endereço de e-mail',
'Email Settings': 'Configurações de e-mail',
'Email settings updated': 'As configurações de e-mail atualizado',
'Email': 'E-mail',
'Embalming': 'Embalsamento',
'Embassy': 'Embaixada',
'Emergency Capacity Building project': 'Plano de emergência de capacidade dos prédios',
'Emergency Department': 'Departamento de Emergência',
'Emergency Shelter': 'Abrigo de Emergência',
'Emergency Support Facility': 'Recurso De Suporte de emergência',
'Emergency Support Service': 'Suporte do Serviço de Emergência',
'Emergency Telecommunications': 'Emergência De Telecomunicações',
'Enable/Disable Layers': 'Ativar/Desativar Camadas',
'Enabled': 'Habilitado',
'End Date': 'Data de encerramento',
'End date should be after start date': 'Data Final deve ser maior do que a data de início',
'End date': 'Data de Término',
'End of Period': 'Fim de Período',
'English': 'Inglês',
'Enter Coordinates:': 'Entre as coordenadas:',
'Enter a GPS Coord': 'Digite uma Coordada GPS',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Digite um nome para a planilha que está fazendo Upload (obrigatório).',
'Enter a new support request.': 'Digite um pedido novo de suporte.',
'Enter a unique label!': 'Digite um rótulo exclusivo!',
'Enter a valid date before': 'Digite uma data válida antes de',
'Enter a valid email': 'Insira um email válido',
'Enter a valid future date': 'Digite uma data futura válida',
'Enter some characters to bring up a list of possible matches': 'Digite alguns caracteres para trazer uma lista de correspondências possíveis',
'Enter some characters to bring up a list of possible matches.': 'Digite alguns caracteres para trazer uma lista de correspondências possíveis.',
'Enter tags separated by commas.': 'Insira as tags separadas por vírgulas.',
'Enter the same password as above': 'Digite a mesma senha acima',
'Entered': 'Inserido',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Digitar um número de telefone é opcional, mas ao fazer isto permite a voçe se registrar para receber mensagens SMS.',
'Entry deleted': 'Entrada removida',
'Environment': 'Ambiente do',
'Equipment': 'Equipamento',
'Error encountered while applying the theme.': 'Erro encontrado ao aplicar o tema.',
'Error in message': 'Erro na mensagem',
"Error logs for '%(app)s'": 'Registro de erros de "%(app)s"',
'Error: no such record': 'Erro: nenhum registro',
'Errors': 'Erros',
'Est. Delivery Date': 'Est. Data de entrega',
'Estimated # of households who are affected by the emergency': '# estimado das famílias que são afetados pela emergência',
'Estimated # of people who are affected by the emergency': '# estimado de pessoas que são afetados pela emergência',
'Estimated Overall Building Damage': 'Dano total de construção estimado',
'Estimated total number of people in institutions': 'Número total estimado de pessoas em instituições',
'Evacuating': 'abandono',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Valide as informações desta mensagem. (Este valor não deve ser utilizado em aplicações de aviso público. ).',
'Event Details': 'Detalhes do evento',
'Event added': 'Evento incluído',
'Event deleted': 'Evento excluído',
'Event updated': 'Evento atualizado',
'Event': 'Evento',
'Events': 'eventos',
'Example': 'Exemplo:',
'Exceeded': 'Excedido',
'Excellent': 'Excelente',
'Exclude contents': 'Excluir conteúdo',
'Excreta disposal': 'Eliminação de dejetos',
'Execute a pre-planned activity identified in <instruction>': 'Executar uma atividade pré-planejada identificada no',
'Exercise': 'Excercício',
'Exercise?': 'Exercício ?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': "Exercícios significa que todas as telas têm uma marca d'água & todas as comunicações têm um prefixo.",
'Existing Placard Type': 'Cartaz existente Tipo',
'Existing food stocks': 'Estoques de alimentos existente',
'Existing location cannot be converted into a group.': 'Local Existente não pode ser convertido em um grupo.',
'Exits': 'Saídas',
'Experience': 'Experiência',
'Expiry Date': 'Data de expiração',
'Explosive Hazard': 'Perigo explosivo',
'Export Data': 'Exportar dados.',
'Export Database as CSV': 'Exportar o banco de dados como CSV',
'Export in GPX format': 'Exportar no formato GPX',
'Export in KML format': 'Exportar no formato KML',
'Export in OSM format': 'Exportar no formato OSM',
'Export in PDF format': 'Exportar no formato PDF',
'Export in RSS format': 'Exportar no formato RSS',
'Export in XLS format': 'Exportar no formato XLS',
'Export': 'Exportar',
'Exterior Only': 'Exterior Apenas',
'Exterior and Interior': 'Exterior e Interior',
'Eye Color': 'Cor dos Olhos',
'Facial hair, color': 'Cabelo Facial, cor',
'Facial hair, type': 'Cabelo Facial, digite',
'Facial hear, length': 'Facial ouvir, COMPRIMENTO',
'Facilities': 'Instalações',
'Facility Details': 'Detalhes da Instalação',
'Facility Operations': 'Facilidades nas Operações',
'Facility Status': 'Status Facility',
'Facility Type': 'Tipo de Instalação',
'Facility added': 'Instalação incluída',
'Facility or Location': 'Instalação ou Local',
'Facility removed': 'Recurso removido',
'Facility updated': 'Recurso atualizado',
'Facility': 'Instalação',
'Fail': 'Falha',
'Failed!': 'Falha!',
'Fair': 'Razoável',
'Falling Object Hazard': 'Queda Objeto Risco',
'Families/HH': 'Famílias/HH',
'Family tarpaulins received': 'lonas de familia recebidas',
'Family tarpaulins, source': 'lonas de familia, fuente',
'Family': 'Familia',
'Family/friends': 'Família/amigos',
'Farmland/fishing material assistance, Rank': 'TERRAS/assistência de material de Pesca, posição',
'Fatalities': 'Fatalidades',
'Fax': 'Número do Fax',
'Feature Layer Details': 'Recurso Camada Detalhes',
'Feature Layer added': 'Recurso Camada incluída',
'Feature Layer deleted': 'Recurso Camada excluído',
'Feature Layer updated': 'Recurso Camada atualizada',
'Feature Layers': 'Camadas recurso',
'Feature Namespace': 'Espaço De recurso',
'Feature Request': 'Pedido de Componente',
'Feature Type': 'Tipo de Componente',
'Features Include': 'Componentes Incluidos',
'Female headed households': 'Famílias chefiadas por mulheres',
'Female': 'Sexo Feminino',
'Few': 'Poucos',
'Field Hospital': 'Campo Hospital',
'Field': 'Campo',
'File': 'arquivo',
'Fill in Latitude': 'Preencher na Latitude',
'Fill in Longitude': 'Preencher na Longitude',
'Filter Field': 'Filtro de Campo',
'Filter Value': 'Filtro de Valor',
'Filter': 'Filtro',
'Find All Matches': 'Localizar todos os equivalentes',
'Find Dead Body Report': 'Localizar Relatório de Cadáver',
'Find Hospital': 'Localizar Hospital',
'Find Person Record': 'Localizar registro de pessoa',
'Find Volunteers': 'Localizar Voluntários',
'Find a Person Record': 'Localizar um Registro de Pessoa',
'Find': 'Localizar',
'Finder': 'Localizador',
'Fingerprint': 'Impressão digital',
'Fingerprinting': 'Impressões digitais',
'Fingerprints': 'Impressões Digitais',
'Finish': 'Terminar',
'Finished Jobs': 'Tarefa Terminada',
'Fire suppression and rescue': 'Supressão e salvamento de incêndio',
'Fire': 'Fogo',
'First Name': 'Primeiro Nome',
'First name': 'Primeiro Nome',
'Fishing': 'Pesca',
'Flash Flood': 'Enchente',
'Flash Freeze': 'congelar o momento',
'Flexible Impact Assessments': 'Flexibilidade no Impacto de avaliações',
'Flood Alerts show water levels in various parts of the country': 'Os alertas de inundação mostram o nível da água em várias partes do país',
'Flood Alerts': 'Alertas de Enchente',
'Flood Report Details': 'Detalhes do Relatório de Inundação',
'Flood Report added': 'Relatório de Inundação incluído',
'Flood Report deleted': 'Relatório de Inundação removido',
'Flood Report updated': 'Relatório de Inundação actualizado',
'Flood Report': 'Relatório de Inundação',
'Flood Reports': 'Relatórios de Inundação',
'Flood': 'Enchente',
'Flow Status': 'posição de fluxo',
'Focal Point': 'Ponto Central',
'Fog': 'Nevoeiro',
'Food Supply': 'Alimentação',
'Food assistance': 'Ajuda alimentar',
'Footer file %s missing!': '% Arquivo rodapé ausente!',
'Footer': 'Rodapé',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Para um país este seria o código ISO2, para uma cidade, este seria o codigo do aeroporto (UNE/Locode).',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Para cada parceiro de sincronização, há uma tarefa de sincronização padrão que é executada após um intervalo de tempo especificado. Você também pode configurar mais tarefas de sincronização que podem ser customizadas de acordo com as suas necessidades. Clique no link à direita para começar.',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Para segurança reforçada, é recomendável digitar um nome de usuário e senha, e notificar os administradores de outras máquinas em sua organização para incluir esse usuário e senha no UUID em Sincronização -> Parceiros De Sincronização',
'For live help from the Sahana community on using this application, go to': 'Para ajuda ao vivo da comunidade do Sahana sobre como utilizar esse aplicativo, vá para',
'For messages that support alert network internal functions': 'Para mensagens que suportam funções internas de alertas de rede',
'For more details on the Sahana Eden system, see the': 'Para obter mais detalhes sobre o sistema Sahana Eden, consulte o',
'For more information, see': 'Para obter mais informações, consulte o',
'For other types, the next screen will allow you to enter the relevant details...': 'Para outros tipos, a próxima tela permitirá que você digite os detalhes relevantes.',
'For': 'Por',
'Forest Fire': 'Incêndios florestais',
'Formal camp': 'Acampamento formal',
'Format': 'Formato',
'Forms': 'formulários',
'Found': 'localizado',
'Foundations': 'Fundações',
'Freezing Drizzle': 'Garoa gelada',
'Freezing Rain': 'Chuva Gelada',
'Freezing Spray': 'Spray Gelado',
'French': 'Francês',
'Friday': 'sexta-feira',
'From Inventory': 'A partir do Inventário',
'From Location': 'Do Local',
'From Organization': 'Da Organização',
'From Person': 'Da Pessoa',
'From': 'from',
'Frost': 'Geada',
'Fulfil. Status': 'Encher. Status',
'Fulfillment Status': 'Status de preenchimento',
'Full beard': 'Barba completa',
'Full': 'Cheio',
'Fullscreen Map': 'Mapa em tela cheia',
'Functions available': 'Funções disponíveis',
'Funding Organization': 'Financiar a Organização',
'Further Action Recommended': 'Mais Acção Recomendada',
'GIS Reports of Shelter': 'Relatórios GIS de abrigos',
'GIS integration to view location details of the Shelter': 'Integration GIS para visualizar detalhes do local do Abrigo',
'GPS Marker': 'Marcador De GPS',
'GPS Track File': 'Rastrear Arquivo GPS',
'GPS Track': 'Rastrear por GPS',
'GPX Track': 'GPX RASTREAR',
'GRN Status': 'Status GRN',
'GRN': 'NRG',
'Gale Wind': 'Temporal',
'Gap Analysis Map': 'Mapa de Análise de Falhas',
'Gap Analysis Report': 'Relatório de Análise de Falhas',
'Gap Analysis': 'Análise de Falhas',
'Gap Map': 'Mapa de Falhas',
'Gap Report': 'Relatório de Falhas',
'Gateway Settings': 'Configurações de Gateway',
'Gateway settings updated': 'Configurações de Gateway atualizadas',
'Gateway': 'Portão',
'Gender': 'Sexo',
'General Comment': 'Comentário Geral',
'General Medical/Surgical': 'Médico/Cirúrgico Geral',
'General emergency and public safety': 'Geral de emergência e segurança pública',
'General information on demographics': 'Informações gerais sobre demografia',
'General': 'geral',
'Generator': 'Gerador',
'Geocode': 'Geocodificar',
'Geocoder Selection': 'Seleção De geocodificador',
'Geometry Name': 'Nome da geometria',
'Geophysical (inc. landslide)': 'Geofísica (inc. deslizamento)',
'Geotechnical Hazards': 'RISCOS geotécnicos',
'Geotechnical': 'Geotécnica',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Geraldo não disponíveis no módulo a execução Python- é necessário instalar para saída PDF!',
'Geraldo not installed': 'Geraldo não instalado',
'Get incoming recovery requests as RSS feed': 'Obter pedidos recebidos de recuperação como feed RSS',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Fornecer uma descrição breve da imagem, por exemplo, o que pode ser visto no local da imagem (opcional).',
'Give information about where and when you have seen them': 'Fornecer informações sobre onde e quando você os viu',
'Global Messaging Settings': 'Configurações Globais de Menssagem',
'Go to Request': 'Ir para Pedido',
'Go': 'ir',
'Goatee': 'Barbicha',
'Good Condition': 'Boa Condição',
'Good': 'Válido',
'Goods Received Note': 'Nota de Recebimento de Mercadorias',
'Government UID': 'GOVERNO UID',
'Government building': 'Prédios Públicos',
'Government': 'Governamental',
'Grade': 'Grau',
'Greek': 'grego',
'Green': 'verde',
'Ground movement, fissures': 'Movimento do solo terrestre, fissuras',
'Ground movement, settlement, slips': 'Movimento do solo terrestre, assentamentos, escorregões',
'Group Description': 'Descrição do Grupo',
'Group Details': 'Detalhes do grupo',
'Group Member added': 'Membro do grupo incluído',
'Group Members': 'membros do grupo',
'Group Memberships': 'Associados do Grupo',
'Group Name': 'Nome do grupo',
'Group Title': 'Título do grupo',
'Group Type': 'Tipo de grupo',
'Group added': 'Grupo adicionado',
'Group deleted': 'Grupo Excluído',
'Group description': 'Descrição do Grupo',
'Group updated': 'GRUPO ATUALIZADO',
'Group': 'Grupo',
'Groups removed': 'Grupos Removido',
'Groups': 'Grupos do',
'Guest': 'Convidado',
'HR Data': 'Dados de RH',
'HR Manager': 'Responsável de RH',
'Hail': 'granizo',
'Hair Color': 'Cor do Cabelo',
'Hair Length': 'Comprimento do cabelo',
'Hair Style': 'Estilo do Cabelo',
'Has additional rights to modify records relating to this Organization or Site.': 'Tem direitos adicionais para modificar os registros relativos a esta organização ou site.',
'Has data from this Reference Document been entered into Sahana?': 'Os dados deste documento de referência foi digitado no Sahana?',
'Has only read-only access to records relating to this Organization or Site.': 'Tem apenas acesso de leitura para os registros relativos a esta organização ou site.',
'Has the Certificate for receipt of the shipment been given to the sender?': 'O certificado de recepção do carregamento foi dado para o remetente?',
'Has the GRN (Goods Received Note) been completed?': 'O GRN (nota de mercadorias recebidas) foi concluído?',
'Hazard Pay': 'Pagar Risco',
'Hazardous Material': 'Material perigoso',
'Hazardous Road Conditions': 'Estradas em Condições de Risco',
'Header Background': 'Conhecimento de Chefia',
'Header background file %s missing!': 'Arquivo de Cabeçalho de Base %s ausente!',
'Headquarters': 'Matriz',
'Health care assistance, Rank': 'Assistência Saúde, Classificação',
'Health center with beds': 'Centro de saúde com camas',
'Health center without beds': 'Centro de saúde sem camas',
'Health center': 'Centro de Saúde',
'Health services status': 'Situação dos serviços de saúde',
'Health': 'Saúde',
'Healthcare Worker': 'Profissional de Saúde',
'Heat Wave': 'Onda de calor',
'Heat and Humidity': 'Calor e Umidade',
'Height (cm)': 'Altura (cm)',
'Height (m)': 'Altura (m)',
'Height': 'Altura',
'Help': 'Ajuda',
'Helps to monitor status of hospitals': 'Ajuda para monitorar status de hospitais',
'Helps to report and search for Missing Persons': 'Ajuda a reportar e procurar pessoas desaparecidas.',
'Helps to report and search for missing persons': 'Ajuda a reportar e procurar pessoas desaparecidas.',
'Here are the solution items related to the problem.': 'Aqui estão as soluções relacionadas ao problema.',
'Heritage Listed': 'Património Listado',
'Hierarchy Level %d Name': 'Hierarquia de Nível% de d Nome',
'Hierarchy Level 0 Name (e.g. Country)': 'Hierarquia Nível 0 Nome (por exemplo, País)',
'Hierarchy Level 0 Name (i.e. Country)': 'Hierarquia Nível 0 nome (por exemplo País)',
'Hierarchy Level 1 Name (e.g. Province)': 'Hierarquia Nível 1 Nome (por exemplo, Província)',
'Hierarchy Level 1 Name (e.g. State or Province)': 'Hierarquia Nível 1 nome (por exemplo, Estado ou Província)',
'Hierarchy Level 2 Name (e.g. District or County)': 'Hierarquia de Nível 2 Nome (por exemplo, Região ou Município)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hierarquia Nível 3 Nome (por exemplo, Cidade / Municipio / Vila)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hierarquia de Nível 4 Nome (por exemplo, Bairro)',
'Hierarchy Level 5 Name': 'Nome de Nível 5 na Hierarquia',
'High Water': "d'água alta",
'High': 'Alta',
'History': 'História',
'Hit the back button on your browser to try again.': 'Clique no ícone de voltar em seu navegador para tentar novamente.',
'Holiday Address': 'Endereço durante Feriado',
'Home Address': 'Endereço Residencial',
'Home Country': 'País natal',
'Home Crime': 'Crime Doméstico',
'Home': 'Residência',
'Hospital Details': 'Detalhes do Hospital',
'Hospital Status Report': 'Relatório de Status do Hospital',
'Hospital information added': 'Informações do hospital inclusas.',
'Hospital information deleted': 'Informações do hospital excluídas',
'Hospital information updated': 'informações do Hospital atualizadas',
'Hospital status assessment.': 'Avaliação de status do Hospital.',
'Hospitals': 'Hospitais',
'Hot Spot': 'ponto de acesso',
'Hour': 'Hora',
'Hours': 'Horas',
'Household kits received': 'Kits caseiros recebidos',
'Household kits, source': 'Kit de família, origem',
'How does it work?': 'Como funciona?',
'How is this person affected by the disaster? (Select all that apply)': 'Como esta pessoa é afetada pelo desastre? (selecione todos que se aplicam)',
'How long will the food last?': 'Quanto tempo irá durar a comida?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'Quantos rapazes (0-17 anos) estão Mortos devido à crise',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'Quantos rapazes (0-17 anos) estão Feridos devido à crise',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'Quantos rapazes (0-17 anos) estão Desaparecidos devido à crise',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'Quantas garotas (0-17 anos) morreram devido à crise',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'Quantas garotas (0-17 anos) estão feridas devido à crise',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'Quantas garotas (0-17 anos) estão perdidas devido à crise',
'How many Men (18 yrs+) are Dead due to the crisis': 'Quantos homens (18 anos+) estão mortos devido à crise',
'How many Men (18 yrs+) are Injured due to the crisis': 'Quantos homens (18 anos +) são feridos devido à crise',
'How many Men (18 yrs+) are Missing due to the crisis': 'Quantos homens (18 anos +) estão ausentes devido à crise',
'How many Women (18 yrs+) are Dead due to the crisis': 'Quantas mulheres (+18 anos) estão mortas devido à crise',
'How many Women (18 yrs+) are Injured due to the crisis': 'Quantas mulheres (+18 anos) estão feridas devido à crise',
'How many Women (18 yrs+) are Missing due to the crisis': 'Quantas mulheres acima de 18 anos estão ausentes devido à crise',
'How many days will the supplies last?': 'Quantos dias irão durar os abastecimentos?',
'How many new cases have been admitted to this facility in the past 24h?': 'Quantos novos casos tenham sido admitidos a esta facilidade nas últimas 24 horas?',
'How many of the patients with the disease died in the past 24h at this facility?': 'Como muitos dos pacientes com a doença morreram nas últimas 24 horas nesta unidade?',
'How many patients with the disease are currently hospitalized at this facility?': 'Quantos pacientes com a doença estão atualmente internados nesta instalação?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Quanto detalhe é visto. Um nível alto de Zoom mostra muitos detalhes, mas não uma grande área. Um nível de Zoom baixo significa ver uma grande área, mas não com um alto nível de detalhe.',
'Human Resource Details': 'Detalhes de Recursos Humanos',
'Human Resource Management': 'Gerenciamento de recursos humanos',
'Human Resource added': 'Recurso humano adicionado',
'Human Resource removed': 'Recursos Humanos removido',
'Human Resource updated': 'Recursos Humanos atualizado',
'Human Resource': 'Recursos humanos',
'Human Resources Management': 'Gerenciamento de Recursos Humanos',
'Human Resources': 'Recursos Humanos',
'Humanitarian NGO': 'ONG humanitária',
'Hurricane Force Wind': 'Furacão Força Vento',
'Hurricane': 'Furacão',
'Hygiene NFIs': 'Higiene NFIs',
'Hygiene kits received': 'Kits de higiene recebido',
'Hygiene kits, source': 'Kits de higiene, origem',
'Hygiene practice': 'Prática de higiene',
'Hygiene problems': 'PROBLEMAS DE HIGIENE',
'Hygiene': 'Higiene',
'I am available in the following area(s)': 'Estou disponível na(s) seguinte(s) área(s)',
'ID Tag Number': 'Número da Etiqueta de Identificação',
'ID Tag': 'Etiqueta de Identificação',
'ID type': 'Tipo de ID',
'Ice Pressure': 'Pressão de gelo',
'Iceberg': 'Icebergue',
'Identification Report': 'Identificação Relatório',
'Identification Reports': 'Relatórios de Identificação',
'Identification Status': 'Status da Identificação',
'Identified as': 'Identificado como',
'Identified by': 'Identificado por',
'Identity Details': 'Detalhes da identidade',
'Identity added': 'Identidade incluída',
'Identity deleted': 'Identidade excluída',
'Identity updated': 'Identidade atualizada',
'Identity': 'Identidade',
'If Staff have login accounts then they are given access to edit the details of the': 'Se o pessoal tiver contas de login, então lhes é dado acesso para editar os detalhes do',
'If a ticket was issued then please provide the Ticket ID.': 'Se um bilhete foi emitido então por favor forneça o ID do bilhete.',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Se um usuário verifica que eles possuem um endereço de email com este domínio, o campo Aprovador é utilizado para determinar se e por quem aprovação adicional é necessária.',
'If it is a URL leading to HTML, then this will downloaded.': 'Se for uma URL levando a HTML, então este será baixado.',
'If neither are defined, then the Default Marker is used.': 'Se nem são definidos, então o Marcador Padrão é utilizado.',
'If no marker defined then the system default marker is used': 'Se nenhum marcador definido, o marcador padrão do sistema é utilizada',
'If no, specify why': 'Se não, especifique por que',
'If none are selected, then all are searched.': 'Se nenhuma for selecionada, então todos são procurados.',
'If the location is a geographic area, then state at what level here.': 'Se o local é uma área geográfica, então defina em que nível aqui.',
'If the request is for type "Other", you should enter a summary of the request here.': 'Se o pedido for para o tipo \\ " Outro", você deve digitar um resumo do pedido aqui.',
'If the request type is "Other", please enter request details here.': 'Se o tipo de pedido é "other", por favor, digite aqui detalhes do pedido.',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Se esse campo for preenchido, o usuário de um específico Domain será automaticamente registrado como funcionário desta organização.',
'If this is set to True then mails will be deleted from the server after downloading.': 'Se isso for ajustado para “True”, as correspondências serão deletadas do servidor depois que o downloading for feito.',
'If this record should be restricted then select which role is required to access the record here.': 'Se esse registro deve ser restrito, selecione qual regra é necessária para acessar o record aqui.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Se esse registro deve ser restrito, selectione qual (is) regra (s) serão permitidas para assessá-lo aqui.',
'If yes, specify what and by whom': 'Se SIM, Especifique o quê e por quem',
'If yes, which and how': 'Se sim, quais e como',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Se você não inserir um documento de referência, seu e-mail será exibido para permitir que esses dados sejam verificados.',
'If you know what the Geonames ID of this location is then you can enter it here.': 'Se voce conhecer o Geonames ID desta localização então voce poderá inserí-lo aqui.',
'If you know what the OSM ID of this location is then you can enter it here.': 'Se voce conhecer o OSM ID desta localização, então voce pode inserí-lo aqui.',
'If you need to add a new document then you can click here to attach one.': 'Se houver necessidade de incluir um novo documento então voce poderá clicar aqui para anexá-lo.',
'If you want several values, then separate with': 'Se voce deseja varios valores, separe com',
'If you would like to help, then please': 'Se você gostaria de ajudar, então por favor',
'Illegal Immigrant': 'Imigrante Ilegal',
'Image Details': 'Detalhes da Imagem',
'Image Tags': 'Imagem Tags',
'Image Type': 'Tipo de Imagem',
'Image Upload': 'Fazer atualizacao Da imagem',
'Image added': 'Imagem Adicionada',
'Image deleted': 'Imagem excluída',
'Image updated': 'Imagem atualizada',
'Image': 'Imagem',
'Imagery': 'Imagens',
'Images': 'Imagens',
'Impact Assessments': 'Avaliações de impacto',
'Impact Details': 'Detalhes de impacto',
'Impact Type Details': 'Detalhes dos tipos de impacto',
'Impact Type added': 'Tipo de impacto incluído',
'Impact Type deleted': 'Tipo de impacto excluído',
'Impact Type updated': 'Atualização dos tipos de impacto',
'Impact Type': 'Tipo de impacto',
'Impact Types': 'Tipos de impactos',
'Impact added': 'Impacto incluído',
'Impact deleted': 'Impacto excluído',
'Impact updated': 'Atualização de impacto',
'Impacts': 'Impactos',
'Import & Export Data': 'Importar & Exportar Dados',
'Import Data': 'Importar Dados',
'Import Jobs': 'Importar Tarefas',
'Import and Export': 'Importação e Exportação',
'Import from Ushahidi Instance': 'Importação da Instância Ushahidi',
'Import if Master': 'Importar se Mestre',
'Import multiple tables as CSV': 'Importar tabelas multiplas como CSV',
'Import': 'Importação',
'Import/Export': 'Importar/Exportar',
'Important': 'Importante',
'Importantly where there are no aid services being provided': 'Importante onde não há serviços de apoio a ser prestado',
'Importing data from spreadsheets': 'Importar dados de planilhas',
'Improper decontamination': 'Descontaminação Imprópria',
'Improper handling of dead bodies': 'Manipulação inadequada de cadáveres',
'In Catalogs': 'Em Catálogos',
'In Inventories': 'Em Inventários',
'In Process': 'Em Processo',
'In Progress': 'Em Progresso',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Maximize o ajuste da janela para preenche-la toda, desta forma não será necessário configurar para uso de fonte grande.',
'Inbound Mail Settings': 'Definições de correio de entrada',
'Incident Categories': 'Categorias Incidente',
'Incident Report Details': 'Detalhes do relatório de incidentes',
'Incident Report added': 'Relatório de Incidente incluído',
'Incident Report deleted': 'Relatório de Incidente excluído',
'Incident Report updated': 'Relatório de incidente atualizado',
'Incident Report': 'Relatório de Incidente',
'Incident Reporting System': 'Sistema de relatórios de incidentes',
'Incident Reporting': 'Relatório de incidentes',
'Incident Reports': 'Relatório de incidentes',
'Incident': 'Incidente',
'Incidents': 'incidentes',
'Incoming Shipment canceled': 'Chegada da encomenda cancelada',
'Incoming Shipment updated': 'Chegada de encomenda actualizada.',
'Incoming': 'Entrada',
'Incomplete': 'Incompleto',
'Individuals': 'Individuais',
'Industrial Crime': 'Crime Industrial',
'Industry Fire': 'Indústria Fogo',
'Infant (0-1)': 'Criança (0-1)',
'Infectious Disease (Hazardous Material)': 'Doenças infecciosas (Material perigoso)',
'Infectious Disease': 'Doença INFECCIOSA',
'Infectious Diseases': 'Doenças infecciosas',
'Infestation': 'Infestação',
'Informal Leader': 'Líder Informal',
'Informal camp': 'Acampamento Informal',
'Information gaps': 'problemas de informação',
'Infusion catheters available': 'Cateteres de infusão disponível',
'Infusion catheters need per 24h': 'Cateteres infusão necessário por 24 H',
'Infusion catheters needed per 24h': 'Cateteres infusão necessário por H',
'Infusions available': 'Infusões disponíveis',
'Infusions needed per 24h': 'Infusões necessário por 24H',
'Inspected': 'Inspecionado',
'Inspection Date': 'Data de Inspeção',
'Inspection date and time': 'Data e hora de inspeção',
'Inspection time': 'Hora da inspeção',
'Inspector ID': 'ID do Inspetor',
'Instant Porridge': 'Mingau Instantâneo',
'Institution': 'Instituição',
'Insufficient vars: Need module, resource, jresource, instance': 'Variaveis insuficientes: necessario modulo, recurso, jrecurso, instância',
'Insufficient': 'insuficiente',
'Intake Items': 'Itens de admissão',
'Intergovernmental Organization': 'Organização Intergovernamental',
'Interior walls, partitions': 'Do Interior das paredes, partições',
'Internal State': 'Estado Interno',
'International NGO': 'ONG internacional',
'International Organization': 'Organização Internacional',
'Interview taking place at': 'Entrevista em',
'Invalid Query': 'Consulta inválida',
'Invalid request!': 'Pedido inválido!',
'Invalid ticket': 'Bilhete Inválido',
'Invalid': 'Inválido',
'Inventories': 'Inventários.',
'Inventory Item Details': 'Detalhes do Item de inventário',
'Inventory Item added': 'Item incluído no inventário',
'Inventory Item deleted': 'Item do inventário excluído',
'Inventory Item updated': 'Item de Inventário atualizado',
'Inventory Item': 'Item do inventário',
'Inventory Items Available for Request Item': 'Itens de inventário disponíveis para Pedir um Item',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Itens de invenrário incluem ambos suprimentos consumíveis & aqueles que se transformarão em Ativos no seu destino.',
'Inventory Items': 'Itens do Inventário',
'Inventory Management': 'Gerenciamento de Inventário',
'Inventory functionality is available for:': 'Inventário de funcionalidades esta disponível para:',
'Inventory of Effects': 'Inventário de Efeitos',
'Inventory': 'Inventário',
'Is editing level L%d locations allowed?': 'É permitido editar o nível dos locais L%d?',
'Is it safe to collect water?': 'É seguro coletar água?',
'Is this a strict hierarchy?': 'Esta é uma hierarquia rigorosa?',
'Issuing Authority': 'Autoridade emissora',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Ele captura não apenas os locais onde elas estão ativas, mas também captura informações sobre o conjunto de projetos que está fornecendo em cada região.',
'Item Added to Shipment': 'Item Incluído para Embarque',
'Item Catalog Details': 'Detalhes do item do catálogo',
'Item Categories': 'Categorias do Item',
'Item Category Details': 'Detalhes da categoria de item',
'Item Category added': 'Categoria de item incluída',
'Item Category deleted': 'Categoria de item excluída',
'Item Category updated': 'Atualização da categoria de item',
'Item Category': 'Categoria do Item',
'Item Details': 'Detalhes do item',
'Item Pack Details': 'Detalhes do pacote de itens',
'Item Pack added': 'Pacote de itens',
'Item Pack deleted': 'Pacote de itens excluído',
'Item Pack updated': 'Itens de Pacote atualizados',
'Item Packs': 'Item de Pacotes',
'Item added to Inventory': 'Itens adicionados ao Inventário',
'Item added to shipment': 'Item incluído para embarque',
'Item added': 'Item incluído',
'Item already in Bundle!': 'Item já no pacote configurável!',
'Item already in Kit!': 'Item já no Kit!',
'Item already in budget!': 'Item já no Orçamento!',
'Item deleted': 'Item Excluído',
'Item removed from Inventory': 'Item removido do Inventário',
'Item updated': 'Item atualizado',
'Items in Category can be Assets': 'itens na categoria podem ser ativos',
'Items': 'Itens',
'Japanese': 'japonês',
'Jerry can': 'Jerry pode',
'Jew': 'Judeu',
'Job Market': 'Mercado de trabalho',
'Job Role Catalog': 'Catalogo de Funçao de trabalho',
'Job Role Details': 'Detalhes da Função',
'Job Role added': 'funçao de trabalho inclusa',
'Job Role deleted': 'Funçao de trabalho excluida',
'Job Role updated': 'Função actualizada',
'Job Role': 'Função de trabalho',
'Job Roles': 'Funções',
'Job Title': 'Título do Cargo',
'Jobs': 'Tarefas',
'Journal Entry Details': 'Detalhes da Entrada de Diário',
'Journal entry added': 'Entrada de diário incluída',
'Journal entry deleted': 'Entrada de diário removida',
'Journal entry updated': 'Entrada de diário atualizado',
'Journal': 'Diário',
'Key Details': 'Detalhes da Chave',
'Key added': 'Chave adicionada',
'Key deleted': 'Chave removida',
'Key updated': 'Chave actualizada',
'Key': 'Tecla',
'Keys': 'Teclas',
'Kit Contents': 'Conteúdo Kit',
'Kit Details': 'Detalhes do Kit',
'Kit Updated': 'Kit de Atualização',
'Kit added': 'Pacote adicionado',
'Kit deleted': 'Kit excluído',
'Kit updated': 'Kit de atualização',
'Kit': 'kit',
'Known Identities': 'Identidades conhecido',
'Known incidents of violence against women/girls': 'Incidentes de violência conhecidos contra mulheres/garotas',
'Known incidents of violence since disaster': 'Incidentes de violência conhecidos desde o desastre',
'LICENSE': 'LICENÇA',
'Lack of material': 'Falta de material',
'Lack of school uniform': 'Falta de uniforme escolar',
'Lack of supplies at school': 'Falta de suprimentos na escola',
'Lack of transport to school': 'Falta de transporte escolar',
'Lactating women': 'Mulheres lactantes',
'Landslide': 'Deslizamento',
'Language': 'Linguagem',
'Last Name': 'sobrenome',
'Last known location': 'Último local conhecido',
'Last synchronization time': 'Horário da última sincronização',
'Last updated by': 'Última atualização por',
'Last updated on': 'Última Atualização em',
'Last updated': 'Última atualização',
'Latitude is North-South (Up-Down).': 'Latitude é sentido norte-sul (emcima-embaixo).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Latitude é zero na linha do Equador, positiva no hemisfério norte e negativa no hemisfério sul.',
'Latitude of Map Center': 'Latitude DO MAPA Centro',
'Latitude of far northern end of the region of interest.': 'Latitude do extremo Norte longe do Região de interesse.',
'Latitude of far southern end of the region of interest.': 'Latitude da extremidade sul longe do Região de interesse.',
'Latitude should be between': 'Latitude deve estar entre',
'Latrines': 'Privadas',
'Law enforcement, military, homeland and local/private security': 'Execução da lei militar, interna e segurança local/privada',
'Layer Details': 'Detalhes de Camada',
'Layer added': 'Camada incluída',
'Layer deleted': 'Camada excluída',
'Layer updated': 'Camada atualizada',
'Layer': 'Camada',
'Layers updated': 'Camadas atualizadas',
'Layers': 'Camadas',
'Layout': 'Modelo',
'Leader': 'guia',
'Legend Format': 'Formato da Legenda',
'Length (m)': 'Comprimento (m)',
'Level 1 Assessment Details': 'Detalhes da Avaliação Nível 1',
'Level 1 Assessment added': 'Avaliação Nível 1 incluído',
'Level 1 Assessment deleted': 'Avaliação Nível 1 excluído',
'Level 1 Assessment updated': 'Avaliação Nível 1 atualizada',
'Level 1 Assessments': 'Avaliações Nível 1',
'Level 1': 'Nível 1',
'Level 2 Assessment Details': 'Nível 2 de avaliação Detalhado',
'Level 2 Assessment added': 'Nível 2 avaliação incluído',
'Level 2 Assessment deleted': 'Nível 2 de avaliação excluído',
'Level 2 Assessment updated': 'Nível 2 de avaliação atualizada',
'Level 2 Assessments': 'Nível 2 de Avaliações',
'Level 2 or detailed engineering evaluation recommended': 'Nível 2 ou engenharia detalhada de avaliação recomendado',
'Level 2': 'nível 2',
'Level': 'Nível',
'Library support not available for OpenID': 'Apoio de biblioteca não está disponível para OpenID',
'LineString': 'cadeia-de-linhas',
'List / Add Baseline Types': 'Lista / Incluir Linha de Tipos',
'List / Add Impact Types': 'Lista / Incluir Tipos de Impacto',
'List / Add Services': 'Lista / Incluir Serviços',
'List / Add Types': 'Lista / Incluir Tipos',
'List Activities': 'listar atividades',
'List All Assets': 'Lista todos os ativos',
'List All Catalog Items': 'Lista todos os Itens Do Catálogo',
'List All Commitments': 'Lista todos os compromissos',
'List All Entries': 'Listar todas as entradas',
'List All Item Categories': 'Lista todos os itens Categorias',
'List All Memberships': 'Listar Todas As Associações',
'List All Received Shipments': 'Lista todas as transferências Recebidas',
'List All Records': 'Lista todos os registros',
'List All Reports': 'Listar todos os Relatórios',
'List All Requested Items': 'Lista Todos Os itens solicitados',
'List All Requests': 'Lista Todos Os Pedidos',
'List All Sent Shipments': 'Listar todos os embarques enviados',
'List All': 'Mostrar Tudo',
'List Alternative Items': 'Listar Itens Alternativos',
'List Assessment Summaries': 'Listar Resumos das Avaliações',
'List Assessments': 'Listar as Avaliações',
'List Asset Assignments': 'Listar Atribuições de Ativos',
'List Assets': 'Listar Ativos',
'List Availability': 'Listar Disponibilidade',
'List Baseline Types': 'Lista de Tipos De Linha',
'List Baselines': 'Lista de Linhas',
'List Brands': 'Lista de Marcas',
'List Budgets': 'Listar Orçamentos',
'List Bundles': 'Listar Pacotes',
'List Camp Services': 'Listar Serviços de Acampamento',
'List Camp Types': 'Listar Tipos de Acampamentos',
'List Camps': 'Listar Acampamentos',
'List Catalog Items': 'Lista de Itens Do Catálogo',
'List Catalogs': 'Listar catálogos',
'List Certificates': 'Listar certificados',
'List Certifications': 'Listar certificações',
'List Checklists': 'Lista Listas de Verificação.',
'List Cluster Subsectors': 'Lista Subsetores de Cluster',
'List Clusters': 'Lista Clusters',
'List Commitment Items': 'Lista Itens de Compromisso',
'List Commitments': 'Lista Compromissos',
'List Competencies': 'Listar competencias',
'List Competency Ratings': 'Listar classificações de competencias',
'List Conflicts': 'Lista Conflitos',
'List Contact Information': 'Listar informações do contato',
'List Contacts': 'Listar contatos',
'List Course Certificates': 'Listar certificados de cursos',
'List Courses': 'Listar Cursos',
'List Credentials': 'Listar credenciais',
'List Current': 'Lista Atual',
'List Documents': 'Listar documentos',
'List Donors': 'Listar doadores',
'List Events': 'Lista de Eventos',
'List Facilities': 'Lista de Facilidades',
'List Feature Layers': 'Listar Camadas de Recursos',
'List Flood Reports': 'Listar Relatórios de Inundações',
'List Groups': 'Listar grupos',
'List Groups/View Members': 'Listar Grupos/visualizar membros',
'List Hospitals': 'Listar de Hospitais',
'List Human Resources': 'Lista de Recursos Humanos',
'List Identities': 'Lista de Identidades',
'List Images': 'Lista de Imagens',
'List Impact Assessments': 'Lista de Avaliações De Impacto',
'List Impact Types': 'Lista de Tipos De Impacto',
'List Impacts': 'Lista de impactos',
'List Incident Reports': 'Lista de relatórios de incidentes',
'List Inventory Items': 'Listar ítens de inventário',
'List Item Categories': 'Listar categorias de ítens',
'List Item Packs': 'Lista pacotes de itens',
'List Items in Inventory': 'Lista de Itens no inventário',
'List Items': 'Listar itens',
'List Job Roles': 'Listar cargos',
'List Keys': 'Listar Chaves',
'List Kits': 'LISTAR Kits',
'List Layers': 'Listar Camadas',
'List Level 1 Assessments': 'Listar avaliações nível 1',
'List Level 1 assessments': 'Listar avaliação nível 1',
'List Level 2 Assessments': 'Listar avaliações nível 2',
'List Level 2 assessments': 'Listar avaliações nível 2',
'List Locations': 'Listar Localizações',
'List Log Entries': 'Listar as entradas de log',
'List Map Profiles': 'Listar configurações de mapa',
'List Markers': 'Listar marcadores',
'List Members': 'Lista de membros',
'List Memberships': 'Lista de associados',
'List Messages': 'Listar Mensagens',
'List Missing Persons': 'Lista de pessoas desaparecidas',
'List Missions': 'Listar Missões',
'List Need Types': 'Listar tipos de necessidades',
'List Needs': 'Lista de Necessidades',
'List Notes': 'Lista de Notas',
'List Offices': 'Lista de Escritórios',
'List Organizations': 'Listar Organizações',
'List Peers': 'LISTA DE PARES',
'List Personal Effects': 'Lista de objetos pessoais',
'List Persons': 'LISTA DE PESSOAS',
'List Photos': 'Lista de Fotos',
'List Population Statistics': 'Lista das Estatisticas da População',
'List Positions': 'Lista de Posições',
'List Problems': 'Lista de Problemas',
'List Projections': 'Lista de Projeções',
'List Projects': 'Listar Projectos',
'List Rapid Assessments': 'Listar Avaliações Rápidas',
'List Received Items': 'Listar Elementos Recebidos',
'List Received Shipments': 'Listar Carga Recebida',
'List Records': 'Listar Registros',
'List Registrations': 'Listar Registrações',
'List Reports': 'Relatórios de Listas',
'List Request Items': 'Pedido de Itens de lista',
'List Requests': 'LISTA DE PEDIDOS',
'List Resources': 'Listar Recursos',
'List Rivers': 'Lista de Rios',
'List Roles': 'Listar Funções',
'List Rooms': 'Listar Salas',
'List Scenarios': 'Listar cenários',
'List Sections': 'lista de Seções',
'List Sectors': 'Lista de Sectores',
'List Sent Items': 'Os itens da lista Enviada',
'List Sent Shipments': 'Embarques lista Enviada',
'List Service Profiles': 'Lista de serviços Perfis',
'List Settings': 'Lista de configurações',
'List Shelter Services': 'Lista de serviços de abrigo',
'List Shelter Types': 'Lista de Tipos De Abrigo',
'List Shelters': 'Lista de Abrigos',
'List Skill Equivalences': 'LISTA DE HABILIDADE Equivalências',
'List Skill Provisions': 'Listar suprimento de habilidades',
'List Skill Types': 'Lista de Tipos De Habilidade',
'List Skills': 'LISTA DE HABILIDADES',
'List Solutions': 'Listar Soluções',
'List Staff Members': 'Listar funcionários',
'List Staff Types': 'Listar Tipos De Equipe',
'List Staff': 'Listar Pessoal',
'List Status': 'Listar Status',
'List Subscriptions': 'Lista de Assinaturas',
'List Subsectors': 'Listar Subsetores',
'List Support Requests': 'Listar Pedidos de Suporte',
'List Survey Answers': 'Listar Respostas de Pesquisa',
'List Survey Questions': 'Listar Perguntas da Pesquisa',
'List Survey Sections': 'Listar Seções da Pesquisa',
'List Survey Series': 'Listar Séries de Pesquisa',
'List Survey Templates': 'Listar Modelos de Pesquisa',
'List Tasks': 'Lista de Tarefas',
'List Teams': 'Lista de Equipes',
'List Themes': 'Lista de Temas',
'List Tickets': 'lista de Bilhetes',
'List Tracks': 'Rastreia lista',
'List Trainings': 'Listar Treinamentos',
'List Units': 'Lista de Unidades',
'List Users': 'Mostrar usuários',
'List Volunteers': 'Mostrar Voluntários',
'List Warehouses': 'Mostrar Depósitos',
'List all': 'Mostrar tudo',
'List available Scenarios': 'Listar Cenários Disponíveis',
'List of Items': 'Lista de Itens',
'List of Missing Persons': 'Lista de pessoas desaparecidas',
'List of Peers': 'Lista de pares',
'List of Reports': 'Lista de Relatórios',
'List of Requests': 'Lista de Pedidos',
'List of Spreadsheets uploaded': 'Lista de Folhas de Cálculo transferidas',
'List of Spreadsheets': 'Lista de Folhas de Cálculo',
'List of Volunteers for this skill set': 'Lista de Voluntários para este conjunto de competências',
'List of Volunteers': 'Lista de Voluntários',
'List of addresses': 'Lista de endereços',
'List unidentified': 'Lista não identificada',
'List': 'Listar',
'List/Add': 'Lista/incluir',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Lista "quem está fazendo o que & aonde". Permite a agências humanitárias coordenar suas atividades',
'Live Help': 'Ajuda ao vivo',
'Livelihood': 'Subsistência',
'Load Cleaned Data into Database': 'Carregue Informações Claras no Banco de Dados',
'Load Raw File into Grid': 'Carregamento de arquivo bruto na Grid',
'Loading': 'Carregando',
'Local Name': 'Nome local',
'Local Names': 'Nomes locais',
'Location 1': 'Local 1',
'Location 2': 'Local 2',
'Location Details': 'Detalhes da Localização',
'Location Hierarchy Level 0 Name': 'Nivel Local de hierarquia 0 nome',
'Location Hierarchy Level 1 Name': 'Nivel local de hierarquia 1 nome',
'Location Hierarchy Level 2 Name': 'Nivel local de hierarquia 2 nome',
'Location Hierarchy Level 3 Name': 'Hierarquia local Nível 3 Nome',
'Location Hierarchy Level 4 Name': 'Hierarquia local Nível 4 Nome',
'Location Hierarchy Level 5 Name': 'Hierarquia local Nível 5 Nome',
'Location added': 'Local incluído',
'Location cannot be converted into a group.': 'Local não pode ser convertido em um grupo.',
'Location deleted': 'Localidade excluída',
'Location details': 'Detalhes do Local',
'Location group cannot be a parent.': 'Localização de grupo não pode ser um pai.',
'Location group cannot have a parent.': 'Localização de grupo não tem um pai.',
'Location groups can be used in the Regions menu.': 'Grupos local pode ser utilizado no menu Regiões.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Grupos locais podem ser utilizados para filtrar o que é mostrado no mapa e nos resultados da procura apenas as entidades locais abrangidas no grupo.',
'Location updated': 'Local atualizado',
'Location': 'Localização',
'Location:': 'Localização:',
'Locations of this level need to have a parent of level': 'Locais de esse nível precisa ter um pai de nível',
'Locations': 'Localizações',
'Lockdown': 'BLOQUEIO',
'Log Entry Details': 'detalhes da entrada de registro',
'Log entry added': 'Entrada de Log incluída',
'Log entry deleted': 'Entrada de Log Excluída',
'Log entry updated': 'Entrada de Log de atualização',
'Log': 'registro',
'Login': 'login',
'Logistics Management System': 'Sistema de Gestão de Logística',
'Logistics': 'Logística',
'Logo file %s missing!': 'Arquivo de logotipo %s ausente!',
'Logo': 'Logotipo',
'Logout': 'Deslogar',
'Long Text': 'Texto Longo',
'Longitude is West - East (sideways).': 'Longitude é Oeste - Leste (lateral).',
'Longitude is West-East (sideways).': 'Longitude é leste-oeste (direções).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude é zero no primeiro meridiano (Greenwich Mean Time) e é positivo para o leste, em toda a Europa e Ásia. Longitude é negativo para o Ocidente, no outro lado do Atlântico e nas Américas.',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude é zero no primeiro meridiano (por meio de Greenwich, Reino Unido) e é positivo para o leste, em toda a Europa e Ásia. Longitude é negativo para o Ocidente, no outro lado do Atlântico e nas Américas.',
'Longitude of Map Center': 'Longitude do Centro do Mapa',
'Longitude of far eastern end of the region of interest.': 'Longitude longe do Oeste no final da região de interesse.',
'Longitude of far western end of the region of interest.': 'Longitude de oeste longínquo no final da Região de interesse.',
'Longitude should be between': 'Longitude deve estar entre',
'Looting': 'Saques',
'Lost Password': 'Senha Perdida',
'Lost': 'Perdido',
'Low': 'Baixo',
'Magnetic Storm': 'Tempestade magnética',
'Major Damage': 'Grandes danos',
'Major expenses': 'Despesas principais',
'Major outward damage': 'Danos exteriores principais',
'Make Commitment': 'Ter obrigação',
'Make New Commitment': 'Fazer Novo Compromisso',
'Make Request': 'Fazer Pedido',
'Make preparations per the <instruction>': 'Fazer Preparações por',
'Male': 'masculino',
'Manage Relief Item Catalogue': 'Gerenciar Catálogo de Item de Alívio',
'Manage Users & Roles': 'GERENCIAR Usuários & Funções',
'Manage Warehouses/Sites': 'GERENCIAR Armazéns/Sites',
'Manage Your Facilities': 'Gerenciar suas instalações',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Gerenciar pedidos de suprimentos, patrimônio, pessoal ou outros recursos. Corresponde aos estoques onde os suprimentos são solicitados.',
'Manage requests of hospitals for assistance.': 'GERENCIAR Pedidos de hospitais para obter assistência.',
'Manage volunteers by capturing their skills, availability and allocation': 'GERENCIAR voluntários por captura sua capacidade, Alocação e disponibilidade',
'Manage': 'Gerenciar',
'Manager': 'Gerente',
'Managing Office': 'Gerenciando Office',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Obrigatório. Em GeoServer, este é o nome Da Camada. No getCapabilities WFS, este é o nome da parte FeatureType após os dois pontos (:).',
'Mandatory. The URL to access the service.': 'Obrigatório. A URL para acessar o serviço.',
'Manual Synchronization': 'Sincronização Manual',
'Many': 'Muitos',
'Map Center Latitude': 'Latitude do Centro do Mapa',
'Map Center Longitude': 'Longitude do centro do mapa',
'Map Profile Details': 'Detalhes de configuração de mapa',
'Map Profile added': 'Configuração de mapa incluído',
'Map Profile deleted': 'Configuração de mapa excluído',
'Map Profile removed': 'Configuração de mapa removido',
'Map Profile updated': 'Configuração de mapa atualizada',
'Map Profile': 'Configuração de Mapa',
'Map Profiles': 'Configuracões de mapa',
'Map Height': 'Altura do Mapa',
'Map Service Catalog': 'Catálogo do serviço de mapas',
'Map Settings': 'Configurações do Mapa',
'Map Viewing Client': 'Cliente de visualização do mapa',
'Map Width': 'Largura do mapa',
'Map Zoom': 'Zoom do mapa',
'Map of Hospitals': 'Mapa de Hospitais',
'Map': 'Mapa',
'Marine Security': 'Segurança da marina',
'Marital Status': 'Estado Civil',
'Marker Details': 'Detalhes do Marcador',
'Marker added': 'Marcador incluído',
'Marker deleted': 'Marcador removido',
'Marker updated': 'Marcador atualizado',
'Marker': 'Marcador',
'Markers': 'Marcadores',
'Master Message Log to process incoming reports & requests': 'Log de Mensagem Principal para processar relatórios de entrada e pedidos',
'Master Message Log': 'Mensagem de Log principal',
'Match Percentage': 'Porcentagem de correspondência',
'Match Requests': 'Corresponder Pedidos',
'Match percentage indicates the % match between these two records': 'Porcentagem idêntica indica a % idêntica entre estes dois registros.',
'Match?': 'Combina?',
'Matching Catalog Items': 'Catálogo de itens correspondentes',
'Matching Items': 'Itens correspondentes',
'Matching Records': 'Registros de correspondência',
'Matrix of Choices (Multiple Answers)': 'Matrix de Opções (Respostas Múltiplas)',
'Matrix of Choices (Only one answer)': 'Matrix de Opções (Apenas uma resposta)',
'Matrix of Text Fields': 'Matriz de campos de texto',
'Max Persons per Dwelling': 'Máx. Pessoas por Habitação',
'Maximum Location Latitude': 'Latitude máxima local',
'Maximum Location Longitude': 'Longitude máxima local',
'Medical and public health': 'Saúde Médica e Pública',
'Medium': 'Médio',
'Megabytes per Month': 'Megabytes por mês',
'Member removed from Group': 'Associação Excluída',
'Members': 'membros',
'Membership Details': 'Detalhes de Associação',
'Membership updated': 'Associação ATUALIZADO',
'Membership': 'Membresia',
'Memberships': 'Parcelas',
'Message Details': 'deatlhes de mesagens',
'Message Variable': 'Mensagem variável',
'Message added': 'Mensagem incluída',
'Message deleted': 'Mensagem Excluída',
'Message field is required!': 'Campo mensagem é obrigatório!',
'Message updated': 'Mensagem atualizada',
'Message variable': 'Mensagem variável',
'Message': 'message',
'Messages': 'mensagens.',
'Messaging settings updated': 'Configurações de mensagens atualizadas',
'Messaging': 'sistema de mensagens',
'Meteorite': 'Meteorito',
'Meteorological (inc. flood)': 'Meteorológico (inc. Enchente)',
'Method used': 'Método utilizado',
'Middle Name': 'Nome do meio',
'Migrants or ethnic minorities': 'Imigrantes ou minorias étnicas',
'Military': 'Militares',
'Minimum Bounding Box': 'Caixa Delimitadora Mínima',
'Minimum Location Latitude': 'Mínimo Latitude de Localidade',
'Minimum Location Longitude': 'Longitude de Localização Mínima',
'Minimum shift time is 6 hours': 'tempo mínimo de Shift é de 6 horas',
'Minor Damage': 'Dano secundário',
'Minor/None': 'Secundária/Nenhum',
'Minorities participating in coping activities': 'Minorias participando em atividades de cópia',
'Minute': 'Minuto',
'Minutes must be a number between 0 and 60': 'Minutos devem ser um número entre 0 e 60',
'Minutes per Month': 'Minutos por Mês',
'Minutes should be a number greater than 0 and less than 60': 'Minutos devem ser um número maior que 0 e menor que 60',
'Miscellaneous': 'Variados',
'Missing Person Details': 'Detalhes da pessoa perdida',
'Missing Person Registry': 'Faltando Registro da Pessoa',
'Missing Person Reports': 'Relatórios da pessoa desaparecida',
'Missing Person': 'Pessoa desaparecida',
'Missing Persons Registry': 'Registro de pessoas desaparecidas',
'Missing Persons Report': 'Relatório de pessoas desaparecidas',
'Missing Persons': 'Pessoas desaparecidas',
'Missing Report': 'Relatório de desaparecimento',
'Missing Senior Citizen': 'Cidadão sênior desaparecido',
'Missing Vulnerable Person': 'Pessoa vulnerável desaparecida',
'Missing': 'Perdido',
'Mission Details': 'Detalhes da Missão',
'Mission Record': 'Registro da Missão',
'Mission added': 'Missão incluída',
'Mission deleted': 'Missão excluída',
'Mission updated': 'Missão atualizada',
'Missions': 'Missões',
'Mobile Basic Assessment': 'Taxação básica móvel',
'Mobile Phone': 'Telefone celular',
'Mobile': 'telefone celular',
'Mode': 'modo',
'Model/Type': 'Modelo/Tipo',
'Modem Settings': 'Configurações do Modem',
'Modem settings updated': 'Configurações de modem atualizadas',
'Moderate': 'moderate',
'Moderator': 'moderator',
'Modify Information on groups and individuals': 'Modificar Informações sobre grupos e pessoas',
'Modifying data in spreadsheet before importing it to the database': 'Modificando dados na planilha antes de importá-los para o banco de dados',
'Module disabled!': 'Módulo desativado!',
'Module provides access to information on current Flood Levels.': 'Módulo fornece acesso a informações na atual Onda níveis.',
'Module': 'Módulo',
'Monday': 'segunda-feira',
'Monthly Cost': 'Custo mensal',
'Monthly Salary': 'Salário mensal',
'Months': 'meses',
'Morgue Status': 'Situação do necrotério',
'Morgue Units Available': 'Unidades disponíveis no necrotério',
'Mosque': 'Mesquita',
'Motorcycle': 'Motocicleta',
'Moustache': 'Bigode',
'MultiPolygon': 'multipolygon',
'Multiple Choice (Multiple Answers)': 'Múltipla escolha (Várias Respostas)',
'Multiple Choice (Only One Answer)': 'Múltipla Escolha (Apenas uma resposta)',
'Multiple Matches': 'Múltiplas Correspondências',
'Multiple Text Fields': 'Vários campos de texto',
'Multiple': 'Múltiplos',
'Muslim': 'Muçulmano',
'Must a location have a parent location?': 'Um local deve ter uma posição pai?',
'My Current function': 'Minha função Atual',
'My Tasks': 'Minhas tarefas',
'N/A': 'n/d',
'NO': 'no',
'NZSEE Level 1': 'NZSEE Nível 1',
'NZSEE Level 2': 'NZSEE Nível 2',
'Name and/or ID': 'Nome E/OU ID',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'O nome do arquivo (& sub OPCIONAL-path) localizado no estáticamente que deve ser utilizado para o segundo plano do Cabeçalho.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Nome do arquivo (e sub-caminho opcional) localizado estático que deveria ser utilizado para a imagem superior esquerda.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Nome do arquivo (e sub-caminho opcional) localizado nas visualizações que deve ser utilizado no rodapé.',
'Name of the person in local language and script (optional).': 'Nome da pessoa no idioma local e script local (opcional).',
'Name or Job Title': 'Nome ou cargo',
'Name': 'nome',
'Name, Org and/or ID': 'Nome, organização e/ou ID.',
'Name/Model/Type': 'Nome/Modelo/Tipo',
'Names can be added in multiple languages': 'Nomes podem ser adicionados em múltiplos idiomas',
'National ID Card': 'Cartão de ID Nacional',
'National NGO': 'Nacional ONG',
'National': 'Nacional',
'Nationality of the person.': 'Nacionalidade da pessoa.',
'Nationality': 'Nacionalidade',
'Nautical Accident': 'Acidente Náutico',
'Nautical Hijacking': 'Sequestro Náutico',
'Need Type Details': 'Tipo precisa de Detalhes',
'Need Type added': 'Precisa de tipo incluído',
'Need Type deleted': 'Precisa de Tipo excluído',
'Need Type updated': 'Tipo de necessidade atualizada',
'Need Type': 'Precisa de Tipo',
'Need Types': 'Tipos de necessidade',
'Need added': 'Necessidade incluída',
'Need deleted': 'Necessidade excluída',
'Need to be logged-in to be able to submit assessments': 'Precisa estar conectado ao programa para conseguir submeter avaliações',
'Need to configure Twitter Authentication': 'Precisa configurar a autenticação do Twitter',
'Need to specify a Budget!': 'É necessário especificar um orçamento!',
'Need to specify a Kit!': 'É necessário especificar um Kit!',
'Need to specify a Resource!': 'É necessário especificar um recurso!',
'Need to specify a bundle!': 'É necessário especificar um pacote!',
'Need to specify a group!': 'É necessário especificar um grupo!',
'Need to specify a location to search for.': 'É necessário especificar um local para procurar.',
'Need to specify a role!': 'Será necessário especificar um papel!',
'Need to specify a table!': 'Será necessário especificar uma tabela!',
'Need to specify a user!': 'Será necessário especificar um usuário!',
'Need updated': 'Precisa de atualização',
'Needs Details': 'detalhes necessarios',
'Needs Maintenance': 'Necessita Manutenção',
'Needs to reduce vulnerability to violence': 'Necessidade de reduzir a vulnerabilidade à violência.',
'Needs': 'necessidades',
'Negative Flow Isolation': 'NEGATIVO Fluxo ISOLAMENTO',
'Neighborhood': 'Bairro',
'Neighbouring building hazard': 'Risco de construção vizinhos',
'Neonatology': 'Neonatologia',
'Network': 'rede',
'Neurology': 'Neurologia',
'New Assessment reported from': 'Nova Avaliação relatada a partir de',
'New Certificate': 'Novo Certificado',
'New Checklist': 'Nova Verificação',
'New Entry': 'Nova Entrada',
'New Event': 'Novo Evento',
'New Item Category': 'Nova Categoria de Ítem',
'New Job Role': 'Novo Papel',
'New Location Group': 'Novo Grupo de Locais',
'New Location': 'Novo Local',
'New Peer': 'Novo Par',
'New Record': 'Novo Registro',
'New Request': 'Nova Requisição',
'New Scenario': 'Novo Cenário',
'New Skill': 'Nova Habilidade',
'New Solution Choice': 'Escolha nova solução',
'New Staff Member': 'Novo membro da equipe',
'New Support Request': 'Novo pedido de suporte',
'New Synchronization Peer': 'Novo par de sincronização',
'New Team': 'Nova equipe',
'New Training Course': 'Novo Curso de Treinamento',
'New Volunteer': 'Novo Voluntário',
'New cases in the past 24h': 'Novos casos nas últimas 24H',
'New': 'Novo(a)',
'News': 'Notícias',
'Next': 'Seguinte',
'No Activities Found': 'Não há actividades',
'No Alternative Items currently registered': 'Nenhum item alternativo atualmente registrado',
'No Assessment Summaries currently registered': 'Nenhum Sumário De Avaliação actualmente registrado',
'No Assessments currently registered': 'Nenhuma Avaliação actualmente registrada',
'No Asset Assignments currently registered': 'Nenhum ativo designado encontra-se atualmente registrado',
'No Assets currently registered in this event': 'Sem ativos atualmente registrados neste evento',
'No Assets currently registered in this scenario': 'Sem ativos atualmente registrados neste cenário',
'No Assets currently registered': 'Sem Ativos registrados atualmente',
'No Baseline Types currently registered': 'Nenhum tipo de base line registrado atualmente',
'No Baselines currently registered': 'Nenhuma linha base registrada atualmente',
'No Brands currently registered': 'Sem Marcas atualmente registrado',
'No Budgets currently registered': 'Nenhum Dos Orçamentos registrados atualmente',
'No Bundles currently registered': 'Nenhum pacote atualmente registrado',
'No Camp Services currently registered': 'Nenhum serviço de acampamento atualmente registrado',
'No Camp Types currently registered': 'Nenhum tipo de acampamento atualmente registrado',
'No Camps currently registered': 'Sem Acampamentos atualmente registrados',
'No Catalog Items currently registered': 'Nenhum itens do catálogo registrado atualmente',
'No Catalogs currently registered': 'Nenhum catálogo atualmente registrado',
'No Checklist available': 'Checklist não disponível',
'No Cluster Subsectors currently registered': 'Nenhum sub-setor de cluster registrado atualmente',
'No Clusters currently registered': 'Nenhum Cluster registrado atualmente',
'No Commitment Items currently registered': 'Nenhum Item de Compromisso registrado atualmente',
'No Commitments': 'Sem Compromissos',
'No Credentials currently set': 'Nenhuma credencial atualmente configurada',
'No Details currently registered': 'Nenhum detalhes registrado atualmente',
'No Documents found': 'Nenhum Documento encontrado',
'No Donors currently registered': 'Sem doadores registrados atualmente',
'No Events currently registered': 'Não há eventos atualmente registrados',
'No Facilities currently registered in this event': 'Não há Recursos atualmente registrado nesse evento',
'No Facilities currently registered in this scenario': 'Não há recursos atualmente registrados neste cenário',
'No Feature Layers currently defined': 'Nenhuma Camada de Componentes atualmente definidos',
'No Flood Reports currently registered': 'Nenhum relatório de Inundação atualmente registrado',
'No Groups currently defined': 'Não há Grupos definidos atualmente',
'No Groups currently registered': 'Nenhum Grupo atualmente registrado',
'No Hospitals currently registered': 'Nenhum hospital atualmente registrado',
'No Human Resources currently registered in this event': 'Nao há recursos humanos atualmente registrados nesse evento',
'No Human Resources currently registered in this scenario': 'Sem recursos humanos atualmente registrados neste cenário',
'No Identification Report Available': 'Nenhum Relatório de Identificação Disponível',
'No Identities currently registered': 'Nenhuma Identidade atualmente registrada',
'No Image': 'Nenhuma Imagem',
'No Images currently registered': 'Nenhuma Imagem atualmente registrada',
'No Impact Types currently registered': 'Nenhum tipo de impacto atualmente registrado',
'No Impacts currently registered': 'Nenhum Impacto atualmente registrado',
'No Incident Reports currently registered': 'Nenhum relatório de incidente registrado atualmente',
'No Incoming Shipments': 'Nenhum Embarque de Entrada',
'No Inventory Items currently registered': 'Nenhum Item de Inventário registrado atualmente',
'No Item Categories currently registered': 'Nenhuma Categoria de Item atualmente registrada',
'No Item Packs currently registered': 'Nenhum Pacote de Itens atualmente registrado',
'No Items currently registered in this Inventory': 'Sem itens registrados atualmente neste inventário',
'No Items currently registered': 'Nenhum item registrado atualmente',
'No Keys currently defined': 'Nenhuma chave definida no momento',
'No Kits currently registered': 'Nenhum kit registrado no momento',
'No Level 1 Assessments currently registered': 'Nenhuma avaliação nível 1 registrada no momento',
'No Level 2 Assessments currently registered': 'Nenhum nível 2 Avaliações atualmente registrado',
'No Locations currently available': 'Locais Não disponíveis atualmente',
'No Locations currently registered': 'Locais Não registrados atualmente',
'No Map Profiles currently defined': 'Nenhuma configuração de Mapa estão atualmente definidos',
'No Map Profiles currently registered in this event': 'Nenhuma configuração de Mapa esta atualmente registrado nesse evento',
'No Map Profiles currently registered in this scenario': 'Nenhuma configuração de Mapa está atualmente registrado neste cenário',
'No Markers currently available': 'Não há marcadores atualmente disponíveis',
'No Match': 'Sem correspondência',
'No Matching Catalog Items': 'Nenhum Item de Catálogo Correspondente',
'No Matching Items': 'Sem itens correspondentes',
'No Matching Records': 'Sem registros correspondentes',
'No Members currently registered': 'Sem membros registrados atualmente',
'No Memberships currently defined': 'Sem Associações definidas atualmente',
'No Messages currently in Outbox': 'Nenhuma mensagem na Caixa de saída',
'No Need Types currently registered': 'Sem necessidade, Tipos atualmente registrados',
'No Needs currently registered': 'Sem necessidade, atualmente registrado',
'No Offices currently registered': 'Nenhum Escritório registrado atualmente',
'No Offices found!': 'Menhum Escritório localizado!',
'No Organizations currently registered': 'Número de Organizações atualmente registradas',
'No People currently registered in this camp': 'Nenhuma pessoa registrada atualmente neste campo',
'No People currently registered in this shelter': 'Nenhuma pessoa registrada atualmente neste abrigo',
'No Persons currently registered': 'Nenhuma pessoa atualmente registrada',
'No Persons currently reported missing': 'nenhuma pessoa reportada atualmente como perdida',
'No Persons found': 'Nenhuma pessoa localizada',
'No Photos found': 'Nenhuma Foto localizada',
'No Picture': 'Nenhuma imagem',
'No Population Statistics currently registered': 'Nenhuma estatística populacional atualmente registrada',
'No Presence Log Entries currently registered': 'Nenhuma entrada no log Presença atualmente registrado',
'No Problems currently defined': 'Nenhum Problema atualmente definido',
'No Projections currently defined': 'Nenhuma projeção atualmente definida',
'No Projects currently registered': 'Nenhum projeto atualmente registrado',
'No Rapid Assessments currently registered': 'Nenhuma Tributação Rápida atualmente registrada',
'No Received Items currently registered': 'Nenhum item recebido atualmente registrado',
'No Received Shipments': 'Entregas/Despachos não recebidos',
'No Records currently available': 'Registros atualmente não disponíveis',
'No Request Items currently registered': 'Não há items de Pedidos registados',
'No Requests': 'Não há pedidos',
'No Rivers currently registered': 'Não Rios atualmente registrado',
'No Roles currently defined': 'Nenhumas funções atualmente definidas',
'No Rooms currently registered': 'Nenhuma sala atualmente registrada',
'No Scenarios currently registered': 'Nenhum cenário atualmente registrado',
'No Sections currently registered': 'Sem seções atualmente registradas',
'No Sectors currently registered': 'setores nao atualmente registrados',
'No Sent Items currently registered': 'Nenhum item Enviado atualmente registrado',
'No Sent Shipments': 'Nenhum carregamento enviado',
'No Settings currently defined': 'configuraçoes atualmente nao definida',
'No Shelter Services currently registered': 'nenhum serviço de abrigo atualmente registrado',
'No Shelter Types currently registered': 'Nenhum tipo de abrigo registrado atualmente',
'No Shelters currently registered': 'abrigos atualmente nao registrados',
'No Solutions currently defined': 'Sem Soluções actualmente definidas',
'No Staff Types currently registered': 'Sem Tipos de Funcionários actualmente registrados',
'No Staff currently registered': 'Sem Funcionários actualmente registrados',
'No Subscription available': 'Nenhuma assinatura disponível',
'No Subsectors currently registered': 'Nenhum sub setor atualmente registrado',
'No Support Requests currently registered': 'Nenhum suporte a pedido atualmente registrado',
'No Survey Answers currently entered.': 'Nenhuma resposta de pesquisa atualmente inscrita.',
'No Survey Answers currently registered': 'Nenhuma resposta a pesquisa atualmente registrada',
'No Survey Questions currently registered': 'Nenhuma pergunta de pesquisa atualmente registrada',
'No Survey Sections currently registered': 'Nenhuma seção de pesquisa atualmente registrada',
'No Survey Series currently registered': 'Nenhuma série de pesquisa atualmente registrada',
'No Survey Template currently registered': 'Nenhum Modelo de Pesquisa atualmente registrado',
'No Tasks with Location Data': 'Nenhuma tarefa com local de dados',
'No Teams currently registered': 'Nenhuma equipe atualmente registrada',
'No Themes currently defined': 'Nenhum Tema atualmente definido',
'No Tickets currently registered': 'Sem ingressos atualmente registrados',
'No Tracks currently available': 'nenhum rastreamento atualmente disponível',
'No Users currently registered': 'Nenhum Usuário actualmente registrado',
'No Volunteers currently registered': 'Nenhum Voluntário actualmente registrado',
'No Warehouses currently registered': 'Nenhum Armazém actualmente registrado',
'No access at all': 'Nenhum acesso',
'No access to this record!': 'Não há acesso a esta entrada!',
'No action recommended': 'Nenhuma acção recomendada',
'No conflicts logged': 'Nenhum conflito registrado',
'No contact information available': 'Nenhuma informações de contato disponível',
'No contacts currently registered': 'Nenhum contato atualmente registrado',
'No data in this table - cannot create PDF!': 'Nenhum dado nesta tabela- PDF não pode ser criado!',
'No databases in this application': 'Nenhum banco de dados neste aplicativo',
'No dead body reports available': 'Nenhum relatório de óbito disponível',
'No entries found': 'Nenhum artigo encontrado',
'No entries matching the query': 'Nenhuma entrada correspondente a consulta',
'No entry available': 'Nenhuma entrada disponível',
'No location known for this person': 'Nenhum local conhecido para essa pessoa',
'No locations found for members of this team': 'Locais não localizado para membros deste equipe',
'No log entries matching the query': 'Nenhuma entrada de log correspondente a consulta',
'No messages in the system': 'Nenhuma mensagem no sistema',
'No notes available': 'Notas não disponíveis',
'No peers currently registered': 'Não há pares registrados atualmente',
'No pending registrations found': 'Não foram encontrados registros pendentes',
'No pending registrations matching the query': 'Não foram encontrados registros pendentes correspondentes à consulta efetuada',
'No person record found for current user.': 'Nenhum registro de pessoa localizado para o usuário atual.',
'No problem group defined yet': 'Nenhum grupo problema definido ainda',
'No records matching the query': 'Sem registros correspondentes a consulta',
'No report available.': 'Nenhum Relatório disponível.',
'No reports available.': 'Não há relatórios disponíveis.',
'No reports currently available': 'Não há relatórios disponíveis actualmente',
'No requests found': 'Não foram foram encontrados pedidos',
'No resources currently reported': 'Recursos não reportados actualmente',
'No service profile available': 'Nenhum perfil de serviço disponível',
'No skills currently set': 'Não há habilidades atualmente configuradas',
'No staff members currently registered': 'Nenhum membro da equipe atualmente registrado',
'No staff or volunteers currently registered': 'Nenhum funcionário ou voluntário atualmente registrado',
'No status information available': 'Informação não está disponível',
'No synchronization': 'Sem Sincronização',
'No tasks currently registered': 'Nenhuma tarefa atualmente registrada',
'No template found!': 'Nenhum modelo localizado!',
'No units currently registered': 'Nenhuma unidade actualmente registrada',
'No volunteer availability registered': 'Sem disponibilidade de voluntário registrada',
'No volunteers currently registered': 'Nenhum Voluntário actualmente registrado',
'No': 'no',
'Non-structural Hazards': 'Riscos não-estruturais',
'None (no such record)': 'Nenhum (sem registro )',
'None': 'Nenhum',
'Noodles': 'Macarrão',
'Not Applicable': 'Não se aplica',
'Not Authorised!': 'Não Autorizado!',
'Not Possible': 'Impossível',
'Not Set': 'não configurado',
'Not Authorized': 'Não autorizado',
'Not installed or incorrectly configured.': 'Não instalado ou Configurado Incorretamente.',
'Not yet a Member of any Group': 'Sem Associações registradas atualmente',
'Note Details': 'Detalhes da Nota',
'Note Status': 'Status da Nota',
'Note Type': 'Tipo de nota',
'Note added': 'Nota Incluída',
'Note deleted': 'NOTA Excluída',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Observer que essa lista mostra apenas voluntários ativos. Para ver todas as pessoas registradas no sistema, procure a partir deste ecrã em vez de',
'Note updated': 'Nota atualizada',
'Note': 'Nota',
'Notes': 'Observações',
'Notice to Airmen': 'Aviso ao piloto',
'Number of Columns': 'Número de colunas',
'Number of Patients': 'Número de Pacientes',
'Number of Rows': 'Número de Linhas',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Número de camas adicionais de tipo esperado tornar disponível nesta unidade nas próximas 24 horas.',
'Number of alternative places for studying': 'Número de locais alternativos para estudar',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Número de camas disponíveis/livre desse tipo nesta unidade no momento do relatório.',
'Number of deaths during the past 24 hours.': 'Número de mortes durante as últimas 24 horas.',
'Number of discharged patients during the past 24 hours.': 'Número de pacientes Descarregados durante as últimas 24 horas.',
'Number of doctors': 'Número de médicos',
'Number of in-patients at the time of reporting.': 'Número de pacientes internos na hora do relatório.',
'Number of newly admitted patients during the past 24 hours.': 'Número de pacientes admitidos durante as últimas 24 horas.',
'Number of non-medical staff': 'Número de funcionários não-médico',
'Number of nurses': 'Número de enfermeiras',
'Number of private schools': 'Número de escolas privadas',
'Number of public schools': 'Número de escolas públicas',
'Number of religious schools': 'Número de escolas religiosas',
'Number of residential units not habitable': 'Unidades de número residencial não habitáveis',
'Number of residential units': 'Número de unidades residenciais',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Número de leitos vagos/disponíveis nesse hospital. Atualizado automaticamente a partir de relatórios diários.',
'Number of vacant/available units to which victims can be transported immediately.': 'Número de unidades vagas/disponíveis em que vítimas podem ser transportadas imediatamente.',
'Number or Label on the identification tag this person is wearing (if any).': 'Número ou código na etiqueta de identificação que a pessoa está usando (se houver).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Número ou código utilizado para marcar o local de localização, por exemplo, código de bandeira, grade de coordenadas, número de referência do site ou similar (se disponível)',
'Number': 'número',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Número/percentagem da população afetada que é uma mulher entre 0 e 5 anos',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Número/percentagem da população afetadas do sexo feminino entre 13 e 17 anos',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Número/percentagem da população afetada que é Mulher com 18-25 anos',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Número/percentagem da população afetada que é Mulher com 26-60 anos',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Número/percentagem da população afetada que é Mulher com 6-12 anos',
'Number/Percentage of affected population that is Female & Aged 61+': 'Número/percentagem da população afetada que é Mulher > 61 anos',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Número/percentagem da população afetada que é Homem com 0-5 anos',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Número/percentagem da população afetada que é Homem com 13-17 anos',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Número/percentagem da população afetada que é Homem com 18-25 anos',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Número/percentagem de população afetada que é do sexo masculino & Idade 26-60',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Número/percentagem de população afectada que é do sexo masculino & Idade 6-12',
'Number/Percentage of affected population that is Male & Aged 61+': 'Número/percentagem da população afetada que é do sexo masculino & Idade 61+',
'Nursery Beds': 'Camas de berçario',
'Nutrition problems': 'Problemas nutricionais',
'Nutrition': 'Nutrição',
'OR Reason': 'Ou Razão',
'OR Status Reason': 'Ou razão do status',
'OR Status': 'Ou Status',
'OR a site OR a location': 'OU um site OU um local',
'Observer': 'observador',
'Obsolete': 'Obsoleto',
'Obstetrics/Gynecology': 'Obstetrícia/Ginecologia',
'Office Address': 'Endereço do escritório',
'Office Details': 'Detalhes do Escritório.',
'Office Phone': 'Telefone do escritório',
'Office added': 'Escritório',
'Office deleted': 'Escritório excluído',
'Office updated': 'Escritório atualizado',
'Office': 'escritório',
'Offices & Warehouses': 'Escritórios & Armazéns',
'Offices': 'Escritórios',
'Offline Sync (from USB/File Backup)': 'Off-line (Sync a partir do USB/arquivo de Backup)',
'Offline Sync': 'Sincronização desconectada.',
'Older people as primary caregivers of children': 'Pessoas mais velhas como responsáveis primárias de crianças',
'Older people in care homes': 'Pessoas mais velhas em casas de cuidados',
'Older people participating in coping activities': 'Pessoas mais antigos participantes em lidar atividades',
'Older person (>60 yrs)': 'Idosos (>60 anos)',
'On by default? (only applicable to Overlays)': 'Por padrão? (apenas aplicável para Sobreposições)',
'On by default?': 'Por padrão?',
'One Time Cost': 'Custo Único',
'One time cost': 'Custo único',
'One-time costs': 'Custos únicos',
'One-time': 'Único',
'Oops! Something went wrong...': 'Oops! Algo deu errado...',
'Oops! something went wrong on our side.': 'Oops! algo deu errado do nosso lado.',
'Opacity (1 for opaque, 0 for fully-transparent)': 'Opacidade (1 para opaco, 0 para totalmente transparente)',
'Open area': 'Abrir área',
'Open recent': 'Abrir recente',
'Open': 'Abrir',
'Operating Rooms': 'Salas operacionais',
'Optional link to an Incident which this Assessment was triggered by.': 'Link opcional para um incidente que esta avaliação foi desencadeada por.',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'opcional Se você desejar apresenta o estilo com base nos valores de um atributo, Selecione o atributo a ser utilizado aqui.',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'opcional Em GeoServer, esta é a área de trabalho Namespace URI (não o nome!). Dentro do getCapabilities WFS, este é parte do nome FeatureType antes dos dois pontos (:).',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'optional. Em GeoServer, este é o espaço de Nomes URI. No getCapabilities WFS, este é o nome da parte FeatureType antes de os dois pontos (:).',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'opcional O nome de um elemento cujo conteúdo deve ser uma URL de um arquivo de imagem para Popups.',
'Optional. The name of an element whose contents should be put into Popups.': 'opcional O nome de um elemento cujo conteúdo deve ser adicionado em Popups.',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'opcional O nome do esquema. Em Geoserver isto tem o formato http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.',
'Options': 'opções',
'Organization Details': 'Detalhes da Organização',
'Organization Registry': 'Registro de Organização',
'Organization added': 'Organização incluída',
'Organization deleted': 'Organização excluída',
'Organization updated': 'Organização atualizada',
'Organization': 'Organização',
'Organizations': 'Organizações',
'Origin of the separated children': 'Origem das crianças separadas',
'Origin': 'Origem',
'Other (describe)': 'Outros (descreva)',
'Other (specify)': 'Outros motivos (especifique)',
'Other Evidence': 'outras evidencias',
'Other Faucet/Piped Water': 'Outras Torneiras /Agua Encanada',
'Other Isolation': 'Outro Isolamento',
'Other Name': 'outro nome',
'Other activities of boys 13-17yrs before disaster': 'Outras atividades de garotos 17-13anos antes do desastre',
'Other activities of boys 13-17yrs': 'Outras atividades de garotos 13-17anos',
'Other activities of boys <12yrs before disaster': 'Outras atividades de garotos <12anos antes do desastre',
'Other activities of boys <12yrs': 'Outras atividades de garotos <12 anos',
'Other activities of girls 13-17yrs before disaster': 'Outras atividades de meninas 13-17anos antes do desastre',
'Other activities of girls 13-17yrs': 'Outras atividades de meninas 13-17anos',
'Other activities of girls<12yrs before disaster': 'Outras atividades de garotas<12anos antes do desastre',
'Other activities of girls<12yrs': 'Outras atividades de garotas<12anos',
'Other alternative infant nutrition in use': 'Nutrição infantil alternativa em uso',
'Other alternative places for study': 'Outros locais alternativos para estudo',
'Other assistance needed': 'Outra assistência necessária',
'Other assistance, Rank': 'Outra assistência, Número',
'Other current health problems, adults': 'Outros problemas actuais de saúde, adultos',
'Other current health problems, children': 'Outros problemas actuais de saúde, crianças',
'Other events': 'outros eventos',
'Other factors affecting school attendance': 'Outros fatores que afetam a frequencia escolar',
'Other major expenses': 'outras despesas importantes',
'Other non-food items': 'Outros itens não alimentícios',
'Other recommendations': 'Outras recomendações',
'Other residential': 'Outros residentes',
'Other school assistance received': 'Assistência de outra escola recebida',
'Other school assistance, details': 'Assistência de outra escola, detalhes',
'Other school assistance, source': 'Assistência de outra escola, origem',
'Other settings can only be set by editing a file on the server': 'Outras configurações só podem ser definidas editando um arquivo no servidor',
'Other side dishes in stock': 'Pratos outro lado em ações',
'Other types of water storage containers': 'Outros tipos de recipientes de armazenamento de água',
'Other ways to obtain food': 'Outras maneiras de obter alimentos',
'Other': 'outro',
'Outbound Mail settings are configured in models/000_config.py.': 'Definições de correio de saída são configurados em modelos/000_config..py',
'Outbox': 'Caixa de Saída',
'Outgoing SMS Handler': 'Saída do Manipulador SMS',
'Outgoing SMS handler': 'Manipulador de SMS de saída',
'Overall Hazards': 'Riscos gerais',
'Overhead falling hazard': 'Risco de queda sobrecarga',
'Overland Flow Flood': 'Por via terrestre Fluxo de Enchente',
'Owned Resources': 'Recursos Próprios',
'PAHO UID': 'OPS UID',
'PIN number': 'Número do pino',
'PIN': 'alfinete',
'PL Women': 'Mulheres PL',
'Pack': 'Pacote',
'Packs': 'Pacotes',
'Parameters': 'Parâmetros de Monitoramento',
'Parapets, ornamentation': 'Passarelas, ornamentação',
'Parent Office': 'Escritório Principal',
'Parent needs to be of the correct level': 'Pai precisa ser do nível correto',
'Parent needs to be set for locations of level': 'Principal precisa ser configurado para locais de nível',
'Parent needs to be set': 'Principal precisa ser configurado',
'Parent': 'parent',
'Parents/Caregivers missing children': 'Pais/cuidadores de crianças desaparecidas',
'Partial': 'Parcial',
'Participant': 'Participante',
'Pashto': 'Pachto',
'Pass': 'Passou',
'Passport': 'passaporte',
'Password': 'senha',
'Path': 'Caminho',
'Pathology': 'Patologia',
'Patients': 'Pacientes',
'Pediatric ICU': 'UTI Pediatrica',
'Pediatric Psychiatric': 'Psiquiátrico Pediátra',
'Pediatrics': 'Pediatria',
'Peer Details': 'Detalhes do Membro',
'Peer Registration Details': 'Detalhes de Registro do Par',
'Peer Registration Request': 'Requerido Registro do Par',
'Peer Registration': 'Registro de par',
'Peer Type': 'Por Tipo',
'Peer UID': 'Por UID',
'Peer added': 'Membro adicionado',
'Peer deleted': 'Membro excluído',
'Peer not allowed to push': 'Peer não permitido para envio',
'Peer registration request added': 'Registro Requerido do Par adicionado',
'Peer registration request deleted': 'Registro requerido do par excluído',
'Peer registration request updated': 'Registro requerido do par atualizado',
'Peer updated': 'PAR ATUALIZADO',
'Peer': 'Membro',
'Peers': 'Pares',
'Pending Requests': 'PEDIDOS PENDENTES',
'Pending': 'pendente',
'People Needing Food': 'Pessoas precisando de alimento',
'People Needing Shelter': 'Pessoas precisando de abrigo',
'People Needing Water': 'Pessoas precisando de água',
'People Trapped': 'Pessoas presas',
'People': 'pessoas',
'Performance Rating': 'Classificação da Performance',
'Person 1': 'Pessoa 1',
'Person 1, Person 2 are the potentially duplicate records': 'Pessoa 1, Pessoa 2 são os registros potencialmente duplicados',
'Person 2': 'Pessoa 2',
'Person De-duplicator': 'Anti-duplicador de Pessoas',
'Person Details': 'Detalhes Pessoais',
'Person Finder': 'Buscador de pessoas',
'Person Registry': 'Registro De Pessoa',
'Person added to Group': 'Membro do grupo incluído',
'Person added to Team': 'Membro do grupo incluído',
'Person added': 'Pessoa Incluída',
'Person deleted': 'Pessoa removida',
'Person details updated': 'Detalhes pessoais actualizados',
'Person interviewed': 'Pessoa entrevistada',
'Person missing': 'Pessoa perdida',
'Person reporting': 'Pessoa relatando',
'Person who has actually seen the person/group.': 'Pessoa que tenha realmente visto a pessoa/Grupo.',
'Person': 'pessoa',
'Person/Group': 'Pessoa/Grupo',
'Personal Data': 'Dados pessoais',
'Personal Effects Details': 'Detalhes dos Efeitos Pessoais',
'Personal Effects': 'Efeitos pessoal',
'Personal Map': 'Mapa De Pessoal',
'Personal Profile': 'Perfil pessoal',
'Personal impact of disaster': 'Impacto de desastre pessoal',
'Personal': 'Pessoal',
'Persons in institutions': 'Pessoas em instituições',
'Persons with disability (mental)': 'Pessoas com deficiência (mental)',
'Persons with disability (physical)': 'Pessoas com deficiência (física)',
'Persons': 'Pessoas',
'Phone 1': 'Telefone 1',
'Phone 2': 'Telefone 2',
'Phone': 'telefone',
'Phone/Business': 'Telefone comercial',
'Phone/Emergency': 'Telefone de emergência',
'Phone/Exchange (Switchboard)': 'Telefone/Câmbio (Central)',
'Phone/Exchange': 'Telefone/Exchange',
'Photo Details': 'Foto com detalhes',
'Photo Taken?': 'Foto tomada?',
'Photo added': 'Foto adicionada (ou incluída)',
'Photo deleted': 'Foto deletada (apagada, excluída em definitivo)',
'Photo updated': 'Foto ATUALIZADA',
'Photo': 'foto',
'Photograph': 'Fotografia ou Arte Fotográfica',
'Photos': 'fotos, imagens fotográficas',
'Physical Description': 'Descrição física',
'Physical Safety': 'Segurança Física',
'Picture upload and finger print upload facility': 'Fazer upload de imagem e impressão dedo upload facility',
'Picture': 'Imagem',
'Place of Recovery': 'Local de recuperação',
'Place': 'Local',
'Places for defecation': 'Locais para a defecação',
'Places the children have been sent to': 'Lugares que as crianças foram enviadas para',
'Planner': 'Planejador',
'Playing': 'Reproduzindo',
'Please correct all errors.': 'Por favor CORRIJA todos os erros.',
'Please enter a First Name': 'Por favor insira um primeiro nome',
'Please enter a first name': 'Por favor insira um primeiro nome',
'Please enter a person': 'Insira uma pessoa',
'Please enter a site OR a location': 'Por favor digite um site ou um local',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Por favor Digite as primeiras letras do Pessoa/Grupo para o AutoCompletar.',
'Please enter the recipient': 'Por favor Digite o destinatário',
'Please fill this!': 'Por favor preencha isso!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened. If a ticket was issued then please provide the Ticket ID.': 'Por favor Forneça a URL da página que você está fazendo referência à, uma descrição do que você esperava que acontecesse & O que realmente aconteceu. Se um bilhete foi emitido então por favor forneça o ID do bilhete.',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Por favor Forneça a URL da página que você está fazendo referência à, uma descrição do que você esperava que acontecesse & O que realmente aconteceu.',
'Please report here where you are:': 'Por favor informe aqui onde você está:',
'Please select another level': 'Por favor selecione outro nível',
'Please select': 'Por favor Selecione',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Por favor inscrever-se com seu celular como isso nos permite lhe enviar mensagens de texto. Por favor inclua código de Área total.',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Por favor especifique quaisquer problemas e obstáculos com a manipulação correcta da doença, em detalhes (em números, se for o caso). Pode também dar sugestões - a situação pode ser melhorada.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Por favor utilize esse campo para registrar quaisquer informações adicionais, incluindo um histórico do registro se ele estiver sendo atualizado.',
'Please use this field to record any additional information, including any Special Needs.': 'Por favor utilize esse campo para registrar quaisquer informações adicionais, incluindo quaisquer necessidades especiais.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Por favor utilize esse campo para registrar quaisquer informações adicionais, como IDs de instância Ushahidi. Incluir o histórico do registo se este fôr actualizado.',
'Pledge Support': 'Suporte da promessa',
'Point': 'Ponto',
'Poisoning': 'Envenenamento',
'Poisonous Gas': 'Gás venenoso',
'Police': 'Polícia',
'Pollution and other environmental': 'Poluição ambiental e outras',
'Polygon reference of the rating unit': 'Polígono de referência da unidade de classificação',
'Polygon': 'Polígono',
'Poor': 'Pobre',
'Population Statistic Details': 'População Estatística Detalhes',
'Population Statistic added': 'População Estatística incluída',
'Population Statistic deleted': 'População Estatística excluído',
'Population Statistic updated': 'População De Estatística atualizada',
'Population Statistics': 'Estatísticas De população',
'Population and number of households': 'população e número de residentes',
'Population': 'População',
'Popup Fields': 'Pop-up Campos',
'Popup Label': 'Rótulo do pop-up',
'Porridge': 'mingau',
'Port Closure': 'Porta Encerramento',
'Port': 'porta',
'Portuguese (Brazil)': 'Português (Brasil)',
'Portuguese': 'Português',
'Position Catalog': 'Catálogo de posições',
'Position Details': 'detalhamento do cargo',
'Position added': 'Cargo inserido',
'Position deleted': 'Cargo excluído',
'Position updated': 'Posição atualizada',
'Position': 'Posição',
'Positions': 'cargos',
'Postcode': 'Código Postal',
'Poultry restocking, Rank': 'Reabastecimento de aves domésticas, posição',
'Poultry': 'Aves',
'Pounds': 'Libras',
'Power Failure': 'Falha de Energia',
'Powered by Sahana Eden': 'Desenvolvido pela Sahana Eden',
'Pre-cast connections': 'Conexões-cast pré',
'Preferred Name': 'Nome Preferido',
'Pregnant women': 'Mulheres grávidas',
'Preliminary': 'Preliminar',
'Presence Condition': 'Condição de Presença',
'Presence Log': 'Log de Presença',
'Presence': 'Presença',
'Previous': 'Anterior',
'Primary Name': 'Nome Principal',
'Primary Occupancy': 'Principal Ocupação',
'Priority from 1 to 9. 1 is most preferred.': 'Prioridade de 1 a 9. 1 é preferível',
'Priority': 'priority',
'Private': 'Privado',
'Problem Administration': 'Gestão de problema',
'Problem Details': 'Detalhes do Problema',
'Problem Group': 'Grupo do Problema',
'Problem Title': 'Título do Problema',
'Problem added': 'Problema incluído',
'Problem connecting to twitter.com - please refresh': 'Problema ao conectar-se ao twitter.com, tente novamente',
'Problem deleted': 'Problema Excluído',
'Problem updated': 'Problema Atualizado',
'Problem': 'Problema do',
'Problems': 'Problemas',
'Procedure': 'Procedimento',
'Process Received Shipment': 'Processo recebeu embarque',
'Process Shipment to Send': 'Processar remessa a enviar',
'Profile': 'profile',
'Project Details': 'Detalhes do Projeto',
'Project Status': 'Status do Projeto',
'Project Tracking': 'Acompanhamento do Projeto',
'Project added': 'Projeto incluído',
'Project deleted': 'Projeto Excluído',
'Project has no Lat/Lon': 'Projeto não possui Latitude/Longitude',
'Project updated': 'Projeto ATUALIZADO',
'Project': 'projeto',
'Projection Details': 'Detalhes da Projeção',
'Projection added': 'Projeção incluída',
'Projection deleted': 'Projeção excluída',
'Projection updated': 'Projecção atualizada',
'Projection': 'Projeção',
'Projections': 'projeções',
'Projects': 'projetos',
'Property reference in the council system': 'Referência de propriedade no sistema do conselho',
'Protected resource': 'Recurso protegido',
'Protection': 'Protecção',
'Provide Metadata for your media files': 'Fornecer Metadados para os seus ficheiros media',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Fornecer um retrato opcional de todo o edifício ou áreas danificadas. Pontos danos indicar.',
'Proxy-server': 'Servidor Proxy',
'Psychiatrics/Adult': 'Psiquiatras/Adulto',
'Psychiatrics/Pediatric': 'Psiquiatras/Pediátrica',
'Public Event': 'Evento público',
'Public and private transportation': 'Transporte Público e Privado',
'Public assembly': 'Assembléia Pública',
'Public': 'Público',
'Pull tickets from external feed': 'Pull de bilhetes alimentação externa',
'Purchase Date': 'Data de aquisição',
'Push tickets to external system': 'BILHETES Push PARA sistema externo',
'Pyroclastic Flow': 'Pyroclastic FLuxo',
'Pyroclastic Surge': 'Pyroclastic Aumento',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Módulo Serial Python não disponíveis no a execução Python-isto tem de instalar para ativar o Modem',
'Python needs the ReportLab module installed for PDF export': 'O módulo de ReportLab não disponíveis na execução Python - isto requer a instalação para a entrega em PDF!',
'Quantity Committed': 'Quantidade Comprometida',
'Quantity Fulfilled': 'Quantidade Preenchida',
'Quantity in Transit': 'Quantidade em Trânsito',
'Quantity': 'Quantidade',
'Quarantine': 'Quarentena',
'Queries': 'Buscas',
'Query': 'Busca',
'Queryable?': 'Consultável?',
'RC frame with masonry infill': 'Quadro de RC com aterros de alvenaria',
'RECORD A': 'Registro A',
'RECORD B': 'REGISTRO B',
'Race': 'Corrida',
'Radio Callsign': 'Rádio Chamada',
'Radiological Hazard': 'Risco Radiológico',
'Radiology': 'Radiologia',
'Railway Accident': 'Acidente Ferroviário',
'Railway Hijacking': 'Sequestro Ferroviário',
'Rain Fall': 'Queda de Chuva',
'Rapid Assessment Details': 'Rápida Avaliação Detalhes',
'Rapid Assessment added': 'Rapid Avaliação incluído',
'Rapid Assessment deleted': 'Rápida Avaliação excluído',
'Rapid Assessment updated': 'Rapid avaliação atualizada',
'Rapid Assessment': 'Avaliação Rápida',
'Rapid Assessments & Flexible Impact Assessments': 'Rapid Avaliações & Flexível Impacto Avaliações',
'Rapid Assessments': 'Rapid Avaliações',
'Rapid Close Lead': 'Fechamento Lead rápido',
'Rapid Data Entry': 'Entrada de dados rápida',
'Rating Scale': 'Escala de avaliação',
'Raw Database access': 'Acesso bruto a Base de dados',
'Read-Only': 'somente para leitura',
'Read-only': 'somente para leitura',
'Receive Items': 'Aceitar itens',
'Receive New Shipment': 'Receber Novos Embarques',
'Receive Shipment': 'Receber carregamento',
'Receive this shipment?': 'Receber esse embarque?',
'Receive': 'Receber',
'Received By Person': 'Recebido Por Pessoa',
'Received By': 'Recebido Por',
'Received Item Details': 'Detalhes do item recebido',
'Received Item deleted': 'Recebido item excluído',
'Received Item updated': 'Item recebido atualizado',
'Received Shipment Details': 'Lista de remessa de mercadorias/produtos',
'Received Shipment canceled and items removed from Inventory': 'Recebido carregamento cancelado e itens removidos do inventário',
'Received Shipment canceled': 'Remessa de produtos cancelada',
'Received Shipment updated': 'Carregamento Recebido Atualizado',
'Received Shipments': 'Carregamento de produtos recebido',
'Received': 'Recebido',
'Receiving and Sending Items': 'Receber e enviar Itens',
'Recipient': 'destinatário',
'Recipients': 'destinatários',
'Recommendations for Repair and Reconstruction or Demolition': 'Recomendações para reparo e reconstrução ou demolição',
'Record Details': 'Detalhes do Registro',
'Record Saved': 'Registro Gravado',
'Record added': 'Registro incluído',
'Record any restriction on use or entry': 'Registro de qualquer restrição à utilização ou entrada',
'Record deleted': 'Registro excluído',
'Record last updated': 'Último registro atualizado',
'Record not found!': 'Registro não encontrado!',
'Record not found': 'Registro não encontrado',
'Record updated': 'registro atualizado',
'Record': 'registro',
'Recording and Assigning Assets': 'Ativos de Gravação e Designação',
'Records': 'Registros',
'Recovery Request added': 'Pedido de recuperação adicionado',
'Recovery Request deleted': 'Pedido de recuperação apagado',
'Recovery Request updated': 'Pedido de recuperação atualizado',
'Recovery Request': 'pedido de recuperação',
'Recovery Requests': 'Pedidos de recuperação',
'Recovery': 'recuperação',
'Recruitment': 'Recrutamento',
'Recurring Cost': 'Custo recorrente',
'Recurring cost': 'Custo recorrente',
'Recurring costs': 'Custos recorrentes',
'Recurring': 'Recorrente',
'Red Cross / Red Crescent': 'Cruz Vermelha / Red Crescent',
'Red': 'vermelho',
'Reference Document': 'Documento de referência',
'Refresh Rate (seconds)': 'Taxa de Atualização (Segundos)',
'Region Location': 'Localizaçao da regiao',
'Regional': 'regional',
'Regions': 'Regiões',
'Register Person into this Camp': 'Registrar Pessoa neste Acampamento',
'Register Person into this Shelter': 'REGISTRAR PESSOA PARA ESTE Abrigo',
'Register Person': 'REGISTRAR PESSOA',
'Register them as a volunteer': 'Registrá-los como voluntários',
'Register': 'registro',
'Registered People': 'Pessoas Registradas',
'Registered users can': 'Os usuários registrados podem',
'Registration Details': 'Detalhes da Inscrição',
'Registration added': 'Inscrição adicionada',
'Registration entry deleted': 'Inscrição excluída',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Registro ainda está pendente de aprovação do Aprovador (%s) - Por favor, aguarde até a confirmação recebida.',
'Registration updated': 'Inscrição atualizada',
'Registration': 'Inscrição',
'Rehabilitation/Long Term Care': 'Reabilitação/Cuidados de Longo Termo',
'Reinforced masonry': 'Alvenaria reforçada',
'Rejected': 'Rejeitado',
'Relief Team': 'Equipe de socorro',
'Relief': 'Alivio',
'Religion': 'Religião',
'Religious Leader': 'Líder religioso',
'Religious': 'Religiosas',
'Relocate as instructed in the <instruction>': 'Relocalizar conforme instruído no',
'Remove Asset from this event': 'Remover ativo deste evento',
'Remove Asset from this scenario': 'Remover ativo deste cenário',
'Remove Facility from this event': 'Remover recurso deste evento',
'Remove Facility from this scenario': 'Remover recurso deste cenário',
'Remove Human Resource from this event': 'Remover recursos humanos a partir deste evento',
'Remove Human Resource from this scenario': 'Remover recursos humanos a partir deste cenário',
'Remove Item from Inventory': 'Remover Item do Inventário',
'Remove Map Profile from this event': 'Remover Mapa de configuração a partir deste evento',
'Remove Map Profile from this scenario': 'Remover Mapa de configuração a partir deste cenário',
'Remove Person from Group': 'Excluir membro',
'Remove Person from Team': 'Excluir membro',
'Remove this asset from this event': 'Remover este recurso a partir deste evento',
'Remove this asset from this scenario': 'Remover este recurso deste cenário',
'Remove': 'Remover',
'Removed from Group': 'Associação Excluída',
'Removed from Team': 'Associação Excluída',
'Repair': 'REPARO',
'Repaired': 'Reparado',
'Repeat your password': 'REPITA sua senha',
'Replace if Master': 'Substituir se Principal',
'Replace if Newer': 'Substituir se o Mais Recente',
'Replace': 'TROCAR',
'Report Another Assessment...': 'Adicionar Outro Relatório De Avaliação....',
'Report Details': 'Detalhes do Relatório',
'Report Resource': 'Reportar Recursos',
'Report Types Include': 'Tipos de relatório incluem',
'Report added': 'Relatório incluído',
'Report deleted': 'Relatório removido',
'Report my location': 'Relate meu local',
'Report the contributing factors for the current EMS status.': 'Reportar os factores que contribuem para a situação EMS actual.',
'Report the contributing factors for the current OR status.': 'Reportar os factores que contribuem para a situação OR actual.',
'Report them as found': 'Reportar como encontrados',
'Report them missing': 'Reportar como perdidos',
'Report updated': 'Relatório atualizado',
'Report': 'Relatório',
'Reporter Name': 'Nome do Relator',
'Reporter': 'Relator',
'Reporting on the projects in the region': 'Relatórios sobre os projetos na região',
'Reports': 'Relatórios',
'Request Added': 'Pedido Incluído',
'Request Canceled': 'Pedido Cancelado',
'Request Details': 'Detalhes do Pedido',
'Request From': 'Pedido De',
'Request Item Details': 'Detalhes do item de pedido',
'Request Item added': 'Item incluído no pedido',
'Request Item deleted': 'Item de pedido excluído',
'Request Item from Available Inventory': 'PEDIDO DE Item de Inventário Disponível',
'Request Item updated': 'Pedido actualizado',
'Request Item': 'Item de pedido',
'Request Items': 'Itens de pedido',
'Request Status': 'Status do Pedido',
'Request Type': 'Tipo de Pedido',
'Request Updated': 'Solicitação atualizada',
'Request added': 'Pedido adicionado',
'Request deleted': 'Solicitação excluída',
'Request for Role Upgrade': 'Pedido de upgrade de função',
'Request updated': 'Pedido actualizado',
'Request': 'Pedido',
'Request, Response & Session': 'Pedido, Resposta & Sessão',
'Requested By Facility': 'Solicitado Pela Instalação',
'Requested By Site': 'Solicitado Por Site',
'Requested By': 'Solicitado Por',
'Requested From': 'Solicitada a Partir de',
'Requested Items': 'Itens solicitados',
'Requested by': 'Solicitado Por',
'Requested on': 'Em solicitada',
'Requested': 'solicitado',
'Requester': 'Solicitante',
'Requests Management': 'Gerenciamento de Pedidos',
'Requests': 'Pedidos',
'Requires Login!': 'É necessário fazer login!',
'Rescue and recovery': 'Resgate e recuperação',
'Reset Password': 'restabelecer senha',
'Reset': 'Restaurar',
'Resolve Conflict': 'Resolver Conflito',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Resolva link que levará até uma nova tela que ajudará a resolver esses registros duplicados e atualizar o banco de dados.',
'Resolve': 'Resolver',
'Resource Details': 'Detalhes do recurso',
'Resource added': 'Recurso incluído',
'Resource deleted': 'Recurso Excluído',
'Resource updated': 'Recurso atualizado',
'Resource': 'Recurso',
'Resources': 'Recursos',
'Respiratory Infections': 'Infecções respiratórias',
'Response': 'Resposta',
'Restricted Access': 'Acesso Restrito',
'Restricted Use': 'Uso restrito',
'Results': 'results',
'Retail Crime': 'Crime a varejo',
'Retrieve Password': 'Recuperar Senha',
'Return to Request': 'Retornar ao pedido',
'Return': 'Retorno',
'Returned From': 'Retornado a partir de',
'Returned Status': 'Retornado Status',
'Returned': 'Retornado',
'Review Incoming Shipment to Receive': 'Revisão da Remessa de Entrada para Receber',
'Rice': 'Arroz',
'Riot': 'Motim',
'River Details': 'Detalhes do Rio',
'River added': 'Rio adicionado',
'River deleted': 'Rio deletado',
'River updated': 'Rio atualizado',
'River': 'Rio',
'Rivers': 'Rios',
'Road Accident': 'Acidente na rua/estrada',
'Road Closed': 'Rua/Estrada fechada',
'Road Conditions': 'Condições da Estrada',
'Road Delay': 'Atraso de Estrada',
'Road Hijacking': 'Sequestro de Estrada',
'Road Usage Condition': 'Condição de Uso de Estrada',
'Role Details': 'Detalhes da Função',
'Role Required': 'Função requerida',
'Role Updated': 'Funções atualizadas',
'Role added': 'Regra incluída',
'Role deleted': 'Função excluída',
'Role updated': 'Funções atualizadas',
'Role': 'Função',
'Role-based': 'Baseada em regra',
'Roles Permitted': 'Funções Permitidas',
'Roles': 'Funções',
'Roof tile': 'Telhado lado a lado',
'Roofs, floors (vertical load)': 'Telhados, pisos (carga vertical)',
'Room Details': 'Detalhes da sala',
'Room added': 'Sala incluída',
'Room deleted': 'Sala excluída',
'Room updated': 'Sala atualizada',
'Room': 'Sala',
'Rooms': 'Salas',
'Roster': 'Lista',
'Row Choices (One Per Line)': 'Opções da linha (Um por linha)',
'Rows in table': 'Linhas na tabela',
'Rows selected': 'Linhas Selecionadas',
'Run Functional Tests': 'Executar testes funcionais',
'Run Interval': 'Intervalo de execução',
'Running Cost': 'Custo corrente',
'Safe environment for vulnerable groups': 'Ambiente seguro para grupos vulneráveis',
'Safety Assessment Form': 'Formulário de avaliação de segurança',
'Safety of children and women affected by disaster?': 'Segurança das crianças e mulheres afetadas pela catástrofe?',
'Sahana Administrator': 'Sahana AdmiNistrador',
'Sahana Blue': 'Sahana Azul',
'Sahana Community Chat': 'Sahana COMUNIDADE de BATE-PAPO',
'Sahana Eden <=> Other': 'Sahana Eden <=> Outros',
'Sahana Eden <=> Sahana Eden': 'Sahana Éden <=> Sahana Éden',
'Sahana Eden Humanitarian Management Platform': 'plataforma de gerenciamento humanitário Sahana Éden',
'Sahana Eden Website': 'SITE Sahana Éden',
'Sahana Green': 'Sahana Verde',
'Sahana access granted': 'Acesso Sahana CONCEDIDO',
'Salted Fish': 'Peixe Salgado',
'Sanitation problems': 'Problemas de saneamento',
'Satellite Office': 'Escritório experimental',
'Satellite': 'satélite',
'Saturday': 'SAturday',
'Save': 'armazenar',
'Saved.': 'armazenado.',
'Saving...': 'Guardando...',
'Scale of Results': 'Nível de Resultados',
'Scenario Details': 'Detalhes do Cenário',
'Scenario added': 'Cenário incluído',
'Scenario deleted': 'Cenário excluído',
'Scenario updated': 'Cenário atualizado',
'Scenario': 'Cenário',
'Scenarios': 'Cenários',
'Schedule': 'Horário',
'Schema': 'Esquema',
'School Closure': 'Encerramento Escolar',
'School Lockdown': 'Bloqueio escolar',
'School Teacher': 'Professor de escola',
'School activities': 'Actividades escolares',
'School assistance': 'Assistência escolar',
'School attendance': 'Presença escolar',
'School destroyed': 'Escola Destruída',
'School heavily damaged': 'Escola fortemente danificada',
'School tents received': 'Tendas da escola recebidas',
'School tents, source': 'Tendas de escolha, origem',
'School used for other purpose': 'Escola utilizada para outros fins',
'School': 'Escola',
'School/studying': 'Escola/estudando',
'Schools': 'Escolas',
'Search Activities': 'procurar atividades',
'Search Activity Report': 'Relatório de pesquisa de atividades',
'Search Addresses': 'procurar endereços',
'Search Alternative Items': 'Procurar itens alternativos',
'Search Assessment Summaries': 'Procura De Avaliação De RESUMOS',
'Search Assessments': 'Avaliações de procura',
'Search Asset Assignments': 'Procurar ATIVO Designações',
'Search Asset Log': 'Procurar log de ativo',
'Search Assets': 'Procurar Recursos',
'Search Baseline Type': 'Procurar Typo de Base',
'Search Baselines': 'Procurar Bases',
'Search Brands': 'Procurar Marcas',
'Search Budgets': 'Procura Orçamentos',
'Search Bundles': 'PACOTES Configuráveis de procura',
'Search Camp Services': 'Procurar Serviços de Acampamento',
'Search Camp Types': 'Procurar Tipos De Acampamento',
'Search Camps': 'Procurar acampamentos',
'Search Catalog Items': 'Itens de procura De Catálogo',
'Search Catalogs': 'Procurar nos Catálogos',
'Search Certificates': 'Procurar Certificados',
'Search Certifications': 'Procurar Certificações',
'Search Checklists': 'Listas De procura',
'Search Cluster Subsectors': 'Procura De Cluster Subsectores',
'Search Clusters': 'Clusters de procura',
'Search Commitment Items': 'Itens de procura Compromisso',
'Search Commitments': 'Compromissos de procura',
'Search Competencies': 'Procurar Competências',
'Search Competency Ratings': 'Procurar Indices de Competência',
'Search Contact Information': 'Procurar informações de contato',
'Search Contacts': 'Buscar contatos',
'Search Course Certificates': 'procura Certificados de Curso',
'Search Courses': 'Procurar Cursos',
'Search Credentials': 'Credenciais de busca',
'Search Documents': 'Pesquisar documentos',
'Search Donors': 'Procura de Doadores',
'Search Entries': 'Pesquisar Entradas',
'Search Events': 'Pesquisar Eventos',
'Search Facilities': 'Pesquisar Instalações',
'Search Feature Layers': 'Pesquisar camadas do dispositivo',
'Search Flood Reports': 'Pesquisar relatórios de inundação',
'Search Groups': 'Buscar Grupos',
'Search Human Resources': 'Pesquise recursos humanos.',
'Search Identity': 'Buscar Identidade',
'Search Images': 'Procurar Imagens',
'Search Impact Type': 'Procurar Tipo de Impacto',
'Search Impacts': 'Procurar Impactos',
'Search Incident Reports': 'Procurar Relatórios de Incidentes',
'Search Inventory Items': 'Procurar Entradas De Inventário',
'Search Inventory items': 'Procurar Entradas De Inventário',
'Search Item Categories': 'Buscar categorias de Item',
'Search Item Packs': 'Buscar pocotes de itens',
'Search Items': 'Buscar Itens',
'Search Job Roles': 'Pesquise papéis de trabalho',
'Search Keys': 'Procurar chaves',
'Search Kits': 'Procurar kits',
'Search Layers': 'Procurar camadas',
'Search Level 1 Assessments': 'Procurar Avaliações Nível 1',
'Search Level 2 Assessments': 'Procurar Avaliações Nível 2',
'Search Locations': 'Procurar Localidades',
'Search Log Entry': 'Procura de entrada de Log',
'Search Map Profiles': 'Pesquise mapa de configurações.',
'Search Markers': 'Marcadores De procura',
'Search Members': 'Procurar Membro',
'Search Membership': 'Procurar filiação',
'Search Memberships': 'Pesquisar Associações',
'Search Missions': 'Procurar Missões',
'Search Need Type': 'Procura Precisa De Tipo',
'Search Needs': 'Procura precisa',
'Search Notes': 'Notes procura',
'Search Offices': 'Escritórios de procura',
'Search Organizations': 'Pesquisar Organizações',
'Search Peer': 'PROCURA Par',
'Search Personal Effects': 'Procura objetos pessoais',
'Search Persons': 'Buscar Membros',
'Search Photos': 'Procura Fotos',
'Search Population Statistics': 'Procurar Estatística de População',
'Search Positions': 'Procura de Posições',
'Search Problems': 'Procura de Problemas',
'Search Projections': 'Projeções de procura',
'Search Projects': 'Procura de Projetos',
'Search Rapid Assessments': 'Procura de Avaliações Rápidas',
'Search Received Items': 'Procura de Itens Recebidos',
'Search Received Shipments': 'Embarques de procura Recebidos',
'Search Records': 'registros de procura',
'Search Registations': 'Registations procura',
'Search Registration Request': 'Pedido de registro de procura',
'Search Report': 'Procurar Relatório',
'Search Reports': 'Procurar Relatórios',
'Search Request Items': 'Pedido de procura de Itens',
'Search Request': 'pedido de pesquisa',
'Search Requested Items': 'Procura de itens solicitados',
'Search Requests': 'Procura de solicitações',
'Search Resources': 'Pesquisa de recursos',
'Search Rivers': 'Rios procura',
'Search Roles': 'Pesquisa de papéis',
'Search Rooms': 'Procurar Salas',
'Search Scenarios': 'Procurar cenários',
'Search Sections': 'As Seções de procura',
'Search Sectors': 'Procurar Setores',
'Search Sent Items': 'Procurar Itens Enviados',
'Search Sent Shipments': 'Procurar Despachos Enviados',
'Search Service Profiles': 'Serviço de procura Perfis',
'Search Settings': 'Definições de Pesquisa',
'Search Shelter Services': 'Procura Abrigo de serviços',
'Search Shelter Types': 'Procura tipos de Abrigo',
'Search Shelters': 'Procurar Abrigos',
'Search Skill Equivalences': 'Procurar equivalencias de habilidades',
'Search Skill Provisions': 'Procurar Disposições de habilidade',
'Search Skill Types': 'Pesquisar Tipos de Habilidades',
'Search Skills': 'Pesquisar Habilidades',
'Search Solutions': 'Pesquisar Soluções',
'Search Staff Types': 'Busca de tipo de pessoal',
'Search Staff or Volunteer': 'Procurar Funcionário ou Voluntário',
'Search Staff': 'Busca de pessoal',
'Search Status': 'Busca de status',
'Search Subscriptions': 'Busca de assinaturas',
'Search Subsectors': 'Buscar subsetores',
'Search Support Requests': 'Pedidos de suporte a pesquisa',
'Search Tasks': 'Tarefa de Pesquisa',
'Search Teams': 'Times de pesquisa',
'Search Themes': 'Temas de pesquisa',
'Search Tickets': 'Buscar Bilhetes',
'Search Tracks': 'Procurar Trilhas',
'Search Trainings': 'Buscar Treinamentos',
'Search Twitter Tags': 'Procurar Twitter Tags',
'Search Units': 'Procura Unidades',
'Search Users': 'Procurar Usuários',
'Search Volunteer Availability': 'Buscar Disponibilidade para Voluntáriado',
'Search Volunteers': 'Procura Voluntários',
'Search Warehouses': 'procura Warehouses',
'Search and Edit Group': 'Procurar e editar GRUPO',
'Search and Edit Individual': 'Procurar e Editar Individual',
'Search for Staff or Volunteers': 'Pesquise por funcionários ou voluntários',
'Search for a Location by name, including local names.': 'Pesquisar local por nome, incluindo nomes locais.',
'Search for a Person': 'Procurar Pessoa',
'Search for a Project': 'Procurar Projecto',
'Search for a shipment by looking for text in any field.': 'Procurar carga fazendo uma pesquisa de texto em qualquer campo.',
'Search for a shipment received between these dates': 'Procurar carga recebida entre estas datas',
'Search for an Organization by name or acronym': 'Procurar por uma Organização por nome ou iniciais',
'Search for an Organization by name or acronym.': 'Procurar por uma organização por nome ou iniciais.',
'Search for an asset by text.': 'Pesquisar um recurso por texto.',
'Search for an item by category.': 'Procurar por categoria.',
'Search for an item by text.': 'Procurar por texto.',
'Search for asset by country.': 'Procurar bens por país.',
'Search for office by country.': 'Procurar escritórios por país.',
'Search for office by organization.': 'Procurar escritórios por organização.',
'Search for office by text.': 'Procura por texto do gabinete.',
'Search for warehouse by country.': 'Pesquise por depósito por país.',
'Search for warehouse by organization.': 'Pesquise por depósito por organização.',
'Search for warehouse by text.': 'Pesquise por depósito via campo-texto.',
'Search here for a person record in order to:': 'Buscar aqui por um registro de pessoa a fim de:',
'Search messages': 'Mensagens de Procura',
'Search': 'Pesquisar',
'Searching for different groups and individuals': 'Procurar diferentes grupos e indivíduos',
'Secondary Server (Optional)': 'Servidor secundário (opcional)',
'Seconds must be a number between 0 and 60': 'Segundos deve ser um número entre 0 e 60',
'Section Details': 'Seção Detalhes',
'Section deleted': 'Seção excluído',
'Section updated': 'Seção atualizada',
'Sections': 'Seções',
'Sector Details': 'Detalhes do Setor',
'Sector added': 'Sector incluído',
'Sector deleted': 'Sector apagado',
'Sector updated': 'Setor atualizado',
'Sector': 'setor',
'Sector(s)': 'Setor(es)',
'Sectors': 'Setores',
'Security Status': 'Status de Segurança',
'Security problems': 'Problemas de Segurança',
'See All Entries': 'Ver todas as entradas',
'See all': 'Ver tudo',
'See unassigned recovery requests': 'Consulte Pedidos de recuperação designado',
'Seen': 'Visto',
'Select Items from the Request': 'Selecionar itens do pedido',
'Select Items from this Inventory': 'Selecionar itens a partir deste Inventário',
'Select Organization': 'Selecionar Organização',
'Select a location': 'Selecionar um local',
'Select a question from the list': 'Selecione uma pergunta a partir da lista',
'Select a range for the number of total beds': 'Selecione um intervalo para o número de camas total',
'Select all that apply': 'Selecione todas as que se applicam',
'Select an Organization to see a list of offices': 'Selecione uma organização para ver uma lista de escritórios',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Selecione as sobreposições de avaliação e actividades relacionadas com cada necessidade para identificar as lacunas.',
'Select the person assigned to this role for this project.': 'Selecione a pessoa designada para essa função neste projeto.',
'Select to show this configuration in the Regions menu.': 'Selecione para mostrar essa configuração no menu regiões.',
'Select': 'select',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Selecione se vau utilizar um Modem, Tropo ou outro Gateway para enviar SMS',
'Send Alerts using Email &/or SMS': 'Envio de alertas usando e-mail e/ou SMS',
'Send Commitment as Shipment': 'Enviar compromisso como carregamento',
'Send New Shipment': 'Enviar nova remessa',
'Send Notification': 'Enviar notificação',
'Send Shipment': 'Enviar Carregamento',
'Send a message to this person': 'Enviar uma mensagem para esta pessoa',
'Send a message to this team': 'Enviar uma mensagem para essa equipe',
'Send from %s': 'Enviar de %s',
'Send message': 'Enviar mensagem',
'Send new message': 'Enviar nova mensagem',
'Send': 'Envie',
'Sends & Receives Alerts via Email & SMS': 'Envia & Recebe Alertas via E-Mail & SMS',
'Senior (50+)': 'Sênior (50+)',
'Sent By Person': 'Enviado Por Pessoa',
'Sent By': 'Enviado Por',
'Sent Item Details': 'Detalhes do Item enviado',
'Sent Item deleted': 'Enviado Item excluído',
'Sent Item updated': 'Enviado Item atualizado',
'Sent Shipment Details': 'Enviado Detalhes de Embarque',
'Sent Shipment canceled and items returned to Inventory': 'Enviado Carregamento cancelado e itens retornado ao Inventário',
'Sent Shipment canceled': 'Enviado Carregamento cancelado',
'Sent Shipment updated': 'Enviado Embarque atualizado',
'Sent Shipments': 'Remessas Enviadas',
'Sent': 'Enviadas',
'Separated children, caregiving arrangements': 'Crianças separados, disposições caregiving',
'Serial Number': 'Numero de série',
'Series': 'serie',
'Server': 'servidor',
'Service Catalog': 'Catálogo de Serviços',
'Service or Facility': 'Serviço ou facilidade',
'Service profile added': 'Perfil de serviço adicionado',
'Service profile deleted': 'Perfil de serviço Excluído',
'Service profile updated': 'Perfil de serviço atualizado',
'Service': 'serviço',
'Services Available': 'Serviços Disponíveis',
'Services': 'Serviços',
'Set Base Site': 'Definir base de dados do site',
'Set By': 'Definido por',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Configure como True para permitir que este nível da hierarquia do local possa ser editado por usuários que não sejam administradores.',
'Setting Details': 'Detalhes de ajuste',
'Setting added': 'Configuração adicionada',
'Setting deleted': 'Configuração Excluída',
'Setting updated': 'Configuração atualizada',
'Settings updated': 'Ajustes atualizados',
'Settings were reset because authenticating with Twitter failed': 'As configurações foram redefinidas porque a autenticação com Twitter falhou',
'Settings which can be configured through the web interface are available here.': 'As configurações que podem ser definidas através da interface da web estão disponíveis aqui.',
'Settings': 'Ajustes',
'Severe': 'Severo',
'Severity': 'Gravidade',
'Share a common Marker (unless over-ridden at the Feature level)': 'Compartilhar um marcador comum (a não ser que abaixo-assinado ao nível de Componente)',
'Shelter & Essential NFIs': 'Abrigo & NFIs Essenciais',
'Shelter Details': 'Detalhes de Abrigo',
'Shelter Name': 'Nome de Abrigo',
'Shelter Registry': 'Registro de Abrigo',
'Shelter Service Details': 'Detalhes do serviço de abrigo',
'Shelter Service added': 'Serviço de Abrigo incluído',
'Shelter Service deleted': 'Serviço de Abrigo excluído',
'Shelter Service updated': 'Atualização de serviços de abrigo',
'Shelter Service': 'Serviço de Abrigo',
'Shelter Services': 'Serviços de abrigo',
'Shelter Type Details': 'Detalhes do tiipo de abrigo',
'Shelter Type added': 'Tipo de abrigo incluído',
'Shelter Type deleted': 'Tipo de abrigo excluído',
'Shelter Type updated': 'Abrigos Tipo De atualização',
'Shelter Type': 'Tipo de abrigo',
'Shelter Types and Services': 'Abrigo Tipos e serviços',
'Shelter Types': 'Tipos De abrigo',
'Shelter added': 'Abrigo incluído',
'Shelter deleted': 'Abrigo excluído',
'Shelter updated': 'Abrigo atualizado',
'Shelter': 'Abrigo',
'Shelter/NFI Assistance': 'Abrigo/ Assistência NFI',
'Shelters': 'Abrigos',
'Shipment Created': 'Embarque Criado',
'Shipment Items received by Inventory': 'Itens de Remessa recebidos pelo Inventário',
'Shipment Items sent from Inventory': 'Itens de Remessa enviados pelo Inventário',
'Shipment Items': 'Itens de Carregamento',
'Shipment to Send': 'Carga para Enviar',
'Shipments To': 'Remessas Para',
'Shipments': 'Remessas',
'Shooting': 'Tiroteio',
'Short Assessment': 'Curta Avaliação',
'Short Description': 'Breve Descrição',
'Show Checklist': 'Mostrar Lista De Verificação',
'Show Details': 'Mostrar detalhes',
'Show Map': 'Mostrar Mapa',
'Show Region in Menu?': 'Mostrar Região no Menu?',
'Show on Map': 'Mostrar no mapa',
'Show on map': 'Mostrar no mapa',
'Sign-up as a volunteer': 'Inscrever-se como um voluntário',
'Sign-up for Account': 'Inscrever-se para conta',
'Sign-up succesful - you should hear from us soon!': 'Sua inscriçao foi feita com sucesso - aguarde notícias em breve!',
'Sindhi': 'Sindi',
'Site Administration': 'Administração do site',
'Site or Location': 'Sítio ou Local',
'Site': 'site',
'Sites': 'sites',
'Situation Awareness & Geospatial Analysis': 'Situação Reconhecimento & Geoespaciais Análise',
'Situation': 'Situação',
'Sketch': 'Esboço',
'Skill Catalog': 'Catálogo de Conhecimentos',
'Skill Details': 'Detalhes das habilidades',
'Skill Equivalence Details': 'Detalhes da Equivalência de Habilidade',
'Skill Equivalence added': 'Equivalência de Habilidade incluída',
'Skill Equivalence deleted': 'Equivalência de Habilidade excluída',
'Skill Equivalence updated': 'Equivalência de Habilidade atualizada',
'Skill Equivalence': 'Equivalência de Conhecimentos',
'Skill Equivalences': 'Equivalências de habilidade',
'Skill Provision Catalog': 'Catálogo de habilidades disponível',
'Skill Provision Details': 'Detalhes de habilidades disponível',
'Skill Provision added': 'Provisão de Habilidade incluída',
'Skill Provision deleted': 'Catalogo de habilidades excluído',
'Skill Provision updated': 'Catálogo de habilidades atualizado',
'Skill Provision': 'Provisão de Habilidade',
'Skill Provisions': 'Habilidades disponíveis',
'Skill Status': 'Status da Habilidade',
'Skill TYpe': 'Tipo de habilidade',
'Skill Type Catalog': 'Catálogo de tipos de habilidades',
'Skill Type Details': 'Detalhes do tipo de habilidade',
'Skill Type added': 'Tipo de habilidade incluído',
'Skill Type deleted': 'Tipo de habilidade excluído',
'Skill Type updated': 'Tipo de habilidade atualizado',
'Skill Types': 'Tipos de habilidade',
'Skill added': 'Habilidade incluída',
'Skill deleted': 'Habilidade Excluída',
'Skill updated': 'Habilidade ATUALIZADA',
'Skill': 'QUALIFICAÇÃO',
'Skill/Training': 'Habilidades/Treinamento',
'Skills Catalog': 'Catálogo de habilidades',
'Skills Management': 'Gerenciamento das Habilidades',
'Skills': 'Habilidades',
'Skype ID': 'ID DO Skype',
'Slightly Damaged': 'Ligeiramente Danificado',
'Slope failure, debris': 'falha de inclinação, destroços',
'Small Trade': 'Pequeno Comércio',
'Smoke': 'Fumaça',
'Snapshot Report': 'Relatório de snapshot',
'Snapshot': 'snapshot',
'Snow Fall': 'Queda de neve , nevasca',
'Snow Squall': 'Rajada de neve',
'Soil bulging, liquefaction': 'abaulamento do solo, liquefação',
'Solid waste': 'Resíduos sólidos',
'Solution Details': 'Detalhes da Solução',
'Solution Item': 'Item de Solução',
'Solution added': 'Solução adicionada',
'Solution deleted': 'Solução excluída',
'Solution updated': 'Solução atualizada',
'Solution': 'Solução',
'Solutions': 'Soluções',
'Some': 'Algum',
'Sorry that location appears to be outside the area of the Parent.': 'Desculpe ! Essa localização está fora da área do Pai.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Desculpe ! Essa localização parece estar fora da área suportada por esta implementação.',
'Sorry, I could not understand your request': 'Desculpe, eu não pude entender o seu pedido',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Desculpe, apenas usuários com o perfil MapAdmin tem permissão para criar locais dos grupos.',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Desculpe, apenas usuários com o perfil MapAdmin tem permissão para editar estes locais',
'Sorry, something went wrong.': 'Desculpe, algo deu errado.',
'Sorry, that page is forbidden for some reason.': 'Desculpe ! Esta página tem acesso restrito por alguma razão.',
'Sorry, that service is temporary unavailable.': 'Desculpe ! Este serviço está indisponível temporariamente.',
'Sorry, there are no addresses to display': 'Desculpe ! Não há endereços para visualizar.',
'Source ID': 'ID de origem',
'Source Time': 'Origem do tempo',
'Source': 'source',
'Sources of income': 'Fontes de rendimento',
'Space Debris': 'Destroços Espaciais',
'Spanish': 'espanhol',
'Special Ice': 'Gelo Especial',
'Special Marine': 'Marinha especial',
'Specialized Hospital': 'Hospital especializado.',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Área específica (exemplo: edifício/quarto) com a localização de onde essa pessoa/grupo é visto.',
'Specific locations need to have a parent of level': 'Locais específicos precisam ter um nível paterno.',
'Specify a descriptive title for the image.': 'Especifique um título descritivo para a imagem.',
'Specify the bed type of this unit.': 'Especifique o tipo de cama dessa unidade.',
'Specify the number of available sets': 'Especificar o número de conjuntos disponíveis',
'Specify the number of available units (adult doses)': 'Especifique o número de unidades disponíveis (doses para adultos)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Especificar o número de unidades disponíveis (litros) de Ringer-Lactato ou soluções equivalentes',
'Specify the number of sets needed per 24h': 'Especificar o número de conjuntos necessários por 24h',
'Specify the number of units (adult doses) needed per 24h': 'Especificar o número de unidades (doses para adultos) necessário por 24h',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Especificar o número de unidades (litros) de Ringer-Lactato ou soluções equivalentes necessárias para 24h',
'Spherical Mercator?': 'Mapa Mercator Esférico?',
'Spreadsheet Importer': 'PLANILHA IMPORTADOR',
'Spreadsheet uploaded': 'Planilha transferido por UPLOAD',
'Spring': 'Primavera',
'Squall': 'Rajada',
'Staff & Volunteers': 'Colaboradores & Voluntários',
'Staff 2': 'Equipe 2',
'Staff Details': 'Equipe Detalhes',
'Staff ID': 'ID da equipe',
'Staff List': 'Lista de pessoal',
'Staff Member Details': 'Detalhes de membro da equipe',
'Staff Members': 'Membros da equipe',
'Staff Record': 'Registro de pessoal',
'Staff Type Details': 'Equipe Tipo Detalhes',
'Staff Type added': 'Equipe tipo incluído',
'Staff Type deleted': 'Tipo De equipe excluído',
'Staff Type updated': 'Equipe Tipo De atualização',
'Staff Types': 'Tipos de equipe',
'Staff added': 'Equipe incluída',
'Staff and Volunteers': 'Funcionários e Voluntários',
'Staff deleted': 'Equipe excluída',
'Staff member added': 'Membro da equipe incluído',
'Staff member updated': 'Membro da equipe atualizado',
'Staff present and caring for residents': 'Equipe presente e cuidando de moradores',
'Staff updated': 'Equipe atualizado',
'Staff': 'Equipe',
'Staff2': 'staff2',
'Staffing': 'Equipe',
'Stairs': 'Escadas',
'Start Date': 'Data do início',
'Start date': 'Data Inicial',
'Start of Period': 'Início do Período',
'State': 'Status',
'Stationery': 'Papel de Carta',
'Status Report': 'Relatório de status',
'Status Updated': 'Status atualizado',
'Status added': 'Estado adicionado',
'Status deleted': 'Estado excluído',
'Status of clinical operation of the facility.': 'Estado da operação clínica da instalação.',
'Status of general operation of the facility.': 'Estado da operação geral da instalação.',
'Status of morgue capacity.': 'Estado da capacidade da morgue.',
'Status of operations of the emergency department of this hospital.': 'Estado das operações do Departamento de Emergência deste hospital.',
'Status of security procedures/access restrictions in the hospital.': 'Estado dos procedimentos de segurança/Restrições de Acesso no hospital.',
'Status of the operating rooms of this hospital.': 'Status das salas de operação deste hospital.',
'Status updated': 'Status atualizado',
'Steel frame': 'Estrutura de aço',
'Stolen': 'Roubado',
'Store spreadsheets in the Eden database': 'Arquivar as planilhas no banco de dados Eden',
'Storeys at and above ground level': 'Andares e no nível do solo acima',
'Storm Force Wind': 'Tempestade Força Vento',
'Storm Surge': 'ressaca',
'Stowaway': 'Penetra',
'Street Address': 'Endereço residencial',
'Strong Wind': 'vento forte',
'Structural Hazards': 'riscos estruturais',
'Structural': 'estrutural',
'Style Field': 'Estilo do Campo',
'Style Values': 'Estilo dos Valores',
'Sub-type': 'Subtipo',
'Subject': 'assunto',
'Submission successful - please wait': 'envio bem sucedido - por favor aguarde',
'Submission successful - please wait...': 'envio bem sucedido - por favor aguarde...',
'Submit New (full form)': 'Submeter Novo (formulário completo)',
'Submit New (triage)': 'Submeter novo (triagem)',
'Submit New': 'Submeter Novamente',
'Submit a request for recovery': 'envie um pedido de recuperação',
'Submit new Level 1 assessment (full form)': 'Submeter novo nível 1 de avaliação (formulário completo)',
'Submit new Level 1 assessment (triage)': 'Submeter novo nível 1 de avaliação (triagem)',
'Submit new Level 2 assessment': 'Submeter novo nível 2 de avaliação',
'Subscription Details': 'Detalhes da Assinatura',
'Subscription added': 'Assinatura Incluída',
'Subscription deleted': 'Assinatura Excluída',
'Subscription updated': 'Assinatura ATUALIZADO',
'Subscriptions': 'assinaturas',
'Subsector Details': 'Detalhes de subsetor',
'Subsector added': 'Subsetor incluído',
'Subsector deleted': 'Subsetor excluído',
'Subsector updated': 'Subsetor atualizado',
'Subsector': 'Subsetor',
'Subsectors': 'Subsetores',
'Subsistence Cost': 'custo de subsistencia',
'Suburb': 'Subúrbio',
'Suggest not changing this field unless you know what you are doing.': 'Sugerimos não alterar esse campo a menos que você saiba o que está fazendo.',
'Summary by Administration Level': 'Resumo por Nível de Administração',
'Summary': 'Sumário',
'Sunday': 'Domingo',
'Supplies': 'Suprimentos',
'Support Request': 'Pedido de Suporte',
'Support Requests': 'Pedidos de Suporte',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Suporta a tomada de decisão de grandes grupos de Especialistas em Gestão de Crises ajudando os grupos a criar listas de classificados.',
'Sure you want to delete this object?': 'Tem certeza que você quer excluir este objeto?',
'Surgery': 'Cirurgia',
'Survey Answer Details': 'Detalhes da Resposta da Pesquisa',
'Survey Answer added': 'Incluído Resposta da Pesquisa',
'Survey Answer deleted': 'Excluído a Resposta da Pesquisa',
'Survey Answer updated': 'Resposta da Pesquisa atualizada',
'Survey Answer': 'Resposta da Pesquisa',
'Survey Module': 'Módulo de Pesquisa',
'Survey Name': 'Nome da Pesquisa',
'Survey Question Details': 'Detalhes da Pergunta de Pesquisa',
'Survey Question Display Name': 'Nome da pergunta de pesquisa',
'Survey Question added': 'Pergunta de pesquisa incluída',
'Survey Question deleted': 'Pergunta de pesquisa excluída',
'Survey Question updated': 'Pergunta de pesquisa atualizada',
'Survey Question': 'Questão de Pesquisa de Opinião',
'Survey Section Details': 'Detalhes de Seção de Pesquisa',
'Survey Section Display Name': 'Seção de pesquisa do nome de exibição',
'Survey Section added': 'Seção de Pesquisa incluída',
'Survey Section deleted': 'Seção de Pesquisa excluída',
'Survey Section updated': 'Seção de pesquisa atualizada',
'Survey Section': 'Seção da Pesquisa de Opinião',
'Survey Series Details': 'Série de Pesquisa Detalhes',
'Survey Series Name': 'Nome de Série de Pesquisa',
'Survey Series added': 'Série de Pesquisa incluída',
'Survey Series deleted': 'Série de Pesquisa excluída',
'Survey Series updated': 'Série de Pesquisa atualizada',
'Survey Series': 'Série de Pesquisa',
'Survey Template Details': 'Definir detalhes do formulário',
'Survey Template added': 'Modelo de Pesquisa incluído',
'Survey Template deleted': 'Modelo de Pesquisa excluído',
'Survey Template updated': 'Definição de formulário actualizada',
'Survey Template': 'Modelo de Pesquisa de Opinião',
'Survey Templates': 'Definir formulários',
'Symbology': 'Simbologia',
'Sync Conflicts': 'Conflitos de Sincronização',
'Sync History': 'Histórico de Sincronização',
'Sync Now': 'Sincronizar Agora',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'PARCEIROS DE Sincronização são instâncias ou PARES (SahanaEden, SahanaAgasti, Ushahidi, etc. ) que você deseja a informação de sincronização com. Clique no link sobre o direito de ir a página em que você pode incluir parceiros de sincronização, procurar por parceiros de sincronização e Modificá-las.',
'Sync Partners': 'Sincronizar parceiros',
'Sync Pools': 'Conjuntos de Sincronização',
'Sync Schedule': 'Planejamento de Sincronização',
'Sync Settings': 'Configurações de Sincronização',
'Sync process already started on': 'Processo de Sincronização já iniciado em',
'Synchronisation': 'Sincronização',
'Synchronization Conflicts': 'Conflitos de Sincronização',
'Synchronization Details': 'Detalhes de Sincronização',
'Synchronization History': 'Histórico de Sincronização',
'Synchronization Peers': 'Parceiros de Sincronização',
'Synchronization Settings': 'Configurações de sincronização',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Sincronização permite compartilhar dados que você tenha com outros e Atualizar seu próprio banco de dados com informações recentes de outros parceiros. Esta página fornece informações sobre como utilizar os recursos de sincronização de Sahana Éden',
'Synchronization not configured.': 'Sincronização não Configurada.',
'Synchronization settings updated': 'Configurações de sincronização atualizadas',
'Synchronization': 'Sincronização',
'Syncronisation History': 'Histórico De Sincronização',
'Take shelter in place or per <instruction>': 'Abrigue-se no local ou por',
'Task Details': 'Detalhes da Tarefa',
'Task List': 'Lista de tarefas',
'Task Status': 'Status da tarefa',
'Task added': 'Task Inclusa',
'Task deleted': 'Tarefa excluída',
'Task updated': 'Tarefa atualizada',
'Tasks': 'Tarefas',
'Team Description': 'Descrição da Equipe',
'Team Details': 'Detalhes da Equipe',
'Team ID': 'ID da Equipe',
'Team Id': 'Id da Equipe',
'Team Leader': 'Líder de Equipe',
'Team Member added': 'Membro da equipe incluído',
'Team Members': 'Membros da equipe',
'Team Name': 'Nome da equipe',
'Team Type': 'Tipo de equipe',
'Team added': 'Equipe incluída',
'Team deleted': 'Equipe excluída',
'Team updated': 'Equipa actualizada',
'Team': 'Equipe',
'Teams': 'Equipes',
'Technical testing only, all recipients disregard': 'Apenas teste técnico, todos os recipientes ignorem',
'Telecommunications': 'Telecomunicações',
'Telephone': 'Telefone',
'Telephony': 'Telefonia',
'Temp folder %s not writable - unable to apply theme!': 'PASTA Temp%s não gravável-impossível aplicar tema!',
'Template file %s not readable - unable to apply theme!': 'Modelo% arquivo não é Legível-impossível aplicar tema!',
'Templates': 'modelos',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Termo para o 5º nível de divisão administrativa nacional (por exemplo, uma subdivisão de código postal ou de zona de votação). Este nível não é frequentemente utilizado.',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Termo para o 4º nível de divisão administrativa nacional(por exemplo, vila, bairro ou distrito).',
'Term for the primary within-country administrative division (e.g. State or Province).': 'Prazo para a principal divisão administrativa dentro do país (i.e. Estado ou Distrito).',
'Term for the secondary within-country administrative division (e.g. District or County).': 'Prazo para a Secundária divisão administrativa dentro do país (por exemplo, Bairro ou Município).',
'Term for the secondary within-country administrative division (e.g. District).': 'Prazo para a Secundária divisão administrativa dentro do país (i.e. Bairro).',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'Prazo para o 3ᵉʳ nível de divisão administrativa dentro do país (por exemplo, Cidade ou Municipio).',
'Term for the top-level administrative division (i.e. Country).': 'Prazo para a divisão administrativa de nível superior (por exemplo País).',
'Term for the top-level administrative division (typically Country).': 'Prazo para a divisão administrativa de nível superior (geralmente País).',
'Territorial Authority': 'Autoridade territoriais',
'Terrorism': 'Terrorismo',
'Tertiary Server (Optional)': 'Servidor terciário (opcional)',
'Text Color for Text blocks': 'Cor de texto para os blocos de texto',
'Text before each Text Field (One per line)': 'Texto antes de cada campo de texto (um por linha)',
'Text': 'texto',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Obrigado para validar seu e-mail. Sua conta de usuário ainda está pendente para aprovação pelo administrador do Sistema (%s). você receberá uma notificação por e-mail quando sua conta esteja ativada.',
'Thanks for your assistance': 'Obrigado por sua ajuda',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'O "query" é uma condição como "db.table1.field1==\'value\'". Algo como "db.table1.field1 == db.table2.field2" resulta em uma junção SQL.',
'The Area which this Site is located within.': 'A área que este Site está localizado',
'The Assessments module allows field workers to send in assessments.': 'O Modulo Avaliações permite aos trabalhadores de campo que enviem avaliações.',
'The Author of this Document (optional)': 'O autor deste documento (opcional)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'O módulo avaliações De Construção permite a segurança edifício a ser avaliada, por exemplo, depois de um terremoto.',
'The Camp this Request is from': 'O Alojamento neste pedido é de',
'The Camp this person is checking into.': 'O Alojamento que esta pessoa está se registrando.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'O local atual do Usuário/Grupo, que pode ser geral (para relatórios) ou precisa (para exibir em um mapa). Digite alguns caracteres para procurar nos locais disponíveis.',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'O endereço de e-mail para onde os pedidos de aprovação são enviados (normalmente seria um correio de Grupo ao invés de um individual). Se o campo estiver em branco, os pedidos são aprovados automaticamente se o domínio corresponder.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'O Sistema de Comunicação de Incidentes permite o Público em Geral reportar incidentes & ter esses rastreados.',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'A Localização da Pessoa vem do, que pode ser geral (para relatórios) ou precisa (para exibir em um mapa). Digite alguns caracteres para procurar nos locais disponíveis.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'O local que a pessoa vai, que pode ser genérico (para Relatórios) ou preciso (para exibir em um mapa). Digite alguns caracteres para procurar nos locais disponíveis.',
'The Media Library provides a catalog of digital media.': 'A Biblioteca de mídias fornece um catálogo de mídia digital.',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'O módulo de mensagens é o hub de comunicação principal do sistema Sahana. É utilizado para enviar alertas e/ou mensagens utilizando o SMS & e-mail para diferentes grupos e indivíduos antes, durante e após um desastre.',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'O registro Da Organização mantém controle de todos as organizações de apoio que trabalham na área.',
'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'O registro da Organização mantém controle de todas organizações de ajuda trabalhando numa região de desastre. Ele captura não apenas os locais onde elas estão ativas, mas também captura informações sobre o conjunto de projetos que está fornecendo em cada região.',
'The Person currently filling this Role.': 'A pessoa atualmente preenchendo esta função.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'O módulo acompanhamento do projeto permite a criação de atividades para preencher Lacunas nas avaliações de necessidades.',
'The Requests Management System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'O sistema De Gerenciamento De Pedidos é um repositório online central em todas as organizações de ajuda, trabalhadores de assistência, agentes do governo e sites de acampamento para a equipe de refugiados pode coordenar o fornecimento da ajuda com seu pedido. Ela permite que usuários aloquem os recursos disponíveis para suprir as demandas de forma efetiva e eficiente.',
'The Role this person plays within this hospital.': 'A Função desta pessoa neste hospital.',
'The Role to which this Role reports.': 'A função à qual essa função responde.',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'O registro do Abrigo rastreia todos os detalhes básicos abrigos e armazena sobre eles. Ele colabora com outros módulos para rastrear as pessoas associadas com um abrigo, os serviços disponíveis etc.',
'The Shelter this Request is from (optional).': 'O pedido este Abrigo é de (opcional).',
'The Shelter this Request is from': 'O pedido deste abrigo é de',
'The Shelter this person is checking into.': 'O abrigo esta pessoa está verificando no.',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': 'A URL para o GetCapabilities de um serviço WMS cujas camadas você deseja acessíveis através do mapa.',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'A URL para a página do GetCapabilities de um Web Map Service (WMS), cujas camadas que você deseja disponíveis através do painel do navegador no Mapa.',
'The URL of your web gateway without the post parameters': 'A URL de seu gateway da web sem os parâmetros post',
'The URL to access the service.': 'A URL para acessar o serviço.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'O Idenfificador Único (UUID) conforme designado pelo governo para esta filial.',
'The asset must be assigned to a site OR location.': 'O ativo deve ser assinalado para um site ou local.',
'The attribute which is used for the title of popups.': 'O atributo que é usado para o título de popups.',
'The attribute within the KML which is used for the title of popups.': 'O Atributo dentro do KML que é utilizado para o título dos pop-ups.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'O Atributo(s) no KML que são utilizados para o corpo dos pop-ups. ( utilizar um espaço entre atributos )',
'The body height (crown to heel) in cm.': 'A altura do corpo (cabeça até o calcanhar) em cm.',
'The contact person for this organization.': 'A pessoa de contato nessa organização.',
'The country the person usually lives in.': 'O país que a pessoa vive habitualmente',
'The default Organization for whom this person is acting.': 'A Organização padrão para quem esta pessoa está atuando.',
'The default Organization for whom you are acting.': 'A Organização padrão para quem você está atuando.',
'The duplicate record will be deleted': 'O registro duplicado será excluído',
'The first or only name of the person (mandatory).': 'O primeiro nome ou único nome da pessoa (obrigatório).',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'O formulário da URL é http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service que representa o caminho da URL para o WMS.',
'The language you wish the site to be displayed in.': 'O idioma que você deseja que o site seja exibido.',
'The last known location of the missing person before disappearance.': 'A última localização conhecida da pessoa desaparecida antes do desaparecimento.',
'The list of Brands are maintained by the Administrators.': 'A lista de Marcas serão mantidas pelos administradores.',
'The list of Catalogs are maintained by the Administrators.': 'A lista de catálogos é mantida pelos administradores.',
'The list of Item categories are maintained by the Administrators.': 'A lista de categorias dos itens são mantidas pelos administradores.',
'The map will be displayed initially with this latitude at the center.': 'O mapa será exibido inicialmente com esta latitude no centro.',
'The map will be displayed initially with this longitude at the center.': 'O mapa será exibido inicialmente com esta longitude no centro.',
'The minimum number of features to form a cluster.': 'O número mínimo de recursos para formar um cluster.',
'The name to be used when calling for or directly addressing the person (optional).': 'O nome a ser usado ao chamar por ou diretamente endereçar a pessoa (opcional).',
'The next screen will allow you to detail the number of people here & their needs.': 'A próxima tela permitirá que você detalhe o número de pessoas aqui e as suas necessidades.',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'O número de unidades de medida dos Itens alternativos é igual a uma unidade de medida do Item',
'The number of pixels apart that features need to be before they are clustered.': 'O número de separado de pixels de funcionalidades tem que ser antes que eles sejam agrupados.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'O número de títulos em torno do mapa visível para fazer download. Zero significa que a primeira página carrega mais rápido, números maiores que zero significam que as paginas seguintes são mais rápida.',
'The person at the location who is reporting this incident (optional)': 'A pessoa no local que está relatando este incidenten (opcional)',
'The person reporting the missing person.': 'A pessoa reportando o desaparecimento de alguem',
'The post variable containing the phone number': 'A variavel post contendo o numero de telefone',
'The post variable on the URL used for sending messages': 'A variável post no URL é utilizada para enviar mensagens',
'The post variables other than the ones containing the message and the phone number': 'As variáveis post diferentes das que contém a mensagem e o número de telefone',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'A porta serial no qual o modem está conectado-/dev/ttyUSB0, etc. No linux e com1, com2, etc. No Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'O servidor não receber uma resposta oportuna de outro servidor que ele estava acessando para preencher o pedido pelo navegador.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'O servidor recebeu uma resposta incorreta a partir de outro servidor que ele estava acessando para preencher o pedido pelo navegador.',
'The site where this position is based.': 'O local onde esta posição se baseia.',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'O pessoal responsável pelas Instalações podem fazer pedidos de assistência. Compromissos podem ser feitas em relação a esses pedidos no entanto os pedidos permanecem abertas até o SOLICITANTE confirma que o pedido foi concluído.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'O acontecimento já não representa uma ameaça ou preocupação e a ação a ser tomada é descrita em<instruction>',
'The time at which the Event started.': 'O momento em que o evento começou.',
'The title of the WMS Browser panel in the Tools panel.': 'O título do painel do navegador WMS em ferramentas.',
'The token associated with this application on': 'O token associado a este aplicativo em',
'The unique identifier which identifies this instance to other instances.': 'O indentificador único diferencia esta instância de outras.',
'The way in which an item is normally distributed': 'O modo em que um item é normalmente distribuído',
'The weight in kg.': 'O peso em quilogramas.',
'The': 'O',
'Theme Details': 'Detalhes do Tema',
'Theme added': 'Tema incluído',
'Theme deleted': 'Tema excluído',
'Theme updated': 'Tema atualizado',
'Theme': 'Tema',
'Themes': 'Temas',
'There are errors': 'Há erros',
'There are insufficient items in the Inventory to send this shipment': 'não há itens suficientes no armazém para o envio desse carregamento',
'There are multiple records at this location': 'Há vários registros neste local',
'There are not sufficient items in the Inventory to send this shipment': 'não há itens suficientes no inventário para enviar esse carregamento',
'There is no address for this person yet. Add new address.': 'Não há endereço para esta pessoa ainda. Adicionar novo endereço.',
'These are settings for Inbound Mail.': 'Estas são as configurações para Correio de entrada.',
'These are the Incident Categories visible to normal End-Users': 'Estes são as Categorias de incidentes visíveis para usuários finais normais.',
'These need to be added in Decimal Degrees.': 'estas precisam ser incluídas em graus decimais.',
'They': 'Eles',
'This Group has no Members yet': 'Sem membros registrados atualmente',
'This Team has no Members yet': 'Sem membros registrados atualmente',
'This appears to be a duplicate of': 'Isto parece ser duplicado de',
'This file already exists on the server as': 'Este arquivo já existe como no servidor',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'Isso é apropriado se esse nível estiver em construção. Para evitar modificação acidental após esse nível estar concluído, pode ser configurado como False.',
'This is the way to transfer data between machines as it maintains referential integrity.': 'Este é o caminho para a transferência de dados entre máquinas que mantém a integridade referencial.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Este é o caminho para a transferência de dados entre máquinas que mantém a integridade referencial...duplicado dados devem ser removidos manualmente 1ᵉʳ!',
'This level is not open for editing.': 'Este nível não é aberto para edição.',
'This might be due to a temporary overloading or maintenance of the server.': 'Isso pode ser devido a uma sobrecarga temporária ou manutenção do servidor.',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Este módulo permite que itens de inventário sejam Solicitados & Enviados entre os Inventários das instalações.',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Este módulo permite que você planeje cenários para os Exercícios & Eventos. Você pode alocar apropriado recursos (humanos, Ativos e Recursos) para que estes possam ser mobilizados facilmente.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Esta página mostra as logs das sincronizações passadas. Clique no link abaixo para ir para essa página.',
'This screen allows you to upload a collection of photos to the server.': 'Esta tela permite que você faça upload de um conjunto de fotografias para o servidor.',
'This setting can only be controlled by the Administrator.': 'Esta definicão só pode ser controlado pelo administrador.',
'This shipment has already been received.': 'Este carregamento já foi recebido.',
'This shipment has already been sent.': 'Este carregamento já foi enviado.',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'Este carregamento não foi recebido-ele não foi cancelado porque ainda pode ser editado.',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'Este carregamento não foi enviado- ele não foi cancelado porque ainda pode ser editado.',
'This shipment will be confirmed as received.': 'Este carregamento será confirmado como recebido.',
'This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': 'Esse valor inclui um pequeno valor de distância fora dos pontos. Sem isto, os pontos mais afastados estariam na caixa delimitadora, e podem não estar visíveis.',
'This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': 'Este valor fornece uma largura e altura minimas em graus para a região mostrada. Sem isto, um mapa que mostre um ponto único não mostraria nenhuma extensão ao redor desse ponto. Depois que o mapa for exibido, pode ser ampliado, conforme desejado.',
'Thunderstorm': 'Trovoada',
'Thursday': 'Quinta-feira',
'Ticket Details': 'Detalhes do bilhete',
'Ticket ID': 'ID do Bilhete',
'Ticket added': 'Bilhete incluído',
'Ticket deleted': 'Bilhete removido',
'Ticket updated': 'Bilhete atualizado',
'Ticket': 'Bilhete',
'Ticketing Module': 'Módulo de bilhetes',
'Tickets': 'Bilhetes',
'Tilt-up concrete': 'Inclinar concreto',
'Timber frame': 'Quadro de madeira',
'Timeline Report': 'Relatório de períodos de tempo',
'Timeline': 'Prazo',
'Title to show for the Web Map Service panel in the Tools panel.': 'Título para mostrar o painel de serviço de Mapa da Web no painel de Ferramentas.',
'Title': 'título',
'To Location': 'Localidade de destino',
'To Person': 'Para Pessoa',
'To begin the sync process, click the button on the right =>': 'Para iniciar o processo de Sincronização, clique no botão à direita.',
'To begin the sync process, click this button =>': 'Para iniciar o processo de Sincronização, clique neste botão.',
'To create a personal map configuration, click': 'Para criar uma configuração do mapa pessoal, clique',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Para editar OpenStreetMap, você precisa editar as configurações do OpenStreetMap em models/000_config.py',
'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Para pesquisar por título, digite qualquer parte do título. Pode utilizar o % como um substituto para qualquer caracter.',
'To variable': 'Para variável',
'To': 'para',
'Tools': 'ferramentas',
'Tornado': 'tornado',
'Total # of Target Beneficiaries': 'Nº Total de Beneficiários De Destino',
'Total # of households of site visited': 'Nº Total de famílias de site Visitado',
'Total Beds': 'Total de Camas',
'Total Beneficiaries': 'Total de Beneficiários',
'Total Cost per Megabyte': 'Custo Total por Megabyte',
'Total Cost per Minute': 'Custo Total por Minuto',
'Total Monthly Cost': 'Custo Total mensal',
'Total Monthly Cost:': 'Custo Total mensal:',
'Total Monthly': 'Total Mensal',
'Total One-time Costs': 'Total Um tempo de Custos',
'Total Persons': 'Totalizar Pessoas',
'Total Recurring Costs': 'Totalizar Custos Recorrentes',
'Total Unit Cost': 'Total do custo unitário',
'Total Unit Cost:': 'Custo Unitário Total:',
'Total Units': 'Total de unidades',
'Total gross floor area (square meters)': 'Total de área bruta (metros quadrados)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Número Total de leitos neste hospital. Atualizado automaticamente a partir de relatórios diários.',
'Total number of houses in the area': 'Número Total de casas na área',
'Total number of schools in affected area': 'Número Total de escolas em área afetada',
'Total population of site visited': 'Totalizar População do site Visitado',
'Totals for Budget:': 'Total para Orçamento',
'Totals for Bundle:': 'Total do Pacote',
'Totals for Kit:': 'Totais para Kit',
'Tourist Group': 'Grupo turístico',
'Town': 'Urbano',
'Traces internally displaced people (IDPs) and their needs': 'Rastreia pessoas deslocadas internamente (PDI) e suas necessidades',
'Tracing': 'Rastreio',
'Track Details': 'Detalhes do restraio',
'Track deleted': 'Rastreio excluído',
'Track updated': 'Rastreamento atualizado',
'Track uploaded': 'Rastreamento enviado',
'Track with this Person?': 'RASTREAR com esta pessoa?',
'Track': 'Rastrear',
'Tracking of Projects, Activities and Tasks': 'Rastreamento de projetos, atividades e tarefas',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Rastreamento de informações básicas sobre a localização, instalações e tamanho dos abrigos',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Rastreia o local, distribuição, capacidade e discriminação da vítima em Abrigos',
'Traffic Report': 'Relatório de tráfego',
'Training Course Catalog': 'Catálogo de cursos de treinamento',
'Training Details': 'Detalhes do treinamento',
'Training added': 'Treinamento incluído',
'Training deleted': 'Treinamento excluído',
'Training updated': 'Treinamento atualizado',
'Training': 'Treinamento',
'Trainings': 'Treinamentos',
'Transit Status': 'Status do Transito',
'Transit': 'Trânsito',
'Transition Effect': 'Efeito de Transição',
'Transparent?': 'TRANSPARENTE?',
'Transportation assistance, Rank': 'Assistência de transporte, Classificação',
'Trauma Center': 'Centro de traumas',
'Travel Cost': 'Custo da Viagem',
'Tropical Storm': 'Tempestade Tropical',
'Tropo Messaging Token': 'Sinal de Mensagem Tropo',
'Tropo Settings': 'Configurações esteja doido parceiro',
'Tropo Voice Token': 'Sinal de Voz Tropo',
'Tropo settings updated': 'Configurações Tropo Atualizadas',
'Tropo': 'substiuir, mudar',
'Truck': 'Caminhão',
'Try checking the URL for errors, maybe it was mistyped.': 'Tente verificar se existem erros na URL, talvez tenha sido um erro de digitação',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Tente apertar o botão atualizar/recarregar ou tente a URL a partir da barra de endereços novamente',
'Try refreshing the page or hitting the back button on your browser.': 'Tente atualizar a página ou apertar o botão voltar em seu navegador.',
'Tuesday': 'Terça-feira',
'Twitter ID or #hashtag': 'ID Twitter ou #hashtag',
'Twitter Settings': 'Configurações do Twitter',
'Type of Construction': 'Tipo de Construção',
'Type of water source before the disaster': 'Tipo de fonte de água antes do desastre',
'Type': 'type',
'UID': 'uid',
'UN': 'ONU',
'URL': 'Localizador-Padrão de Recursos',
'Un-Repairable': 'ONU-Reparáveis',
'Unable to parse CSV file!': 'Não é possível analisar Arquivo CSV!',
'Understaffed': 'Pessoal',
'Unidentified': 'Não identificado',
'Unit Cost': 'Custo por unidade',
'Unit added': 'Unidade incluída',
'Unit deleted': 'Unidade Excluída',
'Unit of Measure': 'Unidade de medida',
'Unit updated': 'Unidade Atualizados',
'Units': 'Unidades',
'Unknown Peer': 'Peer desconhecido',
'Unknown type of facility': 'Tipo desconhecido de instalação',
'Unknown': 'unknown',
'Unreinforced masonry': 'Alvenaria obras',
'Unresolved Conflicts': 'Conflitos não resolvidos',
'Unsafe': 'Inseguro',
'Unselect to disable the modem': 'Desmarcar para desativar o modem',
'Unsent': 'não enviado',
'Unsupported data format!': 'Formato de dados não Suportado!',
'Unsupported method!': 'Método não Suportado!',
'Update Activity Report': 'Atualizar Relatório de atividade',
'Update Cholera Treatment Capability Information': 'Atualizar informações de capacidade de tratamento de Cólera',
'Update Request': 'Atualizar Pedido',
'Update Service Profile': 'Atualizar Perfil de Serviço',
'Update Status': 'Status da Atualização',
'Update Task Status': 'Atualizar Status da Tarefa',
'Update Unit': 'Atualizar Unidade',
'Update if Master': 'Atualizar se for o principal',
'Update if Newer': 'Atualizar se Mais Recente',
'Update your current ordered list': 'ATUALIZE a seu atual lista ordenada',
'Update': 'atualização',
'Updated By': 'Atualizado por',
'Upload Photos': 'Fazer Upload de Fotos',
'Upload Spreadsheet': 'Fazer atualizacao de Planilha',
'Upload Track': 'Pista de carregamento',
'Upload a Spreadsheet': 'Fazer Upload de uma planilha',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Fazer Upload de um arquivo de imagem (bmp, gif, jpeg ou png), máx. 300x300 pixels!',
'Upload an image file here.': 'Fazer atualizacao de um arquivo de imagem aqui.',
'Upload an image, such as a photo': 'Fazer Upload de uma imagem, como uma foto',
'Urban Fire': 'Incêndio urbano',
'Urban area': 'Zona Urbana',
'Urgent': 'Urgente',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Utilize (...)&(...) para e, (...)|(...) ou para, e ~(...) para não para construir consultas mais complexas.',
'Use Geocoder for address lookups?': 'Utiliza Geocodificador para consultas de endereços?',
'Use default': 'usar o padrão',
'Use these links to download data that is currently in the database.': 'Use estes links para fazer o download de dados actualmente na base de dados.',
'Used by IRS & Assess': 'Utilizado pela Receita Federal & Avaliar',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Utilizado em onHover De Dicas & Cluster Popups para diferenciar entre tipos.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Utilizado para construir onHover Dicas & primeiro campo também utilizado no Popups Cluster para diferenciar entre os registros.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Usado para verificar latitude de locais inseridos é razoável. Pode ser utilizado para filtrar listas de recursos que possuem locais.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Usado para verificar que longitude de locais inserido é razoável. Pode ser utilizado para filtrar listas de recursos que possuem locais.',
'Used to import data from spreadsheets into the database': 'Para importar dados utilizada a partir de planilhas no banco de dados',
'Used within Inventory Management, Request Management and Asset Management': 'Utilizado no gerenciamento de inventário, gerenciamento de Pedido e gerenciamento de ativos',
'User Account has been Disabled': 'Conta de Usuário foi Desativado',
'User Details': 'Detalhes do Usuário',
'User Management': 'gerenciamento do usuário',
'User Profile': 'Perfil do Utilizador',
'User Requests': 'Pedidos do Utilizador',
'User Updated': 'Utilizador actualizado',
'User added': 'Usuário Incluído',
'User already has this role': 'Usuário já tem essa função',
'User deleted': 'Usuário Excluído',
'User updated': 'Utilizador actualizado',
'User': 'usuário',
'Username': 'userName',
'Users removed': 'Utilizadores removidos',
'Users': 'usuários',
'Uses the REST Query Format defined in': 'Utiliza o formato de consulta REST definido em',
'Utilities': 'Serviços Públicos',
'Utility, telecommunication, other non-transport infrastructure': 'Serviços Públicos, telecomunicações, outra infra-estrutura não-transporte',
'Vacancies': 'Vagas',
'Value': 'value',
'Various Reporting functionalities': 'Diversas funcionalidades de relatório',
'Vehicle Crime': 'Roubo/Furto de veículo',
'Vehicle Types': 'Tipos de veículo',
'Vehicle': 'veículo',
'Verification Status': 'Status de verificação',
'Verified?': 'Verificado?',
'Verify password': 'Verificar senha',
'Very Good': 'Muito bom',
'Very High': 'muito alto',
'View Alerts received using either Email or SMS': 'Visualizar alertas utilizando quer o correio electrónico quer SMS.',
'View All': 'Visualizar todos',
'View Error Tickets': 'Ver bilhetes de erro',
'View Fullscreen Map': 'Visualização Inteira Mapa',
'View Image': 'Visualizar imagem',
'View Items': 'Ver itens',
'View On Map': 'Visualizar no mapa',
'View Outbox': 'Visualização Outbox',
'View Picture': 'Visualização de imagem',
'View Settings': 'Ver Configurações',
'View Tickets': 'Visualizar Bilhetes',
'View and/or update their details': 'Visualizar e/ou actualizar os seus detalhes',
'View or update the status of a hospital.': 'VISUALIZAR ou atualizar o status de um hospital.',
'View pending requests and pledge support.': 'Visualizar pedidos pendentes e suporte promessa.',
'View the hospitals on a map.': 'Visualizar os hospitais em um mapa.',
'View/Edit the Database directly': 'Visualizar/Editar o banco de dados diretamente',
'Village Leader': 'Líder da Aldeia',
'Village': 'Vila',
'Visible?': 'Visível?',
'Visual Recognition': 'Reconhecimento visual',
'Volcanic Ash Cloud': 'Nuvem de cinzas vulcânicas',
'Volcanic Event': 'Evento vulcânico',
'Volunteer Availability': 'Disponibilidade de Voluntário',
'Volunteer Details': 'Detalhes do voluntário',
'Volunteer Information': 'Voluntário Informações',
'Volunteer Management': 'Gestão de voluntário',
'Volunteer Project': 'Projeto voluntário',
'Volunteer Record': 'Voluntário Registro',
'Volunteer Request': 'Pedido voluntário',
'Volunteer added': 'Voluntário incluído',
'Volunteer availability added': 'Disponibilidade de voluntário incluída',
'Volunteer availability deleted': 'Disponibilidade de voluntário excluída',
'Volunteer availability updated': 'Disponibilidade de voluntário atualizada',
'Volunteer deleted': 'Voluntário excluído',
'Volunteer details updated': 'Atualização dos detalhes de voluntários',
'Volunteer updated': 'Voluntário atualizado',
'Volunteers List': 'Voluntários Lista',
'Volunteers were notified!': 'Voluntários foram notificados!',
'Volunteers': 'Voluntários',
'Vote': 'voto',
'Votes': 'votos',
'WASH': 'LAVAR',
'WMS Browser Name': 'WMS Nome do Navegador',
'WMS Browser URL': 'WMS Navegador URL',
'Walking Only': 'Apenas andando',
'Wall or other structural damage': 'Parede ou outros danos estruturais',
'Warehouse Details': 'Detalhes do Armazém',
'Warehouse added': 'Warehouse incluído',
'Warehouse deleted': 'Deposito apagado',
'Warehouse updated': 'Warehouse ATUALIZADO',
'Warehouse': 'Depósito',
'Warehouses': 'Armazéns',
'Water Sanitation Hygiene': 'Saneamento de água',
'Water collection': 'Coleta de água',
'Water gallon': 'Galão de água',
'Water storage containers in households': 'Recipientes de armazenamento de água nos domicílios',
'Water supply': 'Abastecimento de água',
'Web Map Service Browser Name': 'Nome do mapa da Web navegador de serviços',
'Web Map Service Browser URL': 'Web Mapa Do navegador de Serviços URL',
'Website': 'WebSite',
'Weight (kg)': 'peso (kg)',
'Weight': 'peso',
'Welcome to the Sahana Portal at': 'Bem-vindo ao Portal Sahana em',
'Well-Known Text': 'Texto bem conhecido',
'Wheat': 'Trigo',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points.': 'Quando o mapa é que exibido incide sobre um conjunto de pontos, o mapa é aproximado para mostrar apenas a região delimitadora dos pontos.',
'When reports were entered': 'Quando os relatórios foram Digitados',
'Whiskers': 'Bigodes',
'Who is doing what and where': 'Quem está a fazer o quê e onde',
'Who usually collects water for the family?': 'Quem habitualmente colecta água para a família ?',
'Width (m)': 'Largura (m)',
'Width': 'width',
'Wild Fire': 'Fogo Selvagem',
'Wind Chill': 'Vento Frio',
'Window frame': 'Esquadria de janela',
'Winter Storm': 'Tempestade de inverno',
'Women of Child Bearing Age': 'Mulheres da criança Tendo Idade',
'Women participating in coping activities': 'Mulheres que participam em lidar atividades',
'Women who are Pregnant or in Labour': 'Mulheres que esto grávidas ou no trabalho',
'Womens Focus Groups': 'Mulheres de Grupos Foco',
'Wooden plank': 'Tábua de madeira',
'Wooden poles': 'Postes de madeira',
'Working hours end': 'Horas de trabalho final',
'Working hours start': 'Horas de trabalho iniciar',
'Working or other to provide money/food': 'Trabalhando para outros para prover dinheiro / alimentos',
'X-Ray': 'Raio-X',
'Year built': 'Ano de construção',
'Year of Manufacture': 'Ano de fabricação',
'Yellow': 'amarelo',
'Yes': 'YES',
'You are a recovery team?': 'Você é uma equipe de recuperação?',
'You are attempting to delete your own account - are you sure you want to proceed?': 'Você está tentando excluir sua própria conta-Tem certeza de que deseja continuar?',
'You are currently reported missing!': 'Você está atualmente desaparecido!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Você pode alterar a configuração do Módulo de Sincronização na seção configurações. Essa configuração inclui o seu UUID (número de identificação exclusivo), Planejamentos de Sincronização, serviço Farol e assim por diante. Clique no link a seguir para ir para a página Configurações de Sincronização.',
'You can click on the map below to select the Lat/Lon fields': 'Você pode clicar no mapa abaixo para selecionar os campos Lat/Lon',
'You can select the Draw tool': 'Pode selecionar a ferramenta Desenho',
'You can set the modem settings for SMS here.': 'Pode definir a configuração do modem SMS aqui.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Você pode utilizar a ferramenta de conversão para converter coordenadas de GPS ou graus/minutos/segundos.',
'You do no have permission to cancel this received shipment.': 'Você não tem permissão para cancelar o recebimento deste carregamento.',
'You do no have permission to cancel this sent shipment.': 'Você não tem permissão para cancelar o envio desse carregamento.',
'You do no have permission to make this commitment.': 'Você não tem permissão de fazer este compromisso.',
'You do no have permission to receive this shipment.': 'Você não tem permissão para receber este carregamento.',
'You do no have permission to send this shipment.': 'Você não tem permissão para enviar este carregamento.',
'You do not have permission for any facility to make a commitment.': 'Você não tem permissão em qualquer instalação para estabelecer um compromisso.',
'You do not have permission for any facility to make a request.': 'Você não tem permissão em qualquer instalação para fazer um pedido.',
'You do not have permission for any site to add an inventory item.': 'Você não tem permissão em qualquer site para incluir um item de inventário.',
'You do not have permission for any site to make a commitment.': 'Você não tem permissão em qualquer site para assumir um compromisso.',
'You do not have permission for any site to make a request.': 'Você não tem permissão em qualquer site para fazer um pedido.',
'You do not have permission for any site to perform this action.': 'Você não tem permissão em qualquer site para executar esta ação.',
'You do not have permission for any site to receive a shipment.': 'Você não tem permissão para qualquer site para receber um carregamento.',
'You do not have permission for any site to send a shipment.': 'Você não tem permissão em qualquer site para enviar um carregamento.',
'You do not have permission to cancel this received shipment.': 'Você não tem permissão para cancelar este carregamento recebido.',
'You do not have permission to cancel this sent shipment.': 'Você não tem permissão para cancelar essa remessa enviada.',
'You do not have permission to make this commitment.': 'Você não tem permissão para assumir este compromisso.',
'You do not have permission to receive this shipment.': 'Você não tem permissão para receber esta remessa.',
'You do not have permission to send a shipment from this site.': 'Você não tem permissão para enviar um carregamento a partir deste site.',
'You do not have permission to send this shipment.': 'Você não tem permissão para enviar este carregamento.',
'You have a personal map configuration. To change your personal configuration, click': 'Você tem uma configuração de mapa pessoal. Para alterar a sua configuração pessoal, clique',
'You have found a dead body?': 'Descobriu um cadáver ?',
'You must be logged in to register volunteers.': 'Você deve estar com login efetuado para registrar voluntários.',
'You must be logged in to report persons missing or found.': 'Você deve estar registrado para informar pessoas desaparecidas ou localizadas.',
'You must provide a series id to proceed.': 'Você deve fornecer um número de série para continuar.',
'You should edit Twitter settings in models/000_config.py': 'Você deve editar as definições do Twitter em modelos/000_config.py',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Seu lista de itens de solução pedidos aparece abaixo. Você pode alterá-lo ao votar novamente.',
'Your post was added successfully.': 'O post foi incluído com êxito.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Uma identificação exclusiva (UUID) foi designada para o seu sistema e poderá ser usada por outros computadores ao seu redor para identificá-lo. Para visualizar o seu UUID, você pode ir para Sincronização -> configurações Sync. Você também pode ver outras configurações nesta página.',
'Zero Hour': 'Hora Zero',
'Zinc roof': 'Telhado de Zinco',
'Zoom Levels': 'Níveis de Zoom',
'active': 'ativo',
'added': 'incluído',
'all records': 'todos os registros',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'Permite que um orçamento seja desenvolvido com base em despesas com o pessoal e equipamento, incluindo quaisquer despesas gerais administrativas.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'permite a criação e gerenciamento de pesquisas para avaliar os danos após um desastre natural.',
'an individual/team to do in 1-2 days': 'Uma pessoa/Equipe para fazer em 1 Dias-2',
'assigned': 'designado',
'average': 'Na média',
'black': 'Preto',
'blond': 'Loiro',
'blue': 'azul',
'brown': 'Marrom',
'by': 'por',
'c/o Name': 'c/o Nome',
'can be used to extract data from spreadsheets and put them into database tables.': 'Pode ser utilizado para extrair dados de planilhas e colocá-los em tabelas de dados.',
'cancelled': 'CANCELADO',
'caucasoid': 'Caucasoid',
'check all': 'Verificar Tudo',
'click for more details': 'Clique para mais detalhes',
'completed': 'Concluído',
'confirmed': 'Confirmado',
'consider': 'considerar',
'curly': 'Encaracolado',
'currently registered': 'Atualmente registrados',
'daily': 'Diariamente',
'dark': 'Escuro',
'data uploaded': 'dados carregados',
'database %s select': '% de dados s SELECIONE',
'database': 'DATABASE',
'db': 'dB',
'deceased': 'Falecido',
'delete all checked': 'excluir todos marcados',
'deleted': 'excluídos',
'design': 'projecto',
'diseased': 'Doentes',
'displaced': 'Deslocadas',
'divorced': 'Divorciado',
'done!': 'Pronto!',
'duplicate': 'duplicar',
'edit': 'Editar',
'eg. gas, electricity, water': 'Exemplo: Gás, eletricidade, água',
'embedded': 'integrado',
'enclosed area': 'Área anexada',
'export as csv file': 'Exportar como arquivo cvs.',
'fat': 'Gordura',
'feedback': 'Retorno',
'female': 'Sexo Feminino',
'flush latrine with septic tank': 'esvaziar latrina com tanque séptico',
'food_sources': 'fuentes de alimento',
'forehead': 'testa',
'found': 'Localizado',
'from Twitter': 'do Twitter',
'green': 'verde',
'grey': 'cinza',
'here': 'aqui',
'high': 'Alta',
'hourly': 'Por hora',
'households': 'Membros da família',
'identified': 'identificado',
'ignore': 'Ignore',
'in Deg Min Sec format': 'GRAUS Celsius no formato Mín. Segundo',
'in GPS format': 'GPS no formato',
'inactive': 'inativo',
'injured': 'Feridos',
'insert new %s': 'inserir novo %s',
'insert new': 'inserir novo',
'invalid request': 'PEDIDO INVÁLIDO',
'invalid': 'inválido',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'É um repositório central de informações em tempo real onde vítimas de desastres e seus familiares, especialmente casos isolados, refugiados e pessoas deslocadas podem ser abrigados. Informações como nome, idade, Contate o número de Bilhete de Identidade número, localização Deslocadas, e outros detalhes são capturados. Detalhes de impressão Imagem e Dedo de as pessoas possam ser transferidos por upload para o sistema. As pessoas podem também ser capturados pelo grupo por eficiência e conveniência.',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'tem como visão ser composto de vários sub-módulos que interagem juntos a fim de fornecer funcionalidade complexa para o gerenciamento de itens de ajuda e projeto de uma organização. Isso inclui um sistema de admissão, um sistema de gestão de depósitos, rastreamento de mercadorias, gestão da cadeia de fornecimentos, de gestão da frota, aquisições, recursos de rastreamento financeiro de ativos e outros e gerenciamento de recursos',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'Mantém controle de todos os bilhetes de entrada permitindo que sejam classificados & direcionados ao local apropriado para atuação.',
'latrines': 'privadas',
'leave empty to detach account': 'deixar em branco para desconectar a conta',
'legend URL': 'Legenda URL',
'light': 'luz',
'long': 'Longo',
'long>12cm': 'comprimento>12cm',
'low': 'baixo',
'male': 'masculino',
'manual': 'Manual',
'married': 'casado',
'medium': 'médio.',
'medium<12cm': 'médio<12cm',
'meters': 'metros',
'missing': 'ausente',
'module allows the site administrator to configure various options.': 'Módulo permite que o administrador do site configure várias opções.',
'module helps monitoring the status of hospitals.': 'Módulo ajuda monitorando o status de hospitais.',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'Módulo fornece um mecanismo para colaboração fornecem uma visão geral do desastre de desenvolvimento, utilização de mapeamento online (SIG).',
'mongoloid': 'Mongolóide',
'more': 'Mais',
'n/a': 'n/d',
'negroid': 'Negróide',
'never': 'Nunca',
'new record inserted': 'Novo registro inserido',
'new': 'Novo(a)',
'next 100 rows': 'próximas 100 linhas',
'none': 'nenhum',
'not accessible - no cached version available!': 'Não acessível-nenhuma versão em cache disponível!',
'not accessible - using cached version from': 'Não acessível-Utilizando versão em Cache',
'not specified': 'não especificado',
'num Zoom Levels': 'Num níveis de Zoom',
'obsolete': 'Obsoleto',
'on': 'Ligar',
'once': 'uma vez',
'open defecation': 'Abrir evacuação',
'optional': 'Optional',
'or import from csv file': 'ou importar a partir do arquivo csv',
'other': 'outros',
'over one hour': 'Mais de uma hora',
'people': 'pessoas',
'piece': 'parte',
'pit latrine': 'cova de latrina',
'pit': 'cova',
'postponed': 'Adiado',
'preliminary template or draft, not actionable in its current form': 'Modelo ou rascunho preliminar, não acionável em sua forma atual',
'previous 100 rows': '100 linhas anteriores',
'record does not exist': 'Registro não existe',
'record id': 'ID do Registro',
'red': 'vermelho',
'reported': 'relatado',
'reports successfully imported.': 'relatórios importados com êxito.',
'representation of the Polygon/Line.': 'Representação do polígono /Linha.',
'retired': 'Aposentado',
'river': 'Rio',
'see comment': 'Veja o comentário',
'selected': 'Selecionado',
'separated from family': 'Separados da família',
'separated': 'Separado',
'shaved': 'raspado',
'short': 'pequeno',
'short<6cm': 'pequeno<6cm',
'sides': 'lados',
'sign-up now': 'Inscreva-se agora',
'single': 'único',
'slim': 'estreito',
'specify': 'Especifique.',
'staff members': 'Membros da equipe',
'staff': 'equipe',
'state location': 'Localização do Estado',
'state': 'Estado',
'straight': 'reto',
'suffered financial losses': 'Sofreram perdas financeiras',
'tall': 'Altura',
'this': 'isto',
'to access the system': 'Para acessar o sistema',
'tonsure': 'tonsura',
'total': 'Total',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'Módulo tweepy não disponível com a execução Python-isto necessita da instalação para suporte a tropo Twitter!',
'unable to parse csv file': 'Não é possível analisar arquivo csv',
'uncheck all': 'Desmarcar Tudo',
'unidentified': 'IDENTIFICADO',
'unspecified': 'UNSPECIFIED',
'unverified': 'Não Verificado',
'updated': 'Atualizado',
'updates only': 'Apenas atualizações',
'verified': 'Verificado',
'volunteer': 'voluntário',
'volunteers': 'Voluntários',
'wavy': 'Serpentina',
'weekly': 'Semanalmente',
'white': 'branco',
'wider area, longer term, usually contain multiple Activities': 'maior área, maior prazo, contém usualmente múltiplas actividades',
'widowed': 'Viúvo',
'window': 'janela',
'within human habitat': 'Dentro do habitat humano',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'Módulo Xlwt não disponível no módulo Python sendo executado - isto necessita ser instalado para saída XLS!',
'yes': 'YES',
}
| flavour/Turkey | languages/pt-br.py | Python | mit | 271,439 | [
"VisIt"
] | 251e3f6191d905b018c89b8889349085daa6cef99b3ef539b789d3abfcea5d6e |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic magnetizability tensor for DFT
Refs:
[1] R. Cammi, J. Chem. Phys., 109, 3185 (1998)
[2] Todd A. Keith, Chem. Phys., 213, 123 (1996)
'''
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import jk
from pyscf.dft import numint
from pyscf.prop.nmr import rhf as rhf_nmr
from pyscf.prop.nmr import rks as rks_nmr
from pyscf.prop.magnetizability import rhf as rhf_mag
def dia(magobj, gauge_orig=None):
mol = magobj.mol
mf = magobj._scf
mo_energy = mf.mo_energy
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
orbo = mo_coeff[:,mo_occ > 0]
dm0 = numpy.dot(orbo, orbo.T) * 2
dm0 = lib.tag_array(dm0, mo_coeff=mo_coeff, mo_occ=mo_occ)
dme0 = numpy.dot(orbo * mo_energy[mo_occ > 0], orbo.T) * 2
e2 = rhf_mag._get_dia_1e(magobj, gauge_orig, dm0, dme0)
if gauge_orig is not None:
return -e2
# Computing the 2nd order Vxc integrals from GIAO
grids = mf.grids
ni = mf._numint
xc_code = mf.xc
xctype = ni._xc_type(xc_code)
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(xc_code, mol.spin)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dm0, hermi=1)
ngrids = len(grids.weights)
mem_now = lib.current_memory()[0]
max_memory = max(2000, mf.max_memory*.9-mem_now)
BLKSIZE = numint.BLKSIZE
blksize = min(int(max_memory/12*1e6/8/nao/BLKSIZE)*BLKSIZE, ngrids)
vmat = numpy.zeros((3,3,nao,nao))
if xctype == 'LDA':
ao_deriv = 0
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,
blksize=blksize):
rho = make_rho(0, ao, mask, 'LDA')
vxc = ni.eval_xc(xc_code, rho, 0, deriv=1)[1]
vrho = vxc[0]
r_ao = numpy.einsum('pi,px->pxi', ao, coords)
aow = numpy.einsum('pxi,p,p->pxi', r_ao, weight, vrho)
vmat += lib.einsum('pxi,pyj->xyij', r_ao, aow)
rho = vxc = vrho = aow = None
elif xctype == 'GGA':
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,
blksize=blksize):
rho = make_rho(0, ao, mask, 'GGA')
vxc = ni.eval_xc(xc_code, rho, 0, deriv=1)[1]
wv = numint._rks_gga_wv0(rho, vxc, weight)
# Computing \nabla (r * AO) = r * \nabla AO + [\nabla,r]_- * AO
r_ao = numpy.einsum('npi,px->npxi', ao, coords)
r_ao[1,:,0] += ao[0]
r_ao[2,:,1] += ao[0]
r_ao[3,:,2] += ao[0]
aow = numpy.einsum('npxi,np->pxi', r_ao, wv)
vmat += lib.einsum('pxi,pyj->xyij', r_ao[0], aow)
rho = vxc = vrho = vsigma = wv = aow = None
vmat = vmat + vmat.transpose(0,1,3,2)
elif xctype == 'MGGA':
raise NotImplementedError('meta-GGA')
vmat = _add_giao_phase(mol, vmat)
e2 += numpy.einsum('qp,xypq->xy', dm0, vmat)
vmat = None
e2 = e2.ravel()
# Handle the hybrid functional and the range-separated functional
if abs(hyb) > 1e-10:
vs = jk.get_jk(mol, [dm0]*3, ['ijkl,ji->s2kl',
'ijkl,jk->s1il',
'ijkl,li->s1kj'],
'int2e_gg1', 's4', 9, hermi=1)
e2 += numpy.einsum('xpq,qp->x', vs[0], dm0)
e2 -= numpy.einsum('xpq,qp->x', vs[1], dm0) * .25 * hyb
e2 -= numpy.einsum('xpq,qp->x', vs[2], dm0) * .25 * hyb
vk = jk.get_jk(mol, dm0, 'ijkl,jk->s1il',
'int2e_g1g2', 'aa4', 9, hermi=0)
e2 -= numpy.einsum('xpq,qp->x', vk, dm0) * .5 * hyb
if abs(omega) > 1e-10:
with mol.with_range_coulomb(omega):
vs = jk.get_jk(mol, [dm0]*2, ['ijkl,jk->s1il',
'ijkl,li->s1kj'],
'int2e_gg1', 's4', 9, hermi=1)
e2 -= numpy.einsum('xpq,qp->x', vs[0], dm0) * .25 * (alpha-hyb)
e2 -= numpy.einsum('xpq,qp->x', vs[1], dm0) * .25 * (alpha-hyb)
vk = jk.get_jk(mol, dm0, 'ijkl,jk->s1il',
'int2e_g1g2', 'aa4', 9, hermi=0)
e2 -= numpy.einsum('xpq,qp->x', vk, dm0) * .5 * (alpha-hyb)
else:
vj = jk.get_jk(mol, dm0, 'ijkl,ji->s2kl',
'int2e_gg1', 's4', 9, hermi=1)
e2 += numpy.einsum('xpq,qp->x', vj, dm0)
return -e2.reshape(3, 3)
def _add_giao_phase(mol, vmat):
'''Add the factor i/2*(Ri-Rj) of the GIAO phase e^{i/2 (Ri-Rj) times r}'''
ao_coords = rhf_mag._get_ao_coords(mol)
Rx = .5 * (ao_coords[:,0:1] - ao_coords[:,0])
Ry = .5 * (ao_coords[:,1:2] - ao_coords[:,1])
Rz = .5 * (ao_coords[:,2:3] - ao_coords[:,2])
vxc20 = numpy.empty_like(vmat)
vxc20[0] = Ry * vmat[2] - Rz * vmat[1]
vxc20[1] = Rz * vmat[0] - Rx * vmat[2]
vxc20[2] = Rx * vmat[1] - Ry * vmat[0]
vxc20, vmat = vmat, vxc20
vxc20[:,0] = Ry * vmat[:,2] - Rz * vmat[:,1]
vxc20[:,1] = Rz * vmat[:,0] - Rx * vmat[:,2]
vxc20[:,2] = Rx * vmat[:,1] - Ry * vmat[:,0]
vxc20 *= -1
return vxc20
class Magnetizability(rhf_mag.Magnetizability):
dia = dia
get_fock = rks_nmr.get_fock
solve_mo1 = rks_nmr.solve_mo1
if __name__ == '__main__':
from pyscf import gto
from pyscf import dft
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['Ne' , (0. , 0. , 0.)], ]
mol.basis='631g'
mol.build()
mf = dft.RKS(mol).run()
mag = Magnetizability(mf).kernel()
print(lib.finger(mag) - -0.30375149255154221)
mf.set(xc = 'b3lyp').run()
mag = Magnetizability(mf).kernel()
print(lib.finger(mag) - -0.3022331813238171)
mol.atom = [
[1 , (0. , 0. , .917)],
['F' , (0. , 0. , 0. )], ]
mol.basis = '6-31g'
mol.build()
mf = dft.RKS(mol).set(xc='lda,vwn').run()
mag = Magnetizability(mf).kernel()
print(lib.finger(mag) - -0.4313210213418015)
mf = dft.RKS(mol).set(xc='b3lyp').run()
mag = Magnetizability(mf).kernel()
print(lib.finger(mag) - -0.42828345739100998)
mol = gto.M(atom='''O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587''',
basis='ccpvdz')
mf = dft.RKS(mol)
mf.xc = 'b3lyp'
mf.run()
mag = Magnetizability(mf).kernel()
print(lib.finger(mag) - -0.61042958313712403)
| gkc1000/pyscf | pyscf/prop/magnetizability/rks.py | Python | apache-2.0 | 7,203 | [
"PySCF"
] | b427cc1159b76d843ab60e3aa40032865348d3f7517365806a0e8ff72a7bca11 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2015 James Clark <james.clark@ligo.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Generate a catalogue with 3 waveforms:
1) Gaussian pulse
2) Chirp
3) IMR
"""
from __future__ import division
import os
import sys
__author__ = "James Clark <james.clark@ligo.org>"
import numpy as np
import scipy.signal as signal
import scipy.stats as stats
import scipy.io as sio
import lal
import lalsimulation as lalsim
#import pycbc
#import pycbc.filter
def truncparms(low,upp,mu,sigma):
a = (low - mu) / sigma
b = (upp - mu) / sigma
return a, b
#
# Common Variables
#
Fs=1024
datalen=1.
time_axis = np.arange(-0.5*datalen, 0.5*datalen, 1./Fs)
Nwaveforms = 1000
Ntypes = 2
catalogue = np.zeros(shape=(len(time_axis), Ntypes*Nwaveforms))
#
# Sine-Gaussians ('Gaussian Pulse')
#
# center frequency
fc_mu=500
fc_sigma=100
fc_low=10
fc_upp=1000
a, b = truncparms(fc_low, fc_upp, fc_mu, fc_sigma)
fcs = stats.truncnorm.rvs(a, b, loc=fc_mu, scale=fc_sigma, size=Nwaveforms)
# bandwidth
bw_mu=0.005
bw_sigma=0.001
bw_low=0.0001
bw_upp=0.1
a, b = truncparms(bw_low, bw_upp, bw_mu, bw_sigma)
bws = stats.truncnorm.rvs(a, b, loc=bw_mu, scale=bw_sigma, size=Nwaveforms)
# populate catalogue
win = lal.CreateTukeyREAL8Window(len(time_axis), 0.1)
for i in xrange(Nwaveforms):
catalogue[:,i] = win.data.data*signal.gausspulse(time_axis, fcs[i], bws[i])
catalogue[:,i] /= np.sqrt(np.dot(catalogue[:,i], catalogue[:,i]))
#
# Chirps
#
chirp_times = np.arange(0, datalen, 1.0/Fs)
idx = chirp_times<=0.5
win = lal.CreateTukeyREAL8Window(int(sum(idx)), 0.1)
tau=0.5
f0=100
t1=0.5
f1_mu = 500
f1_sigma = 100
f1_low = 10
f1_upp = 1000
a, b = truncparms(f1_low, f1_upp, f1_mu, f1_sigma)
f1s = stats.truncnorm.rvs(a, b, loc=f1_mu, scale=f1_sigma, size=Nwaveforms)
phis = 180*np.random.random(Nwaveforms)
for i in xrange(Nwaveforms):
catalogue[:,i+Nwaveforms] = signal.chirp(chirp_times, f0 = f0, t1 = t1, f1 = f1s[i],
phi=phis[i])
catalogue[:,i+Nwaveforms] *= np.exp(chirp_times / tau)
catalogue[:,i+Nwaveforms][chirp_times>0.5] = 0.0
catalogue[:,i+Nwaveforms][idx]*=win.data.data
catalogue[:,i] /= np.sqrt(np.dot(catalogue[:,i], catalogue[:,i]))
#catalogue += 0.1*np.random.randn(Fs*datalen, Ntypes*Nwaveforms)
if 0:
#
# PCAT magic: Lifting the following from GMM.py in PCAT
#
import PCA, GMM
score_matrix, principal_components, means, stds, eigenvalues = \
PCA.PCA(catalogue, components_number=10)
principal_components_number=10
reduced_score_matrix = score_matrix[:,:principal_components_number]
mat, tmp, tmp1 = PCA.matrix_whiten(reduced_score_matrix, std=True)
#labels = GMM.gaussian_mixture(mat,upper_bound=5)
labels = GMM.gaussian_mixture(reduced_score_matrix,upper_bound=5)
colored_clusters = GMM.color_clusters( score_matrix, labels )
GMM.print_cluster_info(colored_clusters)
#sys.exit()
#
# PCA
#
#H = np.matrix(waveform_catalogue)
H = np.matrix(catalogue)
# --- 1) Compute catalogue covariance
C = H.T * H / np.shape(H)[0]
# --- 2) Compute eigenvectors (V) and eigenvalues (S) of C
S, V = np.linalg.eigh(C)
# --- 3) Sort eigenvectors in descending order
idx = np.argsort(S)[::-1]
V = V[:,idx]
S = S[idx]
# --- 4) Compute the eigenvectors of the real covariance matrix U and normalise
U = H*V
for i in xrange(Ntypes*Nwaveforms):
U /= np.linalg.norm(U[:,i])
# Coeffs
coeffs = H.T*U
# 'scores'
Z = np.dot(catalogue, coeffs)
# whiten
for i in xrange(np.shape(catalogue)[0]):
Z[i,:] -= np.mean(Z[i,:])
Z[i,:] /= np.std(Z[i,:])
# reduce
Z = Z[:50,:]
#
# GMM
#
from sklearn import mixture
niterations=10
print ''
for i in xrange(niterations):
bic=np.zeros(5)
min_bic=np.inf
for c in xrange(5):
#print c
g = mixture.GMM(n_components=c+1)
g.fit(Z)
bic[c] = g.bic(Z)
if bic[c] < min_bic:
min_bic=bic[c]
best_g = g
best_g.fit(Z)
labels=best_g.predict(Z)
nclusters=np.argmin(bic)+1
print 'favoured # of clusters:', nclusters
print 'Cluster membership:'
for label in xrange(nclusters):
print 'cluster %d: %.2f percent'%(label,
float(sum(labels==label))/len(labels) )
| astroclark/bhextractor | patrec/classify/adhoc_cat.py | Python | gpl-2.0 | 4,977 | [
"Gaussian"
] | c12f1b53ec24040a9a17f3833f41a4bb3d9f8e210e71d07320e1b17e47a34aa1 |
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script retrieves the history of all V8 branches and trunk revisions and
# their corresponding Chromium revisions.
# Requires a chromium checkout with branch heads:
# gclient sync --with_branch_heads
# gclient fetch
import argparse
import csv
import itertools
import json
import os
import re
import sys
from common_includes import *
DEPS_FILE = "DEPS_FILE"
CHROMIUM = "CHROMIUM"
CONFIG = {
BRANCHNAME: "retrieve-v8-releases",
PERSISTFILE_BASENAME: "/tmp/v8-releases-tempfile",
DOT_GIT_LOCATION: ".git",
VERSION_FILE: "src/version.cc",
DEPS_FILE: "DEPS",
}
# Expression for retrieving the bleeding edge revision from a commit message.
PUSH_MESSAGE_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
# Expression for retrieving the merged patches from a merge commit message
# (old and new format).
MERGE_MESSAGE_RE = re.compile(r"^.*[M|m]erged (.+)(\)| into).*$", re.M)
# Expression for retrieving reverted patches from a commit message (old and
# new format).
ROLLBACK_MESSAGE_RE = re.compile(r"^.*[R|r]ollback of (.+)(\)| in).*$", re.M)
# Expression for retrieving the code review link.
REVIEW_LINK_RE = re.compile(r"^Review URL: (.+)$", re.M)
# Expression with three versions (historical) for extracting the v8 revision
# from the chromium DEPS file.
DEPS_RE = re.compile(r'^\s*(?:"v8_revision": "'
'|\(Var\("googlecode_url"\) % "v8"\) \+ "\/trunk@'
'|"http\:\/\/v8\.googlecode\.com\/svn\/trunk@)'
'([0-9]+)".*$', re.M)
# Expression to pick tag and revision for bleeding edge tags. To be used with
# output of 'svn log'.
BLEEDING_EDGE_TAGS_RE = re.compile(
r"A \/tags\/([^\s]+) \(from \/branches\/bleeding_edge\:(\d+)\)")
def SortBranches(branches):
"""Sort branches with version number names."""
return sorted(branches, key=SortingKey, reverse=True)
def FilterDuplicatesAndReverse(cr_releases):
"""Returns the chromium releases in reverse order filtered by v8 revision
duplicates.
cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
"""
last = ""
result = []
for release in reversed(cr_releases):
if last == release[1]:
continue
last = release[1]
result.append(release)
return result
def BuildRevisionRanges(cr_releases):
"""Returns a mapping of v8 revision -> chromium ranges.
The ranges are comma-separated, each range has the form R1:R2. The newest
entry is the only one of the form R1, as there is no end range.
cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
cr_rev either refers to a chromium svn revision or a chromium branch number.
"""
range_lists = {}
cr_releases = FilterDuplicatesAndReverse(cr_releases)
# Visit pairs of cr releases from oldest to newest.
for cr_from, cr_to in itertools.izip(
cr_releases, itertools.islice(cr_releases, 1, None)):
# Assume the chromium revisions are all different.
assert cr_from[0] != cr_to[0]
# TODO(machenbach): Subtraction is not git friendly.
ran = "%s:%d" % (cr_from[0], int(cr_to[0]) - 1)
# Collect the ranges in lists per revision.
range_lists.setdefault(cr_from[1], []).append(ran)
# Add the newest revision.
if cr_releases:
range_lists.setdefault(cr_releases[-1][1], []).append(cr_releases[-1][0])
# Stringify and comma-separate the range lists.
return dict((rev, ", ".join(ran)) for rev, ran in range_lists.iteritems())
def MatchSafe(match):
if match:
return match.group(1)
else:
return ""
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
self.CommonPrepare()
self.PrepareBranch()
class RetrieveV8Releases(Step):
MESSAGE = "Retrieve all V8 releases."
def ExceedsMax(self, releases):
return (self._options.max_releases > 0
and len(releases) > self._options.max_releases)
def GetBleedingEdgeFromPush(self, title):
return MatchSafe(PUSH_MESSAGE_RE.match(title))
def GetMergedPatches(self, body):
patches = MatchSafe(MERGE_MESSAGE_RE.search(body))
if not patches:
patches = MatchSafe(ROLLBACK_MESSAGE_RE.search(body))
if patches:
# Indicate reverted patches with a "-".
patches = "-%s" % patches
return patches
def GetReleaseDict(
self, git_hash, bleeding_edge_rev, branch, version, patches, cl_body):
revision = self.GitSVNFindSVNRev(git_hash)
return {
# The SVN revision on the branch.
"revision": revision,
# The SVN revision on bleeding edge (only for newer trunk pushes).
"bleeding_edge": bleeding_edge_rev,
# The branch name.
"branch": branch,
# The version for displaying in the form 3.26.3 or 3.26.3.12.
"version": version,
# The date of the commit.
"date": self.GitLog(n=1, format="%ci", git_hash=git_hash),
# Merged patches if available in the form 'r1234, r2345'.
"patches_merged": patches,
# Default for easier output formatting.
"chromium_revision": "",
# Default for easier output formatting.
"chromium_branch": "",
# Link to the CL on code review. Trunk pushes are not uploaded, so this
# field will be populated below with the recent roll CL link.
"review_link": MatchSafe(REVIEW_LINK_RE.search(cl_body)),
# Link to the commit message on google code.
"revision_link": ("https://code.google.com/p/v8/source/detail?r=%s"
% revision),
}
def GetRelease(self, git_hash, branch):
self.ReadAndPersistVersion()
base_version = [self["major"], self["minor"], self["build"]]
version = ".".join(base_version)
body = self.GitLog(n=1, format="%B", git_hash=git_hash)
patches = ""
if self["patch"] != "0":
version += ".%s" % self["patch"]
patches = self.GetMergedPatches(body)
title = self.GitLog(n=1, format="%s", git_hash=git_hash)
return self.GetReleaseDict(
git_hash, self.GetBleedingEdgeFromPush(title), branch, version,
patches, body), self["patch"]
def GetReleasesFromBleedingEdge(self):
tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v --limit 20")
releases = []
for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text):
git_hash = self.GitSVNFindGitHash(revision)
# Add bleeding edge release. It does not contain patches or a code
# review link, as tags are not uploaded.
releases.append(self.GetReleaseDict(
git_hash, revision, "bleeding_edge", tag, "", ""))
return releases
def GetReleasesFromBranch(self, branch):
self.GitReset("svn/%s" % branch)
if branch == 'bleeding_edge':
return self.GetReleasesFromBleedingEdge()
releases = []
try:
for git_hash in self.GitLog(format="%H").splitlines():
if self._config[VERSION_FILE] not in self.GitChangedFiles(git_hash):
continue
if self.ExceedsMax(releases):
break # pragma: no cover
if not self.GitCheckoutFileSafe(self._config[VERSION_FILE], git_hash):
break # pragma: no cover
release, patch_level = self.GetRelease(git_hash, branch)
releases.append(release)
# Follow branches only until their creation point.
# TODO(machenbach): This omits patches if the version file wasn't
# manipulated correctly. Find a better way to detect the point where
# the parent of the branch head leads to the trunk branch.
if branch != "trunk" and patch_level == "0":
break
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Clean up checked-out version file.
self.GitCheckoutFileSafe(self._config[VERSION_FILE], "HEAD")
return releases
def RunStep(self):
self.GitCreateBranch(self._config[BRANCHNAME])
# Get relevant remote branches, e.g. "svn/3.25".
branches = filter(lambda s: re.match(r"^svn/\d+\.\d+$", s),
self.GitRemotes())
# Remove 'svn/' prefix.
branches = map(lambda s: s[4:], branches)
releases = []
if self._options.branch == 'recent':
# Get only recent development on trunk, beta and stable.
if self._options.max_releases == 0: # pragma: no cover
self._options.max_releases = 10
beta, stable = SortBranches(branches)[0:2]
releases += self.GetReleasesFromBranch(stable)
releases += self.GetReleasesFromBranch(beta)
releases += self.GetReleasesFromBranch("trunk")
releases += self.GetReleasesFromBranch("bleeding_edge")
elif self._options.branch == 'all': # pragma: no cover
# Retrieve the full release history.
for branch in branches:
releases += self.GetReleasesFromBranch(branch)
releases += self.GetReleasesFromBranch("trunk")
releases += self.GetReleasesFromBranch("bleeding_edge")
else: # pragma: no cover
# Retrieve history for a specified branch.
assert self._options.branch in branches + ["trunk", "bleeding_edge"]
releases += self.GetReleasesFromBranch(self._options.branch)
self["releases"] = sorted(releases,
key=lambda r: SortingKey(r["version"]),
reverse=True)
# TODO(machenbach): Parts of the Chromium setup are c/p from the chromium_roll
# script -> unify.
class CheckChromium(Step):
MESSAGE = "Check the chromium checkout."
def Run(self):
self["chrome_path"] = self._options.chromium
class SwitchChromium(Step):
MESSAGE = "Switch to Chromium checkout."
REQUIRES = "chrome_path"
def RunStep(self):
self["v8_path"] = os.getcwd()
os.chdir(self["chrome_path"])
# Check for a clean workdir.
if not self.GitIsWorkdirClean(): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Assert that the DEPS file is there.
if not os.path.exists(self.Config(DEPS_FILE)): # pragma: no cover
self.Die("DEPS file not present.")
class UpdateChromiumCheckout(Step):
MESSAGE = "Update the checkout and create a new branch."
REQUIRES = "chrome_path"
def RunStep(self):
os.chdir(self["chrome_path"])
self.GitCheckout("master")
self.GitPull()
self.GitCreateBranch(self.Config(BRANCHNAME))
class RetrieveChromiumV8Releases(Step):
MESSAGE = "Retrieve V8 releases from Chromium DEPS."
REQUIRES = "chrome_path"
def RunStep(self):
os.chdir(self["chrome_path"])
trunk_releases = filter(lambda r: r["branch"] == "trunk", self["releases"])
if not trunk_releases: # pragma: no cover
print "No trunk releases detected. Skipping chromium history."
return True
oldest_v8_rev = int(trunk_releases[-1]["revision"])
cr_releases = []
try:
for git_hash in self.GitLog(format="%H", grep="V8").splitlines():
if self._config[DEPS_FILE] not in self.GitChangedFiles(git_hash):
continue
if not self.GitCheckoutFileSafe(self._config[DEPS_FILE], git_hash):
break # pragma: no cover
deps = FileToText(self.Config(DEPS_FILE))
match = DEPS_RE.search(deps)
if match:
svn_rev = self.GitSVNFindSVNRev(git_hash)
v8_rev = match.group(1)
cr_releases.append([svn_rev, v8_rev])
# Stop after reaching beyond the last v8 revision we want to update.
# We need a small buffer for possible revert/reland frenzies.
# TODO(machenbach): Subtraction is not git friendly.
if int(v8_rev) < oldest_v8_rev - 100:
break # pragma: no cover
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Clean up.
self.GitCheckoutFileSafe(self._config[DEPS_FILE], "HEAD")
# Add the chromium ranges to the v8 trunk releases.
all_ranges = BuildRevisionRanges(cr_releases)
trunk_dict = dict((r["revision"], r) for r in trunk_releases)
for revision, ranges in all_ranges.iteritems():
trunk_dict.get(revision, {})["chromium_revision"] = ranges
# TODO(machenbach): Unify common code with method above.
class RietrieveChromiumBranches(Step):
MESSAGE = "Retrieve Chromium branch information."
REQUIRES = "chrome_path"
def RunStep(self):
os.chdir(self["chrome_path"])
trunk_releases = filter(lambda r: r["branch"] == "trunk", self["releases"])
if not trunk_releases: # pragma: no cover
print "No trunk releases detected. Skipping chromium history."
return True
oldest_v8_rev = int(trunk_releases[-1]["revision"])
# Filter out irrelevant branches.
branches = filter(lambda r: re.match(r"branch-heads/\d+", r),
self.GitRemotes())
# Transform into pure branch numbers.
branches = map(lambda r: int(re.match(r"branch-heads/(\d+)", r).group(1)),
branches)
branches = sorted(branches, reverse=True)
cr_branches = []
try:
for branch in branches:
if not self.GitCheckoutFileSafe(self._config[DEPS_FILE],
"branch-heads/%d" % branch):
break # pragma: no cover
deps = FileToText(self.Config(DEPS_FILE))
match = DEPS_RE.search(deps)
if match:
v8_rev = match.group(1)
cr_branches.append([str(branch), v8_rev])
# Stop after reaching beyond the last v8 revision we want to update.
# We need a small buffer for possible revert/reland frenzies.
# TODO(machenbach): Subtraction is not git friendly.
if int(v8_rev) < oldest_v8_rev - 100:
break # pragma: no cover
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Clean up.
self.GitCheckoutFileSafe(self._config[DEPS_FILE], "HEAD")
# Add the chromium branches to the v8 trunk releases.
all_ranges = BuildRevisionRanges(cr_branches)
trunk_dict = dict((r["revision"], r) for r in trunk_releases)
for revision, ranges in all_ranges.iteritems():
trunk_dict.get(revision, {})["chromium_branch"] = ranges
class SwitchV8(Step):
MESSAGE = "Returning to V8 checkout."
REQUIRES = "chrome_path"
def RunStep(self):
self.GitCheckout("master")
self.GitDeleteBranch(self.Config(BRANCHNAME))
os.chdir(self["v8_path"])
class CleanUp(Step):
MESSAGE = "Clean up."
def RunStep(self):
self.CommonCleanup()
class WriteOutput(Step):
MESSAGE = "Print output."
def Run(self):
if self._options.csv:
with open(self._options.csv, "w") as f:
writer = csv.DictWriter(f,
["version", "branch", "revision",
"chromium_revision", "patches_merged"],
restval="",
extrasaction="ignore")
for release in self["releases"]:
writer.writerow(release)
if self._options.json:
with open(self._options.json, "w") as f:
f.write(json.dumps(self["releases"]))
if not self._options.csv and not self._options.json:
print self["releases"] # pragma: no cover
class Releases(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-b", "--branch", default="recent",
help=("The branch to analyze. If 'all' is specified, "
"analyze all branches. If 'recent' (default) "
"is specified, track beta, stable and trunk."))
parser.add_argument("-c", "--chromium",
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
parser.add_argument("--csv", help="Path to a CSV file for export.")
parser.add_argument("-m", "--max-releases", type=int, default=0,
help="The maximum number of releases to track.")
parser.add_argument("--json", help="Path to a JSON file for export.")
def _ProcessOptions(self, options): # pragma: no cover
return True
def _Steps(self):
return [
Preparation,
RetrieveV8Releases,
CheckChromium,
SwitchChromium,
UpdateChromiumCheckout,
RetrieveChromiumV8Releases,
RietrieveChromiumBranches,
SwitchV8,
CleanUp,
WriteOutput,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(Releases(CONFIG).Run())
| kingland/go-v8 | v8-3.28/tools/push-to-trunk/releases.py | Python | mit | 16,571 | [
"VisIt"
] | 39c82260b93b132b7458d49da3d6e70a1f275670716bdb3c7f9e6e1315e75691 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Wang Yong
#
# Author: Wang Yong <lazycat.manatee@gmail.com>
# Maintainer: Wang Yong <lazycat.manatee@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cache_pixbuf import CachePixbuf
from constant import DEFAULT_FONT_SIZE
from draw import draw_vlinear, draw_pixbuf, draw_line, draw_text
from keymap import get_keyevent_name
from label import Label
from theme import ui_theme
from utils import is_in_rect
import gobject
import gtk
import pango
from deepin_utils.process import run_command
from utils import (get_content_size, color_hex_to_cairo, propagate_expose, set_clickable_cursor,
window_is_max, get_same_level_widgets, widget_fix_cycle_destroy_bug,
get_widget_root_coordinate, WIDGET_POS_BOTTOM_LEFT)
__all__ = ["Button", "ImageButton", "ThemeButton",
"MenuButton", "MinButton", "CloseButton",
"MaxButton", "ToggleButton", "ActionButton",
"CheckButton", "RadioButton", "DisableButton",
"LinkButton", "ComboButton", "SwitchButton"]
class Button(gtk.Button):
'''
Button with Deepin UI style.
@undocumented: key_press_button
@undocumented: expose_button
'''
def __init__(self,
label="",
font_size=DEFAULT_FONT_SIZE):
'''
Initialize Button class.
@param label: Button label.
@param font_size: Button label font size.
'''
gtk.Button.__init__(self)
self.font_size = font_size
self.min_width = 69
self.min_height = 22
self.padding_x = 15
self.padding_y = 3
self.set_label(label)
self.connect("expose-event", self.expose_button)
self.connect("key-press-event", self.key_press_button)
self.keymap = {
"Return" : self.clicked
}
def set_label(self, label, font_size=DEFAULT_FONT_SIZE):
'''
Set label of Button.
@param label: Button label.
@param font_size: Button label font size.
'''
self.label = label
(self.label_width, self.label_height) = get_content_size(label, self.font_size)
self.set_size_request(max(self.label_width + self.padding_x * 2, self.min_width),
max(self.label_height + self.padding_y * 2, self.min_height))
self.queue_draw()
def key_press_button(self, widget, event):
'''
Callback for `button-press-event` signal.
@param widget: Button widget.
@param event: Button press event.
'''
key_name = get_keyevent_name(event)
if self.keymap.has_key(key_name):
self.keymap[key_name]()
def expose_button(self, widget, event):
'''
Callback for `expose-event` signal.
@param widget: Button widget.
@param event: Button press event.
'''
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
x, y, w, h = rect.x, rect.y, rect.width, rect.height
# Get color info.
if widget.state == gtk.STATE_NORMAL:
text_color = ui_theme.get_color("button_font").get_color()
border_color = ui_theme.get_color("button_border_normal").get_color()
background_color = ui_theme.get_shadow_color("button_background_normal").get_color_info()
elif widget.state == gtk.STATE_PRELIGHT:
text_color = ui_theme.get_color("button_font").get_color()
border_color = ui_theme.get_color("button_border_prelight").get_color()
background_color = ui_theme.get_shadow_color("button_background_prelight").get_color_info()
elif widget.state == gtk.STATE_ACTIVE:
text_color = ui_theme.get_color("button_font").get_color()
border_color = ui_theme.get_color("button_border_active").get_color()
background_color = ui_theme.get_shadow_color("button_background_active").get_color_info()
elif widget.state == gtk.STATE_INSENSITIVE:
text_color = ui_theme.get_color("disable_text").get_color()
border_color = ui_theme.get_color("disable_frame").get_color()
disable_background_color = ui_theme.get_color("disable_background").get_color()
background_color = [(0, (disable_background_color, 1.0)),
(1, (disable_background_color, 1.0))]
# Draw background.
draw_vlinear(
cr,
x + 1, y + 1, w - 2, h - 2,
background_color)
# Draw border.
cr.set_source_rgb(*color_hex_to_cairo(border_color))
draw_line(cr, x + 2, y + 1, x + w - 2, y + 1) # top
draw_line(cr, x + 2, y + h, x + w - 2, y + h) # bottom
draw_line(cr, x + 1, y + 2, x + 1, y + h - 2) # left
draw_line(cr, x + w, y + 2, x + w, y + h - 2) # right
# Draw four point.
if widget.state == gtk.STATE_INSENSITIVE:
top_left_point = ui_theme.get_pixbuf("button/disable_corner.png").get_pixbuf()
else:
top_left_point = ui_theme.get_pixbuf("button/corner.png").get_pixbuf()
top_right_point = top_left_point.rotate_simple(270)
bottom_right_point = top_left_point.rotate_simple(180)
bottom_left_point = top_left_point.rotate_simple(90)
draw_pixbuf(cr, top_left_point, x, y)
draw_pixbuf(cr, top_right_point, x + w - top_left_point.get_width(), y)
draw_pixbuf(cr, bottom_left_point, x, y + h - top_left_point.get_height())
draw_pixbuf(cr, bottom_right_point, x + w - top_left_point.get_width(), y + h - top_left_point.get_height())
# Draw font.
draw_text(cr, self.label, x, y, w, h, self.font_size, text_color,
alignment=pango.ALIGN_CENTER)
return True
gobject.type_register(Button)
class ImageButton(gtk.Button):
'''
ImageButton class.
'''
def __init__(self,
normal_dpixbuf,
hover_dpixbuf,
press_dpixbuf,
scale_x=False,
content=None,
insensitive_dpixbuf=None):
'''
Initialize ImageButton class.
@param normal_dpixbuf: DynamicPixbuf for button normal status.
@param hover_dpixbuf: DynamicPixbuf for button hover status.
@param press_dpixbuf: DynamicPixbuf for button press status.
@param scale_x: Whether scale horticulturally, default is False.
@param content: Button label content.
@param insensitive_dpixbuf: DyanmicPixbuf for button insensitive status, default is None.
'''
gtk.Button.__init__(self)
cache_pixbuf = CachePixbuf()
draw_button(self,
cache_pixbuf,
normal_dpixbuf,
hover_dpixbuf,
press_dpixbuf,
scale_x,
content,
insensitive_dpixbuf=insensitive_dpixbuf)
def set_active(self, is_active):
'''
Set active status.
@param is_active: Set as True to make ImageButton active.
'''
if is_active:
self.set_state(gtk.STATE_PRELIGHT)
else:
self.set_state(gtk.STATE_NORMAL)
gobject.type_register(ImageButton)
class ThemeButton(gtk.Button):
'''
ThemeButton class.
'''
def __init__(self):
'''
Initialize ThemeButton class.
'''
gtk.Button.__init__(self)
self.cache_pixbuf = CachePixbuf()
draw_button(
self,
self.cache_pixbuf,
ui_theme.get_pixbuf("button/window_theme_normal.png"),
ui_theme.get_pixbuf("button/window_theme_hover.png"),
ui_theme.get_pixbuf("button/window_theme_press.png"))
gobject.type_register(ThemeButton)
class MenuButton(gtk.Button):
'''
MenuButton class.
'''
def __init__(self):
'''
Initialize MenuButton class.
'''
gtk.Button.__init__(self)
self.cache_pixbuf = CachePixbuf()
draw_button(
self,
self.cache_pixbuf,
ui_theme.get_pixbuf("button/window_menu_normal.png"),
ui_theme.get_pixbuf("button/window_menu_hover.png"),
ui_theme.get_pixbuf("button/window_menu_press.png"))
gobject.type_register(MenuButton)
class MinButton(gtk.Button):
'''
MinButton.
'''
def __init__(self):
'''
Initialize MinButton class.
'''
gtk.Button.__init__(self)
self.cache_pixbuf = CachePixbuf()
draw_button(
self,
self.cache_pixbuf,
ui_theme.get_pixbuf("button/window_min_normal.png"),
ui_theme.get_pixbuf("button/window_min_hover.png"),
ui_theme.get_pixbuf("button/window_min_press.png"))
gobject.type_register(MinButton)
class CloseButton(gtk.Button):
'''
CloseButton class.
'''
def __init__(self):
'''
Initialize CloseButton class.
'''
gtk.Button.__init__(self)
self.cache_pixbuf = CachePixbuf()
draw_button(
self,
self.cache_pixbuf,
ui_theme.get_pixbuf("button/window_close_normal.png"),
ui_theme.get_pixbuf("button/window_close_hover.png"),
ui_theme.get_pixbuf("button/window_close_press.png"))
gobject.type_register(CloseButton)
class MaxButton(gtk.Button):
'''
MaxButton class.
'''
def __init__(self,
sub_dir="button",
max_path_prefix="window_max",
unmax_path_prefix="window_unmax"):
'''
Initialize MaxButton class.
@param sub_dir: Subdirectory of button images.
@param max_path_prefix: Image path prefix for maximise status.
@param unmax_path_prefix: Image path prefix for un-maximise status.
'''
gtk.Button.__init__(self)
self.cache_pixbuf = CachePixbuf()
draw_max_button(self, self.cache_pixbuf, sub_dir, max_path_prefix, unmax_path_prefix)
gobject.type_register(MaxButton)
def draw_button(widget,
cache_pixbuf,
normal_dpixbuf,
hover_dpixbuf,
press_dpixbuf,
scale_x=False,
button_label=None,
font_size=DEFAULT_FONT_SIZE,
label_dcolor=ui_theme.get_color("button_default_font"),
insensitive_dpixbuf=None,
):
'''
Draw button.
@param widget: Gtk.Widget instance.
@param cache_pixbuf: CachePixbuf.
@param normal_dpixbuf: DynamicPixbuf of normal status.
@param hover_dpixbuf: DynamicPixbuf of hover status.
@param press_dpixbuf: DynamicPixbuf of press status.
@param scale_x: Whether button scale with content.
@param button_label: Button label, default is None.
@param font_size: Button label font size, default is DEFAULT_FONT_SIZE.
@param label_dcolor: Button label color.
@param insensitive_dpixbuf: DyanmicPixbuf of insensitive status, default is None.
'''
# Init request size.
if scale_x:
request_width = get_content_size(button_label, font_size)[0]
else:
request_width = normal_dpixbuf.get_pixbuf().get_width()
request_height = normal_dpixbuf.get_pixbuf().get_height()
widget.set_size_request(request_width, request_height)
# Expose button.
widget.connect("expose-event", lambda w, e: expose_button(
w, e,
cache_pixbuf,
scale_x, False,
normal_dpixbuf, hover_dpixbuf, press_dpixbuf,
button_label, font_size, label_dcolor, insensitive_dpixbuf))
def expose_button(widget,
event,
cache_pixbuf,
scale_x,
scale_y,
normal_dpixbuf,
hover_dpixbuf,
press_dpixbuf,
button_label,
font_size,
label_dcolor,
insensitive_dpixbuf=None):
'''
Expose callback for L{ I{draw_button} <draw_button>}.
@param widget: Gtk.Widget instance.
@param cache_pixbuf: CachePixbuf.
@param scale_x: Whether button scale width with content.
@param scale_y: Whether button scale height with content.
@param normal_dpixbuf: DynamicPixbuf of normal status.
@param hover_dpixbuf: DynamicPixbuf of hover status.
@param press_dpixbuf: DynamicPixbuf of press status.
@param button_label: Button label, default is None.
@param font_size: Button label font size, default is DEFAULT_FONT_SIZE.
@param label_dcolor: Button label color.
@param insensitive_dpixbuf: DynamicPixbuf of insensitive status.
'''
# Init.
rect = widget.allocation
image = None
# Get pixbuf along with button's sate.
if widget.state == gtk.STATE_NORMAL:
image = normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
image = hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
image = press_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_INSENSITIVE:
if insensitive_dpixbuf == None:
insensitive_dpixbuf = normal_dpixbuf
image = insensitive_dpixbuf.get_pixbuf()
# Init size.
if scale_x:
image_width = widget.allocation.width
else:
image_width = image.get_width()
if scale_y:
image_height = widget.allocation.height
else:
image_height = image.get_height()
# Draw button.
pixbuf = image
if pixbuf.get_width() != image_width or pixbuf.get_height() != image_height:
cache_pixbuf.scale(image, image_width, image_height)
pixbuf = cache_pixbuf.get_cache()
cr = widget.window.cairo_create()
draw_pixbuf(cr, pixbuf, widget.allocation.x, widget.allocation.y)
# Draw font.
if button_label:
draw_text(cr, button_label,
rect.x, rect.y, rect.width, rect.height,
font_size,
label_dcolor.get_color(),
alignment=pango.ALIGN_CENTER
)
# Propagate expose to children.
propagate_expose(widget, event)
return True
def draw_max_button(widget, cache_pixbuf, sub_dir, max_path_prefix, unmax_path_prefix):
'''
Draw maximum button.
@param widget: Gtk.Widget instance.
@param cache_pixbuf: CachePixbuf to avoid unnecessary pixbuf new operation.
@param sub_dir: Subdirectory of button.
@param max_path_prefix: Prefix of maximum image path.
@param unmax_path_prefix: Prefix of un-maximum image path.
'''
# Init request size.
pixbuf = ui_theme.get_pixbuf("%s/%s_normal.png" % (sub_dir, unmax_path_prefix)).get_pixbuf()
widget.set_size_request(pixbuf.get_width(), pixbuf.get_height())
# Redraw.
widget.connect("expose-event", lambda w, e:
expose_max_button(w, e,
cache_pixbuf,
sub_dir, max_path_prefix, unmax_path_prefix))
def expose_max_button(widget, event, cache_pixbuf, sub_dir, max_path_prefix, unmax_path_prefix):
'''
Expose callback for L{ I{draw_max_button} <draw_max_button>}.
@param widget: Gtk.Widget instance.
@param event: Expose event.
@param cache_pixbuf: CachePixbuf to avoid unnecessary new pixbuf operation.
@param sub_dir: Subdirectory for image path.
@param max_path_prefix: Prefix of maximum image path.
@param unmax_path_prefix: Prefix of un-maximum image path.
'''
# Get dynamic pixbuf.
if window_is_max(widget):
normal_dpixbuf = ui_theme.get_pixbuf("%s/%s_normal.png" % (sub_dir, unmax_path_prefix))
hover_dpixbuf = ui_theme.get_pixbuf("%s/%s_hover.png" % (sub_dir, unmax_path_prefix))
press_dpixbuf = ui_theme.get_pixbuf("%s/%s_press.png" % (sub_dir, unmax_path_prefix))
else:
normal_dpixbuf = ui_theme.get_pixbuf("%s/%s_normal.png" % (sub_dir, max_path_prefix))
hover_dpixbuf = ui_theme.get_pixbuf("%s/%s_hover.png" % (sub_dir, max_path_prefix))
press_dpixbuf = ui_theme.get_pixbuf("%s/%s_press.png" % (sub_dir, max_path_prefix))
# Get pixbuf along with button's sate.
if widget.state == gtk.STATE_NORMAL:
image = normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
image = hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
image = press_dpixbuf.get_pixbuf()
# Init size.
image_width = image.get_width()
image_height = image.get_height()
# Draw button.
pixbuf = image
if pixbuf.get_width() != image_width or pixbuf.get_height() != image_height:
cache_pixbuf.scale(image, image_width, image_height)
pixbuf = cache_pixbuf.get_cache()
cr = widget.window.cairo_create()
draw_pixbuf(cr, pixbuf, widget.allocation.x, widget.allocation.y)
# Propagate expose to children.
propagate_expose(widget, event)
return True
class ToggleButton(gtk.ToggleButton):
'''
ToggleButton class.
@undocumented: press_toggle_button
@undocumented: release_toggle_button
@undocumented: expose_toggle_button
@undocumented: set_inactive_pixbuf_group
@undocumented: set_active_pixbuf_group
'''
def __init__(self,
inactive_normal_dpixbuf,
active_normal_dpixbuf,
inactive_hover_dpixbuf=None,
active_hover_dpixbuf=None,
inactive_press_dpixbuf=None,
active_press_dpixbuf=None,
inactive_disable_dpixbuf=None,
active_disable_dpixbuf=None,
button_label=None,
padding_x=0,
font_size=DEFAULT_FONT_SIZE):
'''
Initialize ToggleButton class.
@param inactive_normal_dpixbuf: DynamicPixbuf for inactive normal status.
@param active_normal_dpixbuf: DynamicPixbuf for active normal status.
@param inactive_hover_dpixbuf: DynamicPixbuf for inactive hover status, default is None.
@param active_hover_dpixbuf: DynamicPixbuf for active hover status, default is None.
@param inactive_press_dpixbuf: DynamicPixbuf for inactive press status, default is None.
@param active_press_dpixbuf: DynamicPixbuf for active press status, default is None.
@param inactive_disable_dpixbuf: DynamicPixbuf for inactive disable status, default is None.
@param active_disable_dpixbuf: DynamicPixbuf for active disable status, default is None.
@param button_label: Button label, default is None.
@param padding_x: Padding x, default is 0.
@param font_size: Font size, default is DEFAULT_FONT_SIZE.
'''
gtk.ToggleButton.__init__(self)
self.font_size = font_size
label_dcolor = ui_theme.get_color("button_default_font")
self.button_press_flag = False
self.inactive_pixbuf_group = (inactive_normal_dpixbuf,
inactive_hover_dpixbuf,
inactive_press_dpixbuf,
inactive_disable_dpixbuf)
self.active_pixbuf_group = (active_normal_dpixbuf,
active_hover_dpixbuf,
active_press_dpixbuf,
active_disable_dpixbuf)
# Init request size.
label_width = 0
button_width = inactive_normal_dpixbuf.get_pixbuf().get_width()
button_height = inactive_normal_dpixbuf.get_pixbuf().get_height()
if button_label:
label_width = get_content_size(button_label, self.font_size)[0]
self.set_size_request(button_width + label_width + padding_x * 2,
button_height)
self.connect("button-press-event", self.press_toggle_button)
self.connect("button-release-event", self.release_toggle_button)
# Expose button.
self.connect("expose-event", lambda w, e : self.expose_toggle_button(
w, e,
button_label, padding_x, self.font_size, label_dcolor))
def press_toggle_button(self, widget, event):
'''
Callback for `button-press-event` signal.
@param widget: ToggleButton widget.
@param event: Button press event.
'''
self.button_press_flag = True
self.queue_draw()
def release_toggle_button(self, widget, event):
'''
Callback for `button-press-release` signal.
@param widget: ToggleButton widget.
@param event: Button release event.
'''
self.button_press_flag = False
self.queue_draw()
def expose_toggle_button(self, widget, event,
button_label, padding_x, font_size, label_dcolor):
'''
Callback for `expose-event` signal.
@param widget: ToggleButton widget.
@param event: Expose event.
@param button_label: Button label string.
@param padding_x: horticultural padding value.
@param font_size: Font size.
@param label_dcolor: Label DynamicColor.
'''
# Init.
inactive_normal_dpixbuf, inactive_hover_dpixbuf, inactive_press_dpixbuf, inactive_disable_dpixbuf = self.inactive_pixbuf_group
active_normal_dpixbuf, active_hover_dpixbuf, active_press_dpixbuf, active_disable_dpixbuf = self.active_pixbuf_group
rect = widget.allocation
image = inactive_normal_dpixbuf.get_pixbuf()
# Get pixbuf along with button's sate.
if widget.state == gtk.STATE_INSENSITIVE:
if widget.get_active():
image = active_disable_dpixbuf.get_pixbuf()
else:
image = inactive_disable_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_NORMAL:
image = inactive_normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
if not inactive_hover_dpixbuf and not active_hover_dpixbuf:
if widget.get_active():
image = active_normal_dpixbuf.get_pixbuf()
else:
image = inactive_normal_dpixbuf.get_pixbuf()
else:
if inactive_hover_dpixbuf and active_hover_dpixbuf:
if widget.get_active():
image = active_hover_dpixbuf.get_pixbuf()
else:
image = inactive_hover_dpixbuf.get_pixbuf()
elif inactive_hover_dpixbuf:
image = inactive_hover_dpixbuf.get_pixbuf()
elif active_hover_dpixbuf:
image = active_hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
if inactive_press_dpixbuf and active_press_dpixbuf:
if self.button_press_flag:
if widget.get_active():
image = active_press_dpixbuf.get_pixbuf()
else:
image = inactive_press_dpixbuf.get_pixbuf()
else:
image = active_normal_dpixbuf.get_pixbuf()
else:
image = active_normal_dpixbuf.get_pixbuf()
# Draw button.
cr = widget.window.cairo_create()
draw_pixbuf(cr, image, rect.x + padding_x, rect.y)
# Draw font.
if widget.state == gtk.STATE_INSENSITIVE:
label_color = ui_theme.get_color("disable_text").get_color()
else:
label_color = label_dcolor.get_color()
if button_label:
draw_text(cr, button_label,
rect.x + image.get_width() + padding_x * 2,
rect.y,
rect.width - image.get_width() - padding_x * 2,
rect.height,
font_size,
label_color,
alignment=pango.ALIGN_LEFT
)
# Propagate expose to children.
propagate_expose(widget, event)
return True
def set_inactive_pixbuf_group(self, new_group):
'''
Set inactive pixbuf group.
@param new_group: Inactive pixbuf group.
'''
self.inactive_pixbuf_group = new_group
def set_active_pixbuf_group(self, new_group):
'''
Set inactive pixbuf group.
@param new_group: Active pixbuf group.
'''
self.active_pixbuf_group = new_group
class ActionButton(gtk.Button):
'''
ActionButton class.
@undocumented: expose_action_button
'''
def __init__(self, actions, index=0):
'''
Initialize for ActionButton class.
@param actions: Actions for button.
@param index: Action index, default is 0.
'''
gtk.Button.__init__(self)
self.actions = actions
self.index = index
pixbuf = self.actions[self.index][0][0].get_pixbuf()
self.set_size_request(pixbuf.get_width(), pixbuf.get_height())
self.connect("expose-event", self.expose_action_button)
self.connect("clicked", lambda w: self.update_action_index(w))
def update_action_index(self, widget):
'''
Update action index of ActionButton.
@param widget: ActionButton widget.
'''
# Call click callback.
self.actions[self.index][1](widget)
# Update index.
self.index += 1
if self.index >= len(self.actions):
self.index = 0
# Redraw.
self.queue_draw()
def expose_action_button(self, widget, event):
'''
Callback for `expose-event` signal.
@param widget: ActionButton widget.
@param event: Expose event.
@return: Always return True.
'''
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
if widget.state == gtk.STATE_NORMAL:
pixbuf = self.actions[self.index][0][0].get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
pixbuf = self.actions[self.index][0][1].get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
pixbuf = self.actions[self.index][0][2].get_pixbuf()
draw_pixbuf(cr, pixbuf, rect.x, rect.y)
# Propagate expose to children.
propagate_expose(widget, event)
return True
gobject.type_register(ActionButton)
class CheckButton(ToggleButton):
'''
CheckButton class.
'''
def __init__(self,
label_text=None,
padding_x=2,
font_size=DEFAULT_FONT_SIZE):
'''
Initialize CheckButton class.
@param label_text: Label text.
@param padding_x: Horticultural padding value, default is 8.
@param font_size: Font size, default is DEFAULT_FONT_SIZE.
'''
ToggleButton.__init__(
self,
ui_theme.get_pixbuf("button/check_button_inactive_normal.png"),
ui_theme.get_pixbuf("button/check_button_active_normal.png"),
ui_theme.get_pixbuf("button/check_button_inactive_hover.png"),
ui_theme.get_pixbuf("button/check_button_active_hover.png"),
ui_theme.get_pixbuf("button/check_button_inactive_press.png"),
ui_theme.get_pixbuf("button/check_button_active_press.png"),
ui_theme.get_pixbuf("button/check_button_inactive_disable.png"),
ui_theme.get_pixbuf("button/check_button_active_disable.png"),
label_text, padding_x, font_size
)
gobject.type_register(CheckButton)
class CheckAllButton(gtk.ToggleButton):
'''
CheckAllButton class.
@undocumented: handle_click_event
@undocumented: press_toggle_button
@undocumented: release_toggle_button
@undocumented: expose_toggle_button
'''
__gsignals__ = {
"active-changed" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
def __init__(self,
inactive_normal_dpixbuf=ui_theme.get_pixbuf("button/check_button_inactive_normal.png"),
active_normal_dpixbuf=ui_theme.get_pixbuf("button/check_button_active_normal.png"),
inactive_hover_dpixbuf=ui_theme.get_pixbuf("button/check_button_inactive_hover.png"),
active_hover_dpixbuf=ui_theme.get_pixbuf("button/check_button_active_hover.png"),
inactive_press_dpixbuf=ui_theme.get_pixbuf("button/check_button_inactive_press.png"),
active_press_dpixbuf=ui_theme.get_pixbuf("button/check_button_active_press.png"),
inactive_disable_dpixbuf=ui_theme.get_pixbuf("button/check_button_inactive_disable.png"),
active_disable_dpixbuf=ui_theme.get_pixbuf("button/check_button_active_disable.png"),
middle_disable_dpixbuf=ui_theme.get_pixbuf("button/check_button_middle_disable.png"),
middle_hover_dpixbuf=ui_theme.get_pixbuf("button/check_button_middle_hover.png"),
middle_normal_dpixbuf=ui_theme.get_pixbuf("button/check_button_middle_normal.png"),
middle_press_dpixbuf=ui_theme.get_pixbuf("button/check_button_middle_press.png"),
button_label=None,
padding_x=8,
font_size=DEFAULT_FONT_SIZE,
):
'''
Initialize for CheckAllButton class.
@param inactive_normal_dpixbuf: DyanmicPixbuf for button inactive normal status, default is None.
@param active_normal_dpixbuf: DyanmicPixbuf for button active normal status, default is None.
@param inactive_hover_dpixbuf: DyanmicPixbuf for button inactive hover status, default is None.
@param active_hover_dpixbuf: DyanmicPixbuf for button active hover status, default is None.
@param inactive_press_dpixbuf: DyanmicPixbuf for button inactive press status, default is None.
@param active_press_dpixbuf: DyanmicPixbuf for button active press status, default is None.
@param inactive_disable_dpixbuf: DyanmicPixbuf for button inactive disable status, default is None.
@param active_disable_dpixbuf: DyanmicPixbuf for button active disable status, default is None.
@param middle_disable_dpixbuf: DyanmicPixbuf for button middle disable status, default is None.
@param middle_hover_dpixbuf: DyanmicPixbuf for button middle hover status, default is None.
@param middle_normal_dpixbuf: DyanmicPixbuf for button middle normal status, default is None.
@param middle_press_dpixbuf: DyanmicPixbuf for button middle press status, default is None.
@param button_label: Button label, default is None.
@param padding_x: Padding x, default is 0.
@param font_size: Button label font size, default is DEFAULT_FONT_SIZE.
'''
gtk.ToggleButton.__init__(self)
self.font_size = font_size
label_dcolor = ui_theme.get_color("button_default_font")
self.button_press_flag = False
self.inactive_pixbuf_group = (inactive_normal_dpixbuf,
inactive_hover_dpixbuf,
inactive_press_dpixbuf,
inactive_disable_dpixbuf)
self.active_pixbuf_group = (active_normal_dpixbuf,
active_hover_dpixbuf,
active_press_dpixbuf,
active_disable_dpixbuf)
self.middle_pixbuf_group = (middle_normal_dpixbuf,
middle_hover_dpixbuf,
middle_press_dpixbuf,
middle_disable_dpixbuf,
)
self.in_half_status = False
# Init request size.
label_width = 0
button_width = inactive_normal_dpixbuf.get_pixbuf().get_width()
button_height = inactive_normal_dpixbuf.get_pixbuf().get_height()
if button_label:
label_width = get_content_size(button_label, self.font_size)[0]
self.set_size_request(button_width + label_width + padding_x * 2,
button_height)
self.connect("button-press-event", self.press_toggle_button)
self.connect("button-release-event", self.release_toggle_button)
# Expose button.
self.connect("expose-event", lambda w, e : self.expose_toggle_button(
w, e,
button_label, padding_x, self.font_size, label_dcolor))
self.connect("clicked", self.handle_click_event)
def update_status(self, actives):
'''
Update status of button.
@param actives: This is boolean list that include all button's active status, CheckAllButton will change status in INACTIVE/ACTIVE/HALF-ACTIVE.
'''
if actives.count(True) == len(actives):
self.set_half_status(False)
self.set_active(True)
elif actives.count(False) == len(actives):
self.set_half_status(False)
self.set_active(False)
else:
self.set_active(True)
self.set_half_status(True)
self.queue_draw()
def set_half_status(self, half_status):
'''
Set half active status.
'''
self.in_half_status = half_status
def handle_click_event(self, widget):
'''
Internal callback for `click` signal.
@param widget: The CheckAllButton widget.
'''
if self.in_half_status:
self.set_active(False)
self.in_half_status = False
self.emit("active-changed", self.get_active())
def press_toggle_button(self, widget, event):
'''
Callback for `button-press-event` signal.
@param widget: ToggleButton widget.
@param event: Button press event.
'''
self.button_press_flag = True
self.queue_draw()
def release_toggle_button(self, widget, event):
'''
Callback for `button-press-release` signal.
@param widget: ToggleButton widget.
@param event: Button release event.
'''
self.button_press_flag = False
self.queue_draw()
def expose_toggle_button(self, widget, event,
button_label, padding_x, font_size, label_dcolor):
'''
Callback for `expose-event` signal.
@param widget: ToggleButton widget.
@param event: Expose event.
@param button_label: Button label string.
@param padding_x: horticultural padding value.
@param font_size: Font size.
@param label_dcolor: Label DynamicColor.
'''
# Init.
inactive_normal_dpixbuf, inactive_hover_dpixbuf, inactive_press_dpixbuf, inactive_disable_dpixbuf = self.inactive_pixbuf_group
active_normal_dpixbuf, active_hover_dpixbuf, active_press_dpixbuf, active_disable_dpixbuf = self.active_pixbuf_group
middle_normal_dpixbuf, middle_hover_dpixbuf, middle_press_dpixbuf, middle_disable_dpixbuf = self.middle_pixbuf_group
rect = widget.allocation
image = inactive_normal_dpixbuf.get_pixbuf()
# Get pixbuf along with button's sate.
if widget.state == gtk.STATE_INSENSITIVE:
if self.in_half_status:
image = middle_disable_dpixbuf.get_pixbuf()
else:
if widget.get_active():
image = active_disable_dpixbuf.get_pixbuf()
else:
image = inactive_disable_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_NORMAL:
image = inactive_normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
if not inactive_hover_dpixbuf and not active_hover_dpixbuf:
if self.in_half_status:
image = middle_normal_dpixbuf.get_pixbuf()
else:
if widget.get_active():
image = active_normal_dpixbuf.get_pixbuf()
else:
image = inactive_normal_dpixbuf.get_pixbuf()
else:
if inactive_hover_dpixbuf and active_hover_dpixbuf:
if self.in_half_status:
image = middle_normal_dpixbuf.get_pixbuf()
else:
if widget.get_active():
image = active_hover_dpixbuf.get_pixbuf()
else:
image = inactive_hover_dpixbuf.get_pixbuf()
elif inactive_hover_dpixbuf:
image = inactive_hover_dpixbuf.get_pixbuf()
elif active_hover_dpixbuf:
if self.in_half_status:
image = middle_hover_dpixbuf.get_pixbuf()
else:
image = active_hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
if inactive_press_dpixbuf and active_press_dpixbuf:
if self.button_press_flag:
if self.in_half_status:
image = middle_normal_dpixbuf.get_pixbuf()
else:
if widget.get_active():
image = active_press_dpixbuf.get_pixbuf()
else:
image = inactive_press_dpixbuf.get_pixbuf()
else:
if self.in_half_status:
image = middle_normal_dpixbuf.get_pixbuf()
else:
image = active_normal_dpixbuf.get_pixbuf()
else:
if self.in_half_status:
image = middle_normal_dpixbuf.get_pixbuf()
else:
image = active_normal_dpixbuf.get_pixbuf()
# Draw button.
cr = widget.window.cairo_create()
draw_pixbuf(cr, image, rect.x + padding_x, rect.y)
# Draw font.
if widget.state == gtk.STATE_INSENSITIVE:
label_color = ui_theme.get_color("disable_text").get_color()
else:
label_color = label_dcolor.get_color()
if button_label:
draw_text(cr, button_label,
rect.x + image.get_width() + padding_x * 2,
rect.y,
rect.width - image.get_width() - padding_x * 2,
rect.height,
font_size,
label_color,
alignment=pango.ALIGN_LEFT
)
# Propagate expose to children.
propagate_expose(widget, event)
return True
gobject.type_register(CheckAllButton)
class CheckButtonBuffer(gobject.GObject):
'''
CheckButtonBuffer class.
Use to render CheckButton in TreeView widget.
@undocumented: render
'''
STATE_NORMAL = 1
STATE_PRELIGHT = 2
STATE_ACTIVE = 3
def __init__(self,
active=False,
render_padding_x=0,
render_padding_y=0,
):
'''
Initialize CheckButtonBuffer class.
@param active: Set True to active buffer status, default is False.
@param render_padding_x: Horizontal padding value, default is 0.
@param render_padding_y: Vertical padding value, default is 0.
'''
gobject.GObject.__init__(self)
self.inactive_normal_dpixbuf = ui_theme.get_pixbuf("button/check_button_inactive_normal.png")
self.active_normal_dpixbuf = ui_theme.get_pixbuf("button/check_button_active_normal.png")
self.inactive_hover_dpixbuf = ui_theme.get_pixbuf("button/check_button_inactive_hover.png")
self.active_hover_dpixbuf = ui_theme.get_pixbuf("button/check_button_active_hover.png")
self.inactive_press_dpixbuf = ui_theme.get_pixbuf("button/check_button_inactive_press.png")
self.active_press_dpixbuf = ui_theme.get_pixbuf("button/check_button_active_press.png")
self.render_padding_x = render_padding_x
self.render_padding_y = render_padding_y
pixbuf = self.inactive_normal_dpixbuf.get_pixbuf()
self.render_width = pixbuf.get_width()
self.render_height = pixbuf.get_height()
self.active = active
self.button_state = self.STATE_NORMAL
def get_active(self):
'''
Get active status of check button buffer.
@return: Return True if buffer is in active status.
'''
return self.active
def is_in_button_area(self, x, y):
'''
Helper function to detect button event is in button area.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
return is_in_rect((x, y), (self.render_padding_x, self.render_padding_y, self.render_width, self.render_height))
def press_button(self, x, y):
'''
Helper function to handle button-press-event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y):
self.button_state = self.STATE_ACTIVE
self.button_press_flag = True
self.active = not self.active
return True
else:
return False
def release_button(self, x, y):
'''
Helper function to handle button-release-event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y):
self.button_state = self.STATE_ACTIVE
self.button_press_flag = False
return True
else:
return False
def motion_button(self, x, y):
'''
Helper function to handle motion-notify event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y):
if self.button_state != self.STATE_PRELIGHT:
self.button_state = self.STATE_PRELIGHT
return True
else:
return False
else:
if self.button_state != self.STATE_NORMAL:
self.button_state = self.STATE_NORMAL
return True
else:
return False
def render(self, cr, rect):
# Get pixbuf along with button's sate.
if self.button_state == self.STATE_NORMAL:
if self.active:
image = self.active_normal_dpixbuf.get_pixbuf()
else:
image = self.inactive_normal_dpixbuf.get_pixbuf()
elif self.button_state == self.STATE_PRELIGHT:
if self.active:
image = self.active_hover_dpixbuf.get_pixbuf()
else:
image = self.inactive_hover_dpixbuf.get_pixbuf()
elif self.button_state == self.STATE_ACTIVE:
if self.button_press_flag:
if self.active:
image = self.inactive_press_dpixbuf.get_pixbuf()
else:
image = self.active_press_dpixbuf.get_pixbuf()
else:
if self.active:
image = self.active_normal_dpixbuf.get_pixbuf()
else:
image = self.inactive_normal_dpixbuf.get_pixbuf()
# Draw button.
draw_pixbuf(
cr,
image,
rect.x + self.render_padding_x,
rect.y + self.render_padding_y)
gobject.type_register(CheckButtonBuffer)
class RadioButton(ToggleButton):
'''
RadioButton class.
@undocumented: click_radio_button
'''
def __init__(self,
label_text=None,
padding_x=2,
font_size=DEFAULT_FONT_SIZE,
):
'''
Initialize RadioButton class.
@param label_text: Label text.
@param padding_x: Horticultural padding value, default is 8.
@param font_size: Font size, default is DEFAULT_FONT_SIZE.
'''
ToggleButton.__init__(
self,
ui_theme.get_pixbuf("button/radio_button_inactive_normal.png"),
ui_theme.get_pixbuf("button/radio_button_active_normal.png"),
ui_theme.get_pixbuf("button/radio_button_inactive_hover.png"),
ui_theme.get_pixbuf("button/radio_button_active_hover.png"),
ui_theme.get_pixbuf("button/radio_button_inactive_press.png"),
ui_theme.get_pixbuf("button/radio_button_active_press.png"),
ui_theme.get_pixbuf("button/radio_button_inactive_disable.png"),
ui_theme.get_pixbuf("button/radio_button_active_disable.png"),
label_text,
padding_x,
font_size
)
self.switch_lock = False
self.connect("clicked", self.click_radio_button)
def click_radio_button(self, widget):
'''
Callback for `clicked` signal.
@param widget: RadioButton widget.
'''
if not self.switch_lock:
for w in get_same_level_widgets(self):
w.switch_lock = True
w.set_active(w == self)
w.switch_lock = False
gobject.type_register(RadioButton)
class RadioButtonBuffer(gobject.GObject):
'''
RaidoButtonBuffer class.
Use to render RaidoButton in TreeView widget.
@undocumented: render
'''
STATE_NORMAL = 1
STATE_PRELIGHT = 2
STATE_ACTIVE = 3
def __init__(self,
active=False,
render_padding_x=0,
render_padding_y=0,
):
'''
Initialize RadioButtonBuffer class.
@param active: Set True to active buffer status, default is False.
@param render_padding_x: Horizontal padding value, default is 0.
@param render_padding_y: Vertical padding value, default is 0.
'''
gobject.GObject.__init__(self)
self.inactive_normal_dpixbuf = ui_theme.get_pixbuf("button/radio_button_inactive_normal.png")
self.active_normal_dpixbuf = ui_theme.get_pixbuf("button/radio_button_active_normal.png")
self.inactive_hover_dpixbuf = ui_theme.get_pixbuf("button/radio_button_inactive_hover.png")
self.active_hover_dpixbuf = ui_theme.get_pixbuf("button/radio_button_active_hover.png")
self.inactive_press_dpixbuf = ui_theme.get_pixbuf("button/radio_button_inactive_press.png")
self.active_press_dpixbuf = ui_theme.get_pixbuf("button/radio_button_active_press.png")
self.render_padding_x = render_padding_x
self.render_padding_y = render_padding_y
pixbuf = self.inactive_normal_dpixbuf.get_pixbuf()
self.render_width = pixbuf.get_width()
self.render_height = pixbuf.get_height()
self.active = active
self.button_state = self.STATE_NORMAL
def get_active(self):
'''
Get active status of raido button buffer.
@return: Return True if buffer is in active status.
'''
return self.active
def set_active(self):
self.button_state = self.STATE_ACTIVE
self.button_press_flag = False
self.active = True
#self.queue_draw()
def is_in_button_area(self, x, y):
'''
Helper function to detect button event is in button area.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
return is_in_rect((x, y), (self.render_padding_x, self.render_padding_y, self.render_width, self.render_height))
def press_button(self, x, y):
'''
Helper function to handle button-press-event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y) and self.active == False:
self.button_state = self.STATE_ACTIVE
self.button_press_flag = True
self.active = True
return True
else:
return False
def release_button(self, x, y):
'''
Helper function to handle button-release-event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y):
self.button_state = self.STATE_ACTIVE
self.button_press_flag = False
return True
else:
return False
def motion_button(self, x, y):
'''
Helper function to handle motion-notify event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y):
if self.button_state != self.STATE_PRELIGHT:
self.button_state = self.STATE_PRELIGHT
return True
else:
return False
else:
if self.button_state != self.STATE_NORMAL:
self.button_state = self.STATE_NORMAL
return True
else:
return False
def render(self, cr, rect):
# Get pixbuf along with button's sate.
if self.button_state == self.STATE_NORMAL:
if self.active:
image = self.active_normal_dpixbuf.get_pixbuf()
else:
image = self.inactive_normal_dpixbuf.get_pixbuf()
elif self.button_state == self.STATE_PRELIGHT:
if self.active:
image = self.active_hover_dpixbuf.get_pixbuf()
else:
image = self.inactive_hover_dpixbuf.get_pixbuf()
elif self.button_state == self.STATE_ACTIVE:
if self.button_press_flag:
if self.active:
image = self.inactive_press_dpixbuf.get_pixbuf()
else:
image = self.active_press_dpixbuf.get_pixbuf()
else:
if self.active:
image = self.active_normal_dpixbuf.get_pixbuf()
else:
image = self.inactive_normal_dpixbuf.get_pixbuf()
# Draw button.
draw_pixbuf(
cr,
image,
rect.x + self.render_padding_x,
rect.y + self.render_padding_y)
gobject.type_register(RadioButtonBuffer)
class DisableButton(gtk.Button):
'''
DisableButton class.
@undocumented: expose_disable_button
'''
def __init__(self, dpixbufs):
'''
Initialize DisableButton class.
@param dpixbufs: DyanmicPixbuf.
'''
gtk.Button.__init__(self)
pixbuf = dpixbufs[0].get_pixbuf()
self.set_size_request(pixbuf.get_width(), pixbuf.get_height())
widget_fix_cycle_destroy_bug(self)
self.connect("expose-event", lambda w, e: self.expose_disable_button(w, e, dpixbufs))
def expose_disable_button(self, widget, event, dpixbufs):
'''
Callback for `expose-event` signal.
@param widget: DisableButton widget.
@param event: Expose event.
@param dpixbufs: DynamicPixbufs.
'''
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
(normal_dpixbuf, hover_dpixbuf, press_dpixbuf, disable_dpixbuf) = dpixbufs
# Draw.
if widget.state == gtk.STATE_INSENSITIVE:
pixbuf = disable_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_NORMAL:
pixbuf = normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
pixbuf = hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
pixbuf = press_dpixbuf.get_pixbuf()
draw_pixbuf(cr, pixbuf, rect.x, rect.y)
# Propagate expose to children.
propagate_expose(widget, event)
return True
gobject.type_register(DisableButton)
class LinkButton(Label):
'''
LinkButton click to open browser.
'''
def __init__(self,
text,
link,
enable_gaussian=True,
text_color=ui_theme.get_color("link_text"),
):
'''
Initialize LinkButton class.
@param text: Link content.
@param link: Link address.
@param enable_gaussian: To enable gaussian effect on link, default is True.
@param text_color: Link color, just use when option enable_gaussian is False.
'''
Label.__init__(self, text, text_color, enable_gaussian=enable_gaussian, text_size=9,
gaussian_radious=1, border_radious=0)
self.connect("button-press-event", lambda w, e: run_command("xdg-open %s" % link))
set_clickable_cursor(self)
gobject.type_register(LinkButton)
class ComboButton(gtk.Button):
'''
ComboButton class.
@undocumented: expose_combo_button
@undocumented: button_press_combo_button
@undocumented: click_combo_button
'''
__gsignals__ = {
"button-clicked" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"arrow-clicked" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (int, int, int, int)),
}
def __init__(self,
button_normal_dpixbuf,
button_hover_dpixbuf,
button_press_dpixbuf,
button_disable_dpixbuf,
arrow_normal_dpixbuf,
arrow_hover_dpixbuf,
arrow_press_dpixbuf,
arrow_disable_dpixbuf,
):
'''
Initialize ComboButton class.
@param button_normal_dpixbuf: DyanmicPixbuf of button normal status.
@param button_hover_dpixbuf: DyanmicPixbuf of button hover status.
@param button_press_dpixbuf: DyanmicPixbuf of button press status.
@param button_disable_dpixbuf: DyanmicPixbuf of button disable status.
@param arrow_normal_dpixbuf: DyanmicPixbuf of arrow normal status.
@param arrow_hover_dpixbuf: DyanmicPixbuf of arrow hover status.
@param arrow_press_dpixbuf: DyanmicPixbuf of arrow press status.
@param arrow_disable_dpixbuf: DyanmicPixbuf of arrow disable status.
'''
# Init.
gtk.Button.__init__(self)
self.button_normal_dpixbuf = button_normal_dpixbuf
self.button_hover_dpixbuf = button_hover_dpixbuf
self.button_press_dpixbuf = button_press_dpixbuf
self.button_disable_dpixbuf = button_disable_dpixbuf
self.arrow_normal_dpixbuf = arrow_normal_dpixbuf
self.arrow_hover_dpixbuf = arrow_hover_dpixbuf
self.arrow_press_dpixbuf = arrow_press_dpixbuf
self.arrow_disable_dpixbuf = arrow_disable_dpixbuf
button_pixbuf = button_normal_dpixbuf.get_pixbuf()
arrow_pixbuf = arrow_normal_dpixbuf.get_pixbuf()
self.button_width = button_pixbuf.get_width()
self.arrow_width = arrow_pixbuf.get_width()
self.height = button_pixbuf.get_height()
self.set_size_request(self.button_width + self.arrow_width, self.height)
self.in_button = True
self.connect("expose-event", self.expose_combo_button)
self.connect("button-press-event", self.button_press_combo_button)
self.connect("clicked", self.click_combo_button)
def expose_combo_button(self, widget, event):
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
x, y, w, h = rect.x, rect.y, rect.width, rect.height
# Get pixbuf info.
if widget.state == gtk.STATE_NORMAL:
button_pixbuf = self.button_normal_dpixbuf.get_pixbuf()
arrow_pixbuf = self.arrow_normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
button_pixbuf = self.button_hover_dpixbuf.get_pixbuf()
arrow_pixbuf = self.arrow_hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
if self.in_button:
button_pixbuf = self.button_press_dpixbuf.get_pixbuf()
arrow_pixbuf = self.arrow_hover_dpixbuf.get_pixbuf()
else:
button_pixbuf = self.button_hover_dpixbuf.get_pixbuf()
arrow_pixbuf = self.arrow_press_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_INSENSITIVE:
button_pixbuf = self.button_disable_dpixbuf.get_pixbuf()
arrow_pixbuf = self.arrow_disable_dpixbuf.get_pixbuf()
# Draw.
draw_pixbuf(cr, button_pixbuf, rect.x, rect.y)
draw_pixbuf(cr, arrow_pixbuf, rect.x + self.button_width, rect.y)
return True
def button_press_combo_button(self, widget, event):
self.in_button = event.x < self.button_width
def click_combo_button(self, widget):
if self.in_button:
self.emit("button-clicked")
else:
(button_x, button_y) = get_widget_root_coordinate(self, WIDGET_POS_BOTTOM_LEFT)
self.emit("arrow-clicked",
button_x + self.button_width,
button_y,
self.arrow_width,
self.height)
gobject.type_register(ComboButton)
class SwitchButton(ToggleButton):
'''
SwitchButton class.
'''
def __init__(self, active=False, inactive_disable_dpixbuf=None, active_disable_dpixbuf=None):
'''
Initialize SwitchButton class.
@param active: Button active status, default is False.
'''
if inactive_disable_dpixbuf and active_disable_dpixbuf:
ToggleButton.__init__(self,
ui_theme.get_pixbuf("switchbutton/off.png"),
ui_theme.get_pixbuf("switchbutton/on.png"),
inactive_disable_dpixbuf = inactive_disable_dpixbuf,
active_disable_dpixbuf = active_disable_dpixbuf)
else:
ToggleButton.__init__(self,
ui_theme.get_pixbuf("switchbutton/off.png"),
ui_theme.get_pixbuf("switchbutton/on.png"))
self.set_active(active)
gobject.type_register(SwitchButton)
| linuxdeepin/deepin-ui | dtk/ui/button.py | Python | gpl-3.0 | 60,880 | [
"Gaussian"
] | fe47e1e6069fa160a20359be91456feb31a83e31a41013e1902ba361540c4a1b |
""" Top-level module to build or re-build the JSON files for
FRB host galaxies"""
from pkg_resources import resource_filename
import os
import sys
from IPython import embed
import numpy as np
import pandas
from astropy.coordinates import SkyCoord
from astropy import units
from astropath.priors import load_std_priors
from frb.frb import FRB
from frb.associate import frbassociate
from frb.associate import frbs
from frb.galaxies import hosts
from frb import utils
import pandas
db_path = os.getenv('FRB_GDB')
if db_path is None:
raise IOError('You need to have GDB!!')
def run(frb_list:list, host_coords:list, prior:dict,
override:bool=False):
"""Main method for generating a Host JSON file
Args:
frb_list (list): List of FRB names from the database
host_coords (list): List of host galaxy coords fom the database
prior (dict):
Prior for PATH
override (bool, optional): Attempt to over-ride errors.
Mainly for time-outs of public data. Defaults to False.
tol (float, optional): Tolerance for a match to the expected host
in arcsec.
Raises:
e: [description]
ValueError: [description]
Returns:
pandas.DataFrame: Table of PATH values and a bit more
"""
good_frb, PATH_O, PATH_Ox, RAs, Decs = [], [], [], [], []
ang_sizes = []
for frb, host_coord in zip(frb_list, host_coords):
frb_name = utils.parse_frb_name(frb, prefix='frb')
# Config
if not hasattr(frbs, frb_name.lower()):
print(f"PATH analysis not possible for {frb_name}")
continue
print(f"Performing PATH on {frb_name}")
config = getattr(frbs, frb_name.lower())
# Run me
frbA = frbassociate.run_individual(config, prior=prior)
if frbA is None:
print(f"PATH analysis not possible for {frb_name}")
continue
# Save for table
good_frb.append(frb_name.upper())
PATH_Ox.append(frbA.candidates.P_Ox.values[0])
PATH_O.append(frbA.candidates.P_O.values[0])
RAs.append(host_coord.ra.deg)
Decs.append(host_coord.dec.deg)
ang_sizes.append(frbA.candidates.ang_size.values[0])
# Build the table
df = pandas.DataFrame()
df['FRB'] = good_frb
df['RA'] = RAs
df['Dec'] = Decs
df['ang_size'] = ang_sizes
df['P_O'] = PATH_O
df['P_Ox'] = PATH_Ox
#
return df
def main(options:str=None):
""" Driver of the analysis
Args:
options (str, optional): [description]. Defaults to None.
"""
# Read public host table
host_tbl = hosts.load_host_tbl()
host_coords = [SkyCoord(host_coord, frame='icrs') for host_coord in host_tbl.Coord.values]
# Generate FRBs for PATH analysis
frb_list = host_tbl.FRB.values.tolist()
# Load prior
priors = load_std_priors()
prior = priors['adopted'] # Default
# Parse optionsd
if options is not None:
if 'new_prior' in options:
theta_new = dict(method='exp',
max=priors['adopted']['theta']['max'],
scale=0.5)
prior['theta'] = theta_new
results = run(frb_list, host_coords, prior)
# Write
outfile = os.path.join(resource_filename('frb', 'data'), 'Galaxies',
'PATH', 'tmp.csv')
results.to_csv(outfile)
print(f"PATH analysis written to {outfile}")
print("Rename it, push to Repo, and edit the PATH/README file accordingly")
return results | FRBs/FRB | frb/builds/build_path.py | Python | bsd-3-clause | 3,583 | [
"Galaxy"
] | 3ce59b8d4b24882d638a3682fbf6adfe776850dcb3e26383b9970cf0afee6881 |
''' This is a comment
'''
from xml.sax.handler import ContentHandler
<<<<<<< HEAD
from DIRAC.Core.Workflow.Parameter import Parameter
from DIRAC.Core.Workflow.Module import ModuleInstance, ModuleDefinition
from DIRAC.Core.Workflow.Step import StepDefinition, StepInstance
=======
from DIRAC.Core.Workflow.Parameter import *
from DIRAC.Core.Workflow.Module import *
from DIRAC.Core.Workflow.Step import *
>>>>>>> rel-v6r10
from DIRAC.Core.Workflow.Workflow import Workflow
class WorkflowXMLHandler( ContentHandler ):
def __init__( self, new_wf = None ):
''' If new_wf defined, it will be used as root of document '''
# this is an attribute for the object to be created from the XML document
self.root = new_wf # the reference on the all document
self.stack = None # to keep last object
self.strings = None # to accumulate string object (list of strings) used to split long string
def startDocument( self ):
# reset the process
# self.root=None
self.stack = []
self.strings = []
def endDocument( self ):
pass
def startElement( self, name, attrs ):
# print name ,"startElement", "attr=", attrs.getLength(), attrs.getNames()
self.clearCharacters() # clear to remove empty or nonprintable characters
if name == "Workflow":
if self.root == None: # if root not defined by constractor
self.root = Workflow()
self.stack.append( self.root )
elif name == "StepDefinition":
obj = StepDefinition( "TemporaryXMLObject_StepDefinition" )
if self.root == None: # in case we are saving Step only
self.root = obj
self.stack.append( obj )
elif name == "StepInstance":
obj = StepInstance( "TemporaryXMLObject_StepInstance" )
self.stack.append( obj )
elif name == "ModuleDefinition":
obj = ModuleDefinition( "TemporaryXMLObject_ModuleDefinition" )
if self.root == None: # in case we are saving Module only
self.root = obj
self.stack.append( obj )
elif name == "ModuleInstance":
obj = ModuleInstance( "TemporaryXMLObject_ModuleInstance" )
self.stack.append( obj )
elif name == "Parameter":
obj = Parameter( str( attrs['name'] ), None, str( attrs['type'] ), str( attrs['linked_module'] ), str( attrs['linked_parameter'] ), str( attrs['in'] ), str( attrs['out'] ), str( attrs['description'] ) )
self.stack.append( obj )
# TEMPORARY CODE
elif name == "origin" or name == "version" or name == "name" or name == "type" or name == "value" or\
name == "required" or name == "descr_short" or name == "name" or name == "type" or name == "description" or name == "body":
pass
else:
print "UNTREATED! startElement name=", name, "attr=", attrs.getLength(), attrs.getNames()
pass
def endElement( self, name ):
# print name, "endElement"
# attributes
if name == "origin":
self.stack[len( self.stack ) - 1].setOrigin( self.getCharacters() )
elif name == "version":
self.stack[len( self.stack ) - 1].setVersion( self.getCharacters() )
elif name == "name":
self.stack[len( self.stack ) - 1].setName( self.getCharacters() )
elif name == "type":
self.stack[len( self.stack ) - 1].setType( self.getCharacters() )
elif name == "required":
self.stack[len( self.stack ) - 1].setRequired( self.getCharacters() )
elif name == "descr_short":
self.stack[len( self.stack ) - 1].setDescrShort( self.getCharacters() )
elif name == "name":
self.stack[len( self.stack ) - 1].setName( self.getCharacters() )
elif name == "type":
self.stack[len( self.stack ) - 1].setType( self.getCharacters() )
elif name == "description":
self.stack[len( self.stack ) - 1].setDescription( self.getCharacters() )
elif name == "body":
self.stack[len( self.stack ) - 1].setBody( self.getCharacters() )
elif name == "value":
ch = self.getCharacters()
# to keep compatibility with the old version
# were """ was not used for the string
if self.stack[len( self.stack ) - 1].isTypeString():
self.stack[len( self.stack ) - 1].setValue( ch )
else:
self.stack[len( self.stack ) - 1].setValue( eval( ch ) )
# objects
elif name == "Workflow":
self.stack.pop()
elif name == "StepDefinition":
self.root.step_definitions.append( self.stack.pop() )
elif name == "StepInstance":
self.root.step_instances.append( self.stack.pop() )
elif name == "ModuleDefinition":
self.root.addModule( self.stack.pop() )
elif name == "ModuleInstance":
obj = self.stack.pop()
self.stack[len( self.stack ) - 1].module_instances.append( obj )
elif name == "Parameter":
obj = self.stack.pop();
self.stack[len( self.stack ) - 1].addParameter( obj )
else:
print "UNTREATED! endElement", name
def getCharacters( self ):
# combine all strings and clear the list
ret = ''.join( self.strings )
self.clearCharacters()
return str( ret )
def clearCharacters( self ):
del self.strings
self.strings = []
def characters( self, content ):
self.strings.append( content )
| avedaee/DIRAC | Core/Workflow/WorkflowReader.py | Python | gpl-3.0 | 5,180 | [
"DIRAC"
] | a5a9f3412e5459af3f1f8319eae19ea4c1ba529d055dd253477bab8cc3e6e88a |
#!/usr/bin/python
import logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
import pinproc
import procgame.game, sys, os
import procgame.config
import pygame
import time
import thread
import procgame.sound
sys.path.insert(0,os.path.pardir)
from bingo_emulator.graphics import methods as graphics
from bingo_emulator.common import units
#pygame.init()
pygame.display.set_caption("Multi Bingo")
screen = pygame.display.set_mode((0,0))
screen.fill([0,0,0])
class Menu(procgame.game.BasicGame):
""" This menu lets you select games to play """
def __init__(self, machine_type):
super(Menu, self).__init__(machine_type)
def reset(self, selection, select):
super(Menu, self).reset()
self.load_config('bingo.yaml')
mainmenu = MainMenu(self, selection, select)
self.modes.add(mainmenu)
self.logger = logging.getLogger('game')
class MainMenu(procgame.game.Mode):
def __init__(self, game, selection, select):
super(MainMenu, self).__init__(game=game, priority=5)
self.game.replays = 0
self.game.select = select
self.game.selection = selection
self.game.anti_cheat = units.Relay("anti_cheat")
self.game.green_three_as_five = units.Relay("green_three_as_five")
self.game.yellow_three_as_four = units.Relay("yellow_three_as_four")
self.game.red_three_as_four = units.Relay("red_three_as_four")
self.game.super_card_trip = units.Relay("super_card_trip")
self.game.super_card1 = units.Relay("super_card1")
self.game.super_card2 = units.Relay("super_card2")
self.game.super_line1 = units.Relay("super_line1")
self.game.super_line2 = units.Relay("super_line2")
self.game.super_card_replay_counter = units.Stepper("super_card_replay_counter", 400)
self.game.feature = units.Stepper("feature", 6)
self.game.curtains = units.Stepper("curtains", 6)
self.game.lines = units.Stepper("lines", 6)
self.game.before_fourth = units.Relay("before_fourth")
self.game.searched = units.Relay("searched")
self.game.before_fifth = units.Relay("before_fifth")
self.game.lock = units.Relay("lock")
self.game.tilt = units.Relay("tilt")
self.game.liteaname = units.Relay("liteaname")
self.game.eight_balls = units.Relay("eight_balls")
self.game.three_as_four = units.Relay("three_as_four")
self.game.four_as_five = units.Relay("four_as_five")
self.game.selector = units.Stepper("selector", 6)
self.game.name = units.Stepper("name", 6)
self.game.select_card = units.Stepper("select_card", 6)
self.game.card = units.Stepper("card", 6)
self.game.spotting = units.Stepper("spotting", 49)
self.game.eb_play = units.Relay("eb_play")
self.game.eb = units.Relay("eb")
self.game.spotted_numbers = units.Stepper("spotted_numbers", 8)
self.game.ball_count = units.Stepper("ball_count", 8)
self.game.ball_return = units.Stepper("ball_return", 15)
self.game.extra_ball = units.Stepper("extra_ball", 3)
self.game.double= units.Relay("double")
self.game.double1 = units.Relay("double1")
self.game.double2 = units.Relay("double2")
self.game.double3 = units.Relay("double3")
self.game.b_return = units.Relay("b_return")
self.game.ball_return_played = units.Relay("ball_return_played")
self.game.diagonal_score = units.Relay("diagonal_score")
self.game.all_advantages = units.Relay("all_advantages")
self.game.special_feature = units.Relay("special_feature")
self.game.special_1 = units.Relay("special_1")
self.game.special_2 = units.Relay("special_2")
self.game.special_3 = units.Relay("special_3")
self.game.special_4 = units.Relay("special_4")
self.game.special_5 = units.Relay("special_5")
self.game.special_6 = units.Relay("special_6")
self.game.c1_double = units.Relay("c1_double")
self.game.c2_double = units.Relay("c2_double")
self.game.c3_double = units.Relay("c3_double")
self.game.c4_double = units.Relay("c4_double")
self.game.c5_double = units.Relay("c5_double")
self.game.c6_double = units.Relay("c6_double")
self.game.c1_triple = units.Relay("c1_triple")
self.game.c2_triple = units.Relay("c2_triple")
self.game.c3_triple = units.Relay("c3_triple")
self.game.c4_triple = units.Relay("c4_triple")
self.game.c5_triple = units.Relay("c5_triple")
self.game.c6_triple = units.Relay("c6_triple")
self.game.all_double = units.Relay("all_double")
self.game.all_triple = units.Relay("all_triple")
self.game.corners = units.Relay("corners")
self.game.super_corners = units.Relay("super_corners")
self.game.golden = units.Relay("golden")
self.game.gate = units.Relay("gate")
self.game.two_gold = units.Relay("two_gold")
self.game.odds = units.Stepper("odds", 10)
self.game.green_odds = units.Stepper("green_odds", 10)
self.game.red_odds = units.Stepper("red_odds", 10)
self.game.yellow_odds = units.Stepper("yellow_odds", 10)
self.game.white_odds = units.Stepper("white_odds", 10)
self.game.gold_odds = units.Stepper("gold_odds", 10)
self.game.odds_only = units.Relay("odds_only")
self.game.features = units.Relay("features")
self.game.orange_section = units.Relay("orange_section")
self.game.red_super_section = units.Relay("red_super_section")
self.game.yellow_super_section = units.Relay("yellow_super_section")
self.game.futurity = units.Stepper("futurity", 12)
self.game.ok = units.Relay("ok")
self.game.four_stars = units.Relay("four_stars")
self.game.hole_feature = units.Relay("hole_feature")
self.game.spot_16 = units.Relay("spot_16")
self.game.super_line_feature = units.Relay("super_line_feature")
self.game.extra_ok = units.Relay("extra_ok")
self.game.super_ok = units.Relay("super_ok")
self.game.delay = units.Relay("delay")
self.game.super_card = units.Stepper("super_card", 8)
self.game.selection_feature = units.Stepper("selection_feature", 8)
self.game.magic_screen_feature = units.Stepper("magic_screen_feature", 8)
self.game.magic_numbers_feature = units.Stepper("magic_numbers_feature", 8)
self.game.roto_feature = units.Relay("roto_feature")
self.game.roto_feature_step = units.Stepper("roto_feature_step", 8)
self.game.super_horizontal = units.Relay("super_horizontal")
self.game.horizontal = units.Stepper("horizontal", 8)
self.game.special_odds = units.Stepper("special_odds", 8)
self.game.special_game = units.Stepper("special_game", 8)
self.game.special_replay_counter = units.Stepper("special_replay_counter", 8)
self.game.red_replay_counter = units.Stepper("red_replay_counter", 8)
self.game.yellow_replay_counter = units.Stepper("yellow_replay_counter", 8)
self.game.green_replay_counter = units.Stepper("green_replay_counter", 8)
self.game.twin_number = units.Stepper("twin_number", 8)
self.game.color_selector = units.Stepper("color_selector", 8)
self.game.score_select = units.Stepper("score_select", 8)
self.game.x_feature = units.Stepper("x_feature", 8)
self.game.magic_lines = units.Stepper("magic_lines", 8)
self.game.hold_feature = units.Stepper("hold_feature", 8)
self.game.m_lines = units.Relay("m_lines")
self.game.super_score = units.Relay("super_score")
self.game.magic_pockets = units.Relay("magic_pockets")
self.game.spot_10 = units.Relay("spot_10")
self.game.spot_25 = units.Relay("spot_25")
self.game.bonus = units.Stepper("bonus", 8)
self.game.ring = units.Stepper("ring", 8)
self.game.two_suns = units.Relay("two_suns")
self.game.double_double = units.Relay("double_double")
self.game.double_up = units.Relay("double_up")
self.game.double_colors = units.Stepper("double_colors", 8)
self.game.wheel = units.Stepper("wheel", 8)
self.game.three_suns = units.Relay("three_suns")
self.game.top_odds = units.Relay("top_odds")
self.game.missed = units.Relay("missed")
self.game.red_star = units.Relay("red_star")
self.game.red_rollover = units.Relay("red_rollover")
self.game.special = units.Relay("special")
self.game.triple = units.Relay("triple")
self.game.yellow_star = units.Relay("yellow_star")
self.game.yellow_rollover = units.Relay("yellow_rollover")
self.game.corners1 = units.Relay("corners1")
self.game.corners2 = units.Relay("corners2")
self.game.corners3 = units.Relay("corners3")
self.game.corners4 = units.Relay("corners4")
self.game.corners5 = units.Relay("corners5")
self.game.corners6 = units.Relay("corners6")
self.game.select_spots = units.Relay("select_spots")
self.game.fss = units.Relay("fss")
self.game.fnt = units.Relay("fnt")
self.game.sixteen = units.Relay("sixteen")
self.game.fourteen_eighteen = units.Relay("fourteen_eighteen")
self.game.fifteen_seventeen = units.Relay("fifteen_seventeen")
self.game.fifteen = units.Relay("fifteen")
self.game.seventeen = units.Relay("seventeen")
self.game.twenty = units.Relay("twenty")
self.game.twentyone = units.Relay("twentyone")
self.game.twentytwo = units.Relay("twentytwo")
self.game.good = units.Relay("good")
self.game.excellent = units.Relay("excellent")
self.game.superior = units.Relay("superior")
self.game.dd1 = units.Relay("dd1")
self.game.dd2 = units.Relay("dd2")
self.game.dd3 = units.Relay("dd3")
self.game.dd4 = units.Relay("dd4")
self.game.dd5 = units.Relay("dd5")
self.game.dd6 = units.Relay("dd6")
self.game.before_third = units.Relay("before_third")
self.game.before_fourth = units.Relay("before_fourth")
self.game.top_score = units.Relay("top_score")
self.game.cornersone_three = units.Relay("cornersone_three")
self.game.cornerstwo_three = units.Relay("cornerstwo_three")
self.game.cornersone_four = units.Relay("cornersone_four")
self.game.cornerstwo_four = units.Relay("cornerstwo_four")
self.game.roto = units.Stepper("roto", 6)
self.game.roto2 = units.Stepper("roto2", 6)
self.game.numbera = units.Stepper("numbera", 6)
self.game.numberb = units.Stepper("numberb", 6)
self.game.numberc = units.Stepper("numberc", 6)
self.game.numberd = units.Stepper("numberd", 6)
self.game.each_card = units.Stepper("each_card", 6)
self.game.bump_feature = units.Stepper("bump_feature", 6)
self.game.e_card = units.Relay("e_card")
self.game.average = units.Relay("average")
self.game.expert = units.Relay("expert")
self.game.m_pockets = units.Stepper("m_pockets", 4)
self.game.wild_pockets = units.Stepper("wild_pockets", 9)
self.game.special_pocket = units.Relay("special_pocket")
self.game.super_diagonal = units.Relay("super_diagonal")
self.game.diagonal = units.Relay("diagonal")
self.game.diagonal_separate = units.Stepper("diagonal_separate", 9)
self.game.diagonals_relay = units.Relay("diagonals_relay")
self.game.row1 = units.Relay("row1")
self.game.row2 = units.Relay("row2")
self.game.row3 = units.Relay("row3")
self.game.pocket = units.Stepper("pocket", 5)
self.game.diagonals = units.Stepper("diagonals", 5)
self.game.diagonal_scoring = units.Stepper("diagonal_scoring", 9)
self.game.super_super_card = units.Stepper("super_super_card", 9)
self.game.magic_screen = units.Stepper("magic_screen", 15)
self.game.line = units.Stepper("line", 3)
self.game.line_feature = units.Stepper("line_feature", 3)
self.game.line1 = units.Stepper("line1", 3)
self.game.line2 = units.Stepper("line2", 3)
self.game.line3 = units.Stepper("line3", 3)
self.game.line4 = units.Stepper("line4", 3)
self.game.line5 = units.Stepper("line5", 3)
self.game.magic_lines_feature = units.Stepper("magic_lines_feature", 5)
self.game.magic_card = units.Stepper("magic_card", 15)
self.game.spot = units.Stepper("spot", 15)
self.game.coin = units.Stepper("coin", 50)
self.game.magic = []
self.game.two_red_letter = units.Relay("two_red_letter")
self.game.three_red_letter = units.Relay("three_red_letter")
self.game.magic_spot = units.Relay("magic_spot")
self.game.three_stars = units.Relay("three_stars")
self.game.gate_open = units.Relay("gate_open")
self.game.six_stars = units.Relay("six_stars")
self.game.double_red = units.Relay("double_red")
self.game.double_yellow = units.Relay("double_yellow")
self.game.double_green = units.Relay("double_green")
self.game.double_blue = units.Relay("double_blue")
self.game.mystery_red = units.Relay("mystery_red")
self.game.mystery_yellow = units.Relay("mystery_yellow")
self.game.corners384 = units.Relay("corners384")
self.game.corners300 = units.Relay("corners300")
self.game.corners192 = units.Relay("corners192")
self.game.start = units.Relay("start")
self.game.search_index = units.Relay("search_index")
self.game.nothing = units.Relay("nothing")
self.game.onetwothree = units.Relay("onetwothree")
self.game.fourfivesix = units.Relay("fourfivesix")
self.game.selection_feature_relay = units.Relay("selection_feature_relay")
self.game.left_special_card = units.Relay("left_special_card")
self.game.right_special_card = units.Relay("right_special_card")
self.game.letter_r = units.Relay("letter_r")
self.game.letter_i = units.Relay("letter_i")
self.game.letter_o = units.Relay("letter_o")
self.game.letter_ha = units.Relay("letter_ha")
self.game.letter_va = units.Relay("letter_va")
self.game.letter_na = units.Relay("letter_na")
self.game.letter_me = units.Relay("letter_me")
self.game.letter_xi = units.Relay("letter_xi")
self.game.letter_co = units.Relay("letter_co")
self.game.one_seven_feature = units.Relay("one_seven_feature")
self.game.one_seven = units.Relay("one_seven")
self.game.seven_one = units.Relay("seven_one")
self.game.lite_a_name = units.Relay("lite_a_name")
self.game.rollovers = units.Relay("rollovers")
self.game.diamond_diagonal = units.Relay("diamond_diagonal")
self.game.super1 = units.Relay("super1")
self.game.super2 = units.Relay("super2")
self.game.super3 = units.Relay("super3")
self.game.super4 = units.Relay("super4")
self.game.super5 = units.Relay("super5")
self.game.super6 = units.Relay("super6")
self.game.card1_double = units.Relay("card1_double")
self.game.card2_double = units.Relay("card2_double")
self.game.card3_double = units.Relay("card3_double")
self.game.card4_double = units.Relay("card4_double")
self.game.card5_double = units.Relay("card5_double")
self.game.card6_double = units.Relay("card6_double")
self.game.red_double = units.Relay("red_double")
self.game.yellow_double = units.Relay("yellow_double")
self.game.green_double = units.Relay("green_double")
self.game.white_double = units.Relay("white_double")
self.game.red_missed = units.Relay("red_missed")
self.game.yellow_missed = units.Relay("yellow_missed")
self.game.green_missed = units.Relay("green_missed")
self.game.white_missed = units.Relay("white_missed")
self.game.red_regular = units.Relay("red_regular")
self.game.yellow_regular = units.Relay("yellow_regular")
self.game.green_regular = units.Relay("green_regular")
self.game.white_regular = units.Relay("white_regular")
self.game.cam4 = units.Stepper("cam4", 3, "menu", "continuous")
self.game.square_a = units.Stepper("square_a", 3, "menu", "continuous")
self.game.square_b = units.Stepper("square_b", 3, "menu", "continuous")
self.game.square_c = units.Stepper("square_c", 3, "menu", "continuous")
self.game.square_d = units.Stepper("square_d", 3, "menu", "continuous")
self.game.square_e = units.Stepper("square_e", 3, "menu", "continuous")
self.game.line_f = units.Stepper("line_f", 3, "menu", "continuous")
self.game.magic_line_f = units.Relay("magic_line_f")
self.game.red_line = units.Stepper("red_line", 8)
self.game.yellow_line = units.Stepper("yellow_line", 8)
self.game.green_line = units.Stepper("green_line", 8)
self.game.magic_squares_feature = units.Stepper("magic_squares_feature", 10)
self.game.ballyhole = units.Relay("ballyhole")
self.game.top_line = units.Relay("top_line")
self.game.bottom_line = units.Relay("bottom_line")
self.game.spot_12 = units.Relay("spot_12")
self.game.spot_13 = units.Relay("spot_13")
self.game.game1 = units.Relay("game1")
self.game.game2 = units.Relay("game2")
self.game.shop_three = units.Relay("shop_three")
self.game.shop_four = units.Relay("shop_four")
self.game.score_feature = units.Stepper("score_feature", 8)
self.game.magic_line = units.Relay("magic_line")
self.game.select_a_score = units.Relay("select_a_score")
self.game.kod = units.Relay("kod")
self.game.koh = units.Relay("koh")
self.game.koc = units.Relay("koc")
self.game.kos = units.Relay("kos")
self.game.qod = units.Relay("qod")
self.game.qoh = units.Relay("qoh")
self.game.qoc = units.Relay("qoc")
self.game.qos = units.Relay("qos")
self.game.tod = units.Relay("tod")
self.game.toh = units.Relay("toh")
self.game.toc = units.Relay("toc")
self.game.tos = units.Relay("tos")
self.game.nod = units.Relay("nod")
self.game.noh = units.Relay("noh")
self.game.noc = units.Relay("noc")
self.game.nos = units.Relay("nos")
self.game.aos = units.Relay("aos")
self.game.aod = units.Relay("aod")
self.game.aoh = units.Relay("aoh")
self.game.aoc = units.Relay("aoc")
self.game.joh = units.Relay("joh")
self.game.jod = units.Relay("jod")
self.game.jos = units.Relay("jos")
self.game.joc = units.Relay("joc")
self.game.joker = units.Relay("joker")
self.game.player2 = units.Relay("player2")
self.game.tilt2 = units.Relay("tilt2")
self.game.odds1 = units.Stepper("odds1", 5)
self.game.odds2 = units.Stepper("odds2", 5)
self.game.blue_odds = units.Stepper("blue_odds", 12)
self.game.orange_odds = units.Stepper("orange_odds", 12)
self.game.card1_missed = units.Relay("card1_missed")
self.game.card2_missed = units.Relay("card2_missed")
self.game.card3_missed = units.Relay("card3_missed")
self.game.card4_missed = units.Relay("card4_missed")
self.game.card5_missed = units.Relay("card5_missed")
self.game.card6_missed = units.Relay("card6_missed")
self.game.mystic_lines = units.Stepper("mystic_lines", 6)
self.game.ss = units.Relay("ss")
self.game.skill_shot_scores = units.Stepper("skill_shot_scores", 20)
self.game.skill_shot_replay_counter = units.Stepper("skill_shot_replay_counter", 120)
self.game.skill_shot_reflex = units.Reflex("skill_shot_reflex", 200)
self.game.skill_shot_missed = units.Relay("skill_shot_missed")
self.game.skill_shot_selection = []
self.game.card1_replay_counter = units.Stepper("card1_replay_counter", 300)
self.game.card2_replay_counter = units.Stepper("card2_replay_counter", 300)
self.game.card3_replay_counter = units.Stepper("card3_replay_counter", 300)
self.game.card4_replay_counter = units.Stepper("card4_replay_counter", 300)
self.game.card5_replay_counter = units.Stepper("card5_replay_counter", 300)
self.game.card6_replay_counter = units.Stepper("card6_replay_counter", 300)
self.holes = []
self.holes2 = []
selection[select]
__import__("bingo_emulator.graphics.%s" % (selection[select]))
g = "graphics.%s.display(self,0,True)" % (selection[select])
eval(g)
def display_error(self, selection, select, playfield):
errortext1 = "To play, please insert %s playfield." % (playfield)
errortext2 = "Be careful with the Jones Plugs!"
errortext3 = ""
errortext4 = ""
if playfield == "28 hole":
errortext3 += "When inserting this playfield, please change the ball"
errortext4 += "return board or the game will not function properly."
if self.game.switches.bally28.is_active():
errortext3 += "When inserting this playfield, please change the ball"
errortext4 += "return board or the game will not function properly."
errortext5 = "To choose a different game,"
errortext6 = "press Left or Right buttons."
pygame.font.init() # you have to call this at the start,
# if you want to use this module.
font = pygame.font.SysFont('Liberation Sans', 30)
textsurface1=font.render(errortext1, True, (0, 0, 0))
textsurface2=font.render(errortext2, True, (0, 0, 0))
textsurface3=font.render(errortext3, True, (0, 0, 0))
textsurface4=font.render(errortext4, True, (0, 0, 0))
textsurface5=font.render(errortext5, True, (0, 0, 0))
textsurface6=font.render(errortext6, True, (0, 0, 0))
surface=pygame.Surface((720, 1280))
surface.fill((255, 255, 255))
text_rect1 = textsurface1.get_rect(center=(360, 500))
text_rect2 = textsurface2.get_rect(center=(360, 550))
text_rect3 = textsurface3.get_rect(center=(360, 620))
text_rect4 = textsurface4.get_rect(center=(360, 670))
text_rect5 = textsurface5.get_rect(center=(360, 750))
text_rect6 = textsurface6.get_rect(center=(360, 800))
surface.blit(textsurface1, text_rect1)
surface.blit(textsurface2, text_rect2)
surface.blit(textsurface3, text_rect3)
surface.blit(textsurface4, text_rect4)
surface.blit(textsurface5, text_rect5)
surface.blit(textsurface6, text_rect6)
#surface.blit(textsurface, [360,640])
surface.set_alpha(175)
screen.blit(surface,(0,0))
pygame.display.update()
return
#ck = (127, 33, 33)
#size = 25
#s = pygame.Surface((50, 50))
# first, "erase" the surface by filling it with a color and
# setting this color as colorkey, so the surface is empty
#s.fill(ck)
#s.set_colorkey(ck)
#pygame.draw.circle(s, (255, 0, 0), (size, size), size, 2)
# after drawing the circle, we can set the
# alpha value (transparency) of the surface
#s.set_alpha(75)
#x, y = pygame.mouse.get_pos()
#screen.blit(s, (x-size, y-size))
#pygame.event.poll()
#pygame.display.flip()
def sw_enter_active(self, sw):
try:
s = self.game.select
if s in [2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,30,32,34,35,36,37,38,39,40,41,43,45,46,47,48,49,50,51,53,55,56,57,58,59,60,61,62,63,64,65,67,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,93,94,95,97,98,99,100,101,102,117,118,120,121,124,125,126,129,130,131,132,133,134,137,138,140,141,142]:
if self.game.switches.bally25ro.is_inactive():
self.display_error(self.game.selection, self.game.select, "25 hole with rollovers")
return
if s in [1,3]:
if self.game.switches.unitedRoulette.is_inactive():
self.display_error(self.game.selection, self.game.select, "roulette")
return
if s in [29,31,33]:
if self.game.switches.bally25hold.is_inactive():
self.display_error(self.game.selection, self.game.select, "25 hole with hold")
return
if s in [42,44]:
if self.game.switches.bally25pockets.is_inactive():
self.display_error(self.game.selection, self.game.select, "25 hole with pockets")
return
if s in [52,54,128,135]:
if self.game.switches.bally18.is_inactive():
self.display_error(self.game.selection, self.game.select, "18 hole")
return
if s in [92,96]:
if self.game.switches.bally28.is_inactive():
self.display_error(self.game.selection, self.game.select, "28 hole")
return
if s in [103,104,105,106,107,108,109,110,111,112,113,115,116,119,123]:
if self.game.switches.bally20ro.is_inactive():
self.display_error(self.game.selection, self.game.select, "20 hole with rollovers")
return
if s in [127]:
if self.game.switches.bally20gate.is_inactive():
self.display_error(self.game.selection, self.game.select, "20 hole with gate")
return
if s in [136]:
if self.game.switches.bally24.is_inactive():
self.display_error(self.game.selection, self.game.select, "24 hole")
return
if s in [139]:
if self.game.switches.bally20hold.is_inactive():
self.display_error(self.game.selection, self.game.select, "20 hole with hold")
return
if s in [66,68,114,122]:
if self.game.switches.bally25hole.is_inactive():
self.display_error(self.game.selection, self.game.select, "Card 25 hole")
return
t = thread.start_new(__import__("bingo_emulator.%s.game" % (self.game.selection[self.game.select])))
if t.isAlive():
t.join()
except:
if s in [2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,30,32,34,35,36,37,38,39,40,41,43,45,46,47,48,49,50,51,53,55,56,57,58,59,60,61,62,63,64,65,67,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,93,94,95,97,98,99,100,101,102,117,118,120,121,124,125,126,129,130,131,132,133,134,137,138,140,141,142]:
if self.game.switches.bally25ro.is_inactive():
self.display_error(self.game.selection, self.game.select, "25 hole with rollovers")
return
if s in [1,3]:
if self.game.switches.unitedRoulette.is_inactive():
self.display_error(self.game.selection, self.game.select, "roulette")
return
if s in [29,31,33]:
if self.game.switches.bally25hold.is_inactive():
self.display_error(self.game.selection, self.game.select, "25 hole with hold")
return
if s in [42,44]:
if self.game.switches.bally25pockets.is_inactive():
self.display_error(self.game.selection, self.game.select, "25 hole with pockets")
return
if s in [52,54,128,135]:
if self.game.switches.bally18.is_inactive():
self.display_error(self.game.selection, self.game.select, "18 hole")
return
if s in [92,96]:
if self.game.switches.bally28.is_inactive():
self.display_error(self.game.selection, self.game.select, "28 hole")
return
if s in [103,104,105,106,107,108,109,110,111,112,113,115,116,119,123]:
if self.game.switches.bally20ro.is_inactive():
self.display_error(self.game.selection, self.game.select, "20 hole with rollovers")
return
if s in [127]:
if self.game.switches.bally20gate.is_inactive():
self.display_error(self.game.selection, self.game.select, "20 hole with gate")
return
if s in [136]:
if self.game.switches.bally24.is_inactive():
self.display_error(self.game.selection, self.game.select, "24 hole")
return
if s in [139]:
if self.game.switches.bally20hold.is_inactive():
self.display_error(self.game.selection, self.game.select, "20 hole with hold")
return
if s in [66,68,114,122]:
if self.game.switches.bally25hole.is_inactive():
self.display_error(self.game.selection, self.game.select, "Card 25 hole")
return
g = (__import__("bingo_emulator.%s.game" % (self.game.selection[self.game.select])))
t = thread.start_new(eval(g))
self.game.end_run_loop()
if t.isAlive():
t.join()
self.game.reset(self.game.selection, self.game.select)
def sw_left_active(self, sw):
if self.game.select != 1:
self.game.select -= 1
self.game.selection[self.game.select]
if self.game.selection[self.game.select] == "bway":
self.game.selection[self.game.select] = "broadway"
__import__("bingo_emulator.graphics.%s" % (self.game.selection[self.game.select]))
g = "graphics.%s.display(self,0,True)" % (self.game.selection[self.game.select])
eval(g)
os.system("ssh pi@10.0.0.51 /home/pi/ic.sh %s &" % (self.game.selection[self.game.select]))
os.system("ssh pi@10.0.0.52 /home/pi/sd.sh %s &" % (self.game.selection[self.game.select]))
def sw_right_active(self, sw):
if self.game.select != len(self.game.selection):
self.game.select += 1
self.game.selection[self.game.select]
if self.game.selection[self.game.select] == "bway":
self.game.selection[self.game.select] = "broadway"
__import__("bingo_emulator.graphics.%s" % (self.game.selection[self.game.select]))
g = "graphics.%s.display(self,0,True)" % (self.game.selection[self.game.select])
eval(g)
os.system("ssh pi@10.0.0.51 /home/pi/ic.sh %s &" % (self.game.selection[self.game.select]))
os.system("ssh pi@10.0.0.52 /home/pi/sd.sh %s &" % (self.game.selection[self.game.select]))
def main(sel):
selection = {}
selection[1] = "abc"
selection[2] = "bright_lights"
selection[3] = "u345"
selection[4] = "broadway_51"
selection[5] = "bolero"
selection[6] = "coney_island"
selection[7] = "zingo"
selection[8] = "leader"
selection[9] = "spot_lite"
selection[10] = "holiday"
selection[11] = "atlantic_city"
selection[12] = "miss_california"
selection[13] = "palm_beach"
selection[14] = "stars"
selection[15] = "long_beach"
selection[16] = "circus"
selection[17] = "frolics"
selection[18] = "showboat"
selection[19] = "bright_spot"
selection[20] = "bally_beauty"
selection[21] = "beach_club"
selection[22] = "rodeo_3"
selection[23] = "yacht_club"
selection[24] = "cabana"
selection[25] = "dude_ranch"
selection[26] = "tropics"
selection[27] = "rodeo_1"
selection[28] = "tahiti_1"
selection[29] = "palm_springs"
selection[30] = "rio"
selection[31] = "ice_frolics"
selection[32] = "havana"
selection[33] = "surf_club"
selection[34] = "mexico"
selection[35] = "hi_fi"
selection[36] = "hawaii_1"
selection[37] = "variety"
selection[38] = "nevada"
selection[39] = "singapore"
selection[40] = "big_time"
selection[41] = "tropicana"
selection[42] = "gayety"
selection[43] = "manhattan"
selection[44] = "gay_time"
selection[45] = "serenade"
selection[46] = "miami_beach"
selection[47] = "triple_play"
selection[48] = "beach_beauty"
selection[49] = "pixies"
selection[50] = "bway"
selection[51] = "starlet"
selection[52] = "crosswords"
selection[53] = "caravan"
selection[54] = "spelling_bee"
selection[55] = "night_club"
selection[56] = "stardust"
selection[57] = "parade"
selection[58] = "south_seas"
selection[59] = "double_header"
selection[60] = "big_show"
selection[61] = "monaco"
selection[62] = "key_west"
selection[63] = "brazil"
selection[64] = "showtime"
selection[65] = "sun_valley"
selection[66] = "yukon"
selection[67] = "playtime"
selection[68] = "hi_hand"
selection[69] = "fun_way"
selection[70] = "miss_america"
selection[71] = "cypress_gardens"
selection[72] = "beach_time"
selection[73] = "carnival_queen"
selection[74] = "sea_island"
selection[75] = "ballerina"
selection[76] = "lotta_fun"
selection[77] = "county_fair"
selection[78] = "laguna_beach"
selection[79] = "single_coin_pittsburgh"
selection[80] = "roller_derby"
selection[81] = "fun_spot"
selection[82] = "barrel_o_fun"
selection[83] = "touchdown"
selection[84] = "circus_queen"
selection[85] = "lite_a_line"
selection[86] = "acapulco"
selection[87] = "bikini"
selection[88] = "barrel_o_fun_61"
selection[89] = "fun_spot_61"
selection[90] = "can_can"
selection[91] = "lido"
selection[92] = "shoot_a_line"
selection[93] = "barrel_o_fun_62"
selection[94] = "fun_spot_62"
selection[95] = "fun_spot_63"
selection[96] = "shoot_a_line_63"
selection[97] = "golden_gate"
selection[98] = "rainbow"
selection[99] = "the_twist"
selection[100] = "silver_sails"
selection[101] = "bounty"
selection[102] = "venus"
selection[103] = "border_beauty"
selection[104] = "beauty_beach"
selection[105] = "folies_bergeres"
selection[106] = "bahama_beach"
selection[107] = "zodiac"
selection[108] = "orient"
selection[109] = "big_wheel"
selection[110] = "venice"
selection[111] = "magic_ring"
selection[112] = "london"
selection[113] = "safari"
selection[114] = "joker_wild"
selection[115] = "super_7"
selection[116] = "bonus_7"
selection[117] = "hole_in_one"
selection[118] = "stock_market"
selection[119] = "double_up"
selection[120] = "wall_street"
selection[121] = "ticker_tape"
selection[122] = "twin_joker"
selection[123] = "hawaii_2"
selection[124] = "bali"
selection[125] = "super_wall_street"
selection[126] = "miss_america_75"
selection[127] = "mystic_gate"
selection[128] = "miss_universe"
selection[129] = "blue_chip"
selection[130] = "bull_market"
selection[131] = "bonanza"
selection[132] = "miss_america_supreme"
selection[133] = "high_flyer"
selection[134] = "miss_america_deluxe"
selection[135] = "continental_18"
selection[136] = "galaxy"
selection[137] = "nashville"
selection[138] = "dixieland"
selection[139] = "tahiti_2"
selection[140] = "malibu_beach"
selection[141] = "continental"
selection[142] = "mississippi_showboat"
for i in range(1,142):
if sel in selection[i]:
select = i
break
if selection[select] == "bway":
selection[select] = "broadway"
os.system("ssh pi@10.0.0.51 /home/pi/ic.sh %s &" % (selection[select]))
os.system("ssh pi@10.0.0.52 /home/pi/sd.sh %s &" % (selection[select]))
game = Menu(machine_type='pdb')
game.reset(selection, select)
game.run_loop()
if __name__ == "__main__":
main(sys.argv[1])
| bingopodcast/bingos | bingo_emulator/menu.py | Python | gpl-3.0 | 36,494 | [
"Galaxy"
] | cdb51c15c814d73d304712ffdd2c3e208c1a29187981308d04b396f4a1ef8657 |
from ply import *
keywords = (
'Prefer', 'Patient', 'Visit', 'Blind', 'Click', 'Input', 'Choose',
'Back', 'Forward', 'Accept', 'Auth', 'Dismiss', 'Press', 'Switch',
'Repeat', 'Task', 'End', 'Judge', 'Empty', 'Not', 'True', 'False', 'Page', 'Enter','LoadSmartApp', 'DoGenomicAuth'
)
tokens = keywords + (
'ID', 'NUMBER', 'STRING', 'BOOL', 'EQUAL', 'NEWLINE'
)
t_ignore = '\t'
def t_ID(t):
r'[a-zA-Z][a-zA-Z0-9]*'
if t.value in keywords:
if t.value == 'True' or t.value == 'False':
t.type = 'BOOL'
t.value = True if t.value == 'True' else False
else:
t.type = t.value
return t
def t_NUMBER(t):
r"""(\d+(\.\d*)?|\.\d+)([eE][-+]? \d+)?"""
t.value = float(t.value)
return t
def t_NEWLINE(t):
r'\n'
t.lexer.lineno += 1
return t
def t_STRING(t):
r'\".*?\"'
t.value = t.value.replace("\"", '')
return t
# t_STRING = r'\".*?\"'
def t_BOOL(t):
r'(True)|(False)'
t.value = True if t.value == 'True' else False
return t
t_EQUAL = r'='
def t_error(t):
print("Illegal character %s detected" % t.value[0])
t.lexer.skip(1)
lex.lex(debug=1) | ideaworld/FHIR_Tester | FHIR_Tester_backend/services/monkey/MonkeyLex.py | Python | mit | 1,169 | [
"VisIt"
] | 30887d33c8c9cc1937b658064becf415c14170a99428975b1dd417db6b0e3129 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import numpy as np
import pickle
import espressomd
import espressomd.observables
import espressomd.accumulators
class CorrelatorTest(ut.TestCase):
"""
Test class for the Correlator accumulator.
"""
# Handle for espresso system
system = espressomd.System(box_l=[10, 10, 10])
system.cell_system.skin = 0.4
system.time_step = 0.01
def tearDown(self):
self.system.part.clear()
self.system.auto_update_accumulators.clear()
def calc_tau(self, time_step, tau_lin, length, delta_N=1):
tau = []
for i in range(tau_lin):
tau.append(i)
factor = 1
while len(tau) < length:
p = tau[-1] + factor * 1
for i in range(0, tau_lin, 2):
tau.append(p + factor * i)
factor *= 2
return time_step * np.array(tau[:length]) * delta_N
def check_sizes(self, acc, steps, linear=False):
sizes = acc.sample_sizes()
tau_lin = acc.tau_lin
max_lin = np.arange(steps, steps - tau_lin - 1, -1)
if linear:
np.testing.assert_equal(sizes, max_lin)
else:
np.testing.assert_equal(sizes[:tau_lin + 1], max_lin)
block_size = tau_lin // 2
i = block_size + 1
for _ in range(2):
j = i + block_size
k = j + block_size
np.testing.assert_allclose(sizes[i:j] / sizes[j:k], 2, atol=.5)
i = j
def check_pickling(self, acc):
corr = acc.result()
lags = acc.lag_times()
sizes = acc.sample_sizes()
acc_unpickled = pickle.loads(pickle.dumps(acc))
np.testing.assert_array_equal(acc_unpickled.result(), corr)
np.testing.assert_array_equal(acc_unpickled.lag_times(), lags)
np.testing.assert_array_equal(acc_unpickled.sample_sizes(), sizes)
def test_square_distance_componentwise(self):
s = self.system
v = np.array([1, 2, 3])
p = s.part.add(pos=(0, 0, 0), v=v)
obs = espressomd.observables.ParticlePositions(ids=(p.id,))
acc = espressomd.accumulators.Correlator(
obs1=obs, tau_lin=10, tau_max=2, delta_N=1,
corr_operation="square_distance_componentwise")
s.integrator.run(1000)
s.auto_update_accumulators.add(acc)
s.integrator.run(1000)
# here don't call acc.finalize()
corr = acc.result()
self.check_pickling(acc)
tau = self.calc_tau(s.time_step, acc.tau_lin, corr.shape[0])
np.testing.assert_array_almost_equal(acc.lag_times(), tau)
self.check_sizes(acc, 1000)
for i in range(corr.shape[0]):
np.testing.assert_array_almost_equal(corr[i], [v**2 * tau[i]**2])
def test_tensor_product(self):
s = self.system
v = np.array([1, 2, 3])
p = s.part.add(pos=(0, 0, 0), v=v)
obs = espressomd.observables.ParticleVelocities(ids=(p.id,))
acc = espressomd.accumulators.Correlator(
obs1=obs, tau_lin=12, tau_max=2, delta_N=1,
corr_operation="tensor_product")
s.auto_update_accumulators.add(acc)
s.integrator.run(1000)
acc.finalize()
corr = acc.result()
self.check_pickling(acc)
tau = self.calc_tau(s.time_step, acc.tau_lin, corr.shape[0])
np.testing.assert_array_almost_equal(acc.lag_times(), tau)
corr_ref = np.kron(v, v).reshape((3, 3))
self.check_sizes(acc, 1000)
for i in range(corr.shape[0]):
np.testing.assert_array_almost_equal(corr[i], corr_ref)
def test_componentwise_product(self):
s = self.system
v = np.array([1, 2, 3])
p = s.part.add(pos=(0, 0, 0), v=v)
obs = espressomd.observables.ParticleVelocities(ids=(p.id,))
acc = espressomd.accumulators.Correlator(
obs1=obs, tau_lin=10, tau_max=2, delta_N=1,
corr_operation="componentwise_product")
s.auto_update_accumulators.add(acc)
s.integrator.run(1000)
acc.finalize()
corr = acc.result()
self.check_pickling(acc)
tau = self.calc_tau(s.time_step, acc.tau_lin, corr.shape[0])
np.testing.assert_array_almost_equal(acc.lag_times(), tau)
self.check_sizes(acc, 1000)
for i in range(corr.shape[0]):
np.testing.assert_array_almost_equal(corr[i], [v**2])
def test_scalar_product(self):
s = self.system
v = np.array([1, 2, 3])
p = s.part.add(pos=(0, 0, 0), v=v)
obs = espressomd.observables.ParticleVelocities(ids=(p.id,))
acc = espressomd.accumulators.Correlator(
obs1=obs, tau_lin=10, tau_max=2, delta_N=1,
corr_operation="scalar_product")
s.auto_update_accumulators.add(acc)
s.integrator.run(1000)
acc.finalize()
corr = acc.result()
self.check_pickling(acc)
tau = self.calc_tau(s.time_step, acc.tau_lin, corr.shape[0])
np.testing.assert_array_almost_equal(acc.lag_times(), tau)
self.check_sizes(acc, 1000)
for i in range(corr.shape[0]):
np.testing.assert_array_almost_equal(corr[i], [np.sum(v**2)])
def test_fcs(self):
s = self.system
v = np.array([1, 2, 3])
p = s.part.add(pos=(0, 0, 0), v=v)
w = np.array([3, 2, 1])
obs = espressomd.observables.ParticlePositions(ids=(p.id,))
acc = espressomd.accumulators.Correlator(
obs1=obs, tau_lin=10, tau_max=9.9 * self.system.time_step,
delta_N=1, corr_operation="fcs_acf", args=w)
s.auto_update_accumulators.add(acc)
s.integrator.run(1000)
acc.finalize()
corr = acc.result()
self.check_pickling(acc)
tau = self.calc_tau(s.time_step, acc.tau_lin, corr.shape[0])
np.testing.assert_array_almost_equal(acc.lag_times(), tau)
self.check_sizes(acc, 1000, linear=True)
for i in range(corr.shape[0]):
np.testing.assert_array_almost_equal(
corr[i],
[np.exp(-np.linalg.norm(v / w * tau[i])**2)], decimal=10)
# check setter and getter
np.testing.assert_array_almost_equal(np.copy(acc.args), w**2)
w_squared = np.array([4, 5, 6])**2
acc.args = w_squared
np.testing.assert_array_almost_equal(np.copy(acc.args), w_squared)
def test_correlator_interface(self):
# test setters and getters
obs = espressomd.observables.ParticleVelocities(ids=(123,))
acc = espressomd.accumulators.Correlator(
obs1=obs, tau_lin=10, tau_max=12.0, delta_N=1,
corr_operation="scalar_product")
# check tau_lin
self.assertEqual(acc.tau_lin, 10)
# check tau_max
self.assertEqual(acc.tau_max, 12.)
# check delta_N
self.assertEqual(acc.delta_N, 1)
acc.delta_N = 2
self.assertEqual(acc.delta_N, 2)
# check corr_operation
self.assertEqual(acc.corr_operation, "scalar_product")
# check linear tau correlator and multiple tau correlator
dt = self.system.time_step
for tau_lin in (10, 20):
for delta_N in (1, 2, 10):
tau_max = dt * delta_N * tau_lin
# linear, multiple and default (=multiple) tau correlator
acc_lin = espressomd.accumulators.Correlator(
obs1=obs, tau_lin=tau_lin, tau_max=0.99 * tau_max,
delta_N=delta_N, corr_operation="scalar_product")
acc_mul = espressomd.accumulators.Correlator(
obs1=obs, tau_lin=tau_lin, tau_max=1.0 * tau_max,
delta_N=delta_N, corr_operation="scalar_product")
acc_def = espressomd.accumulators.Correlator(
obs1=obs, tau_lin=1, tau_max=tau_max,
delta_N=delta_N, corr_operation="scalar_product")
lin_tau = acc_lin.lag_times()
mul_tau = acc_mul.lag_times()
def_tau = acc_mul.lag_times()
# check tau
time_lin = dt * delta_N * np.arange(len(lin_tau))
time_mul = self.calc_tau(dt, tau_lin, len(mul_tau), delta_N)
np.testing.assert_array_almost_equal(lin_tau, time_lin)
np.testing.assert_array_almost_equal(mul_tau, time_mul)
np.testing.assert_array_almost_equal(def_tau, time_mul)
self.assertEqual(acc_def.tau_lin, tau_lin)
# check pickling
self.check_pickling(acc_lin)
self.check_pickling(acc_lin)
self.check_pickling(acc_def)
if __name__ == "__main__":
ut.main()
| fweik/espresso | testsuite/python/accumulator_correlator.py | Python | gpl-3.0 | 9,481 | [
"ESPResSo"
] | 98372c265bf0b327a49635555e4c02dce1231f89851e34f6315b46bb33365fbb |
from behave import given, when, then, step
from search.management.commands.create_session import create_pre_authenticated_session
from django.conf import settings
@given('I am a logged-in user')
def given_i_am_logged_in(context):
session_key = create_pre_authenticated_session(email='a@b.c')
## to set a cookie we need to first visit the domain.
## 404 pages load the quickest!
url = context.server_url + "/admin/login/?next=/admin/"
url = 'http://google.com'
context.browser.get(url)
print(url, context.browser.current_url)
context.browser.add_cookie(dict(
name=settings.SESSION_COOKIE_NAME,
value=session_key,
path='/',
))
@when('I create a dork {dork_name} with "{dork_content}"')
def create_a_dork(context, dork_name, dork_content):
context.browser.get(context.server_url)
context.browser.find_element_by_id('id_dork_name').send_keys(dork_name)
context.browser.find_element_by_id('id_dork_content').send_keys('\n')
context.browser.find_element_by_id('id_dork_submit').click()
@when('I click the link to "{link_text}"')
def click_a_link(context, link_text):
assert False
@then('I will see the dork content "{dork_content}"')
def see_a_dork(context, dork_content):
assert False
@then('I will see a link to "{link_text}"')
def see_a_link(context, link_text):
assert False
@then('I will see the dork name "{dork_name}"')
def see_a_dork_name(context, dork_name):
assert False
| golgoth42/golgoth | search/features/steps/dork.py | Python | bsd-2-clause | 1,474 | [
"VisIt"
] | f672a7966e9ec5a6b6355a47a4587986d169d8b8e6507ae3252fad1d4ed802d9 |
# ALL the Scikit-Learn functions
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model as lm
from sklearn.svm import SVC
from sklearn import metrics
# Workhorse libraries for vector ops and loading
import numpy as np
import pandas as pd
# Visualization
from matplotlib import pyplot as plt
# Miscellaneous
import string
import os
from scipy.sparse import hstack, csr_matrix
# Used for counting punctuation present in text
exclude = set(string.punctuation)
'''
An example of custom feature engineering -
Given a string of text, it returns a set of features that hopefully
describe some aspect of the text meaningfully for the classifier to use.
Since we're trying to figure out if the text is an insult we've picked
features like the caps ratio which would make sense.
Features are:
Number of words in the text
Ratio of capitalized characters to non-capitalized
Average word length
Whether or not there is capitalization
The ratio of punctuation to number of words
'''
def text_features(text):
n_words = float(len(text.split(' ')))
n_alphanumeric = float(len([ch for ch in text if ch not in exclude]))
n_caps = float(len([ch for ch in text if ch.isupper()]))
word_len = n_alphanumeric/n_words
cap_ratio = n_caps/n_alphanumeric
caps_present = cap_ratio != 0
punc_ratio = len([ch for ch in text if ch in exclude])/n_words
return [n_words,cap_ratio,punc_ratio,word_len,caps_present]
'''
Uses pandas to load the csv.
Generates text_features for each example and does some distribution scaling to
make them more Gaussian like via sqrts and log transforms.
Shows more complex numpy slicing as well.
(Assigning and slicing multiple columns using an index list)
'''
def load(f):
# Load data with pandas
data = pd.read_csv(f)
# Nice pandas data selecting using the csv header column names
text = data['Comment']
labels = data['Insult']
# Get text features for every text example using a list comprehension
# which is like a condensed for-loop.
text_feats = np.array([text_features(t) for t in text])
# Feature scaling via log and sqrt transforms to force their distributions
# into more Guassian (normal bell curve) shapes
text_feats[:,[0,2,3]] = np.log(text_feats[:,[0,2,3]])
text_feats[:,1] = np.sqrt(text_feats[:,1])
return text,text_feats,labels
# Going to have to change to wherever your data is stored
data_dir = './'
# Loading in raw training and test data
train_text,train_text_feats,train_labels = load(os.path.join(data_dir,'train.csv'))
test_text,test_text_feats,test_labels = load(os.path.join(data_dir,'impermium_verification_labels.csv'))
# Example of using a TfidfVectorizer to convert text to ML friendly format
# Uses bigrams and a minimum document frequency to improve quality of text vector
vect = TfidfVectorizer(min_df=0.001,ngram_range=(1, 2), norm="l1")
vect.fit(np.hstack((train_text,test_text)))
train_text = vect.transform(train_text)
test_text = vect.transform(test_text)
# Scale the generated text features to be friendly to linear models
scaler = StandardScaler()
scaler.fit(np.vstack((train_text_feats,test_text_feats)))
train_text_feats = scaler.transform(train_text_feats)
test_text_feats = scaler.transform(test_text_feats)
# Adding the extracted text features into the word vectors to have a single
# feature matrix to train the model on. hstack stacks columns horizontally
# which is what we want here (adding more features to the rows of examples)
train_text = hstack((train_text,csr_matrix(train_text_feats)))
test_text = hstack((test_text,csr_matrix(test_text_feats)))
# Standard sklearn interface example
# Using Ridge Regression which is a form of Ordinary Least Squares Line fitting
# Kind of un-orthodox for a classification problem but since the scoring metric
# AUC is rank based it doesn't matter.
model = lm.Ridge()
model.fit(train_text,train_labels)
preds = model.predict(test_text)
print 'AUC score:',round(metrics.roc_auc_score(test_labels,preds),5)
| kn-olin/insults | explore.py | Python | gpl-2.0 | 4,130 | [
"Gaussian"
] | c8acf60a6f529ce43b793b2708be1c0d037610806af28937cedaaf2678377ace |
# proxy module
from __future__ import absolute_import
from mayavi.components.implicit_plane import *
| enthought/etsproxy | enthought/mayavi/components/implicit_plane.py | Python | bsd-3-clause | 101 | [
"Mayavi"
] | 13cc7be43331db21ca8662bdd53c9c7e4bc5be1641faa1d2c6189e67a8bba6fd |
from __future__ import division, absolute_import
from struct import unpack as _unpack, pack as _pack
import os.path
from sys import byteorder as _BYTEORDER
import warnings
import numpy as np
from scipy.ndimage import zoom, gaussian_filter
from ._powerfit import blur_points, dilate_points
class Volume(object):
@classmethod
def fromfile(cls, fid, fmt=None):
array, voxelspacing, origin = parse_volume(fid, fmt)
return cls(array, voxelspacing, origin)
def __init__(self, array, voxelspacing=1.0, origin=(0, 0, 0)):
self.array = array
self.voxelspacing = voxelspacing
self.origin = origin
@property
def shape(self):
return self.array.shape
@property
def dimensions(self):
return np.asarray([x * self.voxelspacing for x in self.array.shape][::-1])
@property
def start(self):
return np.asarray([x/self.voxelspacing for x in self.origin])
@start.setter
def start(self, start):
self._origin = np.asarray([x * self.voxelspacing for x in start])
def duplicate(self):
return Volume(self.array.copy(), voxelspacing=self.voxelspacing,
origin=self.origin)
def tofile(self, fid, fmt=None):
if fmt is None:
fmt = os.path.splitext(fid)[-1][1:]
if fmt in ('ccp4', 'map', 'mrc'):
to_mrc(fid, self)
elif fmt in ('xplor', 'cns'):
to_xplor(fid, self)
else:
raise RuntimeError("Format is not supported.")
# builders
def zeros(shape, voxelspacing, origin):
return Volume(np.zeros(shape), voxelspacing, origin)
def zeros_like(volume):
return Volume(np.zeros_like(volume.array), volume.voxelspacing, volume.origin)
def resample(volume, factor, order=1):
# suppress zoom UserWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
resampled_array = zoom(volume.array, factor, order=order)
new_voxelspacing = volume.voxelspacing / factor
return Volume(resampled_array, new_voxelspacing, volume.origin)
def trim(volume, cutoff, margin=2):
if volume.array.max() <= cutoff:
raise ValueError('Cutoff value should be lower than density max.')
extent = []
for axis in xrange(volume.array.ndim):
tmp = np.swapaxes(volume.array, 0, axis)
for n, s in enumerate(tmp):
if s.max() > cutoff:
low = max(0, n - margin)
break
for n, s in enumerate(tmp[::-1]):
if s.max() > cutoff:
high = min(tmp.shape[0], tmp.shape[0] - n + margin)
break
extent.append(slice(low, high))
sub_array = volume.array[extent]
origin = [coor_origin + volume.voxelspacing * ext.start
for coor_origin, ext in zip(volume.origin, extent[::-1])]
return Volume(sub_array, volume.voxelspacing, origin)
def extend(volume, shape):
new_volume = zeros(shape, volume.voxelspacing, volume.origin)
ind = [slice(x) for x in volume.shape]
new_volume.array[ind] = volume.array
return new_volume
def nearest_multiple2357(num):
nearest = num
while not is_multiple2357(nearest):
nearest += 1
return nearest
def is_multiple2357(num):
"""Returns the nearest larger number that is a multiple of 2, 3, and 5"""
MULTIPLES = (2, 3, 5, 7)
for multiple in (MULTIPLES):
while divmod(num, multiple)[1] == 0:
num /= multiple
return num == 1
def res_to_sigma(resolution):
return resolution / (np.sqrt(2.0) * np.pi)
def sigma_to_res(sigma):
return sigma * (np.sqrt(2.0) * np.pi)
def lower_resolution(vol, res_high, res_low):
"""Lowers the resolution of the volume"""
# the sigma of the Gaussian kernel that will lower the density with the
# specified amount is given Sk = sqrt(Sn^2 - Sc^2)
# where Sk is the sigma of the kernel, Sn is the new sigma,
# and Sc is the current sigma
# See http://mathworld.wolfram.com/Convolution.html
sigma_high = res_to_sigma(res_high)
sigma_low = res_to_sigma(res_low)
sigma_k = np.sqrt(sigma_low**2 - sigma_high**2) / vol.voxelspacing
blurred_array = gaussian_filter(vol.array, sigma_k, mode='constant')
return Volume(blurred_array, vol.voxelspacing, vol.origin)
def structure_to_shape(
xyz, resolution, out=None, voxelspacing=None, radii=None, weights=None, shape='vol'
):
if shape not in ('vol', 'mask'):
raise ValueError("shape should either be 'vol' or 'mask'")
if out is None and voxelspacing is None:
voxelspacing = resolution / 4.0
else:
voxelspacing = out.voxelspacing
if shape == 'vol':
if weights is None:
weights = np.ones(xyz.shape[1])
elif weights.size != xyz.shape[1]:
raise ValueError("weights array is of incorrect size")
if shape == 'mask':
if radii is None:
radii = np.empty(xyz.shape[1], dtype=np.float64)
radii.fill(5)
elif radii.size != xyz.shape[1]:
raise ValueError("weights array is of incorrect size")
radii /= voxelspacing
sigma = res_to_sigma(resolution)
if out is None:
extend = 4 * sigma
imin = xyz.min(axis=1) - extend
imax = xyz.max(axis=1) + extend
center = (imin + imax) / 2.0
ni = (np.ceil((imax - imin) / voxelspacing)).astype(np.int32)
origin = center - (ni * voxelspacing) / 2.0
grid = np.zeros(ni[::-1])
xyz_grid = xyz + (ni * voxelspacing / 2.0 - center).reshape(-1, 1)
out = Volume(grid, voxelspacing, origin)
else:
xyz_grid = xyz - out.origin.reshape(-1, 1)
xyz_grid /= voxelspacing
if shape == 'vol':
blur_points(xyz_grid, weights, sigma / voxelspacing, out.array, True)
elif shape == 'mask':
dilate_points(xyz_grid, radii, out.array, True)
return out
def structure_to_shape_like(vol, xyz, resolution=None, weights=None,
radii=None, shape='vol'):
if resolution is None:
resolution = vol.resolution
if shape == ' vol':
if weights is None:
weights = np.ones(xyz.shape[1])
elif weights.size != xyz.shape[1]:
raise ValueError("weights array is of incorrect size")
if shape == 'mask':
if radii is None:
radii = np.empty(xyz.shape[1], dtype=np.float64)
#radii.fill(0.5 * resolution)
radii.fill(5)
elif radii.size != xyz.shape[1]:
raise ValueError("weights array is of incorrect size")
radii /= vol.voxelspacing
sigma = (resolution / (np.sqrt(2.0) * np.pi)) / vol.voxelspacing
# move the coordinates to the origin of the grid
xyz_grid = xyz - np.asarray(vol.origin, dtype=np.float64).reshape(3, 1)
xyz_grid /= vol.voxelspacing
out = zeros_like(vol)
if shape == 'vol':
blur_points(xyz_grid, weights, sigma, out.array, True)
elif shape == 'mask':
dilate_points(xyz_grid, radii, out.array, True)
return out
# Volume parsers
def parse_volume(fid, fmt=None):
try:
fname = fid.name
except AttributeError:
fname = fid
if fmt is None:
fmt = os.path.splitext(fname)[-1][1:]
if fmt in ('ccp4', 'map'):
p = CCP4Parser(fname)
elif fmt == 'mrc':
p = MRCParser(fname)
elif fmt in ('xplor', 'cns'):
p = XPLORParser(fname)
else:
raise ValueError('Extension of file is not supported.')
return p.density, p.voxelspacing, p.origin
class CCP4Parser(object):
HEADER_SIZE = 1024
HEADER_TYPE = ('i' * 10 + 'f' * 6 + 'i' * 3 + 'f' * 3 + 'i' * 3 +
'f' * 27 + 'c' * 8 + 'f' * 1 + 'i' * 1 + 'c' * 800)
HEADER_FIELDS = (
'nc nr ns mode ncstart nrstart nsstart nx ny nz xlength ylength '
'zlength alpha beta gamma mapc mapr maps amin amax amean ispg '
'nsymbt lskflg skwmat skwtrn extra xstart ystart zstart map '
'machst rms nlabel label'
).split()
HEADER_CHUNKS = [1] * 25 + [9, 3, 12] + [1] * 3 + [4, 4, 1, 1, 800]
def __init__(self, fid):
if isinstance(fid, str):
fhandle = open(fid)
elif isinstance(fid, file):
fhandle = fid
else:
raise ValueError("Input should either be a file or filename.")
self.fhandle = fhandle
self.fname = fhandle.name
# first determine the endiannes of the file
self._get_endiannes()
# get the header
self._get_header()
# Symmetry and non-rectangular boxes are not supported.
is_orthogonal = True
for angle_name in ['alpha', 'beta', 'gamma']:
angle = self.header[angle_name]
if abs(angle - 90) > 1e-3:
is_orthogonal = False
break
if not is_orthogonal:
msg = "Only densities in rectangular boxes are supported."
raise RuntimeError(msg)
# check the order of axis in the file
self._get_order()
# determine the voxelspacing and origin
spacings = []
for axis_name in 'xyz':
length = self.header[axis_name + 'length']
nvoxels = self.header['n' + axis_name]
spacing = length / float(nvoxels)
spacings.append(spacing)
equal_spacing = True
average = sum(spacings) / float(len(spacings))
for spacing in spacings:
if abs(spacing - average) > 1e-4:
equal_spacing = False
if not equal_spacing:
msg = "Voxel spacing is not equal in all directions."
raise RuntimeError(msg)
self.voxelspacing = spacings[0]
self.origin = self._get_origin()
# generate the density
shape_fields = 'nz ny nx'.split()
self.shape = [self.header[field] for field in shape_fields]
self._get_density()
def _get_endiannes(self):
self.fhandle.seek(212)
m_stamp = hex(ord(self.fhandle.read(1)))
if m_stamp == '0x44':
endian = '<'
elif m_stamp == '0x11':
endian = '>'
else:
raise RuntimeError('Endiannes is not properly set in file. Check the file format.')
self._endian = endian
self.fhandle.seek(0)
def _get_header(self):
header = _unpack(self._endian + self.HEADER_TYPE,
self.fhandle.read(self.HEADER_SIZE))
self.header = {}
index = 0
for field, nchunks in zip(self.HEADER_FIELDS, self.HEADER_CHUNKS):
end = index + nchunks
if nchunks > 1:
self.header[field] = header[index: end]
else:
self.header[field] = header[index]
index = end
self.header['label'] = ''.join(self.header['label'])
def _get_origin(self):
start_fields = 'nsstart nrstart ncstart'.split()
start = [self.header[field] for field in start_fields]
# Take care of axis order
start = [start[x - 1] for x in self.order]
return np.asarray([x * self.voxelspacing for x in start])
def _get_density(self):
# Determine the dtype of the file based on the mode
mode = self.header['mode']
if mode == 0:
dtype = 'i1'
elif mode == 1:
dtype = 'i2'
elif mode == 2:
dtype = 'f4'
density = np.fromfile(self.fhandle, dtype=self._endian + dtype).reshape(self.shape)
if self.order == (1, 3, 2):
self.density = np.swapaxes(self.density, 0, 1)
elif self.order == (2, 1, 3):
self.density = np.swapaxes(self.density, 1, 2)
elif self.order == (2, 3, 1):
self.density = np.swapaxes(self.density, 2, 0)
self.density = np.swapaxes(self.density, 0, 1)
elif self.order == (3, 1, 2):
self.density = np.swapaxes(self.density, 2, 1)
self.density = np.swapaxes(self.density, 0, 2)
elif self.order == (3, 2, 1):
self.density = np.swapaxes(self.density, 0, 2)
# Upgrade precision to double if float, and to int32 if int16
if mode == 1:
density = density.astype(np.int32)
elif mode == 2:
density = density.astype(np.float64)
self.density =density
def _get_order(self):
self.order = tuple(self.header[axis] for axis in ('mapc', 'mapr',
'maps'))
class MRCParser(CCP4Parser):
def _get_origin(self):
origin_fields = 'xstart ystart zstart'.split()
origin = [self.header[field] for field in origin_fields]
return origin
def to_mrc(fid, volume, labels=[], fmt=None):
if fmt is None:
fmt = os.path.splitext(fid)[-1][1:]
if fmt not in ('ccp4', 'mrc', 'map'):
raise ValueError('Format is not recognized. Use ccp4, mrc, or map.')
voxelspacing = volume.voxelspacing
nz, ny, nx = volume.shape
dtype = volume.array.dtype.name
if dtype == 'int8':
mode = 0
elif dtype in ('int16', 'int32'):
mode = 1
elif dtype in ('float32', 'float64'):
mode = 2
else:
raise TypeError("Data type ({:})is not supported.".format(dtype))
if fmt in ('ccp4', 'map'):
nxstart, nystart, nzstart = [int(round(x)) for x in volume.start]
else:
nxstart, nystart, nzstart = [0, 0, 0]
xl, yl, zl = volume.dimensions
alpha = beta = gamma = 90.0
mapc, mapr, maps = [1, 2, 3]
ispg = 1
nsymbt = 0
lskflg = 0
skwmat = [0.0]*9
skwtrn = [0.0]*3
fut_use = [0.0]*12
if fmt == 'mrc':
origin = volume.origin
else:
origin = [0, 0, 0]
str_map = list('MAP ')
if _BYTEORDER == 'little':
machst = list('\x44\x41\x00\x00')
elif _BYTEORDER == 'big':
machst = list('\x44\x41\x00\x00')
else:
raise ValueError("Byteorder {:} is not recognized".format(byteorder))
labels = [' '] * 800
nlabels = 0
min_density = volume.array.min()
max_density = volume.array.max()
mean_density = volume.array.mean()
std_density = volume.array.std()
with open(fid, 'wb') as out:
out.write(_pack('i', nx))
out.write(_pack('i', ny))
out.write(_pack('i', nz))
out.write(_pack('i', mode))
out.write(_pack('i', nxstart))
out.write(_pack('i', nystart))
out.write(_pack('i', nzstart))
out.write(_pack('i', nx))
out.write(_pack('i', ny))
out.write(_pack('i', nz))
out.write(_pack('f', xl))
out.write(_pack('f', yl))
out.write(_pack('f', zl))
out.write(_pack('f', alpha))
out.write(_pack('f', beta))
out.write(_pack('f', gamma))
out.write(_pack('i', mapc))
out.write(_pack('i', mapr))
out.write(_pack('i', maps))
out.write(_pack('f', min_density))
out.write(_pack('f', max_density))
out.write(_pack('f', mean_density))
out.write(_pack('i', ispg))
out.write(_pack('i', nsymbt))
out.write(_pack('i', lskflg))
for f in skwmat:
out.write(_pack('f', f))
for f in skwtrn:
out.write(_pack('f', f))
for f in fut_use:
out.write(_pack('f', f))
for f in origin:
out.write(_pack('f', f))
for c in str_map:
out.write(_pack('c', c))
for c in machst:
out.write(_pack('c', c))
out.write(_pack('f', std_density))
# max 10 labels
# nlabels = min(len(labels), 10)
# TODO labels not handled correctly
#for label in labels:
# list_label = [c for c in label]
# llabel = len(list_label)
# if llabel < 80:
#
# # max 80 characters
# label = min(len(label), 80)
out.write(_pack('i', nlabels))
for c in labels:
out.write(_pack('c', c))
# write density
modes = [np.int8, np.int16, np.float32]
volume.array.astype(modes[mode]).tofile(out)
class XPLORParser(object):
"""
Class for reading XPLOR volume files created by NIH-XPLOR or CNS.
"""
def __init__(self, fid):
if isinstance(fid, file):
fname = fid.name
elif isinstance(fid, str):
fname = fid
fid = open(fid)
else:
raise TypeError('Input should either be a file or filename')
self.source = fname
self._get_header()
def _get_header(self):
header = {}
with open(self.source) as volume:
# first line is blank
volume.readline()
line = volume.readline()
nlabels = int(line.split()[0])
label = [volume.readline() for n in range(nlabels)]
header['label'] = label
line = volume.readline()
header['nx'] = int(line[0:8])
header['nxstart'] = int(line[8:16])
header['nxend'] = int(line[16:24])
header['ny'] = int(line[24:32])
header['nystart'] = int(line[32:40])
header['nyend'] = int(line[40:48])
header['nz'] = int(line[48:56])
header['nzstart'] = int(line[56:64])
header['nzend'] = int(line[64:72])
line = volume.readline()
header['xlength'] = float(line[0:12])
header['ylength'] = float(line[12:24])
header['zlength'] = float(line[24:36])
header['alpha'] = float(line[36:48])
header['beta'] = float(line[48:60])
header['gamma'] = float(line[60:72])
header['order'] = volume.readline()[0:3]
self.header = header
@property
def voxelspacing(self):
return self.header['xlength']/float(self.header['nx'])
@property
def origin(self):
return [self.voxelspacing * x for x in
[self.header['nxstart'], self.header['nystart'], self.header['nzstart']]]
@property
def density(self):
with open(self.source) as volumefile:
for n in range(2 + len(self.header['label']) + 3):
volumefile.readline()
nx = self.header['nx']
ny = self.header['ny']
nz = self.header['nz']
array = np.zeros((nz, ny, nx), dtype=np.float64)
xextend = self.header['nxend'] - self.header['nxstart'] + 1
yextend = self.header['nyend'] - self.header['nystart'] + 1
zextend = self.header['nzend'] - self.header['nzstart'] + 1
nslicelines = int(np.ceil(xextend*yextend/6.0))
for i in range(zextend):
values = []
nslice = int(volumefile.readline()[0:8])
for m in range(nslicelines):
line = volumefile.readline()
for n in range(len(line)//12):
value = float(line[n*12: (n+1)*12])
values.append(value)
array[i, :yextend, :xextend] = np.float64(values).reshape(yextend, xextend)
return array
def to_xplor(outfile, volume, label=[]):
nz, ny, nx = volume.shape
voxelspacing = volume.voxelspacing
xstart, ystart, zstart = [int(round(x)) for x in volume.start]
xlength, ylength, zlength = volume.dimensions
alpha = beta = gamma = 90.0
nlabel = len(label)
with open(outfile,'w') as out:
out.write('\n')
out.write('{:>8d} !NTITLE\n'.format(nlabel+1))
# CNS requires at least one REMARK line
out.write('REMARK\n')
for n in range(nlabel):
out.write(''.join(['REMARK ', label[n], '\n']))
out.write(('{:>8d}'*9 + '\n').format(nx, xstart, xstart + nx - 1,
ny, ystart, ystart + ny - 1,
nz, zstart, zstart + nz - 1))
out.write( ('{:12.5E}'*6 + '\n').format(xlength, ylength, zlength,
alpha, beta, gamma))
out.write('ZYX\n')
#FIXME very inefficient way of writing out the volume ...
for z in range(nz):
out.write('{:>8d}\n'.format(z))
n = 0
for y in range(ny):
for x in range(nx):
out.write('%12.5E'%volume.array[z,y,x])
n += 1
if (n)%6 is 0:
out.write('\n')
if (nx*ny)%6 > 0:
out.write('\n')
out.write('{:>8d}\n'.format(-9999))
out.write('{:12.4E} {:12.4E} '.format(volume.array.mean(), volume.array.std()))
| haddocking/powerfit | powerfit/volume.py | Python | apache-2.0 | 20,760 | [
"Gaussian"
] | 16d5a6d49e013f63ff49d06d233d1a0a0e1727916e85908741356473e6c17b35 |
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on May 1, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 1, 2012"
import unittest
import os
import json
from io import open
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.plotter import DosPlotter, BSPlotter, _qvertex_target
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
try:
import scipy
except ImportError:
scipy = None
@unittest.skipIf(scipy is None, "scipy not present.")
class DosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "complete_dos.json"), "r",
encoding='utf-8') as f:
self.dos = CompleteDos.from_dict(json.load(f))
self.plotter = DosPlotter(sigma=0.2, stack=True)
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 4)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Li", "Fe", "P", "O"]:
self.assertIn(el, d)
class BSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "CaO_2605_bandstructure.json"),
"r", encoding='utf-8') as f:
d = json.loads(f.read())
self.bs = BandStructureSymmLine.from_dict(d)
self.plotter = BSPlotter(self.bs)
def test_bs_plot_data(self):
self.assertEqual(len(self.plotter.bs_plot_data()['distances'][0]), 16,
"wrong number of distances in the first branch")
self.assertEqual(len(self.plotter.bs_plot_data()['distances']), 10,
"wrong number of branches")
self.assertEqual(sum([len(e) for e in self.plotter.bs_plot_data()['distances']]), 160,
"wrong number of distances")
self.assertEqual(self.plotter.bs_plot_data()['ticks']['label'][5], "K",
"wrong tick label")
self.assertEqual(len(self.plotter.bs_plot_data()['ticks']['label']),
19, "wrong number of tick labels")
def test_qvertex_target(self):
results = _qvertex_target([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0],
[0.0, 0.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0], [0.0, 1.0, 1.0], [0.5, 0.5, 0.5]], 8)
self.assertEqual(len(results), 6)
self.assertEqual(results[3][1], 0.5)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| yanikou19/pymatgen | pymatgen/electronic_structure/tests/test_plotter.py | Python | mit | 3,173 | [
"pymatgen"
] | 43221c1e798f54b9e0741abe3b0da12e64f4b2abafdb88df99b07918fdd72c9c |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Backup Config to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_backup
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Backup the current running or startup configuration to a remote server on devices running Lenovo CNOS
description:
- This module allows you to work with switch configurations. It provides a
way to back up the running or startup configurations of a switch to a
remote server. This is achieved by periodically saving a copy of the
startup or running configuration of the network device to a remote server
using FTP, SFTP, TFTP, or SCP. The first step is to create a directory from
where the remote server can be reached. The next step is to provide the
full file path of the location where the configuration will be backed up.
Authentication details required by the remote server must be provided as
well. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_backup.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
configType:
description:
- This specifies what type of configuration will be backed up. The
choices are the running or startup configurations. There is no
default value, so it will result in an error if the input is
incorrect.
required: Yes
default: Null
choices: [running-config, startup-config]
protocol:
description:
- This refers to the protocol used by the network device to
interact with the remote server to where to upload the backup
configuration. The choices are FTP, SFTP, TFTP, or SCP. Any other
protocols will result in error. If this parameter is not specified,
there is no default value to be used.
required: Yes
default: Null
choices: [SFTP, SCP, FTP, TFTP]
rcserverip:
description:
-This specifies the IP Address of the remote server to where the
configuration will be backed up.
required: Yes
default: Null
rcpath:
description:
- This specifies the full file path where the configuration file
will be copied on the remote server. In case the relative path is
used as the variable value, the root folder for the user of the
server needs to be specified.
required: Yes
default: Null
serverusername:
description:
- Specify the username for the server relating to the protocol
used.
required: Yes
default: Null
serverpassword:
description:
- Specify the password for the server relating to the protocol
used.
required: Yes
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_backup. These are written in the main.yml file of the tasks directory.
---
- name: Test Running Config Backup
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Startup Config Backup
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Running Config Backup -TFTP
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Startup Config Backup - TFTP
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Config file tranferred to server"
'''
import sys
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
configType=dict(required=True),
protocol=dict(required=True),
serverip=dict(required=True),
rcpath=dict(required=True),
serverusername=dict(required=False),
serverpassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
outputfile = module.params['outputfile']
host = module.params['host']
deviceType = module.params['deviceType']
configType = module.params['configType']
protocol = module.params['protocol'].lower()
rcserverip = module.params['serverip']
rcpath = module.params['rcpath']
serveruser = module.params['serverusername']
serverpwd = module.params['serverpassword']
output = ""
timeout = 90
tftptimeout = 450
if not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required for this module')
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in
# your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(host, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
#
# Enable and then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + \
cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + \
cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Invoke method for config transfer from server
if(configType == 'running-config'):
if(protocol == "tftp" or protocol == "ftp"):
transfer_status = cnos.doRunningConfigBackUp(
protocol, tftptimeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
elif(protocol == "sftp" or protocol == "scp"):
transfer_status = cnos.doSecureRunningConfigBackUp(
protocol, timeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
else:
transfer_status = "Invalid Protocol option"
elif(configType == 'startup-config'):
if(protocol == "tftp" or protocol == "ftp"):
transfer_status = cnos.doStartupConfigBackUp(
protocol, tftptimeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
elif(protocol == "sftp" or protocol == "scp"):
transfer_status = cnos.doSecureStartupConfigBackUp(
protocol, timeout, rcserverip, rcpath, serveruser, serverpwd,
remote_conn)
else:
transfer_status = "Invalid Protocol option"
else:
transfer_status = "Invalid configType Option"
output = output + "\n Config Back Up status \n" + transfer_status
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Config file tranferred to server")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/network/cnos/cnos_backup.py | Python | gpl-3.0 | 11,453 | [
"VisIt"
] | 6e1f151ceb036b312c22efd49509558a435f83696182abaee0fde2444badddd5 |
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.1.3"
__license__ = """
Copyright (c) 2010-2013 Kurt McKee <contactme@kurtmckee.org>
Copyright (c) 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>",
"Bernd Schlapsi <https://github.com/brot>",]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is str:
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same type
raise NameError
except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
else:
# Python 3
def _s2bytes(s):
return bytes(s, 'utf8')
def _l2bytes(l):
return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import itertools
import re
import struct
import time
import types
import urllib
import urllib2
import urlparse
import warnings
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing and content santizing
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
try:
import iconv_codec
except ImportError:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
except ImportError:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError, "object doesn't have key 'category'"
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError, "object has no attribute '%s'" % key
def __hash__(self):
return id(self)
_cp1252 = {
128: unichr(8364), # euro sign
130: unichr(8218), # single low-9 quotation mark
131: unichr( 402), # latin small letter f with hook
132: unichr(8222), # double low-9 quotation mark
133: unichr(8230), # horizontal ellipsis
134: unichr(8224), # dagger
135: unichr(8225), # double dagger
136: unichr( 710), # modifier letter circumflex accent
137: unichr(8240), # per mille sign
138: unichr( 352), # latin capital letter s with caron
139: unichr(8249), # single left-pointing angle quotation mark
140: unichr( 338), # latin capital ligature oe
142: unichr( 381), # latin capital letter z with caron
145: unichr(8216), # left single quotation mark
146: unichr(8217), # right single quotation mark
147: unichr(8220), # left double quotation mark
148: unichr(8221), # right double quotation mark
149: unichr(8226), # bullet
150: unichr(8211), # en dash
151: unichr(8212), # em dash
152: unichr( 732), # small tilde
153: unichr(8482), # trade mark sign
154: unichr( 353), # latin small letter s with caron
155: unichr(8250), # single right-pointing angle quotation mark
156: unichr( 339), # latin small ligature oe
158: unichr( 382), # latin small letter z with caron
159: unichr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
if not isinstance(uri, unicode):
uri = uri.decode('utf-8', 'ignore')
try:
uri = urlparse.urljoin(base, uri)
except ValueError:
uri = u''
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://podlove.org/simple-chapters': 'psc',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
# georss
self.ingeometry = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
# psc_chapters_flag prevents multiple psc_chapters from being
# captured in a single entry or item. The transition states are
# None -> True -> False. psc_chapter elements will only be
# captured while it is True.
self.psc_chapters_flag = None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError as e:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = u'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = u'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
# do not resolve guid elements with isPermalink="false"
if not element == 'id' or self.guidislink:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
self.psc_chapters_flag = None
id = self._getAttribute(attrsD, 'rdf:about') or self._getAttribute(attrsD, 'id')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
# geospatial location, or "where", from georss.org
def _start_georssgeom(self, attrsD):
self.push('geometry', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._getContext()
context['where'].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _start_where(self, attrsD):
self.push('where', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrsD):
srsName = attrsD.get('srsname')
try:
srsDimension = int(attrsD.get('srsdimension', '2'))
except ValueError:
srsDimension = 2
context = self._getContext()
context['where']['srsName'] = srsName
context['where']['srsDimension'] = srsDimension
def _start_gml_point(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 1
self.push('geometry', 0)
def _start_gml_linestring(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrsD):
self._parse_srs_attrs(attrsD)
self.push('geometry', 0)
def _start_gml_exterior(self, attrsD):
self.push('geometry', 0)
def _start_gml_linearring(self, attrsD):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrsD):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrsD):
self.push('pos', 0)
def _end_gml_poslist(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(
this, self.ingeometry, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# end geospatial
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
context = self._getContext()
categories = context.setdefault('categories', [])
id = attrsD.get('id')
name = attrsD.get('name')
if id and name:
value = FeedParserDict({'id': id, 'name': name})
if value not in categories:
categories.append(value)
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_subcat(self, attrsD):
context = self._getContext()
subcats = context['categories'][-1].setdefault('subcats', [])
id = attrsD.get('id')
name = attrsD.get('name')
if id and name:
value = FeedParserDict({'id': id, 'name': name})
if value not in subcats:
subcats.append(value)
self.push('category', 1)
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
_end_subcat = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
_start_media_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
_end_media_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_group(self, attrsD):
# don't do anything, but don't break the enclosed tags either
pass
def _start_media_credit(self, attrsD):
context = self._getContext()
context.setdefault('media_credit', [])
context['media_credit'].append(attrsD)
self.push('credit', 1)
def _end_media_credit(self):
credit = self.pop('credit')
if credit != None and len(credit.strip()) != 0:
context = self._getContext()
context['media_credit'][-1]['content'] = credit
def _start_media_restriction(self, attrsD):
context = self._getContext()
context.setdefault('media_restriction', attrsD)
self.push('restriction', 1)
def _end_media_restriction(self):
restriction = self.pop('restriction')
if restriction != None and len(restriction.strip()) != 0:
context = self._getContext()
context['media_restriction']['content'] = restriction
def _start_media_license(self, attrsD):
context = self._getContext()
context.setdefault('media_license', attrsD)
self.push('license', 1)
def _end_media_license(self):
license = self.pop('license')
if license != None and len(license.strip()) != 0:
context = self._getContext()
context['media_license']['content'] = license
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
def _start_psc_chapters(self, attrsD):
if self.psc_chapters_flag is None:
# Transition from None -> True
self.psc_chapters_flag = True
attrsD['chapters'] = []
self._getContext()['psc_chapters'] = FeedParserDict(attrsD)
def _end_psc_chapters(self):
# Transition from True -> False
self.psc_chapters_flag = False
def _start_psc_chapter(self, attrsD):
if self.psc_chapters_flag:
start = self._getAttribute(attrsD, 'start')
attrsD['start_parsed'] = _parse_psc_chapter_start(start)
context = self._getContext()['psc_chapters']
context['chapters'].append(FeedParserDict(attrsD))
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
localname = str(localname).lower()
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
ref = ref.lower()
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('video', 'poster')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or u'')
if not base:
return rel or u''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics'])
mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# Test for inline user:password credentials for HTTP basic auth
auth = None
if base64 and not url_file_stream_or_string.startswith('ftp:'):
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
def _parse_psc_chapter_start(start):
FORMAT = r'^((\d{2}):)?(\d{2}):(\d{2})(\.(\d{3}))?$'
m = re.compile(FORMAT).match(start)
if m is None:
return None
_, h, m, s, _, ms = m.groups()
h, m, s, ms = (int(h or 0), int(m), int(s), int(ms or 0))
return datetime.timedelta(0, h*60*60 + m*60 + s, ms*1000)
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
timezonenames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# W3 date and time format parser
# http://www.w3.org/TR/NOTE-datetime
# Also supports MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (basically, allow a space as a date/time/timezone separator)
def _parse_date_w3dtf(datestr):
if not datestr.strip():
return None
parts = datestr.lower().split('t')
if len(parts) == 1:
# This may be a date only, or may be an MSSQL-style date
parts = parts[0].split()
if len(parts) == 1:
# Treat this as a date only
parts.append('00:00:00z')
elif len(parts) > 2:
return None
date = parts[0].split('-', 2)
if not date or len(date[0]) != 4:
return None
# Ensure that `date` has 3 elements. Using '1' sets the default
# month to January and the default day to the 1st of the month.
date.extend(['1'] * (3 - len(date)))
try:
year, month, day = [int(i) for i in date]
except ValueError:
# `date` may have more than 3 elements or may contain
# non-integer strings.
return None
if parts[1].endswith('z'):
parts[1] = parts[1][:-1]
parts.append('z')
# Append the numeric timezone offset, if any, to parts.
# If this is an MSSQL-style date then parts[2] already contains
# the timezone information, so `append()` will not affect it.
# Add 1 to each value so that if `find()` returns -1 it will be
# treated as False.
loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1
loc = loc - 1
parts.append(parts[1][loc:])
parts[1] = parts[1][:loc]
time = parts[1].split(':', 2)
# Ensure that time has 3 elements. Using '0' means that the
# minutes and seconds, if missing, will default to 0.
time.extend(['0'] * (3 - len(time)))
tzhour = 0
tzmin = 0
if parts[2][:1] in ('-', '+'):
try:
tzhour = int(parts[2][1:3])
tzmin = int(parts[2][4:])
except ValueError:
return None
if parts[2].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[2], 0)
try:
hour, minute, second = [int(float(i)) for i in time]
except ValueError:
return None
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(date):
"""Parse RFC 822 dates and times
http://tools.ietf.org/html/rfc822#section-5
There are some formatting differences that are accounted for:
1. Years may be two or four digits.
2. The month and day can be swapped.
3. Additional timezone names are supported.
4. A default time and timezone are assumed if only a date is present.
"""
daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
months = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
parts = date.lower().split()
if len(parts) < 5:
# Assume that the time and timezone are missing
parts.extend(('00:00:00', '0000'))
# Remove the day name
if parts[0][:3] in daynames:
parts = parts[1:]
if len(parts) < 5:
# If there are still fewer than five parts, there's not enough
# information to interpret this
return None
try:
day = int(parts[0])
except ValueError:
# Check if the day and month are swapped
if months.get(parts[0][:3]):
try:
day = int(parts[1])
except ValueError:
return None
else:
parts[1] = parts[0]
else:
return None
month = months.get(parts[1][:3])
if not month:
return None
try:
year = int(parts[2])
except ValueError:
return None
# Normalize two-digit years:
# Anything in the 90's is interpreted as 1990 and on
# Anything 89 or less is interpreted as 2089 or before
if len(parts[2]) <= 2:
year += (1900, 2000)[year < 90]
timeparts = parts[3].split(':')
timeparts = timeparts + ([0] * (3 - len(timeparts)))
try:
(hour, minute, second) = map(int, timeparts)
except ValueError:
return None
tzhour = 0
tzmin = 0
# Strip 'Etc/' from the timezone
if parts[4].startswith('etc/'):
parts[4] = parts[4][4:]
# Normalize timezones that start with 'gmt':
# GMT-05:00 => -0500
# GMT => GMT
if parts[4].startswith('gmt'):
parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt'
# Handle timezones like '-0500', '+0500', and 'EST'
if parts[4] and parts[4][0] in ('-', '+'):
try:
tzhour = int(parts[4][1:3])
tzmin = int(parts[4][3:])
except ValueError:
return None
if parts[4].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[4], 0)
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_rfc822)
_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
def _parse_date_asctime(dt):
"""Parse asctime-style dates"""
dayname, month, day, remainder = dt.split(None, 3)
# Convert month and day into zero-padded integers
month = '%02i ' % (_months.index(month.lower()) + 1)
day = '%02i ' % (int(day),)
dt = month + day + remainder
return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
ZERO_BYTES = _l2bytes([0x00, 0x00])
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
def convert_to_utf8(http_headers, data):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = u''
xml_encoding = u''
rfc3023_encoding = u''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = u'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = u'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = u'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = u'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = u'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = u'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = u'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = u'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
u'u16', u'utf-16', u'utf16', u'utf_16',
u'u32', u'utf-32', u'utf32', u'utf_32',
u'iso-10646-ucs-2', u'iso-10646-ucs-4',
u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if not isinstance(http_encoding, unicode):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd',
u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or u'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or u'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == u'gb2312':
rfc3023_encoding = u'gb18030'
if xml_encoding.lower() == u'gb2312':
xml_encoding = u'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
chardet_encoding = None
tried_encodings = []
if chardet:
chardet_encoding = chardet.detect(data)['encoding']
if not chardet_encoding:
chardet_encoding = ''
if not isinstance(chardet_encoding, unicode):
chardet_encoding = unicode(chardet_encoding, 'ascii', 'ignore')
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + u'\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = u''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
return data, rfc3023_encoding, error
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
def replace_doctype(data):
'''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
'''
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if _s2bytes('netscape') in doctype.lower():
version = u'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = _s2bytes('')
if len(doctype_results) == 1 and entity_results:
match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
safe_entities = filter(match_safe_entities, entity_results)
if safe_entities:
replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
+ _s2bytes('>\n<!ENTITY ').join(safe_entities) \
+ _s2bytes('>\n]>')
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
return version, data, safe_entities
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == 'linestring':
return _parse_georss_line(value, swap, dims)
elif geom_type == 'polygon':
ring = _parse_georss_line(value, swap, dims)
return {'type': u'Polygon', 'coordinates': (ring['coordinates'],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = itertools.imap(float, value.strip().replace(',', ' ').split())
nxt = latlons.next
while True:
t = [nxt(), nxt()][::swap and -1 or 1]
if dims == 3:
t.append(nxt())
yield tuple(t)
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Point', u'coordinates': coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'LineString', u'coordinates': coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {u'type': u'Polygon', u'coordinates': (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space seperate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Box', u'coordinates': tuple(coords)}
except (IndexError, ValueError):
return None
# end geospatial parsers
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
attempts = 0
while(len(data) > 1 and data[0] == '\x1f' and data[1] == '\x8b' and attempts < 3):
attempts += 1
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error), e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error, e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error, e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', u'')
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', u'')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
data, result['encoding'], error = convert_to_utf8(http_headers, data)
use_strict_parser = result['encoding'] and True or False
if error is not None:
result['bozo'] = 1
result['bozo_exception'] = error
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = http_headers.get('content-location', u'')
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException, e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
# The list of EPSG codes for geographic (latitude/longitude) coordinate
# systems to support decoding of GeoRSS GML profiles.
_geogCS = [
3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008,
4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022,
4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081,
4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132,
4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145,
4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158,
4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171,
4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185,
4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200,
4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227,
4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266,
4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279,
4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293,
4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307,
4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322,
4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603,
4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616,
4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665,
4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678,
4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717,
4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743,
4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804,
4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979 ]
| Elandril/SickRage | lib/feedparser/feedparser.py | Python | gpl-3.0 | 156,312 | [
"NetCDF",
"VisIt"
] | c4c5acc93059df9dc71eef6be2200218ab5a5ffc5955f1d0801ad81e0bd245ce |
# -*- coding:utf-8 -*-
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from pypln.web.core.models import Corpus, Document
admin.site.register(Corpus)
admin.site.register(Document)
| flavioamieiro/pypln.web | pypln/web/core/admin.py | Python | gpl-3.0 | 899 | [
"NAMD"
] | bc69385f939d8cc62fda7b95078aaf8569dd34eea39381c3e1f7eb29cdd3edef |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import hashlib
import numpy as np
from functools import reduce
from py4j.protocol import Py4JError
import pyspark.sql.functions as F
from pyspark.sql import Row, Window
from pyspark.sql.types import IntegerType, ShortType, LongType, FloatType, DecimalType, \
DoubleType, ArrayType, DataType, StructType, StringType, StructField
from pyspark.sql.functions import col as pyspark_col, concat, udf, array, broadcast, \
lit, rank, monotonically_increasing_id, row_number, desc
from pyspark.ml import Pipeline
from pyspark.ml.feature import MinMaxScaler, VectorAssembler, Bucketizer
from zoo.orca import OrcaContext
from zoo.friesian.feature.utils import *
JAVA_INT_MIN = -2147483648
JAVA_INT_MAX = 2147483647
class Table:
def __init__(self, df):
self.df = df
self.__column_names = self.df.schema.names
@staticmethod
def _read_parquet(paths):
if not isinstance(paths, list):
paths = [paths]
spark = OrcaContext.get_spark_session()
df = spark.read.parquet(*paths)
return df
@staticmethod
def _read_json(paths, cols):
if not isinstance(paths, list):
paths = [paths]
spark = OrcaContext.get_spark_session()
df = spark.read.json(paths)
if cols:
if isinstance(cols, list):
df = df.select(*cols)
elif isinstance(cols, str):
df = df.select(cols)
else:
raise Exception("cols should be a column name or list of column names")
return df
@staticmethod
def _read_csv(paths, delimiter=",", header=False, names=None, dtype=None):
if not isinstance(paths, list):
paths = [paths]
spark = OrcaContext.get_spark_session()
df = spark.read.options(header=header, inferSchema=True, delimiter=delimiter).csv(paths)
columns = df.columns
if names:
if not isinstance(names, list):
names = [names]
assert len(names) == len(columns), \
"names should have the same length as the number of columns"
for i in range(len(names)):
df = df.withColumnRenamed(columns[i], names[i])
tbl = Table(df)
if dtype:
if isinstance(dtype, dict):
for col, type in dtype.items():
tbl = tbl.cast(col, type)
elif isinstance(dtype, str):
tbl = tbl.cast(columns=None, dtype=dtype)
elif isinstance(dtype, list):
columns = df.columns
assert len(dtype) == len(columns), \
"dtype should have the same length as the number of columns"
for i in range(len(columns)):
tbl = tbl.cast(columns=columns[i], dtype=dtype[i])
else:
raise ValueError("dtype should be str or a list of str or dict")
return tbl.df
def _clone(self, df):
return Table(df)
def compute(self):
"""
Trigger computation of the Table.
"""
compute(self.df)
return self
def to_spark_df(self):
"""
Convert the current Table to a Spark DataFrame.
:return: The converted Spark DataFrame.
"""
return self.df
def size(self):
"""
Returns the number of rows in this Table.
:return: The number of rows in the current Table.
"""
cnt = self.df.count()
return cnt
def broadcast(self):
"""
Marks the Table as small enough for use in broadcast join.
"""
self.df = broadcast(self.df)
def select(self, *cols):
"""
Select specific columns.
:param cols: str or a list of str that specifies column names. If it is '*',
select all the columns.
:return: A new Table that contains the specified columns.
"""
# If cols is None, it makes more sense to raise error
# instead of returning an empty Table.
if not cols:
raise ValueError("cols should be str or a list of str, but got None.")
return self._clone(self.df.select(*cols))
def drop(self, *cols):
"""
Returns a new Table that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param cols: str or a list of str that specifies the name of the columns
to drop.
:return: A new Table that drops the specified column.
"""
return self._clone(self.df.drop(*cols))
def fillna(self, value, columns):
"""
Replace null values.
:param value: int, long, float, string, or boolean.
Value to replace null values with.
:param columns: list of str, the target columns to be filled. If columns=None and value
is int, all columns of integer type will be filled. If columns=None and value is
long, float, str or boolean, all columns will be filled.
:return: A new Table that replaced the null values with specified value
"""
if columns and not isinstance(columns, list):
columns = [columns]
if columns:
check_col_exists(self.df, columns)
if isinstance(value, int) and JAVA_INT_MIN <= value <= JAVA_INT_MAX:
if columns:
col_not_int_list = list(filter(lambda x: x[0] in columns and x[1] != "int",
self.df.dtypes))
if len(col_not_int_list) == 0:
return self._clone(fill_na_int(self.df, value, columns))
else:
return self._clone(fill_na_int(self.df, value, columns))
return self._clone(fill_na(self.df, value, columns))
def dropna(self, columns, how='any', thresh=None):
"""
Drops the rows containing null values in the specified columns.
:param columns: str or a list of str that specifies column names. If it is None,
it will operate on all columns.
:param how: If `how` is "any", then drop rows containing any null values in `columns`.
If `how` is "all", then drop rows only if every column in `columns` is null for
that row.
:param thresh: int, if specified, drop rows that have less than thresh non-null values.
Default is None.
:return: A new Table that drops the rows containing null values in the specified columns.
"""
return self._clone(self.df.dropna(how, thresh, subset=columns))
def distinct(self):
"""
Select the distinct rows of the Table.
:return: A new Table that only contains distinct rows.
"""
return self._clone(self.df.distinct())
def filter(self, condition):
"""
Filters the rows that satisfy `condition`. For instance, filter("col_1 == 1") will filter
the rows that has value 1 at column col_1.
:param condition: str that gives the condition for filtering.
:return: A new Table with filtered rows.
"""
return self._clone(self.df.filter(condition))
def clip(self, columns, min=None, max=None):
"""
Clips continuous values so that they are within the range [min, max]. For instance, by
setting the min value to 0, all negative values in columns will be replaced with 0.
:param columns: str or a list of str, the target columns to be clipped.
:param min: numeric, the minimum value to clip values to. Values less than this will be
replaced with this value.
:param max: numeric, the maximum value to clip values to. Values greater than this will be
replaced with this value.
:return: A new Table that replaced the value less than `min` with specified `min` and the
value greater than `max` with specified `max`.
"""
assert min is not None or max is not None, "at least one of min and max should be not None"
if columns is None:
raise ValueError("columns should be str or a list of str, but got None.")
if not isinstance(columns, list):
columns = [columns]
check_col_exists(self.df, columns)
return self._clone(clip(self.df, columns, min, max))
def log(self, columns, clipping=True):
"""
Calculates the log of continuous columns.
:param columns: str or a list of str, the target columns to calculate log.
:param clipping: boolean. Default is True, and in this case the negative values in columns
will be clipped to 0 and `log(x+1)` will be calculated. If False, `log(x)` will be
calculated.
:return: A new Table that replaced value in columns with logged value.
"""
if columns is None:
raise ValueError("columns should be str or a list of str, but got None.")
if not isinstance(columns, list):
columns = [columns]
check_col_exists(self.df, columns)
return self._clone(log_with_clip(self.df, columns, clipping))
def fill_median(self, columns):
"""
Replaces null values with the median in the specified numeric columns. Any column to be
filled should not contain only null values.
:param columns: str or a list of str that specifies column names. If it is None,
it will operate on all numeric columns.
:return: A new Table that replaces null values with the median in the specified numeric
columns.
"""
if columns and not isinstance(columns, list):
columns = [columns]
if columns:
check_col_exists(self.df, columns)
return self._clone(fill_median(self.df, columns))
def median(self, columns):
"""
Returns a new Table that has two columns, `column` and `median`, containing the column
names and the medians of the specified numeric columns.
:param columns: str or a list of str that specifies column names. If it is None,
it will operate on all numeric columns.
:return: A new Table that contains the medians of the specified columns.
"""
if columns and not isinstance(columns, list):
columns = [columns]
if columns:
check_col_exists(self.df, columns)
return self._clone(median(self.df, columns))
def merge_cols(self, columns, target):
"""
Merge the target column values as a list to a new column.
The original columns will be dropped.
:param columns: a list of str, the target columns to be merged.
:param target: str, the new column name of the merged column.
:return: A new Table that replaces columns with a new target column of merged list values.
"""
assert isinstance(columns, list), "columns must be a list of column names"
return self._clone(self.df.withColumn(target, array(columns)).drop(*columns))
def rename(self, columns):
"""
Rename columns with new column names
:param columns: dict. Name pairs. For instance, {'old_name1': 'new_name1', 'old_name2':
'new_name2'}".
:return: A new Table with new column names.
"""
assert isinstance(columns, dict), "columns should be a dictionary of {'old_name1': " \
"'new_name1', 'old_name2': 'new_name2'}"
new_df = self.df
for old_name, new_name in columns.items():
new_df = new_df.withColumnRenamed(old_name, new_name)
return self._clone(new_df)
def show(self, n=20, truncate=True):
"""
Prints the first `n` rows to the console.
:param n: int, the number of rows to show.
:param truncate: If set to True, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length `truncate` and
align cells right.
"""
self.df.show(n, truncate)
def get_stats(self, columns, aggr):
"""
Calculate the statistics of the values over the target column(s).
:param columns: str or a list of str that specifies the name(s) of the target column(s).
If columns is None, then the function will return statistics for all numeric columns.
:param aggr: str or a list of str or dict to specify aggregate functions,
min/max/avg/sum/count are supported.
If aggr is a str or a list of str, it contains the name(s) of aggregate function(s).
If aggr is a dict, the key is the column name, and the value is the aggregate
function(s).
:return: dict, the key is the column name, and the value is aggregate result(s).
"""
if columns is None:
columns = [column for column in self.columns if check_column_numeric(self.df, column)]
if not isinstance(columns, list):
columns = [columns]
check_col_exists(self.df, columns)
stats = {}
for column in columns:
if isinstance(aggr, str) or isinstance(aggr, list):
aggr_strs = aggr
elif isinstance(aggr, dict):
if column not in aggr:
raise ValueError("aggregate funtion not defined for the column {}.".
format(column))
aggr_strs = aggr[column]
else:
raise ValueError("aggr must have type str or a list or dict.")
if isinstance(aggr_strs, str):
aggr_strs = [aggr_strs]
values = []
for aggr_str in aggr_strs:
if aggr_str not in ["min", "max", "avg", "sum", "count"]:
raise ValueError("aggregate function must be one of min/max/avg/sum/count, \
but got {}.".format(aggr_str))
values.append(self.df.agg({column: aggr_str}).collect()[0][0])
stats[column] = values[0] if len(values) == 1 else values
return stats
def min(self, columns):
"""
Returns a new Table that has two columns, `column` and `min`, containing the column
names and the minimum values of the specified numeric columns.
:param columns: str or a list of str that specifies column names. If it is None,
it will operate on all numeric columns.
:return: A new Table that contains the minimum values of the specified columns.
"""
data = self.get_stats(columns, "min")
data = [(column, float(data[column])) for column in data]
schema = StructType([StructField("column", StringType(), True),
StructField("min", FloatType(), True)])
spark = OrcaContext.get_spark_session()
return self._clone(spark.createDataFrame(data, schema))
def max(self, columns):
"""
Returns a new Table that has two columns, `column` and `max`, containing the column
names and the maximum values of the specified numeric columns.
:param columns: str or a list of str that specifies column names. If it is None,
it will operate on all numeric columns.
:return: A new Table that contains the maximum values of the specified columns.
"""
data = self.get_stats(columns, "max")
data = [(column, float(data[column])) for column in data]
schema = StructType([StructField("column", StringType(), True),
StructField("max", FloatType(), True)])
spark = OrcaContext.get_spark_session()
return self._clone(spark.createDataFrame(data, schema))
def to_list(self, column):
"""
Convert all values of the target column to a list.
Only call this if the Table is small enough.
:param column: str, specifies the name of target column.
:return: list, contains all values of the target column.
"""
if not isinstance(column, str):
raise ValueError("Column must have type str.")
check_col_exists(self.df, [column])
return self.df.select(column).rdd.flatMap(lambda x: x).collect()
def to_dict(self):
"""
Convert the Table to a dictionary.
Only call this if the Table is small enough.
:return: dict, the key is the column name, and the value is the list containing
all values in the corresponding column.
"""
rows = [list(row) for row in self.df.collect()]
result = {}
for i, column in enumerate(self.columns):
result[column] = [row[i] for row in rows]
return result
def add(self, columns, value=1):
"""
Increase all of values of the target numeric column(s) by a constant value.
:param columns: str or a list of str, the target columns to be increased.
:param value: numeric (int/float/double/short/long), the constant value to be added.
:return: A new Table with updated numeric values on specified columns.
"""
if columns is None:
raise ValueError("Columns should be str or a list of str, but got None")
if not isinstance(columns, list):
columns = [columns]
check_col_exists(self.df, columns)
new_df = self.df
for column in columns:
if new_df.schema[column].dataType not in [IntegerType(), ShortType(),
LongType(), FloatType(),
DecimalType(), DoubleType()]:
raise ValueError("Column type should be numeric, but have type {} \
for column {}".format(new_df.schema[column].dataType, column))
new_df = new_df.withColumn(column, pyspark_col(column) + lit(value))
return self._clone(new_df)
@property
def columns(self):
"""
Get column names of the Table.
:return: A list of strings that specify column names.
"""
return self.__column_names
def sample(self, fraction, replace=False, seed=None):
"""
Return a sampled subset of Table.
:param fraction: float, fraction of rows to generate, should be within the
range [0, 1].
:param replace: allow or disallow sampling of the same row more than once.
:param seed: seed for sampling.
:return: A new Table with sampled rows.
"""
return self._clone(self.df.sample(withReplacement=replace, fraction=fraction, seed=seed))
def ordinal_shuffle_partition(self):
"""
Shuffle each partition of the Table by adding a random ordinal column for each row and sort
by this ordinal column within each partition.
:return: A new Table with shuffled partitions.
"""
return self._clone(ordinal_shuffle_partition(self.df))
def write_parquet(self, path, mode="overwrite"):
"""
Write the Table to Parquet file.
:param path: str. The path to the Parquet file.
:param mode: str. One of "append", "overwrite", "error" or "ignore".
append: Append contents to the existing data.
overwrite: Overwrite the existing data.
error: Throw an exception if the data already exists.
ignore: Silently ignore this operation if data already exists.
"""
write_parquet(self.df, path, mode)
def cast(self, columns, dtype):
"""
Cast columns to the specified type.
:param columns: str or a list of str that specifies column names.
If it is None, then cast all of the columns.
:param dtype: str ("string", "boolean", "int", "long", "short", "float", "double")
that specifies the data type.
:return: A new Table that casts all of the specified columns to the specified type.
"""
if columns is None:
columns = self.df.columns
elif not isinstance(columns, list):
columns = [columns]
check_col_exists(self.df, columns)
valid_types = ["str", "string", "bool", "boolean", "int",
"integer", "long", "short", "float", "double"]
if not (isinstance(dtype, str) and (dtype in valid_types)) \
and not isinstance(dtype, DataType):
raise ValueError(
"dtype should be string, boolean, int, long, short, float, double.")
transform_dict = {"str": "string", "bool": "boolean", "integer": "int"}
dtype = transform_dict[dtype] if dtype in transform_dict else dtype
df_cast = self._clone(self.df)
for i in columns:
df_cast.df = df_cast.df.withColumn(i, pyspark_col(i).cast(dtype))
return df_cast
def write_csv(self, path, mode="overwrite", header=True, num_partitions=None):
"""
Write the Table to csv file.
:param path: str. The path to the csv file.
:param mode: str. One of "append", "overwrite", "error" or "ignore".
append: Append the contents of this StringIndex to the existing data.
overwrite: Overwrite the existing data.
error: Throw an exception if the data already exists.
ignore: Silently ignore this operation if the data already exists.
:param header: boolean, whether to include the schema at the first line of the csv file.
Default is False.
:param num_partitions: positive int. The number of files to write.
"""
if num_partitions:
self.df.repartition(num_partitions).write.csv(path=path, mode=mode, header=header)
else:
self.df.write.csv(path=path, mode=mode, header=header)
def _concat(self, join="outer"):
def concat_inner(self, df2):
col_names_1 = set(self.schema.names)
col_names_2 = set(df2.schema.names)
for col in list(col_names_1.difference(col_names_2)):
self = self.drop(col)
for col in list(col_names_2.difference(col_names_1)):
df2 = df2.drop(col)
return self.unionByName(df2)
def concat_outer(self, df2):
col_names_1 = set(self.schema.names)
col_names_2 = set(df2.schema.names)
for col in col_names_1.difference(col_names_2):
df2 = df2.withColumn(col, lit(None).cast(self.schema[col].dataType))
for col in col_names_2.difference(col_names_1):
self = self.withColumn(col, lit(None).cast(df2.schema[col].dataType))
return self.unionByName(df2)
if join == "outer":
return concat_outer
else:
return concat_inner
def concat(self, tables, mode="inner", distinct=False):
"""
Concatenate a list of Tables into one Table in the dimension of row.
:param tables: a Table or a list of Tables.
:param mode: str, either inner or outer. For inner mode, the new Table would only
contain columns that are shared by all Tables. For outer mode, the resulting
Table would contain all the columns that appear in all Tables.
:param distinct: boolean. If True, the result Table would only contain distinct rows.
Default is False.
:return: A single concatenated Table.
"""
if mode not in ["outer", "inner"]:
raise ValueError("concat mode should be either outer or inner,\
but got {}.".format(mode))
if not isinstance(tables, list):
tables = [tables]
dfs = [table.df for table in tables] + [self.df]
df = reduce(self._concat(mode), dfs)
if distinct:
df = df.distinct()
return self._clone(df)
def drop_duplicates(self, subset=None, sort_cols=None, keep="min"):
"""
Return a new Table with duplicate rows removed.
:param subset: str or a list of str, specifies which column(s) to be considered when
referring to duplication. If subset is None, all the columns will be considered.
:param sort_cols: str or a list of str, specifies the column(s) to determine which
item to keep when duplicated. If sort_cols is None, duplicate rows will be
dropped randomly.
:param keep: str, the strategy to keep the duplicate, either min and max. Default is min.
It will only take effect when sort_cols is not None.
If keep is min, rows with the smallest values in sort_cols will be kept.
If keep is max, rows with the largest values in sort_cols will be kept.
:return: A new Table with duplicate rows removed.
"""
if subset is not None:
if not isinstance(subset, list):
subset = [subset]
check_col_exists(self.df, subset)
else:
subset = self.columns
if sort_cols is None:
return self._clone(self.df.dropDuplicates(subset=subset))
if not isinstance(sort_cols, list):
sort_cols = [sort_cols]
check_col_exists(self.df, sort_cols)
if keep == "min":
window = Window.partitionBy(subset).orderBy(*sort_cols, 'id')
elif keep == "max":
window = Window.partitionBy(subset).orderBy(*[self.df[sort_col].desc()
for sort_col in sort_cols], 'id')
else:
raise ValueError("keep should be either min or max, but got {}.".format(keep))
df = self.df.withColumn('id', monotonically_increasing_id()) \
.withColumn('rank', rank().over(window))
df = df.filter(pyspark_col('rank') == 1).drop('rank', 'id')
return self._clone(df)
def append_column(self, name, value):
"""
Append a column with a constant value to the Table.
:param name: str, the name of the new column.
:param value: The constant column value for the new column.
:return: A new Table with the appended column.
"""
return self._clone(self.df.withColumn(name, lit(value)))
def __getattr__(self, name):
"""
Get the target column of the Table.
"""
return self.df.__getattr__(name)
def col(self, name):
"""
Get the target column of the Table.
"""
return pyspark_col(name)
def sort(self, *cols, **kwargs):
"""
Sort table by the specified col(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
"""
if not cols:
raise ValueError("cols should be str or a list of str, but got None.")
return self._clone(self.df.sort(*cols, **kwargs))
order_by = sort
def to_pandas(self):
return self.df.toPandas()
@staticmethod
def from_pandas(pandas_df):
"""
Returns the contents of this :class:`pandas.DataFrame` as Table
:param pandas_df: pandas dataframe
"""
spark = OrcaContext.get_spark_session()
sparkDF = spark.createDataFrame(pandas_df)
return Table(sparkDF)
def cache(self):
"""
Persist this table in memory
:return: this Table
"""
self.df.cache()
return self
def uncache(self):
"""
Make this table as non-persistent, and remove all blocks for it from memory
:return: this Table
"""
if self.df.is_cached:
try:
self.df.unpersist()
except Py4JError:
print("Try to unpersist an uncached table")
return self
class FeatureTable(Table):
@classmethod
def read_parquet(cls, paths):
"""
Loads Parquet files as a FeatureTable.
:param paths: str or a list of str. The path(s) to Parquet file(s).
:return: A FeatureTable for recommendation data.
"""
return cls(Table._read_parquet(paths))
@classmethod
def read_json(cls, paths, cols=None):
return cls(Table._read_json(paths, cols))
@classmethod
def read_csv(cls, paths, delimiter=",", header=False, names=None, dtype=None):
"""
Loads csv files as a FeatureTable.
:param paths: str or a list of str. The path(s) to csv file(s).
:param delimiter: str, delimiter to use for parsing the csv file(s). Default is ",".
:param header: boolean, whether the first line of the csv file(s) will be treated
as the header for column names. Default is False.
:param names: str or a list of str, the column names for the csv file(s). You need to
provide this if the header cannot be inferred. If specified, names should
have the same length as the number of columns.
:param dtype: str or a list of str or dict, the column data type(s) for the csv file(s).\
You may need to provide this if you want to change the default inferred types
of specified columns.
If dtype is a str, then all the columns will be cast to the target dtype.
If dtype is a list of str, then it should have the same length as the number of
columns and each column will be cast to the corresponding str dtype.
If dtype is a dict, then the key should be the column name and the value should be
the str dtype to cast the column to.
:return: A FeatureTable for recommendation data.
"""
return cls(Table._read_csv(paths, delimiter, header, names, dtype))
def encode_string(self, columns, indices, broadcast=True,
do_split=False, sep=',', sort_for_array=False, keep_most_frequent=False
):
"""
Encode columns with provided list of StringIndex.
:param columns: str or a list of str, the target columns to be encoded.
:param indices: StringIndex or a list of StringIndex, StringIndexes of target columns.
The StringIndex should at least have two columns: id and the corresponding
categorical column.
Or it can be a dict or a list of dicts. In this case,
the keys of the dict should be within the categorical column
and the values are the target ids to be encoded.
:param broadcast: bool, whether need to broadcast index when encode string.
Default is True.
:param do_split: bool, whether need to split column value to array to encode string.
Default is False.
:param sep: str, a string representing a regular expression to split a column value.
Default is ','.
:param sort_for_array: bool, whether need to sort array columns. Default is False.
:param keep_most_frequent: bool, whether need to keep most frequent value as the
column value. Default is False.
:return: A new FeatureTable which transforms categorical features into unique integer
values with provided StringIndexes.
"""
if not isinstance(columns, list):
columns = [columns]
if not isinstance(indices, list):
indices = [indices]
assert len(columns) == len(indices)
if isinstance(indices[0], dict):
indices = list(map(lambda x: StringIndex.from_dict(x[1], columns[x[0]]),
enumerate(indices)))
data_df = self.df
for i in range(len(columns)):
index_tbl = indices[i]
col_name = columns[i]
if broadcast:
index_tbl.broadcast()
if not do_split:
data_df = data_df.join(index_tbl.df, col_name, how="left") \
.drop(col_name).withColumnRenamed("id", col_name)
else:
data_df = data_df.withColumn('row_id', F.monotonically_increasing_id())
tmp_df = data_df.select('row_id', col_name) \
.withColumn(col_name, F.explode(F.split(F.col(col_name), sep)))
tmp_df = tmp_df.join(index_tbl.df, col_name, how="left") \
.filter(F.col("id").isNotNull())
tmp_df = tmp_df.select('row_id', F.col("id"))
if keep_most_frequent:
tmp_df = tmp_df.groupby('row_id') \
.agg(F.array_sort(F.collect_list(F.col("id")))
.getItem(0).alias("id"))
elif sort_for_array:
tmp_df = tmp_df.groupby('row_id') \
.agg(F.array_sort(F.collect_list(F.col("id"))).alias("id"))
else:
tmp_df = tmp_df.groupby('row_id') \
.agg(F.collect_list(F.col("id")).alias("id"))
data_df = data_df.join(tmp_df, 'row_id', 'left') \
.drop('row_id').drop(col_name).withColumnRenamed("id", col_name)
return FeatureTable(data_df)
def filter_by_frequency(self, columns, min_freq=2):
"""
Filter the FeatureTable by the given minimum frequency on the target columns.
:param columns: str or a list of str, column names which are considered for filtering.
:param min_freq: int, min frequency. Columns with occurrence below this value
would be filtered.
:return: A new FeatureTable with filtered records.
"""
freq_df = self.df
if not isinstance(columns, list):
columns = [columns]
name_string = ''
for column in columns:
name_string = name_string + column + '_'
filter_col_name = name_string + 'count'
key = freq_df.groupby(columns).count().withColumnRenamed('count', filter_col_name)
group = key.filter(key[filter_col_name] >= min_freq).drop(filter_col_name)
return FeatureTable(group)
def hash_encode(self, columns, bins, method='md5'):
"""
Hash encode for categorical column(s).
:param columns: str or a list of str, the target columns to be encoded.
For dense features, you need to cut them into discrete intervals beforehand.
:param bins: int, defines the number of equal-width bins in the range of column(s) values.
:param method: hashlib supported method, like md5, sha256 etc.
:return: A new FeatureTable with hash encoded columns.
"""
hash_df = self.df
if not isinstance(columns, list):
columns = [columns]
for i in range(len(columns)):
col_name = columns[i]
hash_str = lambda x: getattr(hashlib, method)(str(x).encode('utf_8')).hexdigest()
hash_int = udf(lambda x: int(hash_str(x), 16) % bins)
hash_df = hash_df.withColumn(col_name, hash_int(pyspark_col(col_name)))
return FeatureTable(hash_df)
def cross_hash_encode(self, columns, bins, cross_col_name=None, method='md5'):
"""
Hash encode for cross column(s).
:param columns: a list of str, the categorical columns to be encoded as cross features.
For dense features, you need to cut them into discrete intervals beforehand.
:param bins: int, defined the number of equal-width bins in the range of column(s) values.
:param cross_col_name: str, the column name for output cross column. Default is None, and
in this case the default cross column name will be 'crossed_col1_col2'
for ['col1', 'col2'].
:param method: hashlib supported method, like md5, sha256 etc.
:return: A new FeatureTable with the target cross column.
"""
cross_hash_df = self.df
assert isinstance(columns, list), "columns should be a list of column names"
assert len(columns) >= 2, "cross_hash_encode should have >= 2 columns"
if cross_col_name is None:
cross_string = ''
for column in columns:
cross_string = cross_string + '_' + column
cross_col_name = 'crossed' + cross_string
cross_hash_df = cross_hash_df.withColumn(cross_col_name, concat(*columns))
cross_hash_df = FeatureTable(cross_hash_df).hash_encode([cross_col_name], bins, method)
return cross_hash_df
def category_encode(self, columns, freq_limit=None, order_by_freq=False,
do_split=False, sep=',', sort_for_array=False, keep_most_frequent=False,
broadcast=True):
"""
Category encode the given columns.
:param columns: str or a list of str, target columns to encode from string to index.
:param freq_limit: int, dict or None. Categories with a count/frequency below freq_limit
will be omitted from the encoding. Can be represented as either an integer,
dict. For instance, 15, {'col_4': 10, 'col_5': 2} etc. Default is None,
and in this case all the categories that appear will be encoded.
:param order_by_freq: boolean, whether the result StringIndex will assign smaller indices
to values with more frequencies. Default is False and in this case frequency order
may not be preserved when assigning indices.
:param do_split: bool, whether need to split column value to array to encode string.
Default is False.
:param sep: str, a string representing a regular expression to split a column value.
Default is ','.
:param sort_for_array: bool, whether need to sort array columns. Default is False.
:param keep_most_frequent: bool, whether need to keep most frequent value as the
column value. Default is False.
:param broadcast: bool, whether need to broadcast index when encode string.
Default is True.
:return: A tuple of a new FeatureTable which transforms categorical features into unique
integer values, and a list of StringIndex for the mapping.
"""
indices = self.gen_string_idx(columns, freq_limit=freq_limit, order_by_freq=order_by_freq,
do_split=do_split, sep=sep)
return self.encode_string(columns, indices, do_split=do_split, sep=sep,
sort_for_array=sort_for_array,
keep_most_frequent=keep_most_frequent,
broadcast=broadcast), indices
def one_hot_encode(self, columns, sizes=None, prefix=None, keep_original_columns=False):
"""
Convert categorical features into ont hot encodings.
If the features are string, you should first call category_encode to encode them into
indices before one hot encoding.
For each input column, a one hot vector will be created expanding multiple output columns,
with the value of each one hot column either 0 or 1.
Note that you may only use one hot encoding on the columns with small dimensions
for memory concerns.
For example, for column 'x' with size 5:
Input:
|x|
|1|
|3|
|0|
Output will contain 5 one hot columns:
|prefix_0|prefix_1|prefix_2|prefix_3|prefix_4|
| 0 | 1 | 0 | 0 | 0 |
| 0 | 0 | 0 | 1 | 0 |
| 1 | 0 | 0 | 0 | 0 |
:param columns: str or a list of str, the target columns to be encoded.
:param sizes: int or a list of int, the size(s) of the one hot vectors of the column(s).
Default is None, and in this case, the sizes will be calculated by the maximum
value(s) of the columns(s) + 1, namely the one hot vector will cover 0 to the
maximum value.
You are recommended to provided the sizes if they are known beforehand. If specified,
sizes should have the same length as columns.
:param prefix: str or a list of str, the prefix of the one hot columns for the input
column(s). Default is None, and in this case, the prefix will be the input
column names. If specified, prefix should have the same length as columns.
The one hot columns for each input column will have column names:
prefix_0, prefix_1, ... , prefix_maximum
:param keep_original_columns: boolean, whether to keep the original index column(s) before
the one hot encoding. Default is False, and in this case the original column(s)
will be replaced by the one hot columns. If True, the one hot columns will be
appended to each original column.
:return: A new FeatureTable which transforms categorical indices into one hot encodings.
"""
if not isinstance(columns, list):
columns = [columns]
if sizes:
if not isinstance(sizes, list):
sizes = [sizes]
else:
# Take the max of the column to make sure all values are within the range.
# The vector size is 1 + max (i.e. from 0 to max).
sizes = [self.select(col_name).group_by(agg="max").df.collect()[0][0] + 1
for col_name in columns]
assert len(columns) == len(sizes), "columns and sizes should have the same length"
if prefix:
if not isinstance(prefix, list):
prefix = [prefix]
assert len(columns) == len(prefix), "columns and prefix should have the same length"
data_df = self.df
def one_hot(columns, sizes):
one_hot_vectors = []
for i in range(len(sizes)):
one_hot_vector = [0] * sizes[i]
one_hot_vector[columns[i]] = 1
one_hot_vectors.append(one_hot_vector)
return one_hot_vectors
one_hot_udf = udf(lambda columns: one_hot(columns, sizes),
ArrayType(ArrayType(IntegerType())))
data_df = data_df.withColumn("friesian_onehot", one_hot_udf(array(columns)))
all_columns = data_df.columns
for i in range(len(columns)):
col_name = columns[i]
col_idx = all_columns.index(col_name)
cols_before = all_columns[:col_idx]
cols_after = all_columns[col_idx + 1:]
one_hot_prefix = prefix[i] if prefix else col_name
one_hot_cols = []
for j in range(sizes[i]):
one_hot_col = one_hot_prefix + "_{}".format(j)
one_hot_cols.append(one_hot_col)
data_df = data_df.withColumn(one_hot_col,
data_df.friesian_onehot[i][j])
if keep_original_columns:
all_columns = cols_before + [col_name] + one_hot_cols + cols_after
else:
all_columns = cols_before + one_hot_cols + cols_after
data_df = data_df.select(*all_columns)
data_df = data_df.drop("friesian_onehot")
return FeatureTable(data_df)
def gen_string_idx(self, columns, freq_limit=None, order_by_freq=False,
do_split=False, sep=','):
"""
Generate unique index value of categorical features. The resulting index would
start from 1 with 0 reserved for unknown features.
:param columns: str, dict or a list of str, dict, target column(s) to generate StringIndex.
dict is a mapping of source column names -> target column name if needs to combine multiple
source columns to generate index.
For example: {'src_cols':['a_user', 'b_user'], 'col_name':'user'}.
:param freq_limit: int, dict or None. Categories with a count/frequency below freq_limit
will be omitted from the encoding. Can be represented as either an integer,
dict. For instance, 15, {'col_4': 10, 'col_5': 2} etc. Default is None,
and in this case all the categories that appear will be encoded.
:param order_by_freq: boolean, whether the result StringIndex will assign smaller indices
to values with more frequencies. Default is False and in this case frequency order
may not be preserved when assigning indices.
:param do_split: bool, whether need to split column value to array to generate index.
Default is False.
:param sep: str, a string representing a regular expression to split a column value.
Default is ','.
:return: A StringIndex or a list of StringIndex.
"""
if columns is None:
raise ValueError("columns should be str or a list of str, but got None.")
is_single_column = False
if not isinstance(columns, list):
is_single_column = True
columns = [columns]
src_columns = []
for c in columns:
if isinstance(c, dict):
if 'src_cols' in c:
src_columns.extend(c['src_cols'])
else:
src_columns.append(c)
check_col_exists(self.df, src_columns)
if freq_limit:
if isinstance(freq_limit, int):
freq_limit = str(freq_limit)
elif isinstance(freq_limit, dict):
freq_limit = ",".join(str(k) + ":" + str(v) for k, v in freq_limit.items())
else:
raise ValueError("freq_limit only supports int, dict or None, but get " +
freq_limit.__class__.__name__)
out_columns = []
simple_columns = []
df_id_list = []
for c in columns:
# union column
if isinstance(c, dict):
if 'src_cols' in c:
src_cols = c['src_cols']
else:
raise ValueError("Union columns must has argument 'src_cols'")
if 'col_name' in c:
col_name = c['col_name']
else:
col_name = src_cols[0] + '_union'
# process simple columns
if simple_columns:
simple_df_id_list = generate_string_idx(self.df, simple_columns,
freq_limit, order_by_freq)
df_id_list.extend(simple_df_id_list)
simple_columns = []
# process union columns
for i, src_c in enumerate(src_cols):
if i == 0:
dict_df = self.df.select(F.col(src_c).alias(col_name))
else:
dict_df = dict_df.union(self.df.select(F.col(src_c).alias(col_name)))
union_id_list = generate_string_idx(dict_df, [col_name],
freq_limit, order_by_freq)
df_id_list.extend(union_id_list)
out_columns.append(col_name)
# single column
else:
if do_split:
dict_df = self.df.select(F.col(c))
dict_df = dict_df.withColumn(c, F.explode(F.split(c, sep)))
split_id_list = generate_string_idx(dict_df, [c], freq_limit, order_by_freq)
df_id_list.extend(split_id_list)
out_columns.append(c)
else:
simple_columns.append(c)
out_columns.append(c)
if simple_columns:
simple_df_id_list = generate_string_idx(self.df, simple_columns,
freq_limit, order_by_freq)
df_id_list.extend(simple_df_id_list)
string_idx_list = list(map(lambda x: StringIndex(x[0], x[1]),
zip(df_id_list, out_columns)))
# If input is a single column (not a list), then the output would be a single StringIndex.
if len(string_idx_list) == 1 and is_single_column:
return string_idx_list[0]
else:
return string_idx_list
def _clone(self, df):
return FeatureTable(df)
def cross_columns(self, crossed_columns, bucket_sizes):
"""
Cross columns and hashed to specified bucket size
:param crossed_columns: list of column name pairs to be crossed.
i.e. [['a', 'b'], ['c', 'd']]
:param bucket_sizes: hash bucket size for crossed pairs. i.e. [1000, 300]
:return: A new FeatureTable with crossed columns.
"""
df = cross_columns(self.df, crossed_columns, bucket_sizes)
return FeatureTable(df)
def min_max_scale(self, columns, min=0.0, max=1.0):
"""
Rescale each column individually to a common range [min, max] linearly using
column summary statistics, which is also known as min-max normalization or rescaling.
:param columns: str or a list of str, the column(s) to be rescaled.
:param min: int, the lower bound after transformation, shared by all columns.
Default is 0.0.
:param max: int, the upper bound after transformation, shared by all columns.
Default is 1.0.
:return: A tuple of a new FeatureTable with rescaled column(s), and a dict of the
original min and max values of the input column(s).
"""
columns = str_to_list(columns, "columns")
df = self.df
types = [x[1] for x in self.df.select(*columns).dtypes]
scalar_cols = [columns[i] for i in range(len(columns))
if types[i] == "int" or types[i] == "bigint"
or types[i] == "float" or types[i] == "double"]
array_cols = [columns[i] for i in range(len(columns))
if types[i] == "array<int>" or types[i] == "array<bigint>"
or types[i] == "array<float>" or types[i] == "array<double>"]
vector_cols = [columns[i] for i in range(len(columns)) if types[i] == "vector"]
min_max_dict = {}
tolist = udf(lambda x: x.toArray().tolist(), ArrayType(DoubleType()))
if scalar_cols:
assembler = VectorAssembler(inputCols=scalar_cols, outputCol="vect")
# MinMaxScaler Transformation
scaler = MinMaxScaler(min=min, max=max, inputCol="vect", outputCol="scaled")
# Pipeline of VectorAssembler and MinMaxScaler
pipeline = Pipeline(stages=[assembler, scaler])
# Fitting pipeline on DataFrame
model = pipeline.fit(df)
df = model.transform(df) \
.withColumn("scaled_list", tolist(pyspark_col("scaled"))) \
.drop("vect").drop("scaled")
for i in range(len(scalar_cols)):
df = df.withColumn(scalar_cols[i], pyspark_col("scaled_list")[i])
df = df.drop("scaled_list")
# cast to float
for c in scalar_cols:
df = df.withColumn(c, pyspark_col(c).cast("float"))
min_list = model.stages[1].originalMin.toArray().tolist()
max_list = model.stages[1].originalMax.toArray().tolist()
for i, min_max in enumerate(zip(min_list, max_list)):
min_max_dict[scalar_cols[i]] = min_max
from pyspark.ml.linalg import Vectors, VectorUDT
for c in array_cols:
list_to_vector_udf = udf(lambda l: Vectors.dense(l), VectorUDT())
df = df.withColumn(c, list_to_vector_udf(pyspark_col(c)))
scaler = MinMaxScaler(min=min, max=max, inputCol=c, outputCol="scaled")
model = scaler.fit(df)
df = model.transform(df).drop(c).withColumn(c, tolist("scaled")).drop("scaled")
min_max_dict[c] = (model.originalMin.toArray().tolist(),
model.originalMax.toArray().tolist())
for c in vector_cols:
scaler = MinMaxScaler(min=min, max=max, inputCol=c, outputCol="scaled")
model = scaler.fit(df)
df = model.transform(df).withColumnRenamed("scaled", c)
min = model.originalMin
max = model.originalMax
min_max_dict[c] = (min, max)
return FeatureTable(df), min_max_dict
def transform_min_max_scale(self, columns, min_max_dict):
"""
Rescale each column individually with the given [min, max] range of each column.
:param columns: str or a list of str, the column(s) to be rescaled.
:param min_max_dict: dict, the key is the column name, and the value is the
tuple of min and max values of this column.
:return: A new FeatureTable with rescaled column(s).
"""
columns = str_to_list(columns, "columns")
types = [x[1] for x in self.df.select(*columns).dtypes]
scalar_cols = [columns[i] for i in range(len(columns))
if types[i] == "int" or types[i] == "bigint"
or types[i] == "float" or types[i] == "double"]
array_cols = [columns[i] for i in range(len(columns))
if types[i] == "array<int>" or types[i] == "array<bigint>"
or types[i] == "array<float>" or types[i] == "array<double>"]
vector_cols = [columns[i] for i in range(len(columns)) if types[i] == "vector"]
tbl = self
def normalize_array(c_min, c_max):
def normalize(x):
np_x = np.array(x)
np_min = np.array(c_min)
np_max = np.array(c_max)
normalized = (np_x - np_min) / (np_max - np_min)
return normalized.tolist()
return normalize
def normalize_scalar_vector(c_min, c_max):
def normalize(x):
return (x - c_min) / (c_max - c_min)
return normalize
for column in scalar_cols:
if column in min_max_dict:
col_min, col_max = min_max_dict[column]
tbl = tbl.apply(column, column, normalize_scalar_vector(col_min, col_max), "float")
for column in array_cols:
if column in min_max_dict:
col_min, col_max = min_max_dict[column]
tbl = tbl.apply(column, column, normalize_array(col_min, col_max), "array<float>")
for column in vector_cols:
if column in min_max_dict:
col_min, col_max = min_max_dict[column]
tbl = tbl.apply(column, column,
normalize_scalar_vector(col_min, col_max), "vector")
return tbl
def add_negative_samples(self, item_size, item_col="item", label_col="label", neg_num=1):
"""
Generate negative item visits for each positive item visit
:param item_size: int, max of item.
:param item_col: str, name of item column
:param label_col: str, name of label column
:param neg_num: int, for each positive record, add neg_num of negative samples
:return: A new FeatureTable with negative samples.
"""
df = add_negative_samples(self.df, item_size, item_col, label_col, neg_num)
return FeatureTable(df)
def add_hist_seq(self, cols, user_col, sort_col='time',
min_len=1, max_len=100, num_seqs=2147483647):
"""
Add a column of history visits into table.
:param cols: a list of str, columns need to be aggregated
:param user_col: str, user column.
:param sort_col: str, sort by sort_col
:param min_len: int, minimal length of a history seq
:param max_len: int, maximal length of a history seq
:param num_seqs: int, default is 2147483647, max 4bite integer,
it means to to keep all seqs; it only keeps last one if num_seqs=1.
:return: FeatureTable
"""
df = add_hist_seq(self.df, cols, user_col, sort_col, min_len, max_len, num_seqs)
return FeatureTable(df)
def add_neg_hist_seq(self, item_size, item_history_col, neg_num):
"""
Generate a list negative samples for each item in item_history_col
:param item_size: int, max of item.
:param item2cat: FeatureTable with a dataframe of item to category mapping
:param item_history_col: str, this column should be a list of visits in history
:param neg_num: int, for each positive record, add neg_num of negative samples
:return: FeatureTable
"""
df = add_neg_hist_seq(self.df, item_size, item_history_col, neg_num)
return FeatureTable(df)
def mask(self, mask_cols, seq_len=100):
"""
Mask mask_cols columns
:param mask_cols: a list of str, columns need to be masked with 1s and 0s.
:param seq_len: int, length of masked column
:return: FeatureTable
"""
df = mask(self.df, mask_cols, seq_len)
return FeatureTable(df)
def pad(self, cols, seq_len=100, mask_cols=None):
"""
Pad and mask columns of the FeatureTable.
:param cols: a list of str, columns need to be padded with 0s.
:param seq_len: int, the length of masked column. Default is 100.
:param mask_cols: a list of str, columns need to be masked with 1s and 0s.
:return: A new FeatureTable with padded columns.
"""
df = pad(self.df, cols, seq_len, mask_cols)
return FeatureTable(df)
def apply(self, in_col, out_col, func, dtype="string"):
"""
Transform a FeatureTable using a user-defined Python function.
:param in_col: str or a list of str, the column(s) to be transformed.
:param out_col: str, the name of output column.
:param func: The Python function with in_col as input and out_col.
When in_col is a list of str, func should take a list as input,
and in this case you are generating out_col given multiple
input columns.
:param dtype: str, the data type of out_col. Default is string type.
:return: A new FeatureTable after column transformation.
"""
udf_func = udf(func, dtype)
assert isinstance(out_col, str), "out_col must be a single column"
if isinstance(in_col, str):
df = self.df.withColumn(out_col, udf_func(pyspark_col(in_col)))
else:
assert isinstance(in_col, list), \
"in_col must be a single column of a list of columns"
df = self.df.withColumn(out_col, udf_func(array(in_col)))
return FeatureTable(df)
def join(self, table, on=None, how=None, lsuffix=None, rsuffix=None):
"""
Join a FeatureTable with another FeatureTable.
:param table: A FeatureTable.
:param on: str or a list of str, the column(s) to join.
:param how: str, default is inner. Must be one of: inner, cross, outer, full,
fullouter, full_outer, left, leftouter, left_outer, right, rightouter,
right_outer, semi, leftsemi, left_semi, anti, leftanti and left_anti.
:param lsuffix: The suffix to use for the original Table's overlapping columns.
:param rsuffix: The suffix to use for the input Table's overlapping columns.
:return: A joined FeatureTable.
"""
assert isinstance(table, Table), "the joined table should be a Table"
if not isinstance(on, list):
on = [on]
overlap_columns = list(set(self.df.schema.names).
intersection(set(table.df.schema.names)).difference(on))
if lsuffix is not None:
names = {column: column + lsuffix for column in overlap_columns}
self = self.rename(names)
if rsuffix is not None:
names = {column: column + rsuffix for column in overlap_columns}
table = table.rename(names)
joined_df = self.df.join(table.df, on=on, how=how)
return FeatureTable(joined_df)
def add_value_features(self, columns, dict_tbl, key, value):
"""
Add features based on key_cols and another key value table,
for each col in columns, it adds a value_col using key-value pairs from dict_tbl
:param columns: str or a list of str
:param dict_tbl: key value mapping table
:param key: str, name of key column in tbl
:param value: str, name of value column in tbl
:return: FeatureTable
"""
if isinstance(columns, str):
columns = [columns]
assert isinstance(columns, list), \
"columns should be str or a list of str, but get a " + type(columns)
df = add_value_features(self.df, columns, dict_tbl.df, key, value)
return FeatureTable(df)
def reindex(self, columns=[], index_tbls=[]):
"""
Replace the value using index_dicts for each col in columns, set 0 for default
:param columns: str of a list of str
:param dict_tbls: table or list of tables, each one has a mapping from old index to new one
:return: FeatureTable
"""
columns = str_to_list(columns, "columns")
if isinstance(index_tbls, dict):
index_tbls = [index_tbls]
assert isinstance(index_tbls, list), \
"index_dicts should be table or a list of table, but get a " + type(index_tbls)
assert len(columns) == len(index_tbls), \
"each column of columns should have one corresponding index_dict"
tbl = FeatureTable(self.df)
for i, c in enumerate(columns):
tbl = tbl.add_value_features(c, index_tbls[i], key=c, value=c)
return tbl
def gen_reindex_mapping(self, columns=[], freq_limit=10):
"""
Generate a mapping from old index to new one based on popularity count on descending order
:param columns: str or a list of str
:param freq_limit: int, dict or None. Indices with a count below freq_limit
will be omitted. Can be represented as either an integer or dict.
For instance, 15, {'col_4': 10, 'col_5': 2} etc. Default is 10,
:return: a list of FeatureTables, each table has a mapping from old index to new index
new index starts from 1, save 0 for default
"""
str_to_list(columns, "columns")
if isinstance(freq_limit, int):
freq_limit = {col: freq_limit for col in columns}
assert isinstance(freq_limit, dict), \
"freq_limit should be int or dict, but get a " + type(freq_limit)
index_tbls = []
for c in columns:
c_count = self.select(c).group_by(c, agg={c: "count"}).rename(
{"count(" + c + ")": "count"})
c_count = c_count.filter(pyspark_col("count") >= freq_limit[c])
w = Window.orderBy(desc("count"))
index_df = c_count.df.withColumn(c + "_new", row_number().over(w))
index_tbl = FeatureTable(index_df).select([c, c + "_new"])
index_tbls.append(index_tbl)
if isinstance(columns, str):
index_tbls = index_tbls[0]
return index_tbls
def group_by(self, columns=[], agg="count", join=False):
"""
Group the Table with specified columns and then run aggregation. Optionally join the result
with the original Table.
:param columns: str or a list of str. Columns to group the Table. If it is an empty list,
aggregation is run directly without grouping. Default is [].
:param agg: str, list or dict. Aggregate functions to be applied to grouped Table.
Default is "count".
Supported aggregate functions are: "max", "min", "count", "sum", "avg", "mean",
"sumDistinct", "stddev", "stddev_pop", "variance", "var_pop", "skewness", "kurtosis",
"collect_list", "collect_set", "approx_count_distinct", "first", "last".
If agg is a str, then agg is the aggregate function and the aggregation is performed
on all columns that are not in `columns`.
If agg is a list of str, then agg is a list of aggregate function and the aggregation
is performed on all columns that are not in `columns`.
If agg is a single dict mapping from str to str, then the key is the column
to perform aggregation on, and the value is the aggregate function.
If agg is a single dict mapping from str to list, then the key is the
column to perform aggregation on, and the value is list of aggregate functions.
Examples:
agg="sum"
agg=["last", "stddev"]
agg={"*":"count"}
agg={"col_1":"sum", "col_2":["count", "mean"]}
:param join: boolean. If True, join the aggregation result with original Table.
:return: A new Table with aggregated column fields.
"""
if isinstance(columns, str):
columns = [columns]
assert isinstance(columns, list), "columns should be str or a list of str"
grouped_data = self.df.groupBy(columns)
if isinstance(agg, str):
agg_exprs_dict = {agg_column: agg for agg_column in self.df.columns
if agg_column not in columns}
agg_df = grouped_data.agg(agg_exprs_dict)
elif isinstance(agg, list):
agg_exprs_list = []
for stat in agg:
stat_func = getattr(F, stat)
agg_exprs_list += [stat_func(agg_column) for agg_column in self.df.columns
if agg_column not in columns]
agg_df = grouped_data.agg(*agg_exprs_list)
elif isinstance(agg, dict):
if all(isinstance(stats, str) for agg_column, stats in agg.items()):
agg_df = grouped_data.agg(agg)
else:
agg_exprs_list = []
for agg_column, stats in agg.items():
if isinstance(stats, str):
stats = [stats]
assert isinstance(stats, list), "value in agg should be str or a list of str"
for stat in stats:
stat_func = getattr(F, stat)
agg_exprs_list += [stat_func(agg_column)]
agg_df = grouped_data.agg(*agg_exprs_list)
else:
raise TypeError("agg should be str, list of str, or dict")
if join:
assert columns, "columns can not be empty if join is True"
result_df = self.df.join(agg_df, on=columns, how="left")
return FeatureTable(result_df)
else:
return FeatureTable(agg_df)
def split(self, ratio, seed=None):
"""
Split the FeatureTable into multiple FeatureTables for train, validation and test.
:param ratio: a list of portions as weights with which to split the FeatureTable.
Weights will be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
:return: A tuple of FeatureTables split by the given ratio.
"""
df_list = self.df.randomSplit(ratio, seed)
tbl_list = [FeatureTable(df) for df in df_list]
return tuple(tbl_list)
def target_encode(self, cat_cols, target_cols, target_mean=None, smooth=20, kfold=2,
fold_seed=None, fold_col="__fold__", drop_cat=False, drop_fold=True,
out_cols=None):
"""
For each categorical column or column group in cat_cols, calculate the mean of target
columns in target_cols and encode the FeatureTable with the target mean(s) to generate
new features.
:param cat_cols: str, a list of str or a nested list of str. Categorical column(s) or column
group(s) to target encode. To encode categorical column(s), cat_cols should be a str
or a list of str. To encode categorical column group(s), cat_cols should be a nested
list of str.
:param target_cols: str or a list of str. Numeric target column(s) to calculate the mean.
If target_cols is a list, then each target_col would be used separately to encode the
cat_cols.
:param target_mean: dict of {target column : mean} to provides global mean of target
column(s) if known beforehand to save calculation. Default is None and in this case
the global mean(s) would be calculated on demand.
:param smooth: int. The mean of each category is smoothed by the overall mean. Default is
20.
:param kfold: int. Specifies number of folds for cross validation. The mean values within
the i-th fold are calculated with data from all other folds. If kfold is 1,
global-mean statistics are applied; otherwise, cross validation is applied. Default
is 2.
:param fold_seed: int. Random seed used for generating folds. Default is None and in this
case folds will be generated with row number in each partition.
:param fold_col: str. Name of integer column used for splitting folds. If fold_col exists
in the FeatureTable, then this column is used; otherwise, it is randomly generated
within the range [0, kfold). Default is "__fold__".
:param drop_cat: boolean, whether to drop the original categorical columns.
Default is False.
:param drop_fold: boolean, whether to drop the fold column. Default is True.
:param out_cols: str, a list of str or a nested list of str. When both cat_cols and
target_cols has only one element, out_cols can be a single str. When cat_cols or
target_cols has only one element, out_cols can be a list of str, and each element
in out_cols corresponds to an element in target_cols or cat_cols. When it is a
nested list of str, each inner list corresponds to the categorical column in the
same position of cat_cols. Each element in the inner list corresponds to the target
column in the same position of target_cols. Default to be None and in this case the
output column will be cat_col + "_te_" + target_col.
:return: A tuple of a new FeatureTable with target encoded columns and a list of TargetCodes
which contains the target encode values of the whole FeatureTable.
"""
assert isinstance(kfold, int) and kfold > 0, "kfold should be an integer larger than 0"
if isinstance(cat_cols, str):
cat_cols = [cat_cols]
assert isinstance(cat_cols, list), "cat_cols should be str or list"
for cat_col in cat_cols:
check_col_str_list_exists(self.df, cat_col, "cat_cols")
if isinstance(target_cols, str):
target_cols = [target_cols]
assert isinstance(target_cols, list), "target_cols should be str or list"
check_col_exists(self.df, target_cols)
nonnumeric_target_col_type = get_nonnumeric_col_type(self.df, target_cols)
assert not nonnumeric_target_col_type, "target_cols should be numeric but get " + ", ".join(
list(map(lambda x: x[0] + " of type " + x[1], nonnumeric_target_col_type)))
if out_cols is None:
out_cols = [[gen_cols_name(cat_col, "_") + "_te_" + target_col
for target_col in target_cols] for cat_col in cat_cols]
else:
if isinstance(out_cols, str):
assert len(cat_cols) == 1 and len(target_cols) == 1, \
"out_cols can be string only if both cat_cols and target_cols has only one" + \
" element"
out_cols = [[out_cols]]
elif isinstance(out_cols, list):
if all(isinstance(out_col, str) for out_col in out_cols):
if len(cat_cols) == 1:
out_cols = [out_cols]
elif len(target_cols) == 1:
out_cols = [[out_col] for out_col in out_cols]
else:
raise TypeError("out_cols should be a nested list of str when both " +
"cat_cols and target_cols have more than one elements")
else:
for outs in out_cols:
assert isinstance(outs, list), "out_cols should be str, a list of str, " \
"or a nested list of str"
else:
raise TypeError("out_cols should be str, a list of str, or a nested list of str")
assert len(out_cols) == len(cat_cols), "length of out_cols should be equal to " \
"length of cat_cols"
for outs in out_cols:
assert len(outs) == len(target_cols), "length of element in out_cols should be " \
"equal to length of target_cols"
# calculate global mean for each target column
target_mean_dict = {}
if target_mean is not None:
assert isinstance(target_mean, dict), "target_mean should be a dict"
for target_col in target_cols:
assert target_col in target_mean, \
"target column " + target_col + " should be in target_mean " + str(target_mean)
target_mean_dict[target_col] = target_mean[target_col]
else:
global_mean_list = [F.mean(pyspark_col(target_col)).alias(target_col)
for target_col in target_cols]
target_mean_dict = self.df.select(*global_mean_list).collect()[0].asDict()
for target_col in target_mean_dict:
assert target_mean_dict[target_col] is not None, "mean of target column {} should " \
"not be None".format(target_col)
# generate fold_col
result_df = self.df
if kfold > 1:
if fold_col not in self.df.columns:
if fold_seed is None:
result_df = result_df.withColumn(
fold_col,
monotonically_increasing_id() % lit(kfold)
)
else:
result_df = result_df.withColumn(
fold_col, (F.rand(seed=fold_seed) * kfold).cast(IntegerType()))
else:
assert list(filter(lambda x: x[0] == fold_col and x[1] == "int",
self.df.dtypes)), \
"fold_col should be integer type but get " + fold_col
else:
fold_col = None
def gen_target_code(cat_out):
cat_col = cat_out[0]
out_col_list = cat_out[1]
cat_col_name = gen_cols_name(cat_col, "_")
sum_list = [F.sum(target_col).alias(cat_col_name + "_all_sum_" + target_col)
for target_col in target_cols]
if isinstance(cat_col, str):
org_all_df = result_df.groupBy(cat_col)
else:
org_all_df = result_df.groupBy(*cat_col)
org_all_df = org_all_df.agg(*sum_list, F.count("*").alias(cat_col_name + "_all_count"))
all_df = org_all_df
for target_col, out_col in zip(target_cols, out_col_list):
global_target_mean = target_mean_dict[target_col]
all_func = udf(
lambda cat_sum, cat_count:
(cat_sum + global_target_mean * smooth) / (cat_count + smooth),
DoubleType())
all_df = all_df.withColumn(out_col,
all_func(cat_col_name + "_all_sum_" + target_col,
cat_col_name + "_all_count")) \
.drop(cat_col_name + "_all_sum_" + target_col)
all_df = all_df.drop(cat_col_name + "_all_count")
if kfold == 1:
fold_df = all_df
else:
fold_sum_list = [F.sum(target_col).alias(cat_col_name + "_sum_" + target_col)
for target_col in target_cols]
if isinstance(cat_col, str):
fold_df = result_df.groupBy(cat_col, fold_col)
else:
fold_df = result_df.groupBy(*cat_col, fold_col)
fold_df = fold_df.agg(*fold_sum_list, F.count("*").alias(cat_col_name + "_count"))
fold_df = fold_df.join(org_all_df, cat_col, how="left")
for target_col, out_col in zip(target_cols, out_col_list):
global_target_mean = target_mean_dict[target_col]
target_func = udf(
lambda s_all, s, c_all, c:
None if c_all == c else
((s_all - s) + global_target_mean * smooth) / ((c_all - c) + smooth),
DoubleType())
fold_df = fold_df.withColumn(
out_col,
target_func(cat_col_name + "_all_sum_" + target_col,
cat_col_name + "_sum_" + target_col,
cat_col_name + "_all_count",
cat_col_name + "_count")
)
fold_df = fold_df.drop(cat_col_name + "_sum_" + target_col,
cat_col_name + "_all_sum_" + target_col)
fold_df = fold_df.drop(cat_col_name + "_count", cat_col_name + "_all_count")
fold_df = fold_df.withColumnRenamed(fold_col, fold_col)
out_target_mean_dict = {
out_col: (target_col, target_mean_dict[target_col])
for target_col, out_col in zip(target_cols, out_col_list)
}
return TargetCode(fold_df, cat_col, out_target_mean_dict), \
TargetCode(all_df, cat_col, out_target_mean_dict)
targets = list(map(gen_target_code, zip(cat_cols, out_cols)))
fold_targets = [t[0] for t in targets]
all_targets = [t[1] for t in targets]
result_tbl = FeatureTable(result_df)
result_tbl = encode_target_(result_tbl, fold_targets, drop_cat=drop_cat,
drop_fold=drop_fold, fold_col=fold_col)
return result_tbl, all_targets
def encode_target(self, targets, target_cols=None, drop_cat=True):
"""
Encode columns with the provided TargetCode(s).
:param targets: TargetCode or a list of TargetCode.
:param target_cols: str or a list of str. Selects part of target columns of which target
encoding will be applied. Default is None and in this case all target columns
contained in targets will be encoded.
:param drop_cat: boolean, whether to drop the categorical column(s). Default is True.
:return: A new FeatureTable which encodes each categorical column into group-specific
mean of target columns with provided TargetCodes.
"""
if isinstance(targets, TargetCode):
targets = [targets]
elif isinstance(targets, list):
for target_code in targets:
assert isinstance(target_code, TargetCode), \
"element in targets should be TargetCode but get {}".format(type(target_code))
else:
raise TypeError("targets should be TargetCode or list of TargetCode")
for target_code in targets:
check_col_str_list_exists(self.df, target_code.cat_col, "TargetCode.cat_col in targets")
if target_cols is not None:
if isinstance(target_cols, str):
target_cols = [target_cols]
assert isinstance(target_cols, list), "target_cols should be str or list"
result_tbl = FeatureTable(self.df)
result_tbl = encode_target_(result_tbl, targets, target_cols=target_cols,
drop_cat=drop_cat)
return result_tbl
def difference_lag(self, columns, sort_cols, shifts=1, partition_cols=None, out_cols=None):
"""
Calculates the difference between two consecutive rows, or two rows with certain interval
of the specified continuous columns. The table is first partitioned by partition_cols if it
is not None, and then sorted by sort_cols before the calculation.
:param columns: str or a list of str. Continuous columns to calculate the difference.
:param sort_cols: str or a list of str. Columns by which the table is sorted.
:param shifts: int or a list of int. Intervals between two rows.
:param partition_cols: Columns by which the table is partitioned.
:param out_cols: str, a list of str, or a nested list of str. When both columns and shifts
has only one element, out_cols can be a single str. When columns or shifts has only
one element, out_cols can be a list of str, and each element in out_cols corresponds
to an element in shifts or columns. When it is a list of list of str, each inner list
corresponds to a column in columns. Each element in the inner list corresponds to a
shift in shifts. If it is None, the output column will be sort_cols + "_diff_lag_"
+ column + "_" + shift. Default is None.
:return: A new FeatureTable with difference columns.
"""
columns = str_to_list(columns, "columns")
sort_cols = str_to_list(sort_cols, "sort_cols")
nonnumeric_col_type = get_nonnumeric_col_type(self.df, columns)
assert not nonnumeric_col_type, \
"columns should be numeric but get " + \
", ".join(list(map(lambda x: x[0] + " of type " + x[1], nonnumeric_col_type)))
if isinstance(shifts, int):
shifts = [shifts]
elif isinstance(shifts, list):
for s in shifts:
assert isinstance(s, int), "elements in shift should be integer but get " + str(s)
else:
raise TypeError("shift should be either int or a list of int")
if partition_cols is not None:
partition_cols = str_to_list(partition_cols, "partition_cols")
if out_cols is None:
sort_name = gen_cols_name(sort_cols)
out_cols = [[sort_name + "_diff_lag_" + column + "_" + str(shift)
for shift in shifts] for column in columns]
else:
if isinstance(out_cols, str):
assert len(columns) == 1 and len(shifts) == 1, \
"out_cols can be string only if both columns and shifts has only one element"
out_cols = [[out_cols]]
elif isinstance(out_cols, list):
if all(isinstance(out_col, str) for out_col in out_cols):
if len(columns) == 1:
out_cols = [out_cols]
elif len(shifts) == 1:
out_cols = [[out_col] for out_col in out_cols]
else:
raise TypeError("out_cols should be a list of list of str when both " +
"columns shifts have more than one elements")
else:
for outs in out_cols:
assert isinstance(outs, list), "out_cols should be str, a list of str, " \
"or a list of lists of str"
else:
raise TypeError("out_cols should be str, a list of str, or a list of lists of str")
assert len(out_cols) == len(columns), "length of out_cols should be equal to length " \
"of columns"
for outs in out_cols:
assert len(outs) == len(shifts), "length of element in out_cols should be " \
"equal to length of shifts"
result_df = self.df
if partition_cols is None:
partition_window = Window.orderBy(*sort_cols)
else:
partition_window = Window.partitionBy(*partition_cols).orderBy(*sort_cols)
for column, outs in zip(columns, out_cols):
diff_func = udf(lambda a, b: a - b if a is not None and b is not None else None,
self.df.schema[column].dataType)
for shift, out in zip(shifts, outs):
result_df = result_df.withColumn(out, F.lag(column, shift).over(partition_window))
result_df = result_df.withColumn(out, diff_func(column, out))
return FeatureTable(result_df)
def cut_bins(self, columns, bins, labels=None, out_cols=None, drop=True):
"""
Segment values of the target column(s) into bins, which is also known as bucketization.
:param columns: str or a list of str, the numeric column(s) to segment into intervals.
:param bins: int, a list of int or dict.
If bins is a list, it defines the bins to be used. **NOTE** that for bins of
length n, there will be **n+1** buckets.
For example, if bins is [0, 6, 18, 60], the resulting buckets are
(-inf, 0), [0, 6), [6, 18), [18, 60), [60, inf).
If bins is an int, it defines the number of equal-width bins in the range of all
the column values, i.e. from column min to max. **NOTE** that there will be
**bins+2** resulting buckets in total to take the values below min and beyond max
into consideration.
For examples, if bins is 2, the resulting buckets are
(-inf, col_min), [col_min, (col_min+col_max)/2), [(col_min+col_max)/2, col_max),
[col_max, inf).
If bins is a dict, the key should be the input column(s) and the value should be
int or a list of int to specify the bins as described above.
:param labels: a list of str or dict, the labels for the returned bins.
Default is None, and in this case the new bin column would use the integer index to
encode the interval. Index would start from 0.
If labels is a list of str, then the corresponding label would be used to replace
the integer index at the same position. The number of elements in labels should be
the same as the number of bins.
If labels is a dict, the key should be the input column(s) and the value should be a
list of str as described above.
:param out_cols: str or a list of str, the name of output bucketized column(s).
Default is None, and in this case the name of each output column will be "column_bin"
for each input column.
:param drop: boolean, whether to drop the original column(s). Default is True.
:return: A new FeatureTable with feature bucket column(s).
"""
columns = str_to_list(columns, "columns")
if out_cols:
out_cols = str_to_list(out_cols, "out_cols")
assert len(columns) == len(out_cols), "columns and out_cols should have the same length"
check_col_exists(self.df, columns)
df_buck = self.df
for i in range(len(columns)):
column = columns[i]
temp_out_col = column + "_bin"
bin = bins[column] if isinstance(bins, dict) else bins
label = labels[column] if isinstance(labels, dict) else labels
if not check_column_numeric(self.df, column):
raise ValueError("{} should be a numeric column".format(column))
if isinstance(bin, int):
col_max = self.get_stats(column, "max")[column]
col_min = self.get_stats(column, "min")[column]
bin = np.linspace(col_min, col_max, bin + 1, endpoint=True).tolist()
elif not isinstance(bin, list):
raise ValueError("bins should int, a list of int or dict with column name "
"as the key and int or a list of int as the value")
bin = [float("-inf")] + bin + [float("inf")]
# For Bucketizer, inputCol and outputCol must be different.
bucketizer = Bucketizer(splits=bin, inputCol=column, outputCol=temp_out_col)
df_buck = bucketizer.setHandleInvalid("keep").transform(df_buck)
# The output of Buckerizer is float, cast to int.
df_buck = df_buck.withColumn(temp_out_col, pyspark_col(temp_out_col).cast("int"))
if label is not None:
assert isinstance(label, list), \
"labels should be a list of str or a dict with column name as the " \
"key and a list of str as the value"
assert len(label) == len(bin) - 1, \
"labels should be of length {} to match bins".format(len(bin) - 1)
to_label = {i: l for (i, l) in enumerate(label)}
udf_label = udf(lambda i: to_label[i], StringType())
df_buck = df_buck.withColumn(temp_out_col, udf_label(temp_out_col))
if out_cols:
out_col = out_cols[i]
if out_col == column or drop: # Replace the input column with the output column
df_buck = df_buck.drop(column).withColumnRenamed(temp_out_col, out_col)
else:
df_buck = df_buck.withColumnRenamed(temp_out_col, out_col)
else:
if drop:
df_buck = df_buck.drop(column)
return self._clone(df_buck)
class StringIndex(Table):
def __init__(self, df, col_name):
super().__init__(df)
cols = df.columns
assert len(cols) >= 2, "StringIndex should have >= 2 columns: col_name, id and other " \
"columns"
assert "id" in cols, "id should be a column of the DataFrame"
assert col_name in cols, col_name + " should be a column of the DataFrame"
self.col_name = col_name
@classmethod
def read_parquet(cls, paths, col_name=None):
"""
Loads Parquet files as a StringIndex.
:param paths: str or a list of str. The path/paths to Parquet file(s).
:param col_name: str. The column name of the corresponding categorical column. If
col_name is None, the file name will be used as col_name.
:return: A StringIndex.
"""
if not isinstance(paths, list):
paths = [paths]
if col_name is None and len(paths) >= 1:
col_name = os.path.basename(paths[0]).split(".")[0]
return cls(Table._read_parquet(paths), col_name)
@classmethod
def from_dict(cls, indices, col_name):
"""
Create the StringIndex from a dict of indices.
:param indices: dict, the key is the categorical column, and the value is the
corresponding index. We assume that the key is a str and the value
is a int.
:param col_name: str. The column name of the categorical column.
:return: A StringIndex.
"""
spark = OrcaContext.get_spark_session()
if not isinstance(indices, dict):
raise ValueError('indices should be dict, but get ' + indices.__class__.__name__)
if not col_name:
raise ValueError('col_name should be str, but get None')
if not isinstance(col_name, str):
raise ValueError('col_name should be str, but get ' + col_name.__class__.__name__)
indices = map(lambda x: {col_name: x[0], 'id': x[1]}, indices.items())
schema = StructType([StructField(col_name, StringType(), False),
StructField("id", IntegerType(), False)])
df = spark.createDataFrame((Row(**x) for x in indices), schema=schema)
return cls(df, col_name)
def to_dict(self):
"""
Convert the StringIndex to a dict, with the categorical features as keys and indices
as values.
Note that you may only call this if the StringIndex is small.
:return: A dict for the mapping from string to index.
"""
cols = self.df.columns
index_id = cols.index("id")
col_id = cols.index(self.col_name)
rows = self.df.collect()
res_dict = {}
for row in rows:
res_dict[row[col_id]] = row[index_id]
return res_dict
def _clone(self, df):
return StringIndex(df, self.col_name)
def write_parquet(self, path, mode="overwrite"):
"""
Write the StringIndex to Parquet file.
:param path: str. The path to the Parquet file. Note that the col_name
will be used as basename of the Parquet file.
:param mode: str. One of "append", "overwrite", "error" or "ignore".
append: Append the contents of this StringIndex to the existing data.
overwrite: Overwrite the existing data.
error: Throw an exception if the data already exists.
ignore: Silently ignore this operation if the data already exists.
"""
path = path + "/" + self.col_name + ".parquet"
write_parquet(self.df, path, mode)
class TargetCode(Table):
def __init__(self, df, cat_col, out_target_mean):
"""
Target Encoding output used for encoding new FeatureTables, which consists of the encoded
categorical column or column group and the target encoded columns (mean statistics of
the categorical column or column group).
:param df: Target encoded data.
:param cat_col: str or list of str. The categorical column or column group encoded in the
original FeatureTable.
:param out_target_mean: dict, the key is the target encoded output column in this
TargetCode, and the value is a tuple of the target column in the original
FeatureTable together with the target column's global mean in the original
FeatureTable.
For example: {"col3_te_target1": ("target1", 3.0)}, and in this case cat_col for this
TargetCode should be "col3".
"""
super().__init__(df)
self.cat_col = cat_col
self.out_target_mean = out_target_mean
check_col_str_list_exists(df, cat_col, "cat_col")
assert isinstance(out_target_mean, dict), "out_target_mean should be dict"
def _clone(self, df):
return TargetCode(df, self.cat_col, self.out_target_mean)
def rename(self, columns):
assert isinstance(columns, dict), "columns should be a dictionary of {'old_name1': " \
"'new_name1', 'old_name2': 'new_name2'}"
new_df = self.df
new_cat_col = self.cat_col
new_out_target_mean = self.out_target_mean
for old_name, new_name in columns.items():
new_df = new_df.withColumnRenamed(old_name, new_name)
if isinstance(self.cat_col, str) and old_name == self.cat_col:
new_cat_col = new_name
elif isinstance(self.cat_col, list):
for i in range(len(self.cat_col)):
if self.cat_col[i] == old_name:
new_cat_col[i] = new_name
elif old_name in self.out_target_mean:
new_out_target_mean[new_name] = new_out_target_mean.pop(old_name)
return TargetCode(new_df, new_cat_col, new_out_target_mean)
| intel-analytics/analytics-zoo | pyzoo/zoo/friesian/feature/table.py | Python | apache-2.0 | 97,400 | [
"ORCA",
"VisIt"
] | 43fba7f793507af0981a4421f5767914fcee15edc471bbf3b4a4c9e3f9bfd651 |
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
import os
import sys
import time
import atexit
import pickle
import numpy as np
from gpaw import debug
from gpaw import dry_run as dry_run_size
from gpaw.utilities import is_contiguous
from gpaw.utilities import gcd
from gpaw.utilities.tools import md5_array
import _gpaw
MASTER = 0
class _Communicator:
def __init__(self, comm, parent=None):
"""Construct a wrapper of the C-object for any MPI-communicator.
Parameters:
comm: MPI-communicator
Communicator.
Attributes:
============ ======================================================
``size`` Number of ranks in the MPI group.
``rank`` Number of this CPU in the MPI group.
``parent`` Parent MPI-communicator.
============ ======================================================
"""
self.comm = comm
self.size = comm.size
self.rank = comm.rank
self.parent = parent #XXX check C-object against comm.parent?
def new_communicator(self, ranks):
"""Create a new MPI communicator for a subset of ranks in a group.
Must be called with identical arguments by all relevant processes.
Note that a valid communicator is only returned to the processes
which are included in the new group; other ranks get None returned.
Parameters:
ranks: ndarray (type int)
List of integers of the ranks to include in the new group.
Note that these ranks correspond to indices in the current
group whereas the rank attribute in the new communicators
correspond to their respective index in the subset.
"""
comm = self.comm.new_communicator(ranks)
if comm is None:
# This cpu is not in the new communicator:
return None
else:
return _Communicator(comm, parent=self)
def sum(self, a, root=-1):
"""Perform summation by MPI reduce operations of numerical data.
Parameters:
a: ndarray or value (type int, float or complex)
Numerical data to sum over all ranks in the communicator group.
If the data is a single value of type int, float or complex,
the result is returned because the input argument is immutable.
Otherwise, the reduce operation is carried out in-place such
that the elements of the input array will represent the sum of
the equivalent elements across all processes in the group.
root: int (default -1)
Rank of the root process, on which the outcome of the reduce
operation is valid. A root rank of -1 signifies that the result
will be distributed back to all processes, i.e. a broadcast.
"""
if isinstance(a, (int, float, complex)):
return self.comm.sum(a, root)
else:
tc = a.dtype
assert tc == int or tc == float or tc == complex
assert is_contiguous(a, tc)
assert root == -1 or 0 <= root < self.size
self.comm.sum(a, root)
def product(self, a, root=-1):
"""Do multiplication by MPI reduce operations of numerical data.
Parameters:
a: ndarray or value (type int or float)
Numerical data to multiply across all ranks in the communicator
group. NB: Find the global product from the local products.
If the data is a single value of type int or float (no complex),
the result is returned because the input argument is immutable.
Otherwise, the reduce operation is carried out in-place such
that the elements of the input array will represent the product
of the equivalent elements across all processes in the group.
root: int (default -1)
Rank of the root process, on which the outcome of the reduce
operation is valid. A root rank of -1 signifies that the result
will be distributed back to all processes, i.e. a broadcast.
"""
if isinstance(a, (int, float)):
return self.comm.product(a, root)
else:
tc = a.dtype
assert tc == int or tc == float
assert is_contiguous(a, tc)
assert root == -1 or 0 <= root < self.size
self.comm.product(a, root)
def max(self, a, root=-1):
"""Find maximal value by an MPI reduce operation of numerical data.
Parameters:
a: ndarray or value (type int or float)
Numerical data to find the maximum value of across all ranks in
the communicator group. NB: Find global maximum from local max.
If the data is a single value of type int or float (no complex),
the result is returned because the input argument is immutable.
Otherwise, the reduce operation is carried out in-place such
that the elements of the input array will represent the max of
the equivalent elements across all processes in the group.
root: int (default -1)
Rank of the root process, on which the outcome of the reduce
operation is valid. A root rank of -1 signifies that the result
will be distributed back to all processes, i.e. a broadcast.
"""
if isinstance(a, (int, float)):
return self.comm.max(a, root)
else:
tc = a.dtype
assert tc == int or tc == float
assert is_contiguous(a, tc)
assert root == -1 or 0 <= root < self.size
self.comm.max(a, root)
def min(self, a, root=-1):
"""Find minimal value by an MPI reduce operation of numerical data.
Parameters:
a: ndarray or value (type int or float)
Numerical data to find the minimal value of across all ranks in
the communicator group. NB: Find global minimum from local min.
If the data is a single value of type int or float (no complex),
the result is returned because the input argument is immutable.
Otherwise, the reduce operation is carried out in-place such
that the elements of the input array will represent the min of
the equivalent elements across all processes in the group.
root: int (default -1)
Rank of the root process, on which the outcome of the reduce
operation is valid. A root rank of -1 signifies that the result
will be distributed back to all processes, i.e. a broadcast.
"""
if isinstance(a, (int, float)):
return self.comm.min(a, root)
else:
tc = a.dtype
assert tc == int or tc == float
assert is_contiguous(a, tc)
assert root == -1 or 0 <= root < self.size
self.comm.min(a, root)
def scatter(self, a, b, root):
"""Distribute data from one rank to all other processes in a group.
Parameters:
a: ndarray (ignored on all ranks different from root; use None)
Source of the data to distribute, i.e. send buffer on root rank.
b: ndarray
Destination of the distributed data, i.e. local receive buffer.
The size of this array multiplied by the number of process in
the group must match the size of the source array on the root.
root: int
Rank of the root process, from which the source data originates.
The reverse operation is ``gather``.
Example::
# The master has all the interesting data. Distribute it.
if comm.rank == 0:
data = np.random.normal(size=N*comm.size)
else:
data = None
mydata = np.empty(N, dtype=float)
comm.scatter(data, mydata, 0)
# .. which is equivalent to ..
if comm.rank == 0:
# Extract my part directly
mydata[:] = data[0:N]
# Distribute parts to the slaves
for rank in range(1, comm.size):
buf = data[rank*N:(rank+1)*N]
comm.send(buf, rank, tag=123)
else:
# Receive from the master
comm.receive(mydata, 0, tag=123)
"""
if self.rank == root:
assert a.dtype == b.dtype
assert a.size == self.size * b.size
assert a.flags.contiguous
assert b.flags.contiguous
assert 0 <= root < self.size
self.comm.scatter(a, b, root)
def all_gather(self, a, b):
"""Gather data from all ranks onto all processes in a group.
Parameters:
a: ndarray
Source of the data to gather, i.e. send buffer of this rank.
b: ndarray
Destination of the distributed data, i.e. receive buffer.
The size of this array must match the size of the distributed
source arrays multiplied by the number of process in the group.
Example::
# All ranks have parts of interesting data. Gather on all ranks.
mydata = np.random.normal(size=N)
data = np.empty(N*comm.size, dtype=float)
comm.all_gather(mydata, data)
# .. which is equivalent to ..
if comm.rank == 0:
# Insert my part directly
data[0:N] = mydata
# Gather parts from the slaves
buf = np.empty(N, dtype=float)
for rank in range(1, comm.size):
comm.receive(buf, rank, tag=123)
data[rank*N:(rank+1)*N] = buf
else:
# Send to the master
comm.send(mydata, 0, tag=123)
# Broadcast from master to all slaves
comm.broadcast(data, 0)
"""
tc = a.dtype
assert a.flags.contiguous
assert b.flags.contiguous
assert b.dtype == a.dtype
assert (b.shape[0] == self.size and a.shape == b.shape[1:] or
a.size * self.size == b.size)
self.comm.all_gather(a, b)
def gather(self, a, root, b=None):
"""Gather data from all ranks onto a single process in a group.
Parameters:
a: ndarray
Source of the data to gather, i.e. send buffer of this rank.
root: int
Rank of the root process, on which the data is to be gathered.
b: ndarray (ignored on all ranks different from root; default None)
Destination of the distributed data, i.e. root's receive buffer.
The size of this array must match the size of the distributed
source arrays multiplied by the number of process in the group.
The reverse operation is ``scatter``.
Example::
# All ranks have parts of interesting data. Gather it on master.
mydata = np.random.normal(size=N)
if comm.rank == 0:
data = np.empty(N*comm.size, dtype=float)
else:
data = None
comm.gather(mydata, 0, data)
# .. which is equivalent to ..
if comm.rank == 0:
# Extract my part directly
data[0:N] = mydata
# Gather parts from the slaves
buf = np.empty(N, dtype=float)
for rank in range(1, comm.size):
comm.receive(buf, rank, tag=123)
data[rank*N:(rank+1)*N] = buf
else:
# Send to the master
comm.send(mydata, 0, tag=123)
"""
assert a.flags.contiguous
assert 0 <= root < self.size
if root == self.rank:
assert b.flags.contiguous and b.dtype == a.dtype
assert (b.shape[0] == self.size and a.shape == b.shape[1:] or
a.size * self.size == b.size)
self.comm.gather(a, root, b)
else:
assert b is None
self.comm.gather(a, root)
def broadcast(self, a, root):
"""Share data from a single process to all ranks in a group.
Parameters:
a: ndarray
Data, i.e. send buffer on root rank, receive buffer elsewhere.
Note that after the broadcast, all ranks have the same data.
root: int
Rank of the root process, from which the data is to be shared.
Example::
# All ranks have parts of interesting data. Take a given index.
mydata[:] = np.random.normal(size=N)
# Who has the element at global index 13? Everybody needs it!
index = 13
root, myindex = divmod(index, N)
element = np.empty(1, dtype=float)
if comm.rank == root:
# This process has the requested element so extract it
element[:] = mydata[myindex]
# Broadcast from owner to everyone else
comm.broadcast(element, root)
# .. which is equivalent to ..
if comm.rank == root:
# We are root so send it to the other ranks
for rank in range(comm.size):
if rank != root:
comm.send(element, rank, tag=123)
else:
# We don't have it so receive from root
comm.receive(element, root, tag=123)
"""
assert 0 <= root < self.size
assert is_contiguous(a)
self.comm.broadcast(a, root)
def sendreceive(self, a, dest, b, src, sendtag=123, recvtag=123):
assert 0 <= dest < self.size
assert dest != self.rank
assert is_contiguous(a)
assert 0 <= src < self.size
assert src != self.rank
assert is_contiguous(b)
return self.comm.sendreceive(a, dest, b, src, sendtag, recvtag)
def send(self, a, dest, tag=123, block=True):
assert 0 <= dest < self.size
assert dest != self.rank
assert is_contiguous(a)
if not block:
pass #assert sys.getrefcount(a) > 3
return self.comm.send(a, dest, tag, block)
def ssend(self, a, dest, tag=123):
assert 0 <= dest < self.size
assert dest != self.rank
assert is_contiguous(a)
return self.comm.ssend(a, dest, tag)
def receive(self, a, src, tag=123, block=True):
assert 0 <= src < self.size
assert src != self.rank
assert is_contiguous(a)
return self.comm.receive(a, src, tag, block)
def test(self, request):
"""Test whether a non-blocking MPI operation has completed. A boolean
is returned immediately and the request is not modified in any way.
Parameters:
request: MPI request
Request e.g. returned from send/receive when block=False is used.
"""
return self.comm.test(request)
def testall(self, requests):
"""Test whether non-blocking MPI operations have completed. A boolean
is returned immediately but requests may have been deallocated as a
result, provided they have completed before or during this invokation.
Parameters:
request: MPI request
Request e.g. returned from send/receive when block=False is used.
"""
return self.comm.testall(requests) # may deallocate requests!
def wait(self, request):
"""Wait for a non-blocking MPI operation to complete before returning.
Parameters:
request: MPI request
Request e.g. returned from send/receive when block=False is used.
"""
self.comm.wait(request)
def waitall(self, requests):
"""Wait for non-blocking MPI operations to complete before returning.
Parameters:
requests: list
List of MPI requests e.g. aggregated from returned requests of
multiple send/receive calls where block=False was used.
"""
self.comm.waitall(requests)
def abort(self, errcode):
"""Terminate MPI execution environment of all tasks in the group.
This function only returns in the advent of an error occurring.
Parameters:
errcode: int
Error code to return to the invoking environment.
"""
return self.comm.abort(errcode)
def name(self):
"""Return the name of the processor as a string."""
return self.comm.name()
def barrier(self):
"""Block execution until all process have reached this point."""
self.comm.barrier()
def get_members(self):
"""Return the subset of processes which are members of this MPI group
in terms of the ranks they are assigned on the parent communicator.
For the world communicator, this is all integers up to ``size``.
Example::
>>> world.rank, world.size
(3, 4)
>>> world.get_members()
array([0, 1, 2, 3])
>>> comm = world.new_communicator(array([2, 3]))
>>> comm.rank, comm.size
(1, 2)
>>> comm.get_members()
array([2, 3])
>>> comm.get_members()[comm.rank] == world.rank
True
"""
return self.comm.get_members()
def get_c_object(self):
"""Return the C-object wrapped by this debug interface.
Whenever a communicator object is passed to C code, that object
must be a proper C-object - *not* e.g. this debug wrapper. For
this reason. The C-communicator object has a get_c_object()
implementation which returns itself; thus, always call
comm.get_c_object() and pass the resulting object to the C code.
"""
c_obj = self.comm.get_c_object()
assert type(c_obj) is _gpaw.Communicator
return c_obj
# Serial communicator
class SerialCommunicator:
size = 1
rank = 0
def __init__(self, parent=None):
self.parent = parent
def sum(self, array, root=-1):
if isinstance(array, (int, float, complex)):
return array
def scatter(self, s, r, root):
r[:] = s
def min(self, value, root=-1):
return value
def max(self, value, root=-1):
return value
def broadcast(self, buf, root):
pass
def send(self, buff, root, tag=123, block=True):
pass
def barrier(self):
pass
def gather(self, a, root, b):
b[:] = a
def all_gather(self, a, b):
b[:] = a
def new_communicator(self, ranks):
if self.rank not in ranks:
return None
return SerialCommunicator(parent=self)
def test(self, request):
return 1
def testall(self, requests):
return 1
def wait(self, request):
raise NotImplementedError('Calls to mpi wait should not happen in '
'serial mode')
def waitall(self, requests):
if not requests:
return
raise NotImplementedError('Calls to mpi waitall should not happen in '
'serial mode')
def get_members(self):
return np.array([0])
def get_c_object(self):
raise NotImplementedError('Should not get C-object for serial comm')
serial_comm = SerialCommunicator()
try:
world = _gpaw.Communicator()
except AttributeError:
world = serial_comm
class DryRunCommunicator(SerialCommunicator):
def __init__(self, size=1, parent=None):
self.size = size
self.parent = parent
def new_communicator(self, ranks):
return DryRunCommunicator(len(ranks), parent=self)
def get_c_object(self):
return None # won't actually be passed to C
if dry_run_size > 1:
world = DryRunCommunicator(dry_run_size)
if debug:
serial_comm = _Communicator(serial_comm)
world = _Communicator(world)
size = world.size
rank = world.rank
parallel = (size > 1)
def distribute_cpus(parsize_domain, parsize_bands,
nspins, nibzkpts, comm=world,
idiotproof=True, mode='fd'):
"""Distribute k-points/spins to processors.
Construct communicators for parallelization over
k-points/spins and for parallelization using domain
decomposition."""
size = comm.size
rank = comm.rank
nsk = nspins * nibzkpts
if mode in ['fd', 'lcao']:
if parsize_bands is None:
parsize_bands = 1
if parsize_domain is not None:
if type(parsize_domain) is int:
ndomains = parsize_domain
else:
ndomains = (parsize_domain[0] *
parsize_domain[1] *
parsize_domain[2])
assert (size // parsize_bands) % ndomains == 0
else:
ntot = nsk * parsize_bands
ndomains = size // gcd(ntot, size)
else:
# Plane wave mode:
ndomains = 1
if parsize_bands is None:
parsize_bands = size // gcd(nsk, size)
assert size % parsize_bands == 0
# How many spin/k-point combinations do we get per node:
nu, x = divmod(nsk, size // parsize_bands // ndomains)
assert x == 0 or nu >= 2 or not idiotproof, 'load imbalance!'
r0 = (rank // ndomains) * ndomains
ranks = np.arange(r0, r0 + ndomains)
domain_comm = comm.new_communicator(ranks)
r0 = rank % (ndomains * parsize_bands)
ranks = np.arange(r0, r0 + size, ndomains * parsize_bands)
kpt_comm = comm.new_communicator(ranks)
r0 = rank % ndomains + kpt_comm.rank * (ndomains * parsize_bands)
ranks = np.arange(r0, r0 + (ndomains * parsize_bands), ndomains)
band_comm = comm.new_communicator(ranks)
assert size == domain_comm.size * kpt_comm.size * band_comm.size
return domain_comm, kpt_comm, band_comm
def compare_atoms(atoms, comm=world):
"""Check whether atoms objects are identical on all processors."""
# Construct fingerprint:
# ASE may return slightly different atomic positions (e.g. due
# to MKL) so compare only first 8 decimals of positions
fingerprint = np.array([md5_array(array, numeric=True) for array in
[atoms.positions.round(8),
atoms.cell,
atoms.pbc * 1.0,
atoms.get_initial_magnetic_moments()]])
# Compare fingerprints:
fingerprints = np.empty((comm.size, 4), fingerprint.dtype)
comm.all_gather(fingerprint, fingerprints)
mismatches = fingerprints.ptp(0)
if debug:
dumpfile = 'compare_atoms'
for i in np.argwhere(mismatches).ravel():
itemname = ['positions', 'cell', 'pbc', 'magmoms'][i]
itemfps = fingerprints[:, i]
itemdata = [atoms.positions,
atoms.cell,
atoms.pbc * 1.0,
atoms.get_initial_magnetic_moments()][i]
if comm.rank == 0:
print 'DEBUG: compare_atoms failed for %s' % itemname
itemfps.dump('%s_fps_%s.pickle' % (dumpfile, itemname))
itemdata.dump('%s_r%04d_%s.pickle' % (dumpfile, comm.rank,
itemname))
# Use only the atomic positions from rank 0
comm.broadcast(atoms.positions, 0)
return not mismatches.any()
def broadcast(obj, root=0, comm=world):
"""Broadcast a Python object across an MPI communicator and return it."""
if comm.rank == root:
assert obj is not None
string = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
else:
assert obj is None
string = None
string = broadcast_string(string, root, comm)
if comm.rank == root:
return obj
else:
return pickle.loads(string)
def broadcast_string(string=None, root=0, comm=world):
"""Broadcast a Python string across an MPI communicator and return it.
NB: Strings are immutable objects in Python, so the input is unchanged."""
if comm.rank == root:
assert isinstance(string, str)
n = np.array(len(string), int)
else:
assert string is None
n = np.zeros(1, int)
comm.broadcast(n, root)
if comm.rank == root:
string = np.fromstring(string, np.int8)
else:
string = np.zeros(n, np.int8)
comm.broadcast(string, root)
return string.tostring()
def send_string(string, rank, comm=world):
comm.send(np.array(len(string)), rank)
comm.send(np.fromstring(string, np.int8), rank)
def receive_string(rank, comm=world):
n = np.array(0)
comm.receive(n, rank)
string = np.empty(n, np.int8)
comm.receive(string, rank)
return string.tostring()
def alltoallv_string(send_dict, comm=world):
scounts = np.zeros(comm.size, dtype=np.int)
sdispls = np.zeros(comm.size, dtype=np.int)
stotal = 0
for proc in range(comm.size):
if proc in send_dict:
data = np.fromstring(send_dict[proc],np.int8)
scounts[proc] = data.size
sdispls[proc] = stotal
stotal += scounts[proc]
rcounts = np.zeros(comm.size, dtype=np.int)
comm.alltoallv( scounts, np.ones(comm.size, dtype=np.int), np.arange(comm.size, dtype=np.int),
rcounts, np.ones(comm.size, dtype=np.int), np.arange(comm.size, dtype=np.int) )
rdispls = np.zeros(comm.size, dtype=np.int)
rtotal = 0
for proc in range(comm.size):
rdispls[proc] = rtotal
rtotal += rcounts[proc]
rtotal += rcounts[proc]
sbuffer = np.zeros(stotal, dtype=np.int8)
for proc in range(comm.size):
sbuffer[sdispls[proc]:(sdispls[proc]+scounts[proc])] = np.fromstring(send_dict[proc],np.int8)
rbuffer = np.zeros(rtotal, dtype=np.int8)
comm.alltoallv(sbuffer, scounts, sdispls, rbuffer, rcounts, rdispls)
rdict = {}
for proc in range(comm.size):
rdict[proc] = rbuffer[rdispls[proc]:(rdispls[proc]+rcounts[proc])].tostring()
return rdict
def ibarrier(timeout=None, root=0, tag=123, comm=world):
"""Non-blocking barrier returning a list of requests to wait for.
An optional time-out may be given, turning the call into a blocking
barrier with an upper time limit, beyond which an exception is raised."""
requests = []
byte = np.ones(1, dtype=np.int8)
if comm.rank == root:
for rank in range(0,root) + range(root+1,comm.size): #everybody else
rbuf, sbuf = np.empty_like(byte), byte.copy()
requests.append(comm.send(sbuf, rank, tag=2 * tag + 0,
block=False))
requests.append(comm.receive(rbuf, rank, tag=2 * tag + 1,
block=False))
else:
rbuf, sbuf = np.empty_like(byte), byte
requests.append(comm.receive(rbuf, root, tag=2 * tag + 0, block=False))
requests.append(comm.send(sbuf, root, tag=2 * tag + 1, block=False))
if comm.size == 1 or timeout is None:
return requests
t0 = time.time()
while not comm.testall(requests): # automatic clean-up upon success
if time.time() - t0 > timeout:
raise RuntimeError('MPI barrier timeout.')
return []
def run(iterators):
"""Run through list of iterators one step at a time."""
if not isinstance(iterators, list):
# It's a single iterator - empty it:
for i in iterators:
pass
return
if len(iterators) == 0:
return
while True:
try:
results = [iter.next() for iter in iterators]
except StopIteration:
return results
class Parallelization:
def __init__(self, comm, nspinkpts):
self.comm = comm
self.size = comm.size
self.nspinkpts = nspinkpts
self.kpt = None
self.domain = None
self.band = None
self.nclaimed = 1
self.navail = comm.size
def set(self, kpt=None, domain=None, band=None):
if kpt is not None:
self.kpt = kpt
if domain is not None:
self.domain = domain
if band is not None:
self.band = band
nclaimed = 1
for group, name in zip([self.kpt, self.domain, self.band],
['k-point', 'domain', 'band']):
if group is not None:
if self.size % group != 0:
msg = ('Cannot paralllize as the '
'communicator size %d is not divisible by the '
'requested number %d of ranks for %s '
'parallelization' % (self.size, group, name))
raise ValueError(msg)
nclaimed *= group
navail = self.size // nclaimed
assert self.size % nclaimed == 0
assert self.size % navail == 0
self.navail = navail
self.nclaimed = nclaimed
def get_communicator_sizes(self, kpt=None, domain=None, band=None):
self.set(kpt=kpt, domain=domain, band=band)
self.autofinalize()
return self.kpt, self.domain, self.band
def build_communicators(self, kpt=None, domain=None, band=None):
self.set(kpt=kpt, domain=domain, band=band)
self.autofinalize()
comm = self.comm
rank = comm.rank
communicators = {}
parent_stride = self.size
offset = 0
# Build communicators in hierachical manner
# The ranks in the first group have largest separation while
# the ranks in the last group are next to each other
for group, name in zip([self.kpt, self.band, self.domain],
['k-point', 'band', 'domain']):
stride = parent_stride // group
# First rank in this group
r0 = rank % stride + offset
# Last rank in this group
r1 = r0 + stride * group
ranks = np.arange(r0, r1, stride)
communicators[name] = comm.new_communicator(ranks)
parent_stride = stride
# Offset for the next communicator
offset += communicators[name].rank * stride
# return domain_comm, kpt_comm, band_comm
return (communicators['domain'], communicators['k-point'],
communicators['band'])
return domain_comm, kpt_comm, band_comm
def autofinalize(self):
if self.kpt is None:
self.set(kpt=self.get_optimal_kpt_parallelization())
if self.domain is None:
self.set(domain=self.navail)
if self.band is None:
self.set(band=self.navail)
if self.navail > 1:
raise RuntimeError('All the CPUs must be used')
def get_optimal_kpt_parallelization(self, kptprioritypower=1.4):
if self.domain and self.band:
# Try to use all the CPUs for k-point parallelization
ncpus = min(self.nspinkpts, self.navail)
return ncpus
ncpuvalues, wastevalues = self.find_kpt_parallelizations()
scores = ((self.navail // ncpuvalues)
* ncpuvalues**kptprioritypower)**(1.0 - wastevalues)
arg = np.argmax(scores)
ncpus = ncpuvalues[arg]
return ncpus
def find_kpt_parallelizations(self):
nspinkpts = self.nspinkpts
ncpuvalues = []
wastevalues = []
ncpus = nspinkpts
while ncpus > 0:
if self.navail % ncpus == 0:
nkptsmin = nspinkpts // ncpus
nkptsmax = -(-nspinkpts // ncpus)
effort = nkptsmax * ncpus
efficiency = nspinkpts / float(effort)
waste = 1.0 - efficiency
wastevalues.append(waste)
ncpuvalues.append(ncpus)
ncpus -= 1
return np.array(ncpuvalues), np.array(wastevalues)
def cleanup():
error = getattr(sys, 'last_type', None)
if error is not None: # else: Python script completed or raise SystemExit
if parallel and not (dry_run_size > 1):
sys.stdout.flush()
sys.stderr.write(('GPAW CLEANUP (node %d): %s occurred. '
'Calling MPI_Abort!\n') % (world.rank, error))
sys.stderr.flush()
# Give other nodes a moment to crash by themselves (perhaps
# producing helpful error messages)
time.sleep(10)
world.abort(42)
def exit(error='Manual exit'):
# Note that exit must be called on *all* MPI tasks
atexit._exithandlers = [] # not needed because we are intentially exiting
if parallel and not (dry_run_size > 1):
sys.stdout.flush()
sys.stderr.write(('GPAW CLEANUP (node %d): %s occurred. ' +
'Calling MPI_Finalize!\n') % (world.rank, error))
sys.stderr.flush()
else:
cleanup(error)
world.barrier() # sync up before exiting
sys.exit() # quit for serial case, return to _gpaw.c for parallel case
atexit.register(cleanup)
| robwarm/gpaw-symm | gpaw/mpi/__init__.py | Python | gpl-3.0 | 33,158 | [
"ASE",
"GPAW"
] | 446d7c32fdc1883c52eefcbea072ce151d647926f5b2b2da8ba5aee9252e10a1 |
""" TokenAgent
This agent inspect all elements, and resets their tokens if necessary.
The following options can be set for the TokenAgent.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN TokenAgent
:end-before: ##END
:dedent: 2
:caption: TokenAgent options
"""
__RCSID__ = '$Id$'
from datetime import datetime, timedelta
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
AGENT_NAME = 'ResourceStatus/TokenAgent'
class TokenAgent( AgentModule ):
'''
TokenAgent is in charge of checking tokens assigned on resources.
Notifications are sent to those users owning expiring tokens.
'''
# Rss token
__rssToken = 'rs_svc'
def __init__( self, *args, **kwargs ):
''' c'tor
'''
AgentModule.__init__( self, *args, **kwargs )
self.notifyHours = 12
self.adminMail = ''
self.rsClient = None
self.tokenDict = {}
self.diracAdmin = None
def initialize( self ):
''' TokenAgent initialization
'''
self.notifyHours = self.am_getOption( 'notifyHours', self.notifyHours )
self.adminMail = self.am_getOption( 'adminMail', self.adminMail )
self.rsClient = ResourceStatusClient()
self.diracAdmin = DiracAdmin()
return S_OK()
def execute( self ):
'''
Looks for user tokens. If they are expired, or expiring, it notifies users.
'''
# Initialized here, as it is needed empty at the beginning of the execution
self.tokenDict = {}
elements = ( 'Site', 'Resource', 'Node' )
for element in elements:
self.log.info( 'Processing %s' % element )
interestingTokens = self._getInterestingTokens( element )
if not interestingTokens[ 'OK' ]:
self.log.error( interestingTokens[ 'Message' ] )
continue
interestingTokens = interestingTokens[ 'Value' ]
processTokens = self._processTokens( element, interestingTokens )
if not processTokens[ 'OK' ]:
self.log.error( processTokens[ 'Message' ] )
continue
notificationResult = self._notifyOfTokens()
if not notificationResult[ 'OK' ]:
self.log.error( notificationResult[ 'Message' ] )
return S_OK()
def _getInterestingTokens( self, element ):
'''
Given an element, picks all the entries with TokenExpiration < now + X<hours>
If the TokenOwner is not the rssToken ( rs_svc ), it is selected.
'''
tokenExpLimit = datetime.utcnow() + timedelta( hours = self.notifyHours )
tokenElements = self.rsClient.selectStatusElement( element, 'Status',
meta = { 'older' : ( 'TokenExpiration', tokenExpLimit ) } )
if not tokenElements[ 'OK' ]:
return tokenElements
tokenColumns = tokenElements[ 'Columns' ]
tokenElements = tokenElements[ 'Value' ]
interestingTokens = []
for tokenElement in tokenElements:
tokenElement = dict( zip( tokenColumns, tokenElement ) )
if tokenElement[ 'TokenOwner' ] != self.__rssToken:
interestingTokens.append( tokenElement )
return S_OK( interestingTokens )
def _processTokens( self, element, tokenElements ):
'''
Given an element and a list of interesting token elements, updates the
database if the token is expired, logs a message and adds
'''
never = datetime.max
for tokenElement in tokenElements:
try:
name = tokenElement[ 'Name' ]
statusType = tokenElement[ 'StatusType' ]
status = tokenElement[ 'Status' ]
tokenOwner = tokenElement[ 'TokenOwner' ]
tokenExpiration = tokenElement[ 'TokenExpiration' ]
except KeyError as e:
return S_ERROR( e )
# If token has already expired
if tokenExpiration < datetime.utcnow():
_msg = '%s with statusType "%s" and owner %s EXPIRED'
self.log.info( _msg % ( name, statusType, tokenOwner ) )
result = self.rsClient.addOrModifyStatusElement( element, 'Status', name = name,
statusType = statusType,
tokenOwner = self.__rssToken,
tokenExpiration = never )
if not result[ 'OK' ]:
return result
else:
_msg = '%s with statusType "%s" and owner %s -> %s'
self.log.info( _msg % ( name, statusType, tokenOwner, tokenExpiration ) )
if tokenOwner not in self.tokenDict:
self.tokenDict[ tokenOwner ] = []
self.tokenDict[ tokenOwner ].append( [ tokenOwner, element, name, statusType, status, tokenExpiration ] )
return S_OK()
def _notifyOfTokens( self ):
'''
Splits interesing tokens between expired and expiring. Also splits them
among users. It ends sending notifications to the users.
'''
now = datetime.utcnow()
adminExpired = []
adminExpiring = []
for tokenOwner, tokenLists in self.tokenDict.items():
expired = []
expiring = []
for tokenList in tokenLists:
if tokenList[ 5 ] < now:
expired.append( tokenList )
adminExpired.append( tokenList )
else:
expiring.append( tokenList )
adminExpiring.append( tokenList )
resNotify = self._notify( tokenOwner, expired, expiring )
if not resNotify[ 'OK' ]:
self.log.error( 'Failed to notify token owner', resNotify[ 'Message' ] )
if (adminExpired or adminExpiring) and self.adminMail:
return self._notify(self.adminMail, adminExpired, adminExpiring)
return S_OK()
def _notify( self, tokenOwner, expired, expiring ):
'''
Given a token owner and a list of expired and expiring tokens, sends an
email to the user.
'''
subject = 'RSS token summary for tokenOwner %s' % tokenOwner
mail = '\nEXPIRED tokens ( RSS has taken control of them )\n'
for tokenList in expired:
mail += ' '.join( [ str(x) for x in tokenList ] )
mail += '\n'
mail = '\nEXPIRING tokens ( RSS will take control of them )\n'
for tokenList in expiring:
mail += ' '.join( [ str(x) for x in tokenList ] )
mail += '\n'
mail += "\n\n You can extend for another 24 hours using the web interface (Set token -> Acquire)\n"
mail += " Or you can use the dirac-rss-set-token script\n\n"
mail += "Through the same interfaces you can release the token any time\n"
# FIXME: you can re-take control of them using this or that...
resEmail = self.diracAdmin.sendMail( tokenOwner, subject, mail )
if not resEmail[ 'OK' ]:
return S_ERROR( 'Cannot send email to user "%s"' % tokenOwner )
return resEmail
| andresailer/DIRAC | ResourceStatusSystem/Agent/TokenAgent.py | Python | gpl-3.0 | 6,969 | [
"DIRAC"
] | fb7f7eb1524d598512b96e1d9cb07685537c3c090069f09397f6a730e83c90a1 |
# Settings for Zulip Voyager
### MANDATORY SETTINGS
#
# These settings MUST be set in production. In a development environment,
# sensible default values will be used.
# The user-accessible Zulip hostname for this installation, e.g.
# zulip.example.com
EXTERNAL_HOST = 'zulip.example.com'
# The email address for the person or team who maintain the Zulip
# Voyager installation. Will also get support emails. (e.g. zulip-admin@example.com)
ZULIP_ADMINISTRATOR = 'zulip-admin@example.com'
# The domain for your organization, e.g. example.com
ADMIN_DOMAIN = 'example.com'
# Enable at least one of the following authentication backends.
AUTHENTICATION_BACKENDS = (
# 'zproject.backends.EmailAuthBackend', # Email and password; see SMTP setup below
# 'zproject.backends.ZulipRemoteUserBackend', # Local SSO
# 'zproject.backends.GoogleMobileOauth2Backend', # Google Apps, setup below
# 'zproject.backends.ZulipLDAPAuthBackend', # LDAP, setup below
)
# Google Oauth requires a bit of configuration; you will need to go to
# do the following:
#
# (1) Visit https://console.developers.google.com, setup an
# Oauth2 client ID that allows redirects to
# e.g. https://zulip.example.com/accounts/login/google/done/.
#
# (2) Then click into the APIs and Auth section (in the sidebar on the
# left side of the page), APIs, then under "Social APIs" click on
# "Google+ API" and click the button to enable the API.
#
# (3) put your client secret as "google_oauth2_client_secret" in
# zulip-secrets.conf, and your client ID right here:
# GOOGLE_OAUTH2_CLIENT_ID=<your client ID from Google>
# If you are using the ZulipRemoteUserBackend authentication backend,
# set this to your domain (e.g. if REMOTE_USER is "username" and the
# corresponding email address is "username@example.com", set
# SSO_APPEND_DOMAIN = "example.com")
SSO_APPEND_DOMAIN = None
# Configure the outgoing SMTP server below. For testing, you can skip
# sending emails entirely by commenting out EMAIL_HOST, but you will
# want to configure this to support email address confirmation emails,
# missed message emails, onboarding follow-up emails, etc. To
# configure SMTP, you will need to complete the following steps:
#
# (1) Fill out the outgoing email sending configuration below.
#
# (2) Put the SMTP password for EMAIL_HOST_USER in
# /etc/zulip/zulip-secrets.conf as email_password.
#
# (3) If you are using a gmail account to send outgoing email, you
# will likely need to read this Google support answer and configure
# that account as "less secure":
# https://support.google.com/mail/answer/14257.
#
# You can quickly test your sending email configuration using:
# ./manage.py send_test_email username@example.com
#
# A common problem is hosting providers that block outgoing SMTP traffic.
#
# With the exception of reading EMAIL_HOST_PASSWORD from
# email_password in the Zulip secrets file, Zulip uses Django's
# standard EmailBackend, so if you're having issues, you may want to
# search for documentation on using your email provider with Django.
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = ''
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# The email From address to be used for automatically generated emails
DEFAULT_FROM_EMAIL = "Zulip <zulip@example.com>"
# The noreply address to be used as Reply-To for certain generated emails.
# Messages sent to this address should not be delivered anywhere.
NOREPLY_EMAIL_ADDRESS = "noreply@example.com"
# A list of strings representing the host/domain names that this
# Django site can serve. You should reset it to be a list of
# domains/IP addresses for your site. This is a security measure to
# prevent an attacker from poisoning caches and triggering password
# reset emails with links to malicious hosts by submitting requests
# with a fake HTTP Host header. You must include 'localhost' here.
ALLOWED_HOSTS = ['*']
### OPTIONAL SETTINGS
# Controls whether session cookies expire when the browser closes
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# Session cookie expiry in seconds after the last page load
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # 2 weeks
# Controls whether or not there is a feedback button in the UI.
ENABLE_FEEDBACK = False
# By default, the feedback button will submit feedback to the Zulip
# developers. If you set FEEDBACK_EMAIL to be an email address
# (e.g. ZULIP_ADMINISTRATOR), feedback sent by your users will instead
# be sent to that email address.
FEEDBACK_EMAIL = ZULIP_ADMINISTRATOR
# Controls whether or not error reports are sent to Zulip. Error
# reports are used to improve the quality of the product and do not
# include message contents; please contact Zulip support with any
# questions.
ERROR_REPORTING = True
# Controls whether or not Zulip will provide inline image preview when
# a link to an image is referenced in a message.
INLINE_IMAGE_PREVIEW = True
# By default, files uploaded by users and user avatars are stored
# directly on the Zulip server. If file storage in Amazon S3 is
# desired, you can configure that as follows:
#
# (1) Set s3_key and s3_secret_key in /etc/zulip/zulip-secrets.conf to
# be the S3 access and secret keys that you want to use, and setting
# the S3_AUTH_UPLOADS_BUCKET and S3_AVATAR_BUCKET to be the S3 buckets
# you've created to store file uploads and user avatars, respectively.
# Then restart Zulip (scripts/restart-zulip).
#
# (2) Edit /etc/nginx/sites-available/zulip-enterprise to comment out
# the nginx configuration for /user_uploads and /user_avatars (see
# https://github.com/zulip/zulip/issues/291 for discussion of a better
# solution that won't be automatically reverted by the Zulip upgrade
# script), and then restart nginx.
LOCAL_UPLOADS_DIR = "/home/zulip/uploads"
#S3_AUTH_UPLOADS_BUCKET = ""
#S3_AVATAR_BUCKET = ""
# Maximum allowed size of uploaded files, in megabytes. DO NOT SET
# ABOVE 80MB. The file upload implementation doesn't support chunked
# uploads, so browsers will crash if you try uploading larger files.
MAX_FILE_UPLOAD_SIZE = 25
# Controls whether name changes are completely disabled for this installation
# This is useful in settings where you're syncing names from an integrated LDAP/Active Directory
NAME_CHANGES_DISABLED = False
# Controls whether users who have not uploaded an avatar will receive an avatar
# from gravatar.com.
ENABLE_GRAVATAR = True
# To override the default avatar image if ENABLE_GRAVATAR is False, place your
# custom default avatar image at /home/zulip/local-static/default-avatar.png
# and uncomment the following line.
#DEFAULT_AVATAR_URI = '/local-static/default-avatar.png'
# To access an external postgres database you should define the host name in
# REMOTE_POSTGRES_HOST, you can define the password in the secrets file in the
# property postgres_password, and the SSL connection mode in REMOTE_POSTGRES_SSLMODE
# Different options are:
# disable: I don't care about security, and I don't want to pay the overhead of encryption.
# allow: I don't care about security, but I will pay the overhead of encryption if the server insists on it.
# prefer: I don't care about encryption, but I wish to pay the overhead of encryption if the server supports it.
# require: I want my data to be encrypted, and I accept the overhead. I trust that the network will make sure I always connect to the server I want.
# verify-ca: I want my data encrypted, and I accept the overhead. I want to be sure that I connect to a server that I trust.
# verify-full: I want my data encrypted, and I accept the overhead. I want to be sure that I connect to a server I trust, and that it's the one I specify.
#REMOTE_POSTGRES_HOST = 'dbserver.example.com'
#REMOTE_POSTGRES_SSLMODE = 'require'
### TWITTER INTEGRATION
# Zulip supports showing inline Tweet previews when a tweet is linked
# to in a message. To support this, Zulip must have access to the
# Twitter API via OAuth. To obtain the various access tokens needed
# below, you must register a new application under your Twitter
# account by doing the following:
#
# 1. Log in to http://dev.twitter.com.
# 2. In the menu under your username, click My Applications. From this page, create a new application.
# 3. Click on the application you created and click "create my access token".
# 4. Fill in the values for twitter_consumer_key, twitter_consumer_secret, twitter_access_token_key,
# and twitter_access_token_secret in /etc/zulip/zulip-secrets.conf.
### EMAIL GATEWAY INTEGRATION
# The Email gateway integration supports sending messages into Zulip
# by sending an email. This is useful for receiving notifications
# from third-party services that only send outgoing notifications via
# email. Once this integration is configured, each stream will have
# an email address documented on the stream settings page an emails
# sent to that address will be delivered into the stream.
#
# There are two ways to configure email mirroring in Zulip:
# 1. Local delivery: A MTA runs locally and passes mail directly to Zulip
# 2. Polling: Checks an IMAP inbox every minute for new messages.
#
# The local delivery configuration is preferred for production because
# it supports nicer looking email addresses and has no cron delay,
# while the polling mechanism is better for testing/developing this
# feature because it doesn't require a public-facing IP/DNS setup.
#
# The main email mirror setting is the email address pattern, where
# you specify the email address format you'd like the integration to
# use. It should be one of the following:
# %s@zulip.example.com (for local delivery)
# username+%s@example.com (for polling if EMAIL_GATEWAY_LOGIN=username@example.com)
EMAIL_GATEWAY_PATTERN = ""
#
# If you are using local delivery, EMAIL_GATEWAY_PATTERN is all you need
# to change in this file. You will also need to enable the Zulip postfix
# configuration to support local delivery by adding
# , zulip::postfix_localmail
# to puppet_classes in /etc/zulip/zulip.conf and then running
# `scripts/zulip-puppet-apply -f` to do the installation.
#
# If you are using polling, you will need to setup an IMAP email
# account dedicated to Zulip email gateway messages. The model is
# that users will send emails to that account via an address of the
# form username+%s@example.com (which is what you will set as
# EMAIL_GATEWAY_PATTERN); your email provider should deliver those
# emails to the username@example.com inbox. Then you run in a cron
# job `./manage.py email-mirror` (see puppet/zulip/files/cron.d/email-mirror),
# which will check that inbox and batch-process any new messages.
#
# You will need to configure authentication for the email mirror
# command to access the IMAP mailbox below and in zulip-secrets.conf.
#
# The IMAP login; username here and password as email_gateway_login in
# zulip-secrets.conf.
EMAIL_GATEWAY_LOGIN = ""
# The IMAP server & port to connect to
EMAIL_GATEWAY_IMAP_SERVER = ""
EMAIL_GATEWAY_IMAP_PORT = 993
# The IMAP folder name to check for emails. All emails sent to EMAIL_GATEWAY_PATTERN above
# must be delivered to this folder
EMAIL_GATEWAY_IMAP_FOLDER = "INBOX"
### LDAP integration configuration
# Zulip supports retrieving information about users via LDAP, and
# optionally using LDAP as an authentication mechanism.
#
# In either configuration, you will need to do the following:
#
# * Fill in the LDAP configuration options below so that Zulip can
# connect to your LDAP server
#
# * Setup the mapping between email addresses (used as login names in
# Zulip) and LDAP usernames. There are two supported ways to setup
# the username mapping:
#
# (A) If users' email addresses are in LDAP, set
# LDAP_APPEND_DOMAIN = None
# AUTH_LDAP_USER_SEARCH to lookup users by email address
#
# (B) If LDAP only has usernames but email addresses are of the form
# username@example.com, you should set:
# LDAP_APPEND_DOMAIN = example.com and
# AUTH_LDAP_USER_SEARCH to lookup users by username
#
# You can quickly test whether your configuration works by running:
# ./manage.py query_ldap username@example.com
# From the root of your Zulip installation; if your configuration is working
# that will output the full name for your user.
#
# -------------------------------------------------------------
#
# If you are using LDAP for authentication, you will need to enable
# the zproject.backends.ZulipLDAPAuthBackend auth backend in
# AUTHENTICATION_BACKENDS above. After doing so, you should be able
# to login to Zulip by entering your email address and LDAP password
# on the Zulip login form.
#
# If you are using LDAP to populate names in Zulip, once you finish
# configuring this integration, you will need to run:
# ./manage.py sync_ldap_user_data
# To sync names for existing users; you may want to run this in a cron
# job to pick up name changes made on your LDAP server.
import ldap
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
# URI of your LDAP server. If set, LDAP is used to prepopulate a user's name in
# Zulip. Example: "ldaps://ldap.example.com"
AUTH_LDAP_SERVER_URI = ""
# This DN will be used to bind to your server. If unset, anonymous
# binds are performed. If set, you need to specify the password as
# 'auth_ldap_bind_password' in zulip-secrets.conf.
AUTH_LDAP_BIND_DN = ""
# Specify the search base and the property to filter on that corresponds to the
# username.
AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=users,dc=example,dc=com",
ldap.SCOPE_SUBTREE, "(uid=%(user)s)")
# If the value of a user's "uid" (or similar) property is not their email
# address, specify the domain to append here.
LDAP_APPEND_DOMAIN = None
# This map defines how to populate attributes of a Zulip user from LDAP.
AUTH_LDAP_USER_ATTR_MAP = {
# Populate the Django user's name from the LDAP directory.
"full_name": "cn",
}
# The default CAMO_URI of '/external_content/' is served by the camo
# setup in the default Voyager nginx configuration. Setting CAMO_URI
# to '' will disable the Camo integration.
CAMO_URI = '/external_content/'
# RabbitMQ configuration
#
# By default, Zulip connects to rabbitmq running locally on the machine,
# but Zulip also supports connecting to RabbitMQ over the network;
# to use a remote RabbitMQ instance, set RABBITMQ_HOST here.
# RABBITMQ_HOST = "localhost"
# To use another rabbitmq user than the default 'zulip', set RABBITMQ_USERNAME here.
# RABBITMQ_USERNAME = 'zulip'
# Memcached configuration
#
# By default, Zulip connects to memcached running locally on the machine,
# but Zulip also supports connecting to memcached over the network;
# to use a remote Memcached instance, set MEMCACHED_LOCATION here.
# Format HOST:PORT
# MEMCACHED_LOCATION = 127.0.0.1:11211
# Redis configuration
#
# By default, Zulip connects to redis running locally on the machine,
# but Zulip also supports connecting to redis over the network;
# to use a remote RabbitMQ instance, set REDIS_HOST here.
# REDIS_HOST = '127.0.0.1'
# For a different redis port set the REDIS_PORT here.
# REDIS_PORT = 6379
# Controls whether Zulip will rate-limit user requests.
# RATE_LIMITING = True
| Frouk/zulip | zproject/local_settings_template.py | Python | apache-2.0 | 15,172 | [
"VisIt"
] | 99f6d13f233db15a2102aa1a203b87cd014f667d8c7d7fe19b46b103ed48d96b |
import nrrd
import json
import numpy as np
import pickle
import os
import scipy.stats as stats
from scipy.optimize import fsolve
from scipy.special import iv
from scipy.special import factorial2, factorial
from scipy.special import hyp1f1
from sklearn.neighbors.kde import KernelDensity
from sklearn.model_selection import GridSearchCV
#from dask.distributed import Client
#client = Client('128.95.156.220:8785')
#import joblib
import matplotlib.pyplot as plt
from matplotlib import patches
import pandas as pd
def rician_eqn(p, measured_mean, measured_variance):
A, sigma = p
nu = A**2/ (4.0 * sigma**2.0)
b = (1+2.0*nu)*iv(0, nu) + 2.0*nu*iv(1,nu)
mean = sigma *np.sqrt(np.pi/2.0)*np.exp(-nu)*(b) - measured_mean
var = A + 2.0*sigma**2.0 - np.pi*sigma**2.0/2.0*np.exp(-2.0*nu)*b**2.0 - measured_variance
return (mean, var)
def beta_N(N):
""" return the Beta_N function
@param: N the number of MRA channels
"""
return np.sqrt(0.5*np.pi)*factorial2(2*N-1) / ( (2**(N-1)) * factorial(N-1) )
def xi(theta, N):
""" returns the correction factor of the multi-channel MRI signal
@param: theta the SNR of the guassian
@param: N the number of MRA channels
"""
return 2.0*N + theta**2 - (beta_N(N)**2.0*(hyp1f1(-0.5, N, -0.5*theta**2))**2)
def g_theta(theta, N, r):
""" returns the guassian SNR value as a function of itself
@param: theta the guassian SNR
@param: N the number of MRA channels
@param: r the measure signal to noise ratio
"""
return np.sqrt(xi(theta, N)*(1.0 + r**2.0) - 2.0*N)
def koay_next(t_n, N, r):
""" returns the n+1 guassian SNR value given an estimate
@param: t_n estimate of the guassian SNR
@param: N the number of MRA channels
@param: r the measure signal to noise ratio
"""
g_n = g_theta(t_n, N, r)
b_n = beta_N(N)
f1_a = hyp1f1(-0.5, N, -0.5*t_n**2.0)
f1_b = hyp1f1(0.5, N+1, -0.5*t_n**2.0)
return t_n - (g_n*(g_n - t_n) ) / (t_n*(1.0+r**2.0)*(1.0 - (0.5*b_n**2.0 / N) * f1_a * f1_b) - g_n)
def koay_test(M, s_r, theta):
# doesn't work
l_term = (1.0+0.5*theta**2.0)*iv(0, 0.25*theta**2.0) + 0.5*theta**2.0*iv(1, 0.25*theta**2.0)
psi = 0.5*(np.sqrt(0.5*np.pi)*np.exp(-0.25*theta**2.0)*(l_term))
s_g_m = M / psi
xi_root = np.sqrt( theta**2.0 + 2.0 - 0.125*np.pi*np.exp(-0.5*theta**2.0)*(l_term**2.0))
s_g_s = s_r / xi_root
return s_g_m, s_g_s
def koay_test2(M, s_r, theta, N):
l_term = hyp1f1(-0.5, N, -0.5*theta**2)
beta_l_term = (beta_N(N)*l_term)
s_g_m = M / (beta_l_term)
xi_root = np.sqrt( 2.0*N + theta**2.0 - beta_N(N)**2.0*l_term**2.0 )
s_g_s = s_r / xi_root
#s_g_new = max(s_g_s, s_g_m) # get the maximum deviation
#M_g_new = s_g_new * beta_l_term
return s_g_m, s_g_s
def koay_test3(M, s_r, theta, N):
l_term = hyp1f1(-0.5, N, -0.5*theta**2)
xi = 2.0*N + theta**2.0 - beta_N(N)**2.0*l_term**2.0
M_n_new = np.sqrt( M**2.0 + (1.0 - 2.0*N/xi)*s_r**2.0)
s_g_new = M_n_new / theta
#s_g_new = max(s_g_s, s_g_m) # get the maximum deviation
#M_g_new = s_g_new * beta_l_term
return M_n_new, s_g_new
def lower_bound(N):
""" return the lower bound of the estimation
@param: N the number of MRA channels
"""
return np.sqrt(2.0*N / xi(0.0, N) - 1.0)
def newton_koay(r, N, iterations=500, tolerance = 1.0E-9):
""" returns newton iteration solve to the Koay derived noise estimator
@param: r the measured signal to noise ratio
@param: N the number of MRA channels
@param: iterations the maximum iterations for the newton solve
@param tolerance the numerical tolerance of the newton iterations
"""
#
#https://www.sciencedirect.com/science/article/pii/S109078070500019X/
it_default = np.copy(iterations)
lb = lower_bound(N)
if (np.isscalar(r)):
if (r <= lb):
t_1 = 0.0
err = 0.0
else:
t_0 = r - lb # initial guess of the guassian SNR theta
t_1 = koay_next(t_0, N, r)
err = np.absolute( t_1 - t_0)
while (err > tolerance and iterations >= 0):
t_0 = np.copy(t_1)
t_1 = koay_next(t_0, N, r)
err = np.absolute( t_1 - t_0)
iterations -= 1
if (iterations < 0):
print("{0} iterations before reaching error tolerance, error: {1} tolerance:{2}".format(it_default, err, tolerance ))
#else:
#t_1 = np.empty(r.shape)
#err = np.ones(r.shape)
#indx = np.where(r <= lb)
#t_1[indx] = 0.0
#t_0 = r - lb
#t_1[indx] = koay_next(t_0[indx], N, r[indx])
#while (err.any() > tolerance and iterations >= 0):
#t_0 = np.copy(t_1)
#t_1[indx] = koay_next(t_0[indx], N, r[indx])
#err = np.absolute( t_1 - t_0)
#iterations -= 1
#if (iterations < 0):
#print("{0} iterations before reaching error tolerance, error: {1} tolerance:{2}".format(it_default, err, tolerance ))
return t_1, err
def bootstrap_resample(X, n=None, percent=0.01):
""" Bootstrap resample an array_like
Parameters
----------
X : numpy array_like
data to resample
n : int, optional
length of resampled array, equal to len(X) if n==None
Results
percent: float, optional
use a percentage of the data for the resample
-------
returns X_resamples based on percentage or number of samples
defaults to the percentage
p_n the number of sample used
"""
if (n == None and percent == None):
p_n = np.floor(0.01*X.shape[0])
else:
if ( n == None):
p_n = np.floor(percent*X.shape[0]).astype(int)
else:
p_n = n
#print(n, X.shape)
X_resample = np.random.choice(X,p_n) # sampling with replacement
return X_resample, p_n
def VWI_Enhancement(post, pre, mean_post_vent, mean_pre_vent, kind = "E1",
std_post_vent = None, std_pre_vent = None, return_parts = False):
""" calculate enhancement
Parameters
----------
post : numpy array_like post contrast VWI
pre : numpy array_like pre contrast VWI
mean_post_vent : mean of post contrast ventricle
mean_pre_vent : mean of pre contrast ventricle
kind : which enhancement to calculate
std_post_vent : mean of post contrast ventricle
std_pre_vent : mean of pre contrast ventricle
-------
returns the enhancement calculation, numpy array_like
"""
if kind == "E1":
#"E = xi_vent / eta_vent * eta - xi"
post_ = (mean_pre_vent / mean_post_vent * post)
pre_ = pre
elif kind == "E2":
#"E = eta / eta_vent - xi / xi_vent"
post_ = (post / mean_post_vent)
pre_ = (pre / mean_pre_vent)
elif kind == "E3":
#"E = ( eta - mean_eta_vent) / stddev(eta_vent) - (xi - mean_xi_vent) / stddev(xi_vent)"
post_ = (post - mean_post_vent) / std_post_vent
pre_ = (pre - mean_pre_vent) / std_pre_vent
elif kind == "E5":
# ratio of normalized things, similar to E3
E = ( std_pre_vent / std_post_vent ) * (post - mean_post_vent) / (pre - mean_pre_vent)
return E
elif kind == "E4":
# ratio of normalized things, similar to E3
num = np.sqrt(std_post_vent**2 + std_pre_vent**2)
post_ = (post - mean_post_vent) / num
pre_ = (pre - mean_pre_vent) / num
else:
raise Exception("undefined enhancement kind {0}".format(kind))
E = post_ - pre_
if return_parts:
return E, post_, pre_
else:
return E
def div0( a, b, value=0 ):
""" ignore / 0, div0( [-1, 0, 1], 0 ) -> [0, 0, 0] """
with np.errstate(divide='ignore', invalid='ignore'):
c = np.true_divide( a, b )
c[ ~ np.isfinite( c )] = value # -inf inf NaN
return c
def uncertainty(eta, xi, conf=2.0, kind = 'E'):
"""
use global variables to get what I want, this is pretty bad practice I think
"""
if (kind == 'E'):
u_E_2 = (u_eta_back_2 + u_xi_back_2 - 2.0 * u_eta_xi_back )
u_E = np.sqrt(u_E_2)
u_E_confidence = conf * u_E # gaussian 95% confidence interval
return u_E_confidence, u_E, u_E_2
elif(kind == 'E1'):
u_E1_2 = eta_vent_term + xi_vent_term + eta_term + xi_term + eta_xi_term + eta_xi_vent_term
u_E1 = np.sqrt(u_E1_2)
u_E1_confidence = conf * u_E1 # gaussian 95% confidence interval
return u_E1_confidence, u_E1, u_E1_2
elif (kind == 'E2'):
u_E2_2 = (1.0 / (eta_vent**2.0) * u_eta_back_2 +
1.0 / (xi_vent**2.0) * u_xi_back_2 +
np.square( eta / (eta_vent**2.0)) * u_eta_vent_2 +
np.square( xi / (xi_vent**2.0)) * u_xi_vent_2 -
2.0 / (eta_vent * xi_vent) * u_eta_xi_back -
2.0 * (eta * xi) / ( (eta_vent * xi_vent)**2.0 ) * u_eta_xi_vent
)
u_E2 = np.sqrt(u_E2_2)
u_E2_confidence = conf * u_E2 # gaussian 95% confidence interval
return u_E2_confidence, u_E2, u_E2_2
elif(kind == 'E3'):
u_E3_2 = (1.0 / (u_eta_vent_2) * u_eta_back_2 +
1.0 / (u_xi_vent_2) * u_xi_back_2 -
2.0 / (np.sqrt(u_eta_vent_2 *u_xi_vent_2)) * u_eta_xi_back +
2.0 -
2.0 / (np.sqrt(u_eta_vent_2 *u_xi_vent_2)) * u_eta_xi_vent
)
u_E3 = np.sqrt(u_E3_2)
u_E3_confidence = conf * u_E3 # gaussian 95% confidence interval
return u_E3_confidence, u_E3, u_E3_2
elif(kind == 'E4'):
u_E4_2 = ( 1.0 / (u_eta_vent_2 + u_xi_vent_2) * (
u_eta_back_2 + u_xi_back_2 -
2.0 * u_eta_xi_back +
u_eta_vent_2 + u_xi_vent_2 -
2.0 * u_eta_xi_vent
)
)
u_E4 = np.sqrt(u_E4_2)
u_E4_confidence = conf * u_E4 # gaussian 95% confidence interval
return u_E4_confidence, u_E4, u_E4_2
else:
raise Exception("undefined enhancement kind {0}".format(kind))
def kernel_fit(data, min_size=20, cross_v=2, n_jobs_def= 22):
""" guassian fit to 1D data
"""
res = np.histogram(data.ravel(), bins='sqrt', density=True)
std_data = data.std()
bw = (data.ravel().shape[0] * (std_data+ 2) / 4.)**(-1. / (std_data + 4))
bw_test = np.geomspace(bw/4.0, std_data, 20, endpoint=True) # np.linspace(bw/2.0, std_data, 3)
N_bins = res[1].shape[0]
if (N_bins < min_size):
extra = 0.2
else:
extra = 0.0
# get plus or minus 20%
x_grid = np.linspace(res[1][0]-extra*abs(res[1][0]), res[1][-1] + extra*abs(res[1][0]), N_bins)
grid = GridSearchCV(KernelDensity(kernel='gaussian'),
{'bandwidth': bw_test},
cv=cross_v,
n_jobs=n_jobs_def) # 5-fold cross-validation
#with joblib.parallel_backend('dask', n_jobs=n_jobs_def):
grid.fit(data.ravel()[:, None])
print("kernel fit: ", grid.best_params_)
kde = grid.best_estimator_
pdf = np.exp(kde.score_samples(x_grid[:, None]))
return pdf, x_grid, grid.best_params_
def kernel_fit_single(data, bw=None, min_size=20, kern='gaussian'):
""" guassian fit to 1D data
"""
res = np.histogram(data.ravel(), bins='sqrt', density=True)
std_data = data.std()
if (bw == None):
bw = (data.ravel().shape[0] * (std_data+ 2) / 4.)**(-1. / (std_data + 4))
N_bins = res[1].shape[0]
if (N_bins < min_size):
extra = 0.2
#N_bins *=2
else:
extra = 0.0
# get plus or minus 20%
x_grid = np.linspace(res[1][0]-extra*abs(res[1][0]), res[1][-1] + extra*abs(res[1][0]), N_bins)
kde = KernelDensity(bandwidth=bw, kernel=kern)
kde.fit(data.ravel()[:, None])
pdf = np.exp(kde.score_samples(x_grid[:, None]))
return pdf, x_grid
'''
A matplotlib-based function to overplot an elliptical error contour from the covariance matrix.
Copyright 2017 Megan Bedell (Flatiron).
Citations: Joe Kington (https://github.com/joferkington/oost_paper_code/blob/master/error_ellipse.py),
Vincent Spruyt (http://www.visiondummy.com/2014/04/draw-error-ellipse-representing-covariance-matrix/)
'''
def error_ellipse(ax, xc, yc, cov, label, sigma=1, **kwargs):
'''
Plot an error ellipse contour over your data.
Inputs:
ax : matplotlib Axes() object
xc : x-coordinate of ellipse center
yc : x-coordinate of ellipse center
cov : covariance matrix
sigma : # sigma to plot (default 1)
additional kwargs passed to matplotlib.patches.Ellipse()
'''
w, v = np.linalg.eigh(cov) # assumes symmetric matrix
order = w.argsort()[::-1]
w, v = w[order], v[:,order]
theta = np.degrees(np.arctan2(*v[:,0][::-1]))
ellipse = patches.Ellipse(xy=(xc,yc),
width=2.*sigma*np.sqrt(w[0]),
height=2.*sigma*np.sqrt(w[1]),
angle=theta, **kwargs)
ellipse.set_facecolor('none')
if (label != None):
ellipse.set_label(label)
ax.add_artist(ellipse)
def covariance_mat(A, B):
return np.cov(np.vstack((A, B)))
def gumbel_r_fit(input_data, n_pts=1000, tail=0.05):
# takes row or columnar format
param = stats.gumbel_r.fit(input_data)
arg = param[:-2]
loc = param[-2]
scale = param[-1]
# Get sane start and end points of distribution
start = stats.gumbel_r.ppf(tail, *arg, loc=loc, scale=scale) if arg else stats.gumbel_r.ppf(tail, loc=loc, scale=scale)
end = stats.gumbel_r.ppf(1.0 - tail, *arg, loc=loc, scale=scale) if arg else stats.gumbel_r.ppf(1.0 - tail, loc=loc, scale=scale)
x_test = np.linspace(start, end, n_pts)
pdf_fitted = stats.gumbel_r.pdf(x_test, *arg, loc=loc, scale=scale)
return pdf_fitted, x_test
#vars
conf = 2.0 #95% confidence interval
percent_boot = 0.01
font_size = 10
figure_1 = True
figure_2 = True
dpi_value = 200
json_file = "/home/sansomk/caseFiles/mri/VWI_proj/step3_normalization.json"
pickle_file = "/home/sansomk/caseFiles/mri/VWI_proj/step3_pickle.pkl"
enhancement_file = "enhancement_pickle.pkl"
bw_file = "bw_pickle.pkl"
write_file_dir = "/home/sansomk/caseFiles/mri/VWI_proj"
write_dir = "VWI_analysis"
plots_dir = "plots"
overwrite = 0
overwrite_out = True
skip_bootstrap = True
skip_write = True
with open(json_file, 'r') as f:
data = json.load(f)
labels = ["post_float","pre_float", "VWI_post_masked_vent", "VWI_post_vent",
"pre2post", "level_set", "VWI_pre2post_masked_vent",
"VWI_background_post_masked", "VWI_background_post",
"VWI_background_pre_masked", "VWI_background_pre",
"model-label_cropped", "model_post_masked",
"model_pre2post_masked", "VWI_post_float_cropped", "VWI_background_intersection",
"VWI_background_post_intersection", "VWI_background_pre_intersection"]
subset_labels = ["VWI_post_masked_vent", "VWI_pre2post_masked_vent",
"VWI_background_post_masked", "VWI_background_pre_masked",
"model_post_masked", "model_pre2post_masked"]
groups = [("post_float", "pre2post"),
("VWI_post_masked_vent", "VWI_pre2post_masked_vent"),
("model_post_masked", "model_pre2post_masked"),
("VWI_background_post_masked", "VWI_background_pre_masked"),
("VWI_background_post_intersection", "VWI_background_pre_intersection"),
("VWI_post_PI_masked", "VWI_pre2post_PI_masked")
]
image_dict = {}
if ((not os.path.exists(pickle_file)) or overwrite == 1):
for case_id, images in data.items():
image_dict[case_id] = {}
print(case_id)
for post_label, pre_label in groups:
#print(pre_label, post_label)
pre_path = images[pre_label]
post_path = images[post_label]
#vwi_mask, vwi_mask_header = nrrd.read(vwi_mask_path)
image_tuple_pre = nrrd.read(pre_path)
image_tuple_post = nrrd.read(post_path)
image_dict[case_id][pre_label] = image_tuple_pre
image_dict[case_id][post_label] = image_tuple_post
pickle.dump(image_dict, open(pickle_file, "wb"))
else:
with open(pickle_file, "rb") as pkl_f:
image_dict = pickle.load(pkl_f)
pickle_time = os.path.getmtime(pickle_file)
dump = False
for case_id, images in data.items():
print(case_id)
for post_label, pre_label in groups:
#print(pre_label, post_label)numpy ones like
pre_path = images[pre_label]
pre_time = os.path.getmtime(pre_path)
post_path = images[post_label]
post_time = os.path.getmtime(post_path)
#vwi_mask, vwi_mask_header = nrrd.read(vwi_mask_path)
if ( pre_label not in image_dict[case_id].keys() or pre_time > pickle_time):
image_tuple_pre = nrrd.read(pre_path)
image_dict[case_id][pre_label] = image_tuple_pre
dump = True
if (post_label not in image_dict[case_id].keys() or post_time > pickle_time):
image_tuple_post = nrrd.read(post_path)
image_dict[case_id][post_label] = image_tuple_post
dump = True
if (dump ):
pickle.dump(image_dict, open(pickle_file, "wb"))
#test = np.linspace(1,54,54, dtype=np.int)
#lb_test = lower_bound(test)
#plt.plot(test, lb_test)
#plt.show()
channels = int(1)
pi_columns = ["Case ID", "Enhancement Type" , "Label", "Average"]
image_path_list = []
bootstrap_fig_list = []
case_id_list = []
e_type_list = []
label_list = []
average_list = []
uncertainty_list = []
params_list = {}
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
# create a plot of all the histograms
gs8 = plt.GridSpec(5,3, wspace=0.2, hspace=0.8)
fig8 = plt.figure(figsize=(13, 17))
ax1_8 = fig8.add_subplot(gs8[0, 0])
ax1_8.set_title("Compare Ventricle histograms", fontsize = font_size)
ax1_8.set_ylabel('Density, $E_0$', fontsize = font_size)
ax2_8 = fig8.add_subplot(gs8[1, 0])
ax2_8.set_ylabel('Density, $E_1$', fontsize = font_size)
ax3_8 = fig8.add_subplot(gs8[2, 0])
ax3_8.set_ylabel('Density $E_2$', fontsize = font_size)
ax4_8 = fig8.add_subplot(gs8[3, 0])
ax4_8.set_ylabel('Density $E_3$', fontsize = font_size)
ax5_8 = fig8.add_subplot(gs8[4, 0])
ax5_8.set_xlabel('Intensity Values', fontsize = font_size)
ax5_8.set_ylabel('Density $E_4$', fontsize = font_size)
ax6_8 = fig8.add_subplot(gs8[0, 1])
ax6_8.set_title("Compare Wall histograms", fontsize = font_size)
ax7_8 = fig8.add_subplot(gs8[1, 1])
ax8_8 = fig8.add_subplot(gs8[2, 1])
ax9_8 = fig8.add_subplot(gs8[3, 1])
ax10_8 = fig8.add_subplot(gs8[4, 1])
ax10_8.set_xlabel('Intensity Values', fontsize = font_size)
ax11_8 = fig8.add_subplot(gs8[0, 2])
ax11_8.set_title("Compare Pituitary Infundibulum histograms", fontsize = font_size)
ax12_8 = fig8.add_subplot(gs8[1, 2])
ax13_8 = fig8.add_subplot(gs8[2, 2])
ax14_8 = fig8.add_subplot(gs8[3, 2])
ax15_8 = fig8.add_subplot(gs8[4, 2])
ax15_8.set_xlabel('Intensity Values', fontsize = font_size)
# create a plot of all the pre vs post
gs6 = plt.GridSpec(6,3, wspace=0.2, hspace=0.6)
fig6 = plt.figure(figsize=(13, 17))
ax1_6 = fig6.add_subplot(gs6[0, 0])
ax1_6.set_title("Compare Ventricle Pre- vs. Post-VWI", fontsize = font_size)
ax1_6.set_ylabel(r'Post: $E_0$', fontsize = font_size)
ax1_6.set_xlabel(r'Pre: $E_0$', fontsize = font_size)
ax2_6 = fig6.add_subplot(gs6[1, 0])
ax2_6.set_ylabel(r'Post: $E_1$', fontsize = font_size)
ax2_6.set_xlabel(r'Pre: $E_1$', fontsize = font_size)
ax3_6 = fig6.add_subplot(gs6[2, 0])
ax3_6.set_ylabel(r'Post: $E_2$', fontsize = font_size)
ax3_6.set_xlabel(r'Pre: $E_2$', fontsize = font_size)
ax4_6 = fig6.add_subplot(gs6[3, 0])
ax4_6.set_ylabel(r'Post: $E_3$', fontsize = font_size)
ax4_6.set_xlabel(r'Pre: $E_3$', fontsize = font_size)
ax5_6 = fig6.add_subplot(gs6[4, 0])
ax5_6.set_ylabel(r'Post: $E_4$', fontsize = font_size)
ax5_6.set_xlabel(r'Pre: $E_4$', fontsize = font_size)
ax6_6 = fig6.add_subplot(gs6[0, 1])
ax6_6.set_title("Compare Wall Pre- vs. Post-VWI", fontsize = font_size)
ax6_6.set_xlabel(r'Pre: $E_0$', fontsize = font_size)
ax7_6 = fig6.add_subplot(gs6[1, 1])
ax7_6.set_xlabel(r'Pre: $E_1$', fontsize = font_size)
ax8_6 = fig6.add_subplot(gs6[2, 1])
ax8_6.set_xlabel(r'Pre: $E_2$', fontsize = font_size)
ax9_6 = fig6.add_subplot(gs6[3, 1])
ax9_6.set_xlabel(r'Pre: $E_3$', fontsize = font_size)
ax10_6 = fig6.add_subplot(gs6[4, 1])
ax10_6.set_xlabel(r'Pre: $E_4$', fontsize = font_size)
ax11_6 = fig6.add_subplot(gs6[0, 2])
ax11_6.set_title("Compare Pituitary Infundibulum Pre- vs. Post-VWI", fontsize = font_size)
ax11_6.set_xlabel(r'Pre: $E_0$', fontsize = font_size)
ax12_6 = fig6.add_subplot(gs6[1, 2])
ax12_6.set_xlabel(r'Pre: $E_1$', fontsize = font_size)
ax13_6 = fig6.add_subplot(gs6[2, 2])
ax13_6.set_xlabel(r'Pre: $E_2$', fontsize = font_size)
ax14_6 = fig6.add_subplot(gs6[3, 2])
ax14_6.set_xlabel(r'Pre: $E_3$', fontsize = font_size)
ax15_6 = fig6.add_subplot(gs6[4, 2])
ax15_6.set_xlabel(r'Pre: $E_4$', fontsize = font_size)
# create a plot of all the pre vs post
gs5 = plt.GridSpec(5,3, wspace=0.2, hspace=0.5)
fig5 = plt.figure(figsize=(13, 17))
ax1_5 = fig5.add_subplot(gs5[0, 0])
ax1_5.set_title("Compare Ventricle Pre- vs. Post-VWI", fontsize = font_size)
ax1_5.set_ylabel(r'Post: $E_0$', fontsize = font_size)
ax1_5.set_xlabel(r'Pre: $E_0$', fontsize = font_size)
ax2_5 = fig5.add_subplot(gs5[1, 0])
ax2_5.set_ylabel(r'Post: $E_1$', fontsize = font_size)
ax2_5.set_xlabel(r'Pre: $E_1$', fontsize = font_size)
ax3_5 = fig5.add_subplot(gs5[2, 0])
ax3_5.set_ylabel(r'Post: $E_2$', fontsize = font_size)
ax3_5.set_xlabel(r'Pre: $E_2$', fontsize = font_size)
ax4_5 = fig5.add_subplot(gs5[3, 0])
ax4_5.set_ylabel(r'Post: $E_3$', fontsize = font_size)
ax4_5.set_xlabel(r'Pre: $E_3$', fontsize = font_size)
ax5_5 = fig5.add_subplot(gs5[4, 0])
ax5_5.set_ylabel(r'Post: $E_4$', fontsize = font_size)
ax5_5.set_xlabel(r'Pre: $E_4$', fontsize = font_size)
ax6_5 = fig5.add_subplot(gs5[0, 1])
ax6_5.set_title("Compare Wall Pre- vs. Post-VWI", fontsize = font_size)
ax6_5.set_xlabel(r'Pre: $E_0$', fontsize = font_size)
ax7_5 = fig5.add_subplot(gs5[1, 1])
ax7_5.set_xlabel(r'Pre: $E_1$', fontsize = font_size)
ax8_5 = fig5.add_subplot(gs5[2, 1])
ax8_5.set_xlabel(r'Pre: $E_2$', fontsize = font_size)
ax9_5 = fig5.add_subplot(gs5[3, 1])
ax9_5.set_xlabel(r'Pre: $E_3$', fontsize = font_size)
ax10_5 = fig5.add_subplot(gs5[4, 1])
ax10_5.set_xlabel(r'Pre: $E_4$', fontsize = font_size)
ax11_5 = fig5.add_subplot(gs5[0, 2])
ax11_5.set_title("Compare Pituitary Infundibulum Pre- vs. Post-VWI", fontsize = font_size)
ax11_5.set_xlabel(r'Pre: $E_0$', fontsize = font_size)
ax12_5 = fig5.add_subplot(gs5[1, 2])
ax12_5.set_xlabel(r'Pre: $E_1$', fontsize = font_size)
ax13_5 = fig5.add_subplot(gs5[2, 2])
ax13_5.set_xlabel(r'Pre: $E_2$', fontsize = font_size)
ax14_5 = fig5.add_subplot(gs5[3, 2])
ax14_5.set_xlabel(r'Pre: $E_3$', fontsize = font_size)
ax15_5 = fig5.add_subplot(gs5[4, 2])
ax15_5.set_xlabel(r'Pre: $E_4$', fontsize = font_size)
for case_id, case_imgs in image_dict.items():
#full_image = np.zeros_like(case_imgs["post_float"][0])
img_post_vent = case_imgs["VWI_post_masked_vent"][0][case_imgs["VWI_post_masked_vent"][0] >= 0.0]
img_post_back = case_imgs["VWI_background_post_masked"][0][case_imgs["VWI_background_post_masked"][0] >= 0.0]
img_post_model = case_imgs["model_post_masked"][0][case_imgs["model_post_masked"][0] >= 0.0]
img_post_back_inter = case_imgs["VWI_background_post_intersection"][0][case_imgs["VWI_background_post_intersection"][0] >= 0.0]
img_pre_vent = case_imgs["VWI_pre2post_masked_vent"][0][case_imgs["VWI_pre2post_masked_vent"][0] >= 0.0]
img_pre_back = case_imgs["VWI_background_pre_masked"][0][case_imgs["VWI_background_pre_masked"][0] >= 0.0]
img_pre_model = case_imgs["model_pre2post_masked"][0][case_imgs["model_pre2post_masked"][0] >= 0.0]
img_pre_back_inter = case_imgs["VWI_background_pre_intersection"][0][case_imgs["VWI_background_pre_intersection"][0] >= 0.0]
eta = case_imgs["post_float"][0][case_imgs["post_float"][0] >= 0.0]
xi = case_imgs["pre2post"][0][case_imgs["pre2post"][0] >= 0.0]
non_zero_indx = np.where(case_imgs["post_float"][0] >= 0.0)
eta_model = case_imgs["model_post_masked"][0][case_imgs["model_post_masked"][0] >= 0.0]
xi_model = case_imgs["model_pre2post_masked"][0][case_imgs["model_pre2post_masked"][0] >= 0.0]
#non_zero_indx_model= np.where(case_imgs["model_post_masked"][0] >= 0.0)
eta_PI = case_imgs["VWI_post_PI_masked"][0][case_imgs["VWI_post_PI_masked"][0] >= 0.0]
xi_PI = case_imgs["VWI_pre2post_PI_masked"][0][case_imgs["VWI_pre2post_PI_masked"][0] >= 0.0]
#scale_factor = np.sqrt(2.0-np.pi/2.0)
#post_back_noise = np.mean(img_post_back) / np.sqrt(np.pi/2.0) # rayleigh distribution
#pre_back_noise = np.mean(img_pre_back) / np.sqrt(np.pi/2.0) # rayleigh distribution
back_std_pre = np.std(img_pre_back)
back_std_post = np.std(img_post_back)
print(case_id, "post vent MEAN: {0:.4f} pre vent MEAN {1:.4f}".format(np.mean(img_post_vent), np.mean(img_pre_vent)))
print(case_id, "post vent STD: {0:.4f} pre vent STD {1:.4f}".format(np.std(img_post_vent) , np.std(img_pre_vent) ))
print(case_id, "post back MEAN: {0:.4f} pre back MEAN {1:.4f}".format(np.mean(img_post_back) , np.mean(img_pre_back) ))
print(case_id, "post inter MEAN: {0:.4f} pre inter MEAN {1:.4f}".format(np.mean(img_post_back_inter) , np.mean(img_pre_back_inter) ))
#print(case_id, "post vent inter shape: {0} pre vent inter shape {1}".format(img_post_back_inter.shape ,img_pre_back_inter.shape ))
print(case_id, "post back STD: {0:.4f} pre back STD {1:.4f}".format(back_std_post, back_std_pre))
print(case_id, "post inter STD: {0:.4f} pre inter STD {1:.4f}".format(np.std(img_post_back_inter) , np.std(img_pre_back_inter) ))
print(case_id, "post PI Mean: {0:.4f} pre PI mean {1:.4f}".format(np.mean(eta_PI), np.mean(xi_PI)))
print(case_id, "post PI STD: {0:.4f} pre PI STD {1:.4f}".format(np.std(eta_PI), np.std(xi_PI)))
#koay_result_post, err = newton_koay(SNR_post_vent, channels)
## ventricle portion
cov_vent = np.cov(np.vstack((img_post_vent, img_pre_vent)))
#N_vent = float(img_post_vent.shape[0])
#print(np.sqrt(2.0*np.trace(cov_vent) - np.sum(cov_vent)))
eta_vent = np.mean(img_post_vent) # mean ventricle post
xi_vent = np.mean(img_pre_vent) # mean ventricle pre
u_eta_vent_2 = cov_vent[0,0] #/ N_vent # uncertainty vent post square
u_xi_vent_2 = cov_vent[1,1] #/ N_vent # uncertainty vent pre square
u_eta_xi_vent = cov_vent[0,1] #/ N_vent # covariance between pre and post
E = VWI_Enhancement(eta, xi, 1.0, 1.0, kind = "E1")
case_id_list.append(case_id)
e_type_list.append("E")
label_list.append("Volume")
average_list.append(E.mean())
E1 = VWI_Enhancement(eta, xi, eta_vent, xi_vent, kind = "E1")
case_id_list.append(case_id)
e_type_list.append("E1")
label_list.append("Volume")
average_list.append(E1.mean())
E2 = VWI_Enhancement(eta, xi, eta_vent, xi_vent, kind = "E2")
case_id_list.append(case_id)
e_type_list.append("E2")
label_list.append("Volume")
average_list.append(E2.mean())
E3 = VWI_Enhancement(eta, xi, eta_vent, xi_vent, kind = "E3",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2))
case_id_list.append(case_id)
e_type_list.append("E3")
label_list.append("Volume")
average_list.append(E3.mean())
E4 = VWI_Enhancement(eta, xi, eta_vent, xi_vent, kind = "E4",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2))
case_id_list.append(case_id)
e_type_list.append("E4")
label_list.append("Volume")
average_list.append(E4.mean())
E_model, E_post_model, E_pre_model = VWI_Enhancement(eta_model, xi_model,
1.0, 1.0, kind = "E1", return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E")
label_list.append("Wall")
average_list.append(E_model.mean())
E1_model, E1_post_model, E1_pre_model = VWI_Enhancement(eta_model, xi_model,
eta_vent, xi_vent, kind = "E1", return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E1")
label_list.append("Wall")
average_list.append(E1_model.mean())
E2_model, E2_post_model, E2_pre_model = VWI_Enhancement(eta_model, xi_model,
eta_vent, xi_vent, kind = "E2", return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E2")
label_list.append("Wall")
average_list.append(E2_model.mean())
E3_model, E3_post_model, E3_pre_model = VWI_Enhancement(eta_model, xi_model,
eta_vent, xi_vent, kind = "E3",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E3")
label_list.append("Wall")
average_list.append(E3_model.mean())
E4_model, E4_post_model, E4_pre_model = VWI_Enhancement(eta_model, xi_model,
eta_vent, xi_vent, kind = "E4",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E4")
label_list.append("Wall")
average_list.append(E4_model.mean())
E_vent, E_post_vent, E_pre_vent = VWI_Enhancement(img_post_vent, img_pre_vent,
1.0, 1.0, kind = "E1", return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E")
label_list.append("Ventricle")
average_list.append(E_vent.mean())
E1_vent, E1_post_vent, E1_pre_vent = VWI_Enhancement(img_post_vent, img_pre_vent,
eta_vent, xi_vent, kind = "E1", return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E1")
label_list.append("Ventricle")
average_list.append(E1_vent.mean())
E2_vent, E2_post_vent, E2_pre_vent = VWI_Enhancement(img_post_vent, img_pre_vent,
eta_vent, xi_vent, kind = "E2", return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E2")
label_list.append("Ventricle")
average_list.append(E2_vent.mean())
E3_vent, E3_post_vent, E3_pre_vent = VWI_Enhancement(img_post_vent, img_pre_vent,
eta_vent, xi_vent, kind = "E3",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E3")
label_list.append("Ventricle")
average_list.append(E3_vent.mean())
E4_vent, E4_post_vent, E4_pre_vent = VWI_Enhancement(img_post_vent, img_pre_vent,
eta_vent, xi_vent, kind = "E4",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E4")
label_list.append("Ventricle")
average_list.append(E4_vent.mean())
E_PI, E_post_PI, E_pre_PI = VWI_Enhancement(eta_PI, xi_PI,
1.0, 1.0, kind = "E1", return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E")
label_list.append("Pituitary Infundibulum")
average_list.append(E_PI.mean())
E1_PI, E1_post_PI, E1_pre_PI = VWI_Enhancement(eta_PI, xi_PI,
eta_vent, xi_vent, kind = "E1", return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E1")
label_list.append("Pituitary Infundibulum")
average_list.append(E1_PI.mean())
E2_PI, E2_post_PI, E2_pre_PI = VWI_Enhancement(eta_PI, xi_PI,
eta_vent, xi_vent, kind = "E2", return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E2")
label_list.append("Pituitary Infundibulum")
average_list.append(E2_PI.mean())
E3_PI, E3_post_PI, E3_pre_PI = VWI_Enhancement(eta_PI, xi_PI,
eta_vent, xi_vent, kind = "E3",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E3")
label_list.append("Pituitary Infundibulum")
average_list.append(E3_PI.mean())
E4_PI, E4_post_PI, E4_pre_PI = VWI_Enhancement(eta_PI, xi_PI,
eta_vent, xi_vent, kind = "E4",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=True)
case_id_list.append(case_id)
e_type_list.append("E4")
label_list.append("Pituitary Infundibulum")
average_list.append(E4_PI.mean())
cov_back = np.cov(np.vstack((img_post_back_inter, img_pre_back_inter)))
#print(np.sqrt(2.0*np.trace(cov_vent) - np.sum(cov_vent)))
u_eta_back_2 = cov_back[0,0] # uncertainty in post measures square
u_xi_back_2 = cov_back[1,1] # uncertainty in pre measures square
u_eta_xi_back = cov_back[0,1] # covariance estimate
eta_vent_term = np.square((xi_vent / (eta_vent**2)) * eta) * u_eta_vent_2
xi_vent_term = np.square( eta / eta_vent) * u_xi_vent_2
eta_term = np.square(xi_vent / eta_vent) * u_eta_back_2
xi_term = u_xi_back_2
eta_xi_term = -2.0 * xi_vent / eta_vent * u_eta_xi_back
eta_xi_vent_term = -2.0 * xi_vent / (eta_vent ** 3.0) * np.square(eta) * u_eta_xi_vent
# determine which term is the driver for uncertainty
u_E_2, u_E, u_E_confidence = uncertainty(eta, xi, conf, kind='E')
u_E1_2, u_E1, u_E1_confidence = uncertainty(eta, xi, conf, kind='E1')
u_E2_2, u_E2, u_E2_confidence = uncertainty(eta, xi, conf, kind='E2')
u_E3_2, u_E3, u_E3_confidence = uncertainty(eta, xi, conf, kind='E3')
u_E4_2, u_E4, u_E4_confidence = uncertainty(eta, xi, conf, kind='E4')
uncertainty_list.extend([u_E.mean(), u_E1.mean(), u_E2.mean(), u_E3.mean(), u_E4.mean()])
u_E_2_m, u_E_m, u_E_conf_m = uncertainty(eta_model, xi_model, conf, kind='E')
u_E1_2_m, u_E1_m, u_E1_conf_m = uncertainty(eta_model, xi_model, conf, kind='E1')
u_E2_2_m, u_E2_m, u_E2_conf_m = uncertainty(eta_model, xi_model, conf, kind='E2')
u_E3_2_m, u_E3_m, u_E3_conf_m = uncertainty(eta_model, xi_model, conf, kind='E3')
u_E4_2_m, u_E4_m, u_E4_conf_m = uncertainty(eta_model, xi_model, conf, kind='E4')
uncertainty_list.extend([u_E_m.mean(), u_E1_m.mean(), u_E2_m.mean(), u_E3_m.mean(), u_E4_m.mean()])
u_E_2_v, u_E_v, u_E_conf_v = uncertainty(eta_vent, xi_vent, conf, kind='E')
u_E1_2_v, u_E1_v, u_E1_conf_v = uncertainty(eta_vent, xi_vent, conf, kind='E1')
u_E2_2_v, u_E2_v, u_E2_conf_v = uncertainty(eta_vent, xi_vent, conf, kind='E2')
u_E3_2_v, u_E3_v, u_E3_conf_v = uncertainty(eta_vent, xi_vent, conf, kind='E3')
u_E4_2_v, u_E4_v, u_E4_conf_v = uncertainty(eta_vent, xi_vent, conf, kind='E4')
uncertainty_list.extend([u_E_v.mean(), u_E1_v.mean(), u_E2_v.mean(), u_E3_v.mean(), u_E4_v.mean()])
u_E_2_pi, u_E_pi, u_E_conf_pi = uncertainty(eta_PI, xi_PI, conf, kind='E')
u_E1_2_pi, u_E1_pi, u_E1_conf_pi = uncertainty(eta_PI, xi_PI, conf, kind='E1')
u_E2_2_pi, u_E2_pi, u_E2_conf_pi = uncertainty(eta_PI, xi_PI, conf, kind='E2')
u_E3_2_pi, u_E3_pi, u_E3_conf_pi = uncertainty(eta_PI, xi_PI, conf, kind='E3')
u_E4_2_pi, u_E4_pi, u_E4_conf_pi = uncertainty(eta_PI, xi_PI, conf, kind='E4')
uncertainty_list.extend([u_E_pi.mean(), u_E1_pi.mean(), u_E2_pi.mean(), u_E3_pi.mean(), u_E4_pi.mean()])
case_id_list.append(case_id)
e_type_list.append("Post")
label_list.append("Wall")
average_list.append(E_post_model.mean())
case_id_list.append(case_id)
e_type_list.append("Pre")
label_list.append("Wall")
average_list.append(E_pre_model.mean())
case_id_list.append(case_id)
e_type_list.append("Post")
label_list.append("Ventricle")
average_list.append(E_post_model.mean())
case_id_list.append(case_id)
e_type_list.append("Pre")
label_list.append("Ventricle")
average_list.append(E_pre_vent.mean())
case_id_list.append(case_id)
e_type_list.append("Post")
label_list.append("Pituitary Infundibulum")
average_list.append(E_post_PI.mean())
case_id_list.append(case_id)
e_type_list.append("Pre")
label_list.append("Pituitary Infundibulum")
average_list.append(E_pre_PI.mean())
uncertainty_list.extend([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan])
#u_E_2 = (u_eta_back_2 + u_xi_back_2 - 2.0 * u_eta_xi_back )
#u_E = np.sqrt(u_E_2)
#u_E_confidence = conf * u_E # gaussian 95% confidence interval
#u_E1_2 = eta_vent_term + xi_vent_term + eta_term + xi_term + eta_xi_term + eta_xi_vent_term
#u_E1 = np.sqrt(u_E1_2)
#u_E1_confidence = conf * u_E1 # gaussian 95% confidence interval
#u_E2_2 = (1.0 / (eta_vent**2.0) * u_eta_back_2 +
#1.0 / (xi_vent**2.0) * u_xi_back_2 +
#np.square( eta / (eta_vent**2.0)) * u_eta_vent_2 +
#np.square( xi / (xi_vent**2.0)) * u_xi_vent_2 -
#2.0 / (eta_vent * xi_vent) * u_eta_xi_back -
#2.0 * (eta * xi) / ( (eta_vent * xi_vent)**2.0 ) * u_eta_xi_vent
#)
#u_E2 = np.sqrt(u_E2_2)
#u_E2_confidence = conf * u_E2 # gaussian 95% confidence interval
#u_E3_2 = (1.0 / (u_eta_vent_2) * u_eta_back_2 +
#1.0 / (u_xi_vent_2) * u_xi_back_2 -
#2.0 / (np.sqrt(u_eta_vent_2 *u_xi_vent_2)) * u_eta_xi_back +
#2.0 -
#2.0 / (np.sqrt(u_eta_vent_2 *u_xi_vent_2)) * u_eta_xi_vent
#)
#u_E3 = np.sqrt(u_E3_2)
#u_E3_confidence = conf * u_E3 # gaussian 95% confidence interval
#u_E4_2 = ( 1.0 / (u_eta_vent_2 + u_xi_vent_2) * (
#u_eta_back_2 + u_xi_back_2 -
#2.0 * u_eta_xi_back +
#u_eta_vent_2 + u_xi_vent_2 -
#2.0 * u_eta_xi_vent
#)
#)
#u_E4 = np.sqrt(u_E4_2)
#u_E4_confidence = conf * u_E4 # gaussian 95% confidence interval
try:
# Create target Directory
test = os.path.join(write_file_dir, case_id, plots_dir)
os.mkdir(test)
print("Directory " , test , " Created ")
except FileExistsError:
print("Directory " , test , " already exists")
alpha_4 = 1.0
gs4 = plt.GridSpec(3,4, wspace=0.2, hspace=0.8)
fig4 = plt.figure(figsize=(13, 15))
fig4.suptitle(r'{0}: pre: $\xi$ vs. post: $\eta$ comparison'.format(case_id), fontsize=font_size)
ax1_4 = fig4.add_subplot(gs4[0, 1:3])
ax1_4.set_title("Compare $E_0$ pre vs post", fontsize = font_size)
ax1_4.scatter(E_pre_model.ravel(), E_post_model.ravel(), label="Wall", alpha=alpha_4)
ax1_4.scatter(E_pre_vent.ravel(), E_post_vent.ravel(), label="Ventricle", alpha=alpha_4)
ax1_4.scatter(E_pre_PI.ravel(), E_post_PI.ravel(), label="Pituitary Infundibulum", alpha=alpha_4)
ax1_4.set_xlabel(r'pre $\xi$', fontsize = font_size)
ax1_4.set_ylabel(r'post $\eta$', fontsize = font_size)
ax1_4.legend()
ax2_4 = fig4.add_subplot(gs4[1, 0:2])
ax2_4.set_title("Compare $E_1$ pre vs post", fontsize = font_size)
ax2_4.scatter(E1_pre_model.ravel(), E1_post_model.ravel(), label="Wall", alpha=alpha_4)
ax2_4.scatter(E1_pre_vent.ravel(), E1_post_vent.ravel(), label="Ventricle", alpha=alpha_4)
ax2_4.scatter(E1_pre_PI.ravel(), E1_post_PI.ravel(), label="Pituitary Infundibulum", alpha=alpha_4)
ax2_4.set_xlabel(r'pre $\xi$', fontsize = font_size)
ax2_4.set_ylabel(r'post $\frac{ \bar{\xi}_{vent}}{ \bar{\eta}_{vent}} \eta$', fontsize = font_size)
ax2_4.legend()
ax3_4 = fig4.add_subplot(gs4[1, 2:4])
ax3_4.set_title("Compare $E_2$ pre vs post", fontsize = font_size)
ax3_4.scatter(E2_pre_model.ravel(), E2_post_model.ravel(), label="Wall", alpha=alpha_4)
ax3_4.scatter(E2_pre_vent.ravel(), E2_post_vent.ravel(), label="Ventricle", alpha=alpha_4)
ax3_4.scatter(E2_pre_PI.ravel(), E2_post_PI.ravel(), label="Pituitary Infundibulum", alpha=alpha_4)
ax3_4.set_xlabel(r'pre $\frac{\xi}{\bar{\xi}_{vent}}$', fontsize = font_size)
ax3_4.set_ylabel(r'post $\frac{\eta}{\bar{\eta}_{vent}}$', fontsize = font_size)
ax3_4.legend()
ax4_4 = fig4.add_subplot(gs4[2, 0:2])
ax4_4.set_title("Compare $E_3$ pre vs post", fontsize = font_size)
ax4_4.scatter(E3_pre_model.ravel(), E3_post_model.ravel(), label="Wall", alpha=alpha_4)
ax4_4.scatter(E3_pre_vent.ravel(), E3_post_vent.ravel(), label="Ventricle", alpha=alpha_4)
ax4_4.scatter(E3_pre_PI.ravel(), E3_post_PI.ravel(), label="Pituitary Infundibulum", alpha=alpha_4)
ax4_4.set_xlabel(r'pre $ \frac{\xi - \bar{\xi}_{vent}}{ \sigma_{ \xi_{vent} } }$', fontsize = font_size)
ax4_4.set_ylabel(r'post $\frac{\eta - \bar{\eta}_{vent}}{\sigma_{ \eta_{vent} } }$', fontsize = font_size)
ax4_4.legend()
ax5_4 = fig4.add_subplot(gs4[2, 2:4])
ax5_4.set_title("Compare $E_4$ pre vs post", fontsize = font_size)
ax5_4.scatter(E4_pre_model.ravel(), E4_post_model.ravel(), label="Wall", alpha=alpha_4)
ax5_4.scatter(E4_pre_vent.ravel(), E4_post_vent.ravel(), label="Ventricle", alpha=alpha_4)
ax5_4.scatter(E4_pre_PI.ravel(), E4_post_PI.ravel(), label="Pituitary Infundibulum", alpha=alpha_4)
ax5_4.set_xlabel(r'pre $ \frac{\xi - \bar{\xi}_{vent}}{ \sqrt{\sigma^2_{ \eta_{vent}} + \sigma^2_{ \xi_{vent} } } }$', fontsize = font_size)
ax5_4.set_ylabel(r'post $\frac{\eta - \bar{\eta}_{vent}}{\sqrt{\sigma^2_{ \eta_{vent}} + \sigma^2_{ \xi_{vent} } } }$', fontsize = font_size)
ax5_4.legend()
path_E3_model = os.path.join(write_file_dir, case_id, plots_dir, "Compare_Enhancement.png")
fig4.savefig(path_E3_model, dpi=dpi_value)
plt.close(fig4)
del fig4
alpha_4 = 0.1
alpha_ellipse = 1.0
#gs5 = plt.GridSpec(1,1, wspace=0.2, hspace=0.8)
#fig5 = plt.figure(figsize=(13, 13))
#ax1_5 = fig5.add_subplot(gs5[0, 0])
#ax1_5.set_title("Compare Model post vs. pre", fontsize = font_size)
#ax1_5.scatter(E_pre_model.ravel() / E_pre_model.max(),
#E_post_model.ravel() / E_post_model.max(), label=r'$\frac{E}{max(E_0)}$', alpha=alpha_4)
#ax1_5.scatter(E1_pre_model.ravel() / E1_pre_model.max(),
#E1_post_model.ravel() / E1_post_model.max(), label=r'$\frac{E_1}{max(E_1)}$', alpha=alpha_4)
#ax1_5.scatter(E2_pre_model.ravel() / E2_pre_model.max(),
#E2_post_model.ravel() / E2_post_model.max(), label=r'$\frac{E_2}{max(E_2)}$', alpha=alpha_4)
#ax1_5.scatter(E3_pre_model.ravel() / E3_pre_model.max(),
#E3_post_model.ravel() / E3_post_model.max(), label=r'$\frac{E_3}{max(E_3)}$', alpha=alpha_4)
#ax1_5.scatter(E4_pre_model.ravel() / E4_pre_model.max(),
#E4_post_model.ravel() / E4_post_model.max(), label=r'$\frac{E_4}{max(E_4)}$', alpha=alpha_4)
#ax1_5.set_xlabel(r'Pre: $E$', fontsize = font_size)
#ax1_5.set_ylabel(r'Post: $E$', fontsize = font_size)
#ax1_5.legend()
#path_ventricle_model = os.path.join(write_file_dir, case_id, plots_dir, "Compare_Model_normed.png")
#fig5.savefig(path_ventricle_model, dpi=dpi_value)
#plt.close(fig5)
#del fig5
#gs6 = plt.GridSpec(1,1, wspace=0.2, hspace=0.8)
#fig6 = plt.figure(figsize=(13, 13))
#ax1_6 = fig6.add_subplot(gs6[0, 0])
#ax1_6.set_title("Compare Pituitary Infundibulum post vs. pre", fontsize = font_size)
#ax1_6.scatter(E_pre_PI.ravel() / E_pre_PI.max(),
#E_post_PI.ravel()/E_post_PI.max(), label=r'$\frac{E}{max(E_0)}$', alpha=alpha_4)
#ax1_6.scatter(E1_pre_PI.ravel()/E1_pre_PI.max(),
#E1_post_PI.ravel()/E1_post_PI.max(), label=r'$\frac{E_1}{max(E_1)}$', alpha=alpha_4)
#ax1_6.scatter(E2_pre_PI.ravel() /E2_pre_PI.max(),
#E2_post_PI.ravel() / E2_post_PI.max(), label=r'$\frac{E_2}{max(E_2)}$', alpha=alpha_4)
#ax1_6.scatter(E3_pre_PI.ravel() / E3_pre_PI.max(),
#E3_post_PI.ravel() / E3_post_PI.max(), label=r'$\frac{E_3}{max(E_3)}$', alpha=alpha_4)
#ax1_6.scatter(E4_pre_PI.ravel() / E4_pre_PI.max(),
#E4_post_PI.ravel() / E4_post_PI.max(), label=r'$\frac{E_4}{max(E_4)}$', alpha=alpha_4)
#ax1_6.set_xlabel(r'Pre: $E$', fontsize = font_size)
#ax1_6.set_ylabel(r'Post: $E$', fontsize = font_size)
#ax1_6.legend()
#path_ventricle_model = os.path.join(write_file_dir, case_id, plots_dir, "Compare_PI_normed.png")
#fig6.savefig(path_ventricle_model, dpi=dpi_value)
#plt.close(fig6)
#del fig6
#gs7 = plt.GridSpec(1,1, wspace=0.2, hspace=0.8)
#fig7 = plt.figure(figsize=(13, 13))
#ax1_7 = fig7.add_subplot(gs7[0, 0])
#ax1_7.set_title("Compare Ventricle post vs. pre ", fontsize = font_size)
#ax1_7.scatter(E_pre_vent.ravel() / E_pre_vent.max(),
#E_post_vent.ravel() / E_post_vent.max(), label=r'$\frac{E}{max(E_0)}$', alpha=alpha_4)
#ax1_7.scatter(E1_pre_vent.ravel() / E1_pre_vent.max(),
#E1_post_vent.ravel() / E1_post_vent.max(), label=r'$\frac{E_1}{max(E_1)}$', alpha=alpha_4)
#ax1_7.scatter(E2_pre_vent.ravel() / E2_pre_vent.max(),
#E2_post_vent.ravel() / E2_post_vent.max(), label=r'$\frac{E_2}{max(E_2)}$', alpha=alpha_4)
#ax1_7.scatter(E3_pre_vent.ravel() / E3_pre_vent.max(),
#E3_post_vent.ravel() / E3_post_vent.max(), label=r'$\frac{E_3}{max(E_3)}$', alpha=alpha_4)
#ax1_7.scatter(E4_pre_vent.ravel() / E4_pre_vent.max(),
#E4_post_vent.ravel() / E4_post_vent.max(), label=r'$\frac{E_4}{max(E_4)}$', alpha=alpha_4)
#ax1_7.set_xlabel(r'Pre: $E$', fontsize = font_size)
#ax1_7.set_ylabel(r'Post: $E$', fontsize = font_size)
#ax1_7.legend()
#path_ventricle_model = os.path.join(write_file_dir, case_id, plots_dir, "Compare_Ventricle_normed.png")
#fig7.savefig(path_ventricle_model, dpi=dpi_value)
#plt.close(fig7)
#del fig7
sc = ax1_5.scatter(E_pre_vent.ravel(), E_post_vent.ravel(), alpha=alpha_4, label="{0}".format(case_id))
sc_color = sc.get_facecolors()[0].tolist()
ax2_5.scatter(E1_pre_vent.ravel(), E1_post_vent.ravel() , alpha=alpha_4, color=sc_color, label="{0}".format(case_id))
ax3_5.scatter(E2_pre_vent.ravel(), E2_post_vent.ravel() , alpha=alpha_4, color=sc_color, label="{0}".format(case_id))
ax4_5.scatter(E3_pre_vent.ravel(), E3_post_vent.ravel() , alpha=alpha_4, color=sc_color, label="{0}".format(case_id))
ax5_5.scatter(E4_pre_vent.ravel(), E4_post_vent.ravel() , alpha=alpha_4, color=sc_color, label="{0}".format(case_id))
sc2 =ax6_5.scatter(E_pre_model.ravel(), E_post_model.ravel() , alpha=alpha_4, label="{0}".format(case_id))
sc_color2 = sc2.get_facecolors()[0].tolist()
ax7_5.scatter(E1_pre_model.ravel(), E1_post_model.ravel() , alpha=alpha_4, color=sc_color2, label="{0}".format(case_id))
ax8_5.scatter(E2_pre_model.ravel(), E2_post_model.ravel() , alpha=alpha_4, color=sc_color2, label="{0}".format(case_id))
ax9_5.scatter(E3_pre_model.ravel(), E3_post_model.ravel() , alpha=alpha_4, color=sc_color2, label="{0}".format(case_id))
ax10_5.scatter(E4_pre_model.ravel(), E4_post_model.ravel() , alpha=alpha_4, color=sc_color2, label="{0}".format(case_id))
sc3 =ax11_5.scatter(E_pre_PI.ravel(), E_post_PI.ravel() , alpha=alpha_4, label="{0}".format(case_id))
sc_color3 = sc3.get_facecolors()[0].tolist()
ax12_5.scatter(E1_pre_PI.ravel(), E1_post_PI.ravel() , alpha=alpha_4, color=sc_color3, label="{0}".format(case_id))
ax13_5.scatter(E2_pre_PI.ravel(), E2_post_PI.ravel() , alpha=alpha_4, color=sc_color3, label="{0}".format(case_id))
ax14_5.scatter(E3_pre_PI.ravel(), E3_post_PI.ravel() , alpha=alpha_4, color=sc_color3, label="{0}".format(case_id))
ax15_5.scatter(E4_pre_PI.ravel(), E4_post_PI.ravel() , alpha=alpha_4, color=sc_color3, label="{0}".format(case_id))
error_ellipse(ax1_5, E_pre_vent.mean(), E_post_vent.mean(),
covariance_mat(E_pre_vent.ravel(), E_post_vent.ravel()), None,
alpha=alpha_ellipse, ec = sc_color)
error_ellipse(ax2_5, E1_pre_vent.mean(), E1_post_vent.mean(),
covariance_mat(E1_pre_vent.ravel(), E1_post_vent.ravel()), None,
alpha=alpha_ellipse, ec = sc_color)
error_ellipse(ax3_5, E2_pre_vent.mean(), E2_post_vent.mean(),
covariance_mat(E2_pre_vent.ravel(), E2_post_vent.ravel()), None,
alpha=alpha_ellipse, ec = sc_color)
error_ellipse(ax4_5, E3_pre_vent.mean(), E3_post_vent.mean(),
covariance_mat(E3_pre_vent.ravel(), E3_post_vent.ravel()), None,
alpha=alpha_ellipse, ec = sc_color)
error_ellipse(ax5_5, E4_pre_vent.mean(), E4_post_vent.mean(),
covariance_mat(E4_pre_vent.ravel(), E4_post_vent.ravel()), None,
alpha=alpha_ellipse, ec = sc_color)
error_ellipse(ax6_5, E_pre_model.mean(), E_post_model.mean(),
covariance_mat(E_pre_model.ravel(), E_post_model.ravel()), None,
alpha=alpha_ellipse, ec = sc_color2)
error_ellipse(ax7_5, E1_pre_model.mean(), E1_post_model.mean(),
covariance_mat(E1_pre_model.ravel(), E1_post_model.ravel()), None,
alpha=alpha_ellipse, ec = sc_color2)
error_ellipse(ax8_5, E2_pre_model.mean(), E2_post_model.mean(),
covariance_mat(E2_pre_model.ravel(), E2_post_model.ravel()), None,
alpha=alpha_ellipse, ec = sc_color2)
error_ellipse(ax9_5, E3_pre_model.mean(), E3_post_model.mean(),
covariance_mat(E3_pre_model.ravel(), E3_post_model.ravel()),None,
alpha=alpha_ellipse, ec = sc_color2)
error_ellipse(ax10_5, E4_pre_model.mean(), E4_post_model.mean(),
covariance_mat(E4_pre_model.ravel(), E4_post_model.ravel()), None,
alpha=alpha_ellipse, ec = sc_color2)
error_ellipse(ax11_5, E_pre_PI.mean(), E_post_PI.mean(),
covariance_mat(E_pre_PI.ravel(), E_post_PI.ravel()), None,
alpha=alpha_ellipse, ec = sc_color3)
error_ellipse(ax12_5, E1_pre_PI.mean(), E1_post_PI.mean(),
covariance_mat(E1_pre_PI.ravel(), E1_post_PI.ravel()), None,
alpha=alpha_ellipse, ec = sc_color3)
error_ellipse(ax13_5, E2_pre_PI.mean(), E2_post_PI.mean(),
covariance_mat(E2_pre_PI.ravel(), E2_post_PI.ravel()), None,
alpha=alpha_ellipse, ec = sc_color3)
error_ellipse(ax14_5, E3_pre_PI.mean(), E3_post_PI.mean(),
covariance_mat(E3_pre_PI.ravel(), E3_post_PI.ravel()), None,
alpha=alpha_ellipse, ec = sc_color3)
error_ellipse(ax15_5, E4_pre_PI.mean(), E4_post_PI.mean(),
covariance_mat(E4_pre_PI.ravel(), E4_post_PI.ravel()),None,
alpha=alpha_ellipse, ec = sc_color3)
error_ellipse(ax1_6, E_pre_vent.mean(), E_post_vent.mean(),
covariance_mat(E_pre_vent.ravel(), E_post_vent.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color)
ax1_6.set_xlim(ax1_5.get_xlim())
ax1_6.set_ylim(ax1_5.get_ylim())
error_ellipse(ax2_6, E1_pre_vent.mean(), E1_post_vent.mean(),
covariance_mat(E1_pre_vent.ravel(), E1_post_vent.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color)
ax2_6.set_xlim(ax2_5.get_xlim())
ax2_6.set_ylim(ax2_5.get_ylim())
error_ellipse(ax3_6, E2_pre_vent.mean(), E2_post_vent.mean(),
covariance_mat(E2_pre_vent.ravel(), E2_post_vent.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color)
ax3_6.set_xlim(ax3_5.get_xlim())
ax3_6.set_ylim(ax3_5.get_ylim())
error_ellipse(ax4_6, E3_pre_vent.mean(), E3_post_vent.mean(),
covariance_mat(E3_pre_vent.ravel(), E3_post_vent.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color)
ax4_6.set_xlim(ax4_5.get_xlim())
ax4_6.set_ylim(ax4_5.get_ylim())
error_ellipse(ax5_6, E4_pre_vent.mean(), E4_post_vent.mean(),
covariance_mat(E4_pre_vent.ravel(), E4_post_vent.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color)
ax5_6.set_xlim(ax5_5.get_xlim())
ax5_6.set_ylim(ax5_5.get_ylim())
error_ellipse(ax6_6, E_pre_model.mean(), E_post_model.mean(),
covariance_mat(E_pre_model.ravel(), E_post_model.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color2)
ax6_6.set_xlim(ax6_5.get_xlim())
ax6_6.set_ylim(ax6_5.get_ylim())
error_ellipse(ax7_6, E1_pre_model.mean(), E1_post_model.mean(),
covariance_mat(E1_pre_model.ravel(), E1_post_model.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color2)
ax7_6.set_xlim(ax7_5.get_xlim())
ax7_6.set_ylim(ax7_5.get_ylim())
error_ellipse(ax8_6, E2_pre_model.mean(), E2_post_model.mean(),
covariance_mat(E2_pre_model.ravel(), E2_post_model.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color2)
ax8_6.set_xlim(ax8_5.get_xlim())
ax8_6.set_ylim(ax8_5.get_ylim())
error_ellipse(ax9_6, E3_pre_model.mean(), E3_post_model.mean(),
covariance_mat(E3_pre_model.ravel(), E3_post_model.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color2)
ax9_6.set_xlim(ax9_5.get_xlim())
ax9_6.set_ylim(ax9_5.get_ylim())
error_ellipse(ax10_6, E4_pre_model.mean(), E4_post_model.mean(),
covariance_mat(E4_pre_model.ravel(), E4_post_model.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color2)
ax10_6.set_xlim(ax10_5.get_xlim())
ax10_6.set_ylim(ax10_5.get_ylim())
error_ellipse(ax11_6, E_pre_PI.mean(), E_post_PI.mean(),
covariance_mat(E_pre_PI.ravel(), E_post_PI.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color3)
ax11_6.set_xlim(ax11_5.get_xlim())
ax11_6.set_ylim(ax11_5.get_ylim())
error_ellipse(ax12_6, E1_pre_PI.mean(), E1_post_PI.mean(),
covariance_mat(E1_pre_PI.ravel(), E1_post_PI.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color3)
ax12_6.set_xlim(ax12_5.get_xlim())
ax12_6.set_ylim(ax12_5.get_ylim())
error_ellipse(ax13_6, E2_pre_PI.mean(), E2_post_PI.mean(),
covariance_mat(E2_pre_PI.ravel(), E2_post_PI.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color3)
ax13_6.set_xlim(ax13_5.get_xlim())
ax13_6.set_ylim(ax13_5.get_ylim())
error_ellipse(ax14_6, E3_pre_PI.mean(), E3_post_PI.mean(),
covariance_mat(E3_pre_PI.ravel(), E3_post_PI.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color3)
ax14_6.set_xlim(ax14_5.get_xlim())
ax14_6.set_ylim(ax14_5.get_ylim())
error_ellipse(ax15_6, E4_pre_PI.mean(), E4_post_PI.mean(),
covariance_mat(E4_pre_PI.ravel(), E4_post_PI.ravel()), case_id,
alpha=alpha_ellipse, ec = sc_color3)
ax15_6.set_xlim(ax15_5.get_xlim())
ax15_6.set_ylim(ax15_5.get_ylim())
#gs5 = plt.GridSpec(1,1, wspace=0.2, hspace=0.8)
#fig5 = plt.figure(figsize=(13, 13))
#ax1_5 = fig5.add_subplot(gs5[0, 0])
#ax1_5.set_title("Compare Wall post vs. pre", fontsize = font_size)
#ax1_5.scatter(E_pre_model.ravel(), E_post_model.ravel() , label=r'$E_0$', alpha=alpha_4)
#ax1_5.scatter(E1_pre_model.ravel(), E1_post_model.ravel(), label=r'$E_1$', alpha=alpha_4)
#ax1_5.scatter(E2_pre_model.ravel(), E2_post_model.ravel(), label=r'$E_2$', alpha=alpha_4)
#ax1_5.scatter(E3_pre_model.ravel(), E3_post_model.ravel(), label=r'$E_3$', alpha=alpha_4)
#ax1_5.scatter(E4_pre_model.ravel(), E4_post_model.ravel(), label=r'$E_4$', alpha=alpha_4)
#ax1_5.set_xlabel(r'Pre: $E$', fontsize = font_size)
#ax1_5.set_ylabel(r'Post: $E$', fontsize = font_size)
#ax1_5.legend()
#path_ventricle_model = os.path.join(write_file_dir, case_id, plots_dir, "Compare_Model_data.png")
#fig5.savefig(path_ventricle_model, dpi=dpi_value)
#plt.close(fig5)
#del fig5
#gs6 = plt.GridSpec(1,1, wspace=0.2, hspace=0.8)
#fig6 = plt.figure(figsize=(13, 13))
#ax1_6 = fig6.add_subplot(gs6[0, 0])
#ax1_6.set_title("Compare Pituitary Infundibulum post vs. pre", fontsize = font_size)
#ax1_6.scatter(E_pre_PI.ravel(), E_post_PI.ravel(), label=r'$E_0$', alpha=alpha_4)
#ax1_6.scatter(E1_pre_PI.ravel(), E1_post_PI.ravel(), label=r'$E_1$', alpha=alpha_4)
#ax1_6.scatter(E2_pre_PI.ravel(), E2_post_PI.ravel(), label=r'$E_2$', alpha=alpha_4)
#ax1_6.scatter(E3_pre_PI.ravel(), E3_post_PI.ravel(), label=r'$E_3$', alpha=alpha_4)
#ax1_6.scatter(E4_pre_PI.ravel(), E4_post_PI.ravel(), label=r'$E_4$', alpha=alpha_4)
#ax1_6.set_xlabel(r'Pre: $E$', fontsize = font_size)
#ax1_6.set_ylabel(r'Post: $E$', fontsize = font_size)
#ax1_6.legend()
#path_ventricle_model = os.path.join(write_file_dir, case_id, plots_dir, "Compare_PI_data.png")
#fig6.savefig(path_ventricle_model, dpi=dpi_value)
#plt.close(fig6)
#del fig6
#gs7 = plt.GridSpec(1,1, wspace=0.2, hspace=0.8)
#fig7 = plt.figure(figsize=(13, 13))
#ax1_7 = fig7.add_subplot(gs7[0, 0])
#ax1_7.set_title("Compare Ventricle post vs. pre ", fontsize = font_size)
#ax1_7.scatter(E_pre_vent.ravel(), E_post_vent.ravel(), label=r'$E_0$', alpha=alpha_4)
#ax1_7.scatter(E1_pre_vent.ravel(), E1_post_vent.ravel(), label=r'$E_1$', alpha=alpha_4)
#ax1_7.scatter(E2_pre_vent.ravel(), E2_post_vent.ravel(), label=r'$E_2$', alpha=alpha_4)
#ax1_7.scatter(E3_pre_vent.ravel(), E3_post_vent.ravel(), label=r'$E_3$', alpha=alpha_4)
#ax1_7.scatter(E4_pre_vent.ravel(),E4_post_vent.ravel(), label=r'$E_4$', alpha=alpha_4)
#ax1_7.set_xlabel(r'Pre: $E$', fontsize = font_size)
#ax1_7.set_ylabel(r'Post: $E$', fontsize = font_size)
#ax1_7.legend()
#path_ventricle_model = os.path.join(write_file_dir, case_id, plots_dir, "Compare_Ventricle_data.png")
#fig7.savefig(path_ventricle_model, dpi=dpi_value)
#plt.close(fig7)
#del fig7
gsz = plt.GridSpec(1,1, wspace=0.2, hspace=0.8)
figz = plt.figure(figsize=(13, 13))
ax1_z = figz.add_subplot(gsz[0, 0])
ax1_z.set_title("Compare Histogram Ventricle for $E$ ", fontsize = font_size)
# estimate bandwidth
bw = (E_vent.ravel().shape[0] * (E_vent.std() + 2) / 4.)**(-1. / (E_vent.std() + 4))
bw2 = (E_model.ravel().shape[0] * (E_model.std() + 2) / 4.)**(-1. / (E_model.std() + 4))
bw3 = (E_PI.ravel().shape[0] * (E_PI.std() + 2) / 4.)**(-1. / (E_PI.std() + 4))
print("bw: ", bw, bw2, bw3)
#E_kde = KernelDensity(kernel='gaussian', bandwidth=bw).fit(E_vent.ravel()[:, None])
#res1 = ax1_z.hist(E_vent.ravel(), bins='sqrt', label="{0}:{1}".format(case_id, "Ventricle"), alpha=alpha_4, density=True, histtype="step")
#res1 = np.histogram(E_vent.ravel(), bins='sqrt', density=True)
#E_kde_res = E_kde.score_samples(res1[1][:, None])
#res2 = ax1_z.fill_between(res1[1], np.exp(E_kde_res), alpha=alpha_4, label=r'$E_0$')
ax1_z.hist(E_vent.ravel(), bins='sqrt', label=r'$E_0$', alpha=alpha_4, density=True)
ax1_z.hist(E1_vent.ravel(), bins='sqrt', label=r'$E_1$', alpha=alpha_4, density=True)
ax1_z.hist(E2_vent.ravel(), bins='sqrt', label=r'$E_2$', alpha=alpha_4, density=True)
ax1_z.hist(E3_vent.ravel(), bins='sqrt', label=r'$E_3$', alpha=alpha_4, density=True)
ax1_z.hist(E4_vent.ravel(), bins='sqrt', label=r'$E_4$', alpha=alpha_4, density=True)
#ax1_z.set_xlabel(r'Pre: $E$', fontsize = font_size)
#ax1_z.set_ylabel(r'Post: $E$', fontsize = font_size)
#ylims = ax1_z.get_ylim()
xlims = ax1_z.get_xlim()
ax1_z.set_ylim([0.0, 0.3])
ax1_z.set_xlim([0.2*xlims[0], 0.2*xlims[1]])
ax1_z.legend()
path_ventricle_model = os.path.join(write_file_dir, case_id, plots_dir, "Compare_smooth_hist_data.png")
figz.savefig(path_ventricle_model, dpi=dpi_value)
plt.close(figz)
del figz
alpha_5 = 0.8
params_list[case_id] = {} # have to create the empty dictionary
E_vent_fit, E_grid, E_vent_params = kernel_fit(E_vent)
params_list[case_id].update( {"E" : {"vent": E_vent_params}})
res2 = ax1_8.plot(E_grid, np.exp(E_vent_fit), alpha=alpha_5, label="{0}".format(case_id))
plot_color = res2[0].get_color()
E1_vent_fit, E1_grid, E1_vent_params= kernel_fit(E1_vent)
params_list[case_id].update({"E1" : {"vent" : E1_vent_params}})
res = ax2_8.plot(E1_grid, np.exp(E1_vent_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color)
E2_vent_fit, E2_grid, E2_vent_params= kernel_fit(E2_vent)
params_list[case_id].update( {"E2" : {"vent" : E2_vent_params}})
res = ax3_8.plot(E2_grid, np.exp(E2_vent_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color)
E3_vent_fit, E3_grid, E3_vent_params= kernel_fit(E3_vent)
params_list[case_id].update( {"E3" : {"vent" : E3_vent_params}})
res = ax4_8.plot(E3_grid, np.exp(E3_vent_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color)
E4_vent_fit, E4_grid, E4_vent_params = kernel_fit(E4_vent)
params_list[case_id].update( {"E4" : {"vent" : E4_vent_params}})
res = ax5_8.plot(E4_grid, np.exp(E4_vent_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color)
E_model_fit, E_grid, E_model_params = kernel_fit(E_model)
params_list[case_id]["E"].update({'model' : E_vent_params})
res3 = ax6_8.plot(E_grid, np.exp(E_model_fit), alpha=alpha_5, label="{0}".format(case_id))
plot_color2= res3[0].get_color()
E1_model_fit, E1_grid, E1_model_params = kernel_fit(E1_model)
params_list[case_id]["E1"].update({"model" : E1_model_params})
res = ax7_8.plot(E1_grid, np.exp(E1_model_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color2)
E2_model_fit, E2_grid, E2_model_params = kernel_fit(E2_model)
params_list[case_id]["E2"].update({"model": E2_model_params})
res = ax8_8.plot(E2_grid, np.exp(E2_model_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color2)
E3_model_fit, E3_grid, E3_model_params = kernel_fit(E3_model)
params_list[case_id]["E3"].update({"model": E3_model_params})
res = ax9_8.plot(E3_grid, np.exp(E3_model_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color2)
E4_model_fit, E4_grid, E4_model_params = kernel_fit(E4_model)
params_list[case_id]["E4"].update({"model": E4_model_params})
res = ax10_8.plot(E4_grid, np.exp(E4_model_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color2)
E_PI_fit, E_grid, E_PI_params = kernel_fit(E_PI)
params_list[case_id]["E"].update({"PI": E_PI_params})
res4 = ax11_8.plot(E_grid, np.exp(E_PI_fit), alpha=alpha_5, label="{0}".format(case_id))
plot_color3= res4[0].get_color()
E1_PI_fit, E1_grid, E1_PI_params = kernel_fit(E1_PI)
params_list[case_id]["E1"].update({"PI" : E1_PI_params})
res = ax12_8.plot(E1_grid, np.exp(E1_PI_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color3)
E2_PI_fit, E2_grid, E2_PI_params = kernel_fit(E2_PI)
params_list[case_id]["E2"].update( {"PI" : E2_PI_params})
res = ax13_8.plot(E2_grid, np.exp(E2_PI_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color3)
E3_PI_fit, E3_grid, E3_PI_params = kernel_fit(E3_PI)
params_list[case_id]["E3"].update( {"PI" : E3_PI_params})
res = ax14_8.plot(E3_grid, np.exp(E3_PI_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color3)
E4_PI_fit, E4_grid, E4_PI_params = kernel_fit(E4_PI)
params_list[case_id]["E4"].update({"PI" : E4_PI_params})
res = ax15_8.plot(E4_grid, np.exp(E4_PI_fit), alpha=alpha_5, label="{0}".format(case_id), color=plot_color3)
#res1 = ax1_8.hist(E_vent.ravel(), bins='sqrt', label="{0}".format(case_id), alpha=alpha_5, density=True, histtype='step')
#hist_color = res1[2][0].get_facecolor()
#res = ax2_8.hist(E1_vent.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color, alpha=alpha_5, density=True, histtype='step')
#res = ax3_8.hist(E2_vent.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color, alpha=alpha_5, density=True, histtype='step')
#res = ax4_8.hist(E3_vent.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color, alpha=alpha_5, density=True, histtype='step')
#res = ax5_8.hist(E4_vent.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color, alpha=alpha_5, density=True, histtype='step')
#res2 = ax6_8.hist(E_model.ravel(), bins='sqrt', label="{0}".format(case_id), alpha=alpha_5, density=True, histtype='step')
#hist_color2 = res2[2][0].get_facecolor()
#res = ax7_8.hist(E1_model.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color2, alpha=alpha_5, density=True, histtype='step')
#res = ax8_8.hist(E2_model.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color2, alpha=alpha_5, density=True, histtype='step')
#res = ax9_8.hist(E3_model.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color2, alpha=alpha_5, density=True, histtype='step')
#res = ax10_8.hist(E4_model.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color2, alpha=alpha_5, density=True, histtype='step')
#res3 = ax11_8.hist(E_PI.ravel(), bins='sqrt', label="{0}".format(case_id), alpha=alpha_5, density=True, histtype='step')
#hist_color3 = res3[2][0].get_facecolor()
#res = ax12_8.hist(E1_PI.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color3, alpha=alpha_5, density=True, histtype='step')
#res = ax13_8.hist(E2_PI.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color3, alpha=alpha_5, density=True, histtype='step')
#res = ax14_8.hist(E3_PI.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color3, alpha=alpha_5, density=True, histtype='step')
#res = ax15_8.hist(E4_PI.ravel(), bins='sqrt', label="{0}".format(case_id), color=hist_color3, alpha=alpha_5, density=True, histtype='step')
# create histogram plots of image and model
n_bins2 = 4000
n_bins3 = 100
shrink_y = 0.2 # fraction of ylim to view distribution
shrink_x = 0.2
gs2 = plt.GridSpec(5,4, wspace=0.2, hspace=0.8)
#gs2 = plt.GridSpec(4,4, wspace=0.2, hspace=0.8)
# Create a figure
fig2 = plt.figure(figsize=(17, 19))
# SUBFIGURE 1
# Create subfigure 1 (takes over two rows (0 to 1) and column 0 of the grid)
ax1a_2 = fig2.add_subplot(gs2[0, 0])
ax1a_2.set_title("{0}: $E_0$ Volume".format(case_id), fontsize = font_size)
ax1a_2.set_ylabel("count", fontsize = font_size)
#ax1a_2.set_xlabel("Enhancement", fontsize = font_size)
ax1b_2 = fig2.add_subplot(gs2[0,1])
ax1b_2.set_title("{0}: $E_0$ Vessel Wall".format(case_id), fontsize = font_size)
#ax1b_2.set_ylabel("count", fontsize = font_size)
ax1c_2 = fig2.add_subplot(gs2[0, 2])
ax1c_2.set_title("{0}: $E_0$ Ventricle".format(case_id), fontsize = font_size)
#ax1b_2.set_ylabel("count", fontsize = font_size)
ax1d_2 = fig2.add_subplot(gs2[0, 3])
ax1d_2.set_title("{0}: $E_0$ Pituitary Infundibulum".format(case_id), fontsize = font_size)
#ax1d_2.set_title(r'{0}: $\bar{U}_{{E}}$ Volume'.format(case_id), fontsize = font_size)
ax2a_2 = fig2.add_subplot(gs2[1, 0])
ax2a_2.set_title("{0}: $E_1$ Volume".format(case_id), fontsize = font_size)
ax2a_2.set_ylabel("count", fontsize = font_size)
ax2b_2 = fig2.add_subplot(gs2[1, 1])
ax2b_2.set_title("{0}: $E_1$ Vessel Wall".format(case_id), fontsize = font_size)
ax2c_2 = fig2.add_subplot(gs2[1, 2])
ax2c_2.set_title("{0}: $E_1$ Ventricle".format(case_id), fontsize = font_size)
ax2d_2 = fig2.add_subplot(gs2[1, 3])
ax2d_2.set_title("{0}: $E_1$ Pituitary Infundibulum".format(case_id), fontsize = font_size)
#ax2d_2.set_title(r'{0}: $\bar{U}_{{E_1}}$ Volume'.format(case_id), fontsize = font_size)
ax3a_2 = fig2.add_subplot(gs2[2, 0])
ax3a_2.set_title("{0}: $E_2$ Volume".format(case_id), fontsize = font_size)
ax3a_2.set_ylabel("count", fontsize = font_size)
ax3b_2 = fig2.add_subplot(gs2[2, 1])
ax3b_2.set_title("{0}: $E_2$ Vessel Wall".format(case_id), fontsize = font_size)
ax3c_2 = fig2.add_subplot(gs2[2, 2])
ax3c_2.set_title("{0}: $E_2$ Ventricle".format(case_id), fontsize = font_size)
ax3d_2 = fig2.add_subplot(gs2[2, 3])
ax3d_2.set_title("{0}: $E_2$ Pituitary Infundibulum".format(case_id), fontsize = font_size)
#ax3d_2.set_title(r'{0}: $\bar{U}_{{E_2}}$ Volume'.format(case_id), fontsize = font_size)
ax4a_2 = fig2.add_subplot(gs2[3, 0])
ax4a_2.set_title("{0}: $E_3$ Volume".format(case_id), fontsize = font_size)
ax4a_2.set_ylabel("count", fontsize = font_size)
ax4b_2 = fig2.add_subplot(gs2[3, 1])
ax4b_2.set_title("{0}: $E_3$ Vessel Wall".format(case_id), fontsize = font_size)
ax4c_2 = fig2.add_subplot(gs2[3, 2])
ax4c_2.set_title("{0}: $E_3$ Ventricle".format(case_id), fontsize = font_size)
ax4d_2 = fig2.add_subplot(gs2[3, 3])
ax4d_2.set_title("{0}: $E_3$ Pituitary Infundibulum".format(case_id), fontsize = font_size)
#ax4d_2.set_title(r'{0}: $\bar{U}_{{E_3}}$ Volume'.format(case_id), fontsize = font_size)
ax5a_2 = fig2.add_subplot(gs2[4, 0])
ax5a_2.set_title("{0}: $E_4$ Volume".format(case_id), fontsize = font_size)
ax5a_2.set_ylabel("count", fontsize = font_size)
ax5a_2.set_xlabel("Enhancement", fontsize = font_size)
ax5b_2 = fig2.add_subplot(gs2[4, 1])
ax5b_2.set_title("{0}: $E_4$ Vessel Wall".format(case_id), fontsize = font_size)
ax5b_2.set_xlabel("Enhancement", fontsize = font_size)
ax5c_2 = fig2.add_subplot(gs2[4, 2])
ax5c_2.set_title("{0}: $E_4$ Ventricle".format(case_id), fontsize = font_size)
ax5c_2.set_xlabel("Enhancement", fontsize = font_size)
ax5d_2 = fig2.add_subplot(gs2[4, 3])
ax5d_2.set_title("{0}: $E_4$ Pituitary Infundibulum".format(case_id), fontsize = font_size)
#ax5d_2.set_title(r'{0}: $\bar{U}_{{E_4}}$ Volume'.format(case_id), fontsize = font_size)
ax1a_2.hist(E.ravel(), bins='auto', label="$E_0$")
ax1a_2.axvline(x=np.mean(E), color='r', label="mean")
ax1a_2.axvline(x=u_E.mean(), color='k', label=r'$+\bar{U}_{E_0}$')
#ax1a_2.axvline(x=-u_E.mean(), color='k', label=r'$-\bar{U}_{E_0}$')
ymin, ymax = ax1a_2.get_ylim()
ax1a_2.set_ylim([ymin, shrink_y*ymax])
xmin, xmax = ax1a_2.get_xlim()
ax1a_2.set_xlim([shrink_x*xmin, shrink_x*xmax])
ax1a_2.legend()
ax1b_2.hist(E_model.ravel(), bins='auto', label="$E_0$")
ax1b_2.axvline(x=np.mean(E_model), color='r', label="mean")
ax1b_2.axvline(x=u_E_m.mean(), color='k', label=r'$+\bar{U}_{E_0}$')
#ax1b_2.axvline(x=-u_E_m.mean(), color='k', label=r'$-\bar{U}_{E_0}$')
ax1b_2.legend()
ax1c_2.hist(E_vent.ravel(), bins='auto', label="$E_0$")
ax1c_2.axvline(x=np.mean(E_vent), color='r', label="mean")
ax1c_2.axvline(x=u_E_v.mean(), color='k', label=r'$+\bar{U}_{E_0}$')
#ax1c_2.axvline(x=-u_E_v.mean(), color='k', label=r'$-\bar{U}_{E_0}$')
ax1c_2.legend()
ax1d_2.hist(E_PI.ravel(), bins='auto', label="$E_0$")
ax1d_2.axvline(x=np.mean(E_PI), color='r', label="mean")
ax1d_2.axvline(x=u_E_pi.mean(), color='k', label=r'$+\bar{U}_{E_0}$')
#ax1d_2.axvline(x=-u_E_pi.mean(), color='k', label=r'$-\bar{U}_{E_0}$')
ax1d_2.legend()
#ax1d_2.hist(u_E.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E}$ Volume", density=True)
#ax1d_2.hist(u_E_m.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E}$ Wall", density=True)
#ax1d_2.hist(u_E_v.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E}$ Ventricle", density=True)
#ax1d_2.legend()
ax2a_2.hist(E1.ravel(), bins='auto', label="$E_1$")
ax2a_2.axvline(x=np.mean(E), color='r', label="mean")
ax2a_2.axvline(x=u_E1.mean(), color='k', label=r'$+\bar{U}_{E_1}$')
#ax2a_2.axvline(x=-u_E1.mean(), color='k', label=r'$-\bar{U}_{E_1}$')
ymin, ymax = ax2a_2.get_ylim()
ax2a_2.set_ylim([ymin, shrink_y*ymax])
xmin, xmax = ax2a_2.get_xlim()
ax2a_2.set_xlim([shrink_x*xmin, shrink_x*xmax])
ax2a_2.legend()
ax2b_2.hist(E1_model.ravel(), bins='auto', label="$E_1$")
ax2b_2.axvline(x=np.mean(E1_model), color='r', label="mean")
ax2b_2.axvline(x=u_E1_m.mean(), color='k', label=r'$+\bar{U}_{E_1}$')
#ax2b_2.axvline(x=-u_E1_m.mean(), color='k', label=r'$-\bar{U}_{E_1}$')
ax2b_2.legend()
ax2c_2.hist(E1_vent.ravel(), bins='auto', label="$E_1$t")
ax2c_2.axvline(x=np.mean(E1_vent), color='r', label="mean")
ax2c_2.axvline(x=u_E1_v.mean(), color='k', label=r'$+\bar{U}_{E_1}$')
#ax2c_2.axvline(x=-u_E1_v.mean(), color='k', label=r'$-\bar{U}_{E_1}$')
ax2c_2.legend()
ax2d_2.hist(E1_PI.ravel(), bins='auto', label="$E_1$")
ax2d_2.axvline(x=np.mean(E1_PI), color='r', label="mean")
ax2d_2.axvline(x=u_E1_pi.mean(), color='k', label=r'$+\bar{U}_{E_1}$')
#ax2d_2.axvline(x=-u_E1_pi.mean(), color='k', label=r'$-\bar{U}_{E_1}$')
ax2d_2.legend()
#ax2d_2.hist(u_E1.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_1}$ Volume", density=True)
#ax2d_2.hist(u_E1_m.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_1}$ Wall", density=True)
#ax2d_2.hist(u_E1_v.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_1}$ Ventricle", density=True)
#ax2d_2.legend()
ax3a_2.hist(E2.ravel(), bins='auto', label="$E_2$")
ax3a_2.axvline(x=np.mean(E2), color='r', label="mean")
ax3a_2.axvline(x=u_E2.mean(), color='k', label=r'$+\bar{U}_{E_2}$')
#ax3a_2.axvline(x=-u_E2.mean(), color='k', label=r'$-\bar{U}_{E_2}$')
ymin, ymax = ax3a_2.get_ylim()
ax3a_2.set_ylim([ymin, shrink_y*ymax])
xmin, xmax = ax3a_2.get_xlim()
ax3a_2.set_xlim([shrink_x*xmin, shrink_x*xmax])
ax3a_2.legend()
ax3b_2.hist(E2_model.ravel(), bins='auto', label="$E_2$")
ax3b_2.axvline(x=np.mean(E2_model), color='r', label="mean")
ax3b_2.axvline(x=u_E2_m.mean(), color='k', label=r'$+\bar{U}_{E_2}$')
#ax3b_2.axvline(x=-u_E2_m.mean(), color='k', label=r'$-\bar{U}_{E_2}$')
ax3b_2.legend()
ax3c_2.hist(E2_vent.ravel(), bins='auto', label="$E_2$")
ax3c_2.axvline(x=np.mean(E2_vent), color='r', label="mean")
ax3c_2.axvline(x=u_E2_v.mean(), color='k', label=r'$+\bar{U}_{E_2}$')
#ax3c_2.axvline(x=-u_E2_v.mean(), color='k', label=r'$-\bar{U}_{E_2}$')
ax3c_2.legend()
ax3d_2.hist(E2_PI.ravel(), bins='auto', label="$E_2$")
ax3d_2.axvline(x=np.mean(E2_PI), color='r', label="mean")
ax3d_2.axvline(x=u_E2_pi.mean(), color='k', label=r'$+\bar{U}_{E_2}$')
#ax3d_2.axvline(x=-u_E2_pi.mean(), color='k', label=r'$-\bar{U}_{E_2}$')
ax3d_2.legend()
#ax3d_2.hist(u_E2.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_2}$ Volume", density=True)
#ax3d_2.hist(u_E2_m.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_2}$ Wall", density=True)
#ax3d_2.hist(u_E2_v.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_2}$ Ventricle", density=True)
#ax3d_2.legend()
ax4a_2.hist(E3.ravel(), bins='auto', label="$E_3$")
ax4a_2.axvline(x=np.mean(E3), color='r', label="mean")
ax4a_2.axvline(x=u_E3.mean(), color='k', label=r'$+\bar{U}_{E_3}$')
#ax4a_2.axvline(x=-u_E3.mean(), color='k', label=r'$-\bar{U}_{E_3}$')
ymin, ymax = ax4a_2.get_ylim()
ax4a_2.set_ylim([ymin, shrink_y*ymax])
xmin, xmax = ax4a_2.get_xlim()
ax4a_2.set_xlim([shrink_x*xmin, shrink_x*xmax])
ax4a_2.legend()
ax4b_2.hist(E3_model.ravel(), bins='auto', label="$E_3$")
ax4b_2.axvline(x=np.mean(E3_model), color='r', label="mean")
ax4b_2.axvline(x=u_E3_m.mean(), color='k', label=r'$+\bar{U}_{E_3}$')
#ax4b_2.axvline(x=-u_E3_m.mean(), color='k', label=r'$-\bar{U}_{E_3}$')
ax4b_2.legend()
ax4c_2.hist(E3_vent.ravel(), bins='auto', label="$E_1$")
ax4c_2.axvline(x=np.mean(E3_vent), color='r', label="mean")
ax4c_2.axvline(x=u_E3_v.mean(), color='k', label=r'$+\bar{U}_{E_3}$')
#ax4c_2.axvline(x=-u_E3_v.mean(), color='k', label=r'$-\bar{U}_{E_3}$')
ax4c_2.legend()
ax4d_2.hist(E3_PI.ravel(), bins='auto', label="$E_3$")
ax4d_2.axvline(x=np.mean(E3_PI), color='r', label="mean")
ax4d_2.axvline(x=u_E3_pi.mean(), color='k', label=r'$+\bar{U}_{E_3}$')
#ax4d_2.axvline(x=-u_E3_pi.mean(), color='k', label=r'$-\bar{U}_{E_3}$')
ax4d_2.legend()
#ax4d_2.hist(u_E3.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_3}$ Volume", density=True)
#ax4d_2.hist(u_E3_m.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_3}$ Wall", density=True)
#ax4d_2.hist(u_E3_v.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_3}$ Ventricle", density=True)
#ax4d_2.legend()
ax5a_2.hist(E4.ravel(), bins='auto', label="$E_4$")
ax5a_2.axvline(x=np.mean(E4), color='r', label="mean")
ax5a_2.axvline(x=u_E4.mean(), color='k', label=r'$+\bar{U}_{E_4}$')
#ax5a_2.axvline(x=-u_E4.mean(), color='k', label=r'$-\bar{U}_{E_4}$')
ymin, ymax = ax5a_2.get_ylim()
ax5a_2.set_ylim([ymin, shrink_y*ymax])
xmin, xmax = ax5a_2.get_xlim()
ax5a_2.set_xlim([shrink_x*xmin, shrink_x*xmax])
ax5a_2.legend()
ax5b_2.hist(E4_model.ravel(), bins='auto', label="$E_4$ model")
ax5b_2.axvline(x=np.mean(E4_model), color='r', label="mean")
ax5b_2.axvline(x=u_E4_m.mean(), color='k', label=r'$+\bar{U}_{E_4}$')
#ax5b_2.axvline(x=-u_E4_m.mean(), color='k', label=r'$-\bar{U}_{E_4}$')
ax5b_2.legend()
ax5c_2.hist(E4_vent.ravel(), bins='auto', label="$E_4$ vent")
ax5c_2.axvline(x=np.mean(E4_vent), color='r', label="mean")
ax5c_2.axvline(x=u_E4_v.mean(), color='k', label=r'$+\bar{U}_{E_4}$')
#ax5c_2.axvline(x=-u_E4_v.mean(), color='k', label=r'$-\bar{U}_{E_4}$')
ax5c_2.legend()
ax5d_2.hist(E4_PI.ravel(), bins='auto', label="$E_4$")
ax5d_2.axvline(x=np.mean(E4_PI), color='r', label="mean")
ax5d_2.axvline(x=u_E4_pi.mean(), color='k', label=r'$+\bar{U}_{E_2}$')
#ax5d_2.axvline(x=-u_E4_pi.mean(), color='k', label=r'$-\bar{U}_{E_2}$')
ax5d_2.legend()
#ax5d_2.hist(u_E4.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_4}$ Volume", density=True)
#ax5d_2.hist(u_E4_m.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_4}$ Wall", density=True)
#ax5d_2.hist(u_E4_v.ravel(), bins='auto', alpha=alpha_4, label="$\bar{U}_{E_4}$ Ventricle", density=True)
#ax5d_2.legend()
#ax8_2.hist(u_E_confidence.ravel(), bins=n_bins3)
#ax8_2.axvline(x=np.mean(u_E_confidence), color='r')
#ax8_2.set_ylabel("count", fontsize = font_size)
path_images = os.path.join(write_file_dir, case_id, plots_dir, "Compare_Enhancement_Distribution.png")
image_path_list.append(path_images)
fig2.savefig(path_images, dpi=dpi_value)
plt.close(fig2)
del fig2
if (skip_write) :
pass
else:
print(np.mean(u_E_confidence))
print(np.mean(u_E1_confidence))
print(np.mean(u_E2_confidence))
print(np.mean(u_E3_confidence))
print(np.mean(u_E4_confidence))
print("pink")
write_list = {}
E_full = VWI_Enhancement(case_imgs["post_float"][0], case_imgs["pre2post"][0], 1.0, 1.0, kind = "E1")
write_list["E.nrrd"] = E_full
E1_full = VWI_Enhancement(case_imgs["post_float"][0], case_imgs["pre2post"][0], eta_vent, xi_vent, kind = "E1")
write_list["E1.nrrd"] = E1_full
E2_full = VWI_Enhancement(case_imgs["post_float"][0], case_imgs["pre2post"][0], eta_vent, xi_vent, kind = "E2")
write_list["E2.nrrd"] = E2_full
E3_full= VWI_Enhancement(case_imgs["post_float"][0], case_imgs["pre2post"][0], eta_vent, xi_vent, kind = "E3",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=False)
write_list["E3.nrrd"] = E3_full
E4_full = VWI_Enhancement(case_imgs["post_float"][0], case_imgs["pre2post"][0], eta_vent, xi_vent, kind = "E4",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=False)
write_list["E4.nrrd"] = E4_full
#gs3 = plt.GridSpec(1,1, wspace=0.2, hspace=0.8)
#fig3 = plt.figure(figsize=(13, 13))
#ax1_3 = fig3.add_subplot(gs3[0, 0])
#ax1_3.set_title("Compare E3 pre vs post", fontsize = font_size)
#ax1_3.scatter(E3_pre.ravel(), E3_post.ravel())
#ax1_3.set_xlabel(r'pre $ \frac{\xi - \bar{\xi}_{vent}}{std(\xi_{vent})}$', fontsize = font_size)
#ax1_3.set_ylabel(r'post $\frac{\eta - \bar{\eta}_{vent}}{std(\eta_{vent})}$', fontsize = font_size)
#path_E3 = os.path.join(write_file_dir, case_id, plots_dir, "Compare_E3.png")
#fig3.savefig(path_E3)
#del fig3
#E_non-zeros = np.where(E == 0.0)
E_term_frac = np.divide(E, u_E_confidence.mean(), dtype=np.float)
E1_term_frac = np.divide(E1, u_E1_confidence.mean(), dtype=np.float)
E2_term_frac = np.divide(E2, u_E2_confidence.mean(), dtype=np.float)
E3_term_frac = np.divide(E3, u_E3_confidence.mean(), dtype=np.float)
E4_term_frac = np.divide(E4, u_E4_confidence.mean(), dtype=np.float)
# assuming the null hypothesis is that the pixel value for E is zero
P_E = 1.0 - stats.norm.cdf( E_term_frac / conf )
P_E1 = 1.0 - stats.norm.cdf( E1_term_frac / conf )
P_E2 = 1.0 - stats.norm.cdf( E2_term_frac / conf)
P_E3 = 1.0 - stats.norm.cdf( E3_term_frac / conf)
P_E4 = 1.0 - stats.norm.cdf( E4_term_frac / conf)
# create confidence arrays
pE = np.zeros_like(E_full)
pE[non_zero_indx] = P_E
write_list["pE.nrrd"] = pE
uE = -1.0*np.ones_like(E_full)
uE[non_zero_indx] = u_E_confidence
write_list["UE.nrrd"] = uE
#percent_uE = 1.0*np.ones_like(E_full)
#percent_uE[non_zero_indx] = np.abs(div0(1.0, E_term_frac, value=-1.0) )
#percent_uE[percent_uE > 1.0] = 1.0
#write_list["percent_error_E.nrrd"] = percent_uE
pE1 = np.zeros_like(E1_full)
pE1[non_zero_indx] = P_E1
write_list["pE1.nrrd"] = pE1
uE1 = -1.0*np.ones_like(E1_full)
uE1[non_zero_indx] = u_E1_confidence
write_list["UE1.nrrd"] = uE1
#percent_uE1 = -1.0*np.ones_like(E1_full)
#percent_uE1[non_zero_indx] = np.abs(div0(1.0, E1_term_frac, value=-1.0))
#percent_uE1[percent_uE1 > 1.0] = 1.0
#write_list["percent_error_E1.nrrd"] = percent_uE1
pE2 = np.zeros_like(E2_full)
pE2[non_zero_indx] = P_E2
write_list["pE2.nrrd"] = pE2
uE2 = -1.0*np.ones_like(E2_full)
uE2[non_zero_indx] = u_E2_confidence
write_list["UE2.nrrd"] = uE2
#percent_uE2 = -1.0*np.ones_like(E2_full)
#percent_uE2[non_zero_indx] = np.abs(div0(1.0, E2_term_frac, value=-1.0 ) )
#percent_uE2[percent_uE2 > 1.0] = 1.0
#write_list["percent_error_E2.nrrd"] = percent_uE2
pE3= np.zeros_like(E3_full)
pE3[non_zero_indx] = P_E3
write_list["pE1.nrrd"] = pE3
uE3 = -1.0*np.ones_like(E3_full)
uE3[non_zero_indx] = u_E3_confidence
write_list["UE3.nrrd"] = uE3
#percent_uE3 = -1.0*np.ones_like(E3_full)
#percent_uE3[non_zero_indx] = np.abs(div0(1.0, E3_term_frac, value=-1.0) )
#percent_uE3[percent_uE3 > 1.0] = 1.0
#write_list["percent_error_E3.nrrd"] = percent_uE3
pE4 = np.zeros_like(E4_full)
pE4[non_zero_indx] = P_E4
write_list["pE4.nrrd"] = pE4
uE4 = -1.0*np.ones_like(E4_full)
uE4[non_zero_indx] = u_E4_confidence
write_list["UE4.nrrd"] = uE4
#percent_uE4 = -1.0*np.ones_like(E4_full)
#percent_uE4[non_zero_indx] = np.abs(div0(1.0, E4_term_frac, value=-1.0 ) )
#percent_uE4[percent_uE4 > 1.0] = 1.0
#write_list["percent_error_E4.nrrd"] = percent_uE4
# threshold
#indx_E = np.where(E_full > uE)
#E_thresh = np.zeros_like(E_full)
#E_thresh[indx_E] = E_full[indx_E]
#write_list["E_thresh.nrrd"] = E_thresh
#indx_E1 = np.where(E1_full > uE1)
#E1_thresh = np.zeros_like(E1_full)
#E1_thresh[indx_E1] = E1_full[indx_E1]
#write_list["E1_thresh.nrrd"] = E1_thresh
#indx_E2 = np.where(E2_full > uE2)
#E2_thresh = np.zeros_like(E2_full)
#E2_thresh[indx_E2] = E2_full[indx_E2]
#write_list["E2_thresh.nrrd"] = E2_thresh
#indx_E3 = np.where(E3_full > uE3)
#E3_thresh = np.zeros_like(E3_full)
#E3_thresh[indx_E3] = E3_full[indx_E3]
#write_list["E3_thresh.nrrd"] = E3_thresh
#indx_E4 = np.where(E4_full > uE4)
#E4_thresh = np.zeros_like(E4_full)
#E4_thresh[indx_E4] = E4_full[indx_E4]
#write_list["E4_thresh.nrrd"] = E4_thresh
## threshold 2
#indx_E = np.where(E_full > 0.0)
#E_thresh2= np.zeros_like(E_full)
#E_thresh2[indx_E] = E_full[indx_E]
#write_list["E_thresh2.nrrd"] = E_thresh2
#indx_E1 = np.where(E1_full > 0.0)
#E1_thresh2 = np.zeros_like(E1_full)
#E1_thresh2[indx_E1] = E1_full[indx_E1]
#write_list["E1_thresh2.nrrd"] = E1_thresh2
#indx_E2 = np.where(E2_full > 0.0)
#E2_thresh2 = np.zeros_like(E2_full)
#E2_thresh2[indx_E2] = E2_full[indx_E2]
#write_list["E2_thresh2.nrrd"] = E2_thresh2
#indx_E3 = np.where(E3_full > 0.0)
#E3_thresh2= np.zeros_like(E3_full)
#E3_thresh2[indx_E3] = E3_full[indx_E3]
#write_list["E3_thresh2.nrrd"] = E3_thresh2
#indx_E4 = np.where(E4_full > 0.0)
#E4_thresh2 = np.zeros_like(E4_full)
#E4_thresh2[indx_E4] = E4_full[indx_E4]
#write_list["E4_thresh2.nrrd"] = E4_thresh2
for file_name, np_image in write_list.items():
path_test = os.path.join(write_file_dir, case_id, write_dir, file_name)
if ( not os.path.exists(path_test) or overwrite_out == True):
nrrd.write(path_test, np_image, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E.nrrd"), E_full, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E1.nrrd"), E1_full, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E2.nrrd"), E2_full, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E3.nrrd"), E3_full, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E4.nrrd"), E4_full, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "UE.nrrd"), uE, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "UE1.nrrd"), uE1, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "UE2.nrrd"), uE2, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "UE3.nrrd"), uE3, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "UE4.nrrd"), uE4, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E_thresh.nrrd"), E_thresh, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E1_thresh.nrrd"), E1_thresh, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E2_thresh.nrrd"), E2_thresh, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E3_thresh.nrrd"), E3_thresh, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E4_thresh.nrrd"), E4_thresh, case_imgs["VWI_post_masked_vent"][1])
if (skip_bootstrap):
pass
else:
boot_size = 10000
#n_bins = 30
pre_dist_std = np.zeros(boot_size)
post_dist_std = np.zeros(boot_size)
pre_dist_mean = np.zeros(boot_size)
post_dist_mean = np.zeros(boot_size)
for i in range(boot_size):
X_resample_pre, ns = bootstrap_resample(img_pre_vent, n=None, percent=percent_boot)
X_resample_post, ns = bootstrap_resample(img_post_vent, n=None, percent=percent_boot)
pre_dist_std[i] = X_resample_pre.std()
post_dist_std[i] = X_resample_post.std()
pre_dist_mean[i] = X_resample_pre.mean()
post_dist_mean[i] = X_resample_post.mean()
#print( 'original mean:', X.mean()
#print(case_id, "post vent resample MEAN: {0:.4f} pre vent resample MEAN {1:.4f}".format(
# X_resample_pre.mean() , X_resample_post.mean() ))
print ("ha")
gs = plt.GridSpec(3,2, wspace=0.2, hspace=0.8)
# Create a figure
fig = plt.figure(figsize=(13, 13))
fig.suptitle("{0} Comparison of bootstrapped means".format(case_id), fontsize = font_size)
# SUBFIGURE 1
# Create subfigure 1 (takes over two rows (0 to 1) and column 0 of the grid)
ax1 = fig.add_subplot(gs[0, 0])
ax1.set_title("{0}: pre vs post bootstrap".format(case_id), fontsize = font_size)
ax3 = fig.add_subplot(gs[0, 1])
ax3.set_title(r'$E_0 = \eta - \xi $ bootstrap', fontsize = font_size)
ax4 = fig.add_subplot(gs[1, 0])
ax4.set_title(r'$E_1 = \frac{\overline{\xi}_{v}}{\overline{\eta}_{v}} post - pre$', fontsize = font_size)
ax5 = fig.add_subplot(gs[1, 1])
ax5.set_title(r'$E_2 = \frac{\eta}{\overline{\eta}_{v}} - \frac{\xi}{\overline{\xi}_{v}}$', fontsize = font_size)
ax6 = fig.add_subplot(gs[2, 0])
ax6.set_title(r'$E_3 = \frac{\eta - \overline{\eta}_{v}}{s_{\eta_{v}}} - \frac{\xi - \overline{\xi}_{v}}{s_{\xi_{v}}}$', fontsize = font_size)
ax2 = fig.add_subplot(gs[2, 1])
ax2.set_title("VWI_post_masked_vent bootstrap", fontsize = font_size)
ax2.set_title(r'$ E_4 = \frac{ \left ( \eta - \overline{\eta}_{v} \right ) - \left ( \xi - \overline{\xi}_{v} \right ) } {\sqrt{s^2_{\eta_{v}} + s^2_{\eta_{v}}}} $', fontsize = font_size)
ax1.scatter(pre_dist_mean, post_dist_mean)
ax1.set_xlabel(r'$\eta$', fontsize = font_size)
ax1.set_ylabel(r'$\xi$', fontsize = font_size)
test_E = VWI_Enhancement(post_dist_mean, pre_dist_mean, 1.0, 1.0, kind = "E1")
ax3.hist(test_E, bins="auto")
ax3.axvline(x=(eta_vent - xi_vent), color='r')
#ax3.axvline(x=test_E.mean(), color='b')
ax3.set_xlabel("mean $E_0$", fontsize = font_size)
ax3.set_ylabel("count", fontsize = font_size)
test_E1 = VWI_Enhancement(post_dist_mean, pre_dist_mean, eta_vent, xi_vent, kind = "E1")
ax4.hist(test_E1, bins="auto")
ax4.axvline(x=0.0, color='r')
#ax4.axvline(x=xi_vent/eta_vent*post_dist_mean.mean() - pre_dist_mean.mean(), color='r')
#ax4.axvline(x=test_E1.mean(), color='b')
ax4.set_xlabel("mean $E_1$", fontsize = font_size)
ax4.set_ylabel("count", fontsize = font_size)
test_E2 = VWI_Enhancement(post_dist_mean, pre_dist_mean, eta_vent, xi_vent, kind = "E2")
ax5.hist(test_E2, bins="auto")
ax5.axvline(x=0.0, color='r')
#ax5.axvline(x=(post_dist_mean.mean() / eta_vent) - (pre_dist_mean.mean() / xi_vent), color='r')
#ax5.axvline(x=test_E2.mean(), color='b')
ax5.set_xlabel("mean $E_2$", fontsize = font_size)
ax5.set_ylabel("count", fontsize = font_size)
test_E3 = VWI_Enhancement(post_dist_mean, pre_dist_mean, eta_vent, xi_vent, kind = "E3",
std_post_vent = post_dist_std, std_pre_vent = pre_dist_std)
ax6.hist(test_E3, bins="auto")
ax6.axvline(x=0.0 , color='r')
#ax6.axvline(x=(post_dist_mean.mean() - eta_vent) / post_dist_std.mean() - (pre_dist_mean.mean() - xi_vent) / pre_dist_std.mean() , color='b')
ax6.set_xlabel("mean $E_3$", fontsize = font_size)
ax6.set_ylabel("count", fontsize = font_size)
test_E4 = VWI_Enhancement(post_dist_mean, pre_dist_mean, eta_vent, xi_vent, kind = "E4",
std_post_vent = post_dist_std, std_pre_vent = pre_dist_std)
ax2.hist(test_E4, bins="auto")
ax2.axvline(x=0.0 , color='r')
#ax6.axvline(x=(post_dist_mean.mean() - eta_vent) / post_dist_std.mean() - (pre_dist_mean.mean() - xi_vent) / pre_dist_std.mean() , color='b')
ax2.set_xlabel("mean $E_4$", fontsize = font_size)
ax2.set_ylabel("count", fontsize = font_size)
path_bootstrap = os.path.join(write_file_dir, case_id, plots_dir, "Compare_bootstrap.png")
bootstrap_fig_list.append(path_bootstrap)
fig.savefig(path_bootstrap, dpi=dpi_value)
#plt.show()
plt.close(fig)
del fig
print("hehe")
pickle.dump( params_list, open(os.path.join(write_file_dir, bw_file), 'wb') )
print("saved another pickle!")
data_cases = {'Case ID': case_id_list, "Enhancement Type": e_type_list, "Region": label_list, "Average": average_list, "Uncertainty": uncertainty_list}
# Create DataFrame
df_cases = pd.DataFrame(data_cases)
df_cases.to_pickle(os.path.join(write_file_dir, enhancement_file))
print("saved the pickle!")
print(image_path_list)
print(bootstrap_fig_list)
lg_8 = ax11_8.legend(bbox_to_anchor=(1.04,1), loc="upper left")
for lg in lg_8.legendHandles:
lg.set_alpha(1.0)
path_ventricle_model = os.path.join(write_file_dir, "Compare_histograms.png")
fig8.savefig(path_ventricle_model, dpi=dpi_value)
plt.close(fig8)
del fig8
lg_5 = ax11_5.legend(bbox_to_anchor=(1.04,1), loc="upper left")
for lg in lg_5.legendHandles:
lg.set_alpha(1.0)
path_ventricle_model = os.path.join(write_file_dir, "Compare_pre2post.png")
fig5.savefig(path_ventricle_model, dpi=dpi_value)
plt.close(fig5)
del fig5
lg_6 = ax11_6.legend(bbox_to_anchor=(1.04,1), loc="upper left")
for lg in lg_6.legendHandles:
lg.set_alpha(1.0)
path_ventricle_model = os.path.join(write_file_dir, "Compare_pre2post_ellipse.png")
fig6.savefig(path_ventricle_model, dpi=dpi_value)
plt.close(fig6)
del fig6
| kayarre/Tools | process_VWI_nrrd_4.py | Python | bsd-2-clause | 97,925 | [
"Gaussian"
] | a526c42f228114670215eda577778ee5d43f5ef873a13e355200e36a3078d37c |
########################################################################
# $HeadURL$
# File : MightyOptimizer.py
# Author : Adria Casajus
########################################################################
"""
SuperOptimizer
One optimizer to rule them all, one optimizer to find them,
one optimizer to bring them all, and in the darkness bind them.
"""
__RCSID__ = "$Id$"
import os
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.Core.Utilities import ThreadSafe
from DIRAC.Core.Utilities.Shifter import setupShifterProxyInEnv
gOptimizerLoadSync = ThreadSafe.Synchronizer()
class MightyOptimizer( AgentModule ):
"""
The specific agents must provide the following methods:
- initialize() for initial settings
- beginExecution()
- execute() - the main method called in the agent cycle
- endExecution()
- finalize() - the graceful exit of the method, this one is usually used
for the agent restart
"""
__jobStates = [ 'Received', 'Checking' ]
def initialize( self ):
""" Standard constructor
"""
self.jobDB = JobDB()
self.jobLoggingDB = JobLoggingDB()
self._optimizers = {}
self.am_setOption( "PollingTime", 30 )
return S_OK()
def execute( self ):
""" The method call by AgentModule on each iteration
"""
jobTypeCondition = self.am_getOption( "JobTypeRestriction", [] )
jobCond = { 'Status': self.__jobStates }
if jobTypeCondition:
jobCond[ 'JobType' ] = jobTypeCondition
result = self.jobDB.selectJobs( jobCond )
if not result[ 'OK' ]:
return result
jobsList = result[ 'Value' ]
self.log.info( "Got %s jobs for this iteration" % len( jobsList ) )
if not jobsList:
return S_OK()
result = self.jobDB.getAttributesForJobList( jobsList )
if not result[ 'OK' ]:
return result
jobsToProcess = result[ 'Value' ]
for jobId in jobsToProcess:
self.log.info( "== Processing job %s == " % jobId )
jobAttrs = jobsToProcess[ jobId ]
jobDef = False
jobOptimized = False
jobOK = True
while not jobOptimized:
result = self.optimizeJob( jobId, jobAttrs, jobDef )
if not result[ 'OK' ]:
self.log.error( "Optimizer %s error" % jobAttrs[ 'MinorStatus' ], "Job %s: %s" % ( str(jobID), result[ 'Message' ] ) )
jobOK = False
break
optResult = result[ 'Value' ]
jobOptimized = optResult[ 'done' ]
if 'jobDef' in optResult:
jobDef = optResult[ 'jobDef' ]
if jobOK:
self.log.info( "Finished optimizing job %s" % jobId )
return S_OK()
def optimizeJob( self, jobId, jobAttrs, jobDef ):
""" The method call for each Job to be optimized
"""
#Get the next optimizer
result = self._getNextOptimizer( jobAttrs )
if not result[ 'OK' ]:
return result
optimizer = result[ 'Value' ]
if not optimizer:
return S_OK( { 'done' : True } )
#If there's no job def then get it
if not jobDef:
result = optimizer.getJobDefinition( jobId, jobDef )
if not result['OK']:
optimizer.setFailedJob( jobId, result[ 'Message' ] )
return result
jobDef = result[ 'Value' ]
#Does the optimizer require a proxy?
shifterEnv = False
if optimizer.am_getModuleParam( 'shifterProxy' ):
shifterEnv = True
result = setupShifterProxyInEnv( optimizer.am_getModuleParam( 'shifterProxy' ),
optimizer.am_getShifterProxyLocation() )
if not result[ 'OK' ]:
return result
#Call the initCycle function
result = self.am_secureCall( optimizer.beginExecution, name = "beginExecution" )
if not result[ 'OK' ]:
return result
#Do the work
result = optimizer.optimizeJob( jobId, jobDef[ 'classad' ] )
if not result[ 'OK' ]:
return result
nextOptimizer = result[ 'Value' ]
#If there was a shifter proxy, unset it
if shifterEnv:
del( os.environ[ 'X509_USER_PROXY' ] )
#Check if the JDL has changed
newJDL = jobDef[ 'classad' ].asJDL()
if newJDL != jobDef[ 'jdl' ]:
jobDef[ 'jdl' ] = newJDL
#If there's a new optimizer set it!
if nextOptimizer:
jobAttrs[ 'Status' ] = 'Checking'
jobAttrs[ 'MinorStatus' ] = nextOptimizer
return S_OK( { 'done' : False, 'jobDef' : jobDef } )
return S_OK( { 'done' : True, 'jobDef' : jobDef } )
def _getNextOptimizer( self, jobAttrs ):
""" Determine next Optimizer in the Path
"""
if jobAttrs[ 'Status' ] == 'Received':
nextOptimizer = "JobPath"
else:
nextOptimizer = jobAttrs[ 'MinorStatus' ]
if nextOptimizer in self.am_getOption( "FilteredOptimizers", "InputData, BKInputData" ):
return S_OK( False )
gLogger.info( "Next optimizer for job %s is %s" % ( jobAttrs['JobID'], nextOptimizer ) )
if nextOptimizer not in self._optimizers:
result = self.__loadOptimizer( nextOptimizer )
if not result[ 'OK' ]:
return result
self._optimizers[ nextOptimizer ] = result[ 'Value' ]
return S_OK( self._optimizers[ nextOptimizer ] )
@gOptimizerLoadSync
def __loadOptimizer( self, optimizerName ):
"""Need to load an optimizer
"""
gLogger.info( "Loading optimizer %s" % optimizerName )
try:
agentName = "%sAgent" % optimizerName
optimizerModule = __import__( 'DIRAC.WorkloadManagementSystem.Agent.%s' % agentName,
globals(),
locals(), agentName )
optimizerClass = getattr( optimizerModule, agentName )
optimizer = optimizerClass( "WorkloadManagement/%s" % agentName, self.am_getModuleParam( 'fullName' ) )
result = optimizer.am_initialize( self.jobDB, self.jobLoggingDB )
if not result[ 'OK' ]:
return S_ERROR( "Can't initialize optimizer %s: %s" % ( optimizerName, result[ 'Message' ] ) )
except Exception, e:
gLogger.exception( "LOADERROR" )
return S_ERROR( "Can't load optimizer %s: %s" % ( optimizerName, str( e ) ) )
return S_OK( optimizer )
| calancha/DIRAC | WorkloadManagementSystem/Agent/MightyOptimizer.py | Python | gpl-3.0 | 6,295 | [
"DIRAC"
] | c5d4954c7cbb851baf1f4f025239611b90b7ea48d34f67b83044fe06caf3414a |
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# ----------------------------------------
# USAGE:
# ----------------------------------------
# PREAMBLE:
import MDAnalysis
from MDAnalysis.analysis.align import *
import sys
import os
from sel_list import *
from distance_functions import *
# ----------------------------------------
# VARIABLE DECLARATION
ref_list = []
ref_list.append(['AMBER_apo', 21, 150])
ref_list.append(['AMBER_atp', 21, 150])
ref_list.append(['AMBER_ssrna', 21, 150])
ref_list.append(['AMBER_ssrna_atp', 21, 150])
ref_list.append(['AMBER_ssrna_adp_pi', 21, 150])
ref_list.append(['AMBER_ssrna_adp', 21, 150])
ref_list.append(['AMBER_ssrna_pi', 21, 150])
ref_file = sys.argv[1] # pointer to the pdb file to be used as the reference structure
traj_loc = sys.argv[2] # pointer to the trajectory positions (or really the position where all systems are stored); look at this variable's use in the script
number = int(sys.argv[3]) # integer identifying which system is used as the reference structure; use python indexing for the ref_list variable
flush = sys.stdout.flush
alignment = 'protein and name CA and (resid 20:25 50:55 73:75 90:94 112:116 142:147 165:169 190:194 214:218 236:240 253:258 303:307)'
important = 'protein'
nSys = len(ref_list)
nSel = len(sel)
# ----------------------------------------
# FUNCTIONS:
def ffprint(string):
print '%s' %(string)
flush()
def summary(nSteps):
sum_file = open('%s.rmsd.summary' %(system),'w')
sum_file.write('Using MDAnalysis version: %s\n' %(MDAnalysis.version.__version__))
sum_file.write('To recreate this analysis, run this line in terminal:\n')
for i in range(len(sys.argv)):
sum_file.write('%s ' %(sys.argv[i]))
sum_file.write('\n\n')
sum_file.write('output is written to:\n')
sum_file.write(' %s.output\n' %(ref_list[number][0]))
sum_file.write(' %s.rmsd.dat\n' %(ref_list[number][0]))
sum_file.write('\nTotal number of steps analyzed: %d\n' %(nSteps))
sum_file.write('\nAtom selections analyzed:\n')
for i in range(nSel):
sum_file.write(' %02d %s %s\n' %(i,sel[i][0],sel[i][1]))
sum_file.write('\nSystems analyzed:\n')
for i in range(nSys):
sum_file.write(' %s, Trajectories %03d to %03d\n' %(ref_list[i][0],ref_list[i][1],ref_list[i][2]))
sum_file.close()
# ----------------------------------------
# MAIN:
out1 = open('%s.output' %(ref_list[number][0]),'w',1)
out2 = open('%s.rmsd.dat' %(ref_list[number][0]),'w')
out1.write('Reference structure: %s\n' %(ref_file))
ref = MDAnalysis.Universe(ref_file)
ref_all = ref.select_atoms('all')
ref_align = ref.select_atoms(alignment)
ref_all.translate(-ref_align.center_of_mass())
pos0 = ref_align.positions
# SAVE COORDINATES FOR ALL SELECTIONS...
pos_list = []
for i in range(nSel):
selection = sel[i][1]
temp_sel = ref.select_atoms(selection)
temp_pos = temp_sel.positions
pos_list.append(temp_pos)
out1.write('Finished collecting the reference structure data\n')
nSteps = 0
# INITIALIZING UNIVERSES, LOADING TRAJECTORIES IN, ANALYZING, ETC...
for i in range(nSys):
out1.write('Loading in Trajectories from %s\n' %(ref_list[i][0]))
u = MDAnalysis.Universe('%s%s/truncated.pdb' %(traj_loc,ref_list[i][0]))
u_align = u.select_atoms(alignment)
u_important = u.select_atoms(important)
u_selection_list = []
for a in range(nSel):
selection = sel[a][1]
temp_sel = u.select_atoms(selection)
u_selection_list.append([int(temp_sel.n_atoms),temp_sel])
out1.write('%s corresponds to %s atom selection\n' %(sel[a][0],u_selection_list[a][1]))
count = 0
out1.write('Beginning trajectory analysis from system %s\n' %(ref_list[i][0]))
a = ref_list[i][1]
while a <= ref_list[i][2]:
u.load_new('%s%s/Trajectories/production.%s/production.%s.dcd' %(traj_loc,ref_list[i][0],a,a))
nSteps += len(u.trajectory)
for ts in u.trajectory:
u_important.translate(-u_align.center_of_mass())
R, rmsd = rotation_matrix(u_align.positions,pos0)
u_important.rotate(R)
for j in range(nSel):
temp_pos = u_selection_list[j][1].positions
rmsd = RMSD(temp_pos,pos_list[j],u_selection_list[j][0])
out2.write('%f ' %(rmsd))
out2.write('\n')
out1.write('Finished analyzing Trajectory: %s%s/Trajectories/production.%s/production.%s.dcd\n' %(traj_loc,ref_list[i][0],a,a))
a +=1
out1.write('Analyzed %d frames from system %s\n' %(count,ref_list[i][0]))
out1.close()
out2.close()
summary(nSteps)
| rbdavid/RMSD_analyses | PCA_RMSD_One_Ref/RMSD_analysis.py | Python | gpl-3.0 | 4,428 | [
"MDAnalysis"
] | 4fe3a6090fd6c4dc1ef22bebd23a33026dd91d3269e91a4555d6ba05e156fbb3 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Converting code parameters and components from python (PsychoPy)
to JS (ES6/PsychoJS)
"""
import ast
import astunparse
import esprima
from os import path
from psychopy.constants import PY3
from psychopy import logging
if PY3:
from past.builtins import unicode
from io import StringIO
else:
from StringIO import StringIO
from psychopy.experiment.py2js_transpiler import translatePythonToJavaScript
class NamesJS(dict):
def __getitem__(self, name):
try:
return dict.__getitem__(self, name)
except:
return "{}".format(name)
namesJS = NamesJS()
namesJS['sin'] = 'Math.sin'
namesJS['cos'] = 'Math.cos'
namesJS['tan'] = 'Math.tan'
namesJS['pi'] = 'Math.PI'
namesJS['rand'] = 'Math.random'
namesJS['random'] = 'Math.random'
namesJS['sqrt'] = 'Math.sqrt'
namesJS['abs'] = 'Math.abs'
namesJS['randint'] = 'util.randint'
namesJS['round'] = 'util.round' # better than Math.round, supports n DPs arg
namesJS['sum'] = 'util.sum'
class TupleTransformer(ast.NodeTransformer):
""" An ast subclass that walks the abstract syntax tree and
allows modification of nodes.
This class transforms a tuple to a list.
:returns node
"""
def visit_Tuple(self, node):
return ast.List(node.elts, node.ctx)
class Unparser(astunparse.Unparser):
"""astunparser had buried the future_imports option underneath its init()
so we need to override that method and change it."""
def __init__(self, tree, file):
"""Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file."""
self.f = file
self.future_imports = ['unicode_literals']
self._indent = 0
self.dispatch(tree)
self.f.flush()
def unparse(tree):
v = StringIO()
Unparser(tree, file=v)
return v.getvalue()
def expression2js(expr):
"""Convert a short expression (e.g. a Component Parameter) Python to JS"""
# if the code contains a tuple (anywhere), convert parenths to be list.
# This now works for compounds like `(2*(4, 5))` where the inner
# parenths becomes a list and the outer parens indicate priority.
# This works by running an ast transformer class to swap the contents of the tuple
# into a list for the number of tuples in the expression.
try:
syntaxTree = ast.parse(expr)
except Exception:
try:
syntaxTree = ast.parse(unicode(expr))
except Exception as err:
logging.error(err)
return
for node in ast.walk(syntaxTree):
TupleTransformer().visit(node) # Transform tuples to list
# for py2 using 'unicode_literals' we don't want
if isinstance(node, ast.Str) and type(node.s)==bytes:
node.s = unicode(node.s, 'utf-8')
elif isinstance(node, ast.Str) and node.s.startswith("u'"):
node.s = node.s[1:]
if isinstance(node, ast.Name):
if node.id == 'undefined':
continue
node.id = namesJS[node.id]
jsStr = unparse(syntaxTree).strip()
if not any(ch in jsStr for ch in ("=",";","\n")):
try:
jsStr = translatePythonToJavaScript(jsStr)
if jsStr.endswith(';\n'):
jsStr = jsStr[:-2]
except:
# If translation fails, just use old translation
pass
return jsStr
def snippet2js(expr):
"""Convert several lines (e.g. a Code Component) Python to JS"""
# for now this is just adding ';' onto each line ending so will fail on
# most code (e.g. if... for... will certainly fail)
# do nothing for now
return expr
def findUndeclaredVariables(ast, allUndeclaredVariables):
"""Detect undeclared variables
"""
undeclaredVariables = []
for expression in ast:
if expression.type == 'ExpressionStatement':
expression = expression.expression
if expression.type == 'AssignmentExpression' and expression.operator == '=' and expression.left.type == 'Identifier':
variableName = expression.left.name
if variableName not in allUndeclaredVariables:
undeclaredVariables.append(variableName)
allUndeclaredVariables.append(variableName)
elif expression.type == 'IfStatement':
if expression.consequent.body is None:
consequentVariables = findUndeclaredVariables(
[expression.consequent], allUndeclaredVariables)
else:
consequentVariables = findUndeclaredVariables(
expression.consequent.body, allUndeclaredVariables)
undeclaredVariables.extend(consequentVariables)
elif expression.type == "ReturnStatement":
if expression.argument.type == "FunctionExpression":
consequentVariables = findUndeclaredVariables(
expression.argument.body.body, allUndeclaredVariables)
undeclaredVariables.extend(consequentVariables)
return undeclaredVariables
def addVariableDeclarations(inputProgram, fileName):
"""Transform the input program by adding just before each function
a declaration for its undeclared variables
"""
# parse Javascript code into abstract syntax tree:
# NB: esprima: https://media.readthedocs.org/pdf/esprima/4.0/esprima.pdf
try:
ast = esprima.parseScript(inputProgram, {'range': True, 'tolerant': True})
except esprima.error_handler.Error as err:
logging.error("{0} in {1}".format(err, path.split(fileName)[1]))
return inputProgram # So JS can be written to file
# find undeclared vars in functions and declare them before the function
outputProgram = inputProgram
offset = 0
allUndeclaredVariables = []
for expression in ast.body:
if expression.type == 'FunctionDeclaration':
# find all undeclared variables:
undeclaredVariables = findUndeclaredVariables(expression.body.body,
allUndeclaredVariables)
# add declarations (var) just before the function:
funSpacing = ['', '\n'][len(undeclaredVariables) > 0] # for consistent function spacing
declaration = funSpacing + '\n'.join(['var ' + variable + ';' for variable in
undeclaredVariables]) + '\n'
startIndex = expression.range[0] + offset
outputProgram = outputProgram[
:startIndex] + declaration + outputProgram[
startIndex:]
offset += len(declaration)
return outputProgram
if __name__ == '__main__':
for expr in ['sin(t)', 't*5',
'(3, 4)', '(5*-2)', # tuple and not tuple
'(1,(2,3), (1,2,3), (-4,-5,-6))', '2*(2, 3)', # combinations
'[1, (2*2)]', # List with nested operations returns list + nested tuple
'(.7, .7)', # A tuple returns list
'(-.7, .7)', # A tuple with unary operators returns nested lists
'[-.7, -.7]', # A list with unary operators returns list with nested tuple
'[-.7, (-.7 * 7)]']: # List with unary operators and nested tuple with operations returns list + tuple
print("{} -> {}".format(repr(expr), repr(expression2js(expr))))
| psychopy/versions | psychopy/experiment/py2js.py | Python | gpl-3.0 | 7,669 | [
"VisIt"
] | 9e283a61deebb36b1647d1b71184794f25cec4164417ccd20058dae945bd6230 |
import os
from setuptools import setup, find_packages
# Convert README.md to README.rst automagically
# https://packaging.python.org/en/latest/guides/making-a-pypi-friendly-readme/
# read the contents of your README file
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(
name='methtuple',
version='1.7.0',
description='methtuple',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
author='Peter Hickey',
author_email='peter.hickey@gmail.com',
url='https://github.com/PeteHaitch/methtuple',
keywords='bisulfite sequencing methylation bismark bioinformatics',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['pysam >= 0.8.4'],
test_suite="methtuple.tests",
scripts = [
'methtuple/scripts/methtuple'
]
)
| PeteHaitch/methtuple | setup.py | Python | mit | 1,263 | [
"pysam"
] | 49ddf7842551d9407f0d70662e8a54139ffe20f145907aeb618d6e98491ed7b0 |
# Copyright (C) 2006, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Wrappers for PSEA, a program for secondary structure assignment.
See this citation for P-SEA, PMID: 9183534
Labesse G, Colloc'h N, Pothier J, Mornon J-P: P-SEA: a new efficient
assignment of secondary structure from C_alpha.
Comput Appl Biosci 1997 , 13:291-295
ftp://ftp.lmcp.jussieu.fr/pub/sincris/software/protein/p-sea/
"""
import os
from Bio.PDB.Polypeptide import is_aa
def run_psea(fname):
"""Run PSEA and return output filename.
Note that this assumes the P-SEA binary is called "psea" and that it is
on the path.
Note that P-SEA will write an output file in the current directory using
the input filename with extension ".sea".
Note that P-SEA will write output to the terminal while run.
"""
os.system("psea " + fname)
last = fname.split("/")[-1]
base = last.split(".")[0]
return base + ".sea"
def psea(pname):
"""Parse PSEA output file."""
fname = run_psea(pname)
start = 0
ss = ""
with open(fname, 'r') as fp:
for l in fp.readlines():
if l[0:6] == ">p-sea":
start = 1
continue
if not start:
continue
if l[0] == "\n":
break
ss = ss + l[0:-1]
return ss
def psea2HEC(pseq):
"""Translate PSEA secondary structure string into HEC."""
seq = []
for ss in pseq:
if ss == "a":
n = "H"
elif ss == "b":
n = "E"
elif ss == "c":
n = "C"
seq.append(n)
return seq
def annotate(m, ss_seq):
"""Apply seconardary structure information to residues in model."""
c = m.get_list()[0]
all = c.get_list()
residues = []
# Now remove HOH etc.
for res in all:
if is_aa(res):
residues.append(res)
L = len(residues)
if not (L == len(ss_seq)):
raise ValueError("Length mismatch %i %i" % (L, len(ss_seq)))
for i in range(0, L):
residues[i].xtra["SS_PSEA"] = ss_seq[i]
# os.system("rm "+fname)
class PSEA(object):
def __init__(self, model, filename):
ss_seq = psea(filename)
ss_seq = psea2HEC(ss_seq)
annotate(model, ss_seq)
self.ss_seq = ss_seq
def get_seq(self):
"""
Return secondary structure string.
"""
return self.ss_seq
if __name__ == "__main__":
import sys
from Bio.PDB import PDBParser
# Parse PDB file
p = PDBParser()
s = p.get_structure('X', sys.argv[1])
# Annotate structure with PSEA sceondary structure info
PSEA(s[0], sys.argv[1])
| zjuchenyuan/BioWeb | Lib/Bio/PDB/PSEA.py | Python | mit | 2,832 | [
"Biopython"
] | 043bdb0fdbf8f577e171cd86796086bc635b1ce6e1c15996e2a9826da8748114 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import sys
from psi4 import core
from psi4.driver.util.filesystem import *
from psi4.driver.util import tty
def sanitize_name(name):
"""Function to return *name* in coded form, stripped of
characters that confuse filenames, characters into lowercase,
``+`` into ``p``, ``*`` into ``s``, and ``(``, ``)``, ``-``,
& ``,`` into ``_``.
Also checks the sanitized name against a list of restricted C++ keywords.
"""
if name[0].isalpha():
temp = name.lower()
temp = temp.replace('+', 'p')
temp = temp.replace('*', 's')
temp = temp.replace('(', '_')
temp = temp.replace(')', '_')
temp = temp.replace(',', '_')
temp = temp.replace('-', '_')
# Taken from http://en.cppreference.com/w/cpp/keyword
cpp_keywords = [
"alignas", "alignof", "and", "and_eq", "asm", "atomic_cancel",
"atomic_commit", "atomic_noexcept", "auto", "bitand", "bitor",
"bool", "break", "case", "catch", "char", "char16_t", "char32_t",
"class", "compl", "concept", "const", "constexpr", "const_cast",
"continue", "decltype", "default", "delete", "do", "double",
"dynamic_cast", "else", "enum", "explicit", "export", "extern",
"false", "float", "for", "friend", "goto", "if", "import", "inline",
"int", "long", "module", "mutable", "namespace", "new", "noexcept",
"not", "not_eq", "nullptr", "operator", "or", "or_eq", "private",
"protected", "public", "register", "reinterpret_cast", "requires",
"return", "short", "signed", "sizeof", "static", "static_assert",
"static_cast", "struct", "switch", "synchronized", "template",
"this", "thread_local", "throw", "true", "try", "typedef", "typeid",
"typename", "union", "unsigned", "using", "virtual", "void",
"volatile", "wchar_t", "while", "xor", "xor_eq",
# Identifiers with special meanings"
"override", "final", "transaction_safe", "transaction_safe_dynamic",
# Preprocessor tokens
"if", "elif", "else", "endif", "defined", "ifdef", "ifndef",
"define", "undef", "include", "line", "error", "pragma",
"_pragma"
]
if temp in cpp_keywords:
tty.die("The plugin name you provided is a C++ reserved keyword. Please provide a different name.")
return temp
else:
tty.die("Plugin name must begin with a letter.")
# Determine the available plugins
available_plugins = []
psidatadir = core.get_datadir()
plugin_path = join_path(psidatadir, "plugin")
for dir in os.listdir(plugin_path):
if os.path.isdir(join_path(plugin_path, dir)):
available_plugins.append(dir)
def create_plugin(name, template):
"""Generate plugin in directory with sanitized *name* based upon *template*."""
name = sanitize_name(name)
template_path = join_path(plugin_path, template)
# Create, but do not overwrite, plugin directory
if os.path.exists(name):
tty.error("""Plugin directory "{}" already exists.""".format(name))
# Do a first pass to determine the template temp_files
template_files = os.listdir(template_path)
source_files = []
for temp_file in template_files:
target_file = temp_file
if temp_file.endswith('.template'):
target_file = temp_file[0:-9]
if temp_file.endswith('.cc.template'):
source_files.append(target_file)
tty.hline("""Creating "{}" with "{}" template.""".format(name, template))
os.mkdir(name)
created_files = []
for source_file in template_files:
# Skip swp files
if source_file.endswith(".swp"):
continue
target_file = source_file
if source_file.endswith('.template'):
target_file = source_file[0:-9]
try:
print(join_path(template_path, source_file))
with open(join_path(template_path, source_file), 'r') as temp_file:
contents = temp_file.read()
except IOError as err:
tty.error("""Unable to open {} template.""".format(source_file))
tty.error(err)
sys.exit(1)
contents = contents.replace('@plugin@', name)
contents = contents.replace('@Plugin@', name.capitalize())
contents = contents.replace('@PLUGIN@', name.upper())
contents = contents.replace('@sources@', ' '.join(source_files))
try:
with open(join_path(name, target_file), 'w') as temp_file:
temp_file.write(contents)
created_files.append(target_file)
except IOError as err:
tty.error("""Unable to create {}""".format(target_file))
tty.error(err)
sys.exit(1)
tty.info("Created plugin files (in {} as {}): ".format(name, template), ", ".join(created_files))
sys.exit(0)
| jgonthier/psi4 | psi4/driver/pluginutil.py | Python | lgpl-3.0 | 5,886 | [
"Psi4"
] | 105dc0431ee93dd112aff2629ba2f53007fd90e3d2bf3618f8c36e61b8ecc996 |
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
#pylint: skip-file
import numpy as np
from horton import *
from horton.test.common import get_random_cell, tmpdir
def test_unravel_counter():
from horton.io.vasp import _unravel_counter
assert _unravel_counter(0, [3, 3, 3]) == [0, 0, 0]
assert _unravel_counter(0, [2, 4, 3]) == [0, 0, 0]
assert _unravel_counter(1, [2, 4, 3]) == [1, 0, 0]
assert _unravel_counter(2, [2, 4, 3]) == [0, 1, 0]
assert _unravel_counter(3, [2, 4, 3]) == [1, 1, 0]
assert _unravel_counter(8, [2, 4, 3]) == [0, 0, 1]
assert _unravel_counter(9, [2, 4, 3]) == [1, 0, 1]
assert _unravel_counter(11, [2, 4, 3]) == [1, 1, 1]
assert _unravel_counter(24, [2, 4, 3]) == [0, 0, 0]
def test_load_chgcar_oxygen():
fn = context.get_fn('test/CHGCAR.oxygen')
mol = IOData.from_file(fn)
assert (mol.numbers == [8]).all()
assert abs(mol.cell.volume - (10*angstrom)**3) < 1e-10
ugrid = mol.grid
assert len(ugrid.shape) == 3
assert (ugrid.shape == 2).all()
assert abs(ugrid.grid_rvecs - mol.cell.rvecs/2).max() < 1e-10
assert abs(ugrid.origin).max() < 1e-10
d = mol.cube_data
assert abs(d[0,0,0] - 0.78406017013E+04/mol.cell.volume) < 1e-10
assert abs(d[-1,-1,-1] - 0.10024522914E+04/mol.cell.volume) < 1e-10
assert abs(d[1,0,0] - 0.76183317989E+04/mol.cell.volume) < 1e-10
def test_load_chgcar_water():
fn = context.get_fn('test/CHGCAR.water')
mol = IOData.from_file(fn)
assert mol.title == 'unknown system'
assert (mol.numbers == [8, 1, 1]).all()
assert abs(mol.coordinates[1] - np.array([0.074983*15+0.903122*1, 0.903122*15, 0.000000])*angstrom).max() < 1e-10
assert abs(mol.cell.volume - (15*angstrom)**3) < 1e-10
ugrid = mol.grid
assert len(ugrid.shape) == 3
assert (ugrid.shape == 3).all()
assert abs(ugrid.grid_rvecs - mol.cell.rvecs/3).max() < 1e-10
assert abs(ugrid.origin).max() < 1e-10
def test_load_locpot_oxygen():
fn = context.get_fn('test/LOCPOT.oxygen')
mol = IOData.from_file(fn)
assert mol.title == 'O atom in a box'
assert (mol.numbers[0] == [8]).all()
assert abs(mol.cell.volume - (10*angstrom)**3) < 1e-10
ugrid = mol.grid
assert len(ugrid.shape) == 3
assert (ugrid.shape == [1, 4, 2]).all()
assert abs(ugrid.origin).max() < 1e-10
d = mol.cube_data
assert abs(d[0, 0, 0]/electronvolt - 0.35046350435E+01) < 1e-10
assert abs(d[0, 1, 0]/electronvolt - 0.213732132354E+01) < 1e-10
assert abs(d[0, 2, 0]/electronvolt - -.65465465497E+01) < 1e-10
assert abs(d[0, 2, 1]/electronvolt - -.546876467887E+01) < 1e-10
def test_load_poscar_water():
fn = context.get_fn('test/POSCAR.water')
mol = IOData.from_file(fn)
assert mol.title == 'Water molecule in a box'
assert (mol.numbers == [8, 1, 1]).all()
assert abs(mol.coordinates[1] - np.array([0.074983*15, 0.903122*15, 0.000000])*angstrom).max() < 1e-10
assert abs(mol.cell.volume - (15*angstrom)**3) < 1e-10
def test_load_dump_consistency():
mol0 = IOData.from_file(context.get_fn('test/water_element.xyz'))
mol0.cell = get_random_cell(5.0, 3)
with tmpdir('horton.io.test.test_vasp.test_load_dump_consistency') as dn:
mol0.to_file('%s/POSCAR' % dn)
mol1 = IOData.from_file('%s/POSCAR' % dn)
assert mol0.title == mol1.title
assert (mol1.numbers == [8, 1, 1]).all()
assert abs(mol0.coordinates[1] - mol1.coordinates[0]).max() < 1e-10
assert abs(mol0.coordinates[0] - mol1.coordinates[1]).max() < 1e-10
assert abs(mol0.coordinates[2] - mol1.coordinates[2]).max() < 1e-10
assert abs(mol0.cell.rvecs - mol1.cell.rvecs).max() < 1e-10
| eustislab/horton | horton/io/test/test_vasp.py | Python | gpl-3.0 | 4,443 | [
"VASP"
] | d1f36b31b846dd99071c29fe78cc39483d324034e4fe40be3129e78f964d7630 |
#!/usr/bin/env python
""" This template will become the job wrapper that's actually executed.
The JobWrapperTemplate is completed and invoked by the jobAgent and uses functionalities from JobWrapper module.
It has to be an executable.
The JobWrapperTemplate will reschedule the job according to certain criteria:
- the working directory could not be created
- the jobWrapper initialization phase failed
- the inputSandbox download failed
- the resolution of the inpt data failed
- the JobWrapper ended with the status DErrno.EWMSRESC
"""
import sys
import json
import ast
import os
import errno
sitePython = "@SITEPYTHON@"
if sitePython:
sys.path.insert( 0, "@SITEPYTHON@" )
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC import gLogger
from DIRAC.Core.Utilities import DErrno
from DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper import JobWrapper, rescheduleFailedJob
from DIRAC.WorkloadManagementSystem.Client.JobReport import JobReport
gJobReport = None
os.umask( 0o22 )
class JobWrapperError( Exception ):
""" Custom exception for handling JobWrapper "genuine" errors
"""
def __init__( self, value ):
self.value = value
def __str__( self ):
return str( self.value )
def execute( arguments ):
""" The only real function executed here
"""
global gJobReport
jobID = arguments['Job']['JobID']
os.environ['JOBID'] = jobID
jobID = int( jobID )
if 'WorkingDirectory' in arguments:
wdir = os.path.expandvars( arguments['WorkingDirectory'] )
if os.path.isdir( wdir ):
os.chdir( wdir )
else:
try:
os.makedirs( wdir ) # this will raise an exception if wdir already exists (which is ~OK)
if os.path.isdir( wdir ):
os.chdir( wdir )
except OSError as osError:
if osError.errno == errno.EEXIST and os.path.isdir( wdir ):
gLogger.exception( 'JobWrapperTemplate found that the working directory already exists' )
rescheduleResult = rescheduleFailedJob( jobID, 'Working Directory already exists' )
else:
gLogger.exception( 'JobWrapperTemplate could not create working directory' )
rescheduleResult = rescheduleFailedJob( jobID, 'Could Not Create Working Directory' )
return 1
gJobReport = JobReport( jobID, 'JobWrapper' )
try:
job = JobWrapper( jobID, gJobReport )
job.initialize( arguments ) # initialize doesn't return S_OK/S_ERROR
except Exception as exc: #pylint: disable=broad-except
gLogger.exception( 'JobWrapper failed the initialization phase', lException = exc )
rescheduleResult = rescheduleFailedJob( jobID, 'Job Wrapper Initialization', gJobReport )
try:
job.sendJobAccounting( rescheduleResult, 'Job Wrapper Initialization' )
except Exception as exc: #pylint: disable=broad-except
gLogger.exception( 'JobWrapper failed sending job accounting', lException = exc )
return 1
if 'InputSandbox' in arguments['Job']:
gJobReport.commit()
try:
result = job.transferInputSandbox( arguments['Job']['InputSandbox'] )
if not result['OK']:
gLogger.warn( result['Message'] )
raise JobWrapperError( result['Message'] )
except JobWrapperError:
gLogger.exception( 'JobWrapper failed to download input sandbox' )
rescheduleResult = rescheduleFailedJob( jobID, 'Input Sandbox Download', gJobReport )
job.sendJobAccounting( rescheduleResult, 'Input Sandbox Download' )
return 1
except Exception as exc: #pylint: disable=broad-except
gLogger.exception( 'JobWrapper raised exception while downloading input sandbox', lException = exc )
rescheduleResult = rescheduleFailedJob( jobID, 'Input Sandbox Download', gJobReport )
job.sendJobAccounting( rescheduleResult, 'Input Sandbox Download' )
return 1
else:
gLogger.verbose( 'Job has no InputSandbox requirement' )
gJobReport.commit()
if 'InputData' in arguments['Job']:
if arguments['Job']['InputData']:
try:
result = job.resolveInputData()
if not result['OK']:
gLogger.warn( result['Message'] )
raise JobWrapperError( result['Message'] )
except JobWrapperError:
gLogger.exception( 'JobWrapper failed to resolve input data' )
rescheduleResult = rescheduleFailedJob( jobID, 'Input Data Resolution', gJobReport )
job.sendJobAccounting( rescheduleResult, 'Input Data Resolution' )
return 1
except Exception as exc: #pylint: disable=broad-except
gLogger.exception( 'JobWrapper raised exception while resolving input data', lException = exc )
rescheduleResult = rescheduleFailedJob( jobID, 'Input Data Resolution', gJobReport )
job.sendJobAccounting( rescheduleResult, 'Input Data Resolution' )
return 1
else:
gLogger.verbose( 'Job has a null InputData requirement:' )
gLogger.verbose( arguments )
else:
gLogger.verbose( 'Job has no InputData requirement' )
gJobReport.commit()
try:
result = job.execute( arguments )
if not result['OK']:
gLogger.error( 'Failed to execute job', result['Message'] )
raise JobWrapperError( (result['Message'], result['Errno']) )
except JobWrapperError as exc:
if exc[1] == 0 or str(exc[0]) == '0':
gLogger.verbose( 'JobWrapper exited with status=0 after execution' )
if exc[1] == DErrno.EWMSRESC:
gLogger.warn("Asked to reschedule job")
rescheduleResult = rescheduleFailedJob( jobID, 'JobWrapper execution', gJobReport )
job.sendJobAccounting( rescheduleResult, 'JobWrapper execution' )
return 1
else:
gLogger.exception( 'Job failed in execution phase' )
gJobReport.setJobParameter( 'Error Message', str( exc ), sendFlag = False )
gJobReport.setJobStatus( 'Failed', 'Exception During Execution', sendFlag = False )
job.sendFailoverRequest( 'Failed', 'Exception During Execution' )
return 1
except Exception as exc: #pylint: disable=broad-except
gLogger.exception( 'Job raised exception during execution phase', lException = exc )
gJobReport.setJobParameter( 'Error Message', str( exc ), sendFlag = False )
gJobReport.setJobStatus( 'Failed', 'Exception During Execution', sendFlag = False )
job.sendFailoverRequest( 'Failed', 'Exception During Execution' )
return 1
if 'OutputSandbox' in arguments['Job'] or 'OutputData' in arguments['Job']:
try:
result = job.processJobOutputs( arguments )
if not result['OK']:
gLogger.warn( result['Message'] )
raise JobWrapperError( result['Message'] )
except JobWrapperError as exc:
gLogger.exception( 'JobWrapper failed to process output files' )
gJobReport.setJobParameter( 'Error Message', str( exc ), sendFlag = False )
gJobReport.setJobStatus( 'Failed', 'Uploading Job Outputs', sendFlag = False )
job.sendFailoverRequest( 'Failed', 'Uploading Job Outputs' )
return 2
except Exception as exc:
gLogger.exception( 'JobWrapper raised exception while processing output files', lException = exc )
gJobReport.setJobParameter( 'Error Message', str( exc ), sendFlag = False )
gJobReport.setJobStatus( 'Failed', 'Uploading Job Outputs', sendFlag = False )
job.sendFailoverRequest( 'Failed', 'Uploading Job Outputs' )
return 2
else:
gLogger.verbose( 'Job has no OutputData or OutputSandbox requirement' )
try:
# Failed jobs will return 1 / successful jobs will return 0
return job.finalize()
except Exception as exc: #pylint: disable=broad-except
gLogger.exception( 'JobWrapper raised exception during the finalization phase', lException = exc )
return 2
###################### Note ##############################
# The below arguments are automatically generated by the #
# JobAgent, do not edit them. #
##########################################################
ret = -3
try:
jsonFileName = os.path.realpath( __file__ ) + '.json'
with open( jsonFileName, 'r' ) as f:
jobArgsFromJSON = json.loads( f.readlines()[0] )
jobArgs = ast.literal_eval(jobArgsFromJSON)
if not isinstance(jobArgs, dict):
raise TypeError, "jobArgs is of type %s" %type(jobArgs)
if 'Job' not in jobArgs:
raise ValueError, "jobArgs does not contain 'Job' key: %s" %str(jobArgs)
ret = execute( jobArgs )
gJobReport.commit()
except Exception as exc: #pylint: disable=broad-except
gLogger.exception("JobWrapperTemplate exception", lException = exc)
try:
gJobReport.commit()
ret = -1
except Exception as exc: #pylint: disable=broad-except
gLogger.exception("Could not commit the job report", lException = exc)
ret = -2
sys.exit( ret )
| hgiemza/DIRAC | WorkloadManagementSystem/JobWrapper/JobWrapperTemplate.py | Python | gpl-3.0 | 8,744 | [
"DIRAC"
] | 4772acc3b2546f46529b7e8fd9848d88d411ebf1970cfc8ad7a45f6144f59118 |
''' This file defines the columns for each table in the Postgres database.
We store our data in multiple schemas. We create one schema for each partner,
(ex: 'noble' and 'kipp_nj'), where each table contains the data from that
individual partner. We also create a 'common' schema where we combine the data
from all partners.
Each table has a standardized set of columns:
- Every table in a partner data schema contain the columns defined in 'partnerids' and
'data' of that table's dictionary below.
This means that corresponding tables in partner data schemas (ex: noble.students and
kipp_nj.students) contain identical columns.
- Every table in the 'common' schema contains the set of columns defined in 'commonids'
and 'data' of that table's dictionary below.
'''
template = {
'commonids': '''
''',
'partnerids': '''
''',
'data': '''
'''
}
studentid = {
'commonids': '''
studentid SERIAL PRIMARY KEY,''',
'partnerids': '',
'data': '''
noble_sf_id VARCHAR(18) UNIQUE,
CONSTRAINT check_length_id CHECK (length(noble_sf_id) = 18),
kipp_nj_sf_id VARCHAR(18) UNIQUE,
CONSTRAINT check_length CHECK (length(kipp_nj_sf_id) = 18),
kipp_nj_powerschool_id VARCHAR UNIQUE,
cps_id VARCHAR(18) UNIQUE,
CONSTRAINT check_cps_id CHECK (length(cps_id) = 8)'''
}
schoolid = {
'commonids': '''
schoolid SERIAL PRIMARY KEY,''',
'partnerids': '''
''',
'data': '''
kipp_nj_sf_school_id VARCHAR(18) UNIQUE CHECK (length(kipp_nj_sf_school_id) = 18),
noble_sf_school_id VARCHAR(18) UNIQUE CHECK (length(kipp_nj_sf_school_id) = 18),
noble_powerschool_id VARCHAR(8) UNIQUE'''
}
collegeid = {
'commonids': '''
collegeid SERIAL PRIMARY KEY,''',
'partnerids': '''
''',
'data': '''
"noble_sf_college_id" CHAR(18) UNIQUE NOT NULL CHECK(length(noble_sf_college_id) = 18),
"ipedsid" INTEGER UNIQUE CHECK(ipedsid>=0 and ipedsid<=999999)'''
}
acttests = {
'commonids': '''
studentid INTEGER NOT NULL REFERENCES common.studentid(studentid),''',
'partnerids': '''
"kipp_nj_sf_id" VARCHAR(18) CHECK (length(kipp_nj_sf_id) = 18),
"cps_id" CHAR(8) CHECK(length(cps_id) = 8),''',
'data': '''
"date" DATE CHECK(date <= now()::date),
"test_level" VARCHAR,
CONSTRAINT valid_level CHECK (test_level IN ('PLAN', 'EXPLORE', 'ACT')),
"test_semester" VARCHAR,
CONSTRAINT valid_semester CHECK (test_semester in ('Fall', 'Spring')),
"score_composite" INTEGER CHECK(score_composite>=1 and score_composite<=36 and score_composite=round((score_english+score_math+score_reading+score_science+0.0001)/4)),
"score_english" INTEGER CHECK(score_english>=1 and score_english<=36),
"score_math" INTEGER CHECK(score_math>=1 and score_math<=36),
"score_reading" INTEGER CHECK(score_reading>=1 and score_reading<=36),
"score_science" INTEGER CHECK(score_science>=1 and score_science<=36),
"score_writing" INTEGER CHECK(score_writing>=1 and score_writing<=36),
"onlyhighestavailable" BOOLEAN NOT NULL'''
}
aptests = {
'commonids': '''
studentid INTEGER NOT NULL REFERENCES common.studentid(studentid),''',
'partnerids': '''
"noble_powerschool_id" CHAR(18) CHECK(length(noble_powerschool_id) = 8),''',
'data': '''
"date" DATE CHECK(date <= now()::date),
"score" INTEGER CHECK(score>=1 and score<=5),
"subject" VARCHAR(37) REFERENCES lookup.apsubjects(name)'''
}
attendance = {
'commonids': '''
studentid INTEGER REFERENCES common.studentid(studentid),
schoolid INTEGER REFERENCES common.schoolid(schoolid),''',
'partnerids': '''
student_cps_id CHAR(8) CHECK(length(student_cps_id) = 8),
school_noble_powerschoolid VARCHAR,
kipp_nj_sf_id VARCHAR(18) CHECK(length(kipp_nj_sf_id) = 18),
kipp_nj_sf_school_id VARCHAR(18) CHECK(length(kipp_nj_sf_school_id) = 18),''',
'data': '''
attendance_date DATE CHECK (attendance_date > DATE('1900-01-01') and attendance_date <= now()::date),
school_year INTEGER CHECK (school_year > 1950 and school_year < extract(year from now()) + 1),
attendance_type VARCHAR(15),
CONSTRAINT "valid_type" CHECK (attendance_type IN ('excused','unexcused','suspended','tardy','early_dismissal'))'''
}
students = {
'commonids': '''
studentid INTEGER UNIQUE NOT NULL REFERENCES common.studentid(studentid),''',
'partnerids': '''
kipp_nj_sf_id CHAR(18) CHECK (length(kipp_nj_sf_id) = 18),
noble_sf_id CHAR(18) CHECK (length(noble_sf_id) = 18),
cps_id CHAR(8) CHECK (length(cps_id) = 8),
kipp_nj_powerschool_id VARCHAR(20),''',
'data': '''
network VARCHAR,
date_of_birth DATE CHECK (date_of_birth > DATE('1970-01-01')),
ethnicity VARCHAR(25),
--constrain the categories of ethnicity to the standard set of nine
CONSTRAINT valid_ethnicity CHECK (ethnicity IN ('Hispanic', 'African American', 'Caucasian', 'Multicultural', 'Asian', 'American Indian', 'Pacific Islander', 'Unknown', 'Other')),
is_female BOOLEAN,
ever_special_ed BOOLEAN,
ever_free_lunch BOOLEAN,
family_income_bracket VARCHAR(15),
number_in_household INTEGER,
is_first_gen BOOLEAN,
street VARCHAR,
zip VARCHAR(5),
-- force zip to be five digits long
CONSTRAINT check_length_zip CHECK (length(zip) = 5),
fafsa_efc FLOAT'''
}
enrollments = {
'commonids': '''
enrollid SERIAL PRIMARY KEY,
studentid INTEGER REFERENCES common.studentid(studentid),
collegeid INTEGER REFERENCES common.collegeid(collegeid),''',
'partnerids': '''
noble_student_sf_id CHAR(18) CHECK (length(noble_student_sf_id) = 18),
kipp_nj_sf_id CHAR(18) CHECK (length(kipp_nj_sf_id) = 18),
"ipedsid" INTEGER CHECK(ipedsid>=0 and ipedsid<=999999),
noble_college_sf_id CHAR(18),
CONSTRAINT check_id CHECK (length(noble_college_sf_id) = 18),''',
'data': '''
"start_date" DATE,
"end_date" DATE,
"date_last_verified" DATE,
"status" VARCHAR(20) CHECK(status IN ('Transferred out', 'Matriculating', 'Attending', 'Did not matriculate', 'Withdrew', 'Graduated')),
"data_source" VARCHAR,
"living_on_campus" BOOLEAN,
"degree_type" VARCHAR CHECK(degree_type IN ('Associates','Associates or Certificate (TBD)', 'Masters', 'Bachelors', 'Certificate', 'Trade/Vocational', 'Employment')),
"degree_subject" VARCHAR REFERENCES lookup.collegedegrees (name),
"major" VARCHAR REFERENCES lookup.majors (name),
"withdrawal_reason_financial" BOOLEAN,
"withdrawal_reason_academic" BOOLEAN,
"withdrawal_reason_motivational" BOOLEAN,
"withdrawal_reason_family" BOOLEAN,
"withdrawal_reason_health" BOOLEAN,
"withdrawal_reason_social" BOOLEAN,
"withdrawal_reason_racial" BOOLEAN'''
}
enrollment_dummies = {
'commonids': '''
''',
'partnerids': '''
''',
'data': '''
"enrollid" INTEGER UNIQUE REFERENCES common.enrollments(enrollid),
"studentid" INTEGER REFERENCES common.studentid(studentid),
"collegeid" INTEGER REFERENCES common.collegeid(collegeid),
"persist_1_halfyear" BOOLEAN,
"persist_2_halfyear" BOOLEAN,
"persist_3_halfyear" BOOLEAN,
"persist_4_halfyear" BOOLEAN,
"persist_5_halfyear" BOOLEAN,
"persist_6_halfyear" BOOLEAN,
"persist_7_halfyear" BOOLEAN,
"persist_8_halfyear" BOOLEAN'''
}
contacts = {
'commonids': '''
studentid INTEGER NOT NULL REFERENCES common.studentid(studentid),''',
'partnerids': '''
kipp_nj_sf_id CHAR(18) CHECK (length(kipp_nj_sf_id) = 18),
noble_sf_id CHAR(18) CHECK (length(noble_sf_id) = 18),''',
'data': '''
contact_date DATE CHECK (contact_date > DATE('1900-01-01') and contact_date <= now()::date),
counselor_id VARCHAR,
contact_medium VARCHAR,
CONSTRAINT "valid_medium" CHECK (
contact_medium IN ('Call', 'Email', 'In Person', 'Text', 'Social Networking', 'School Visit', 'Mail', 'IM', 'Parent Contact', 'College Contact')),
initiated_by_student BOOLEAN,
was_outreach BOOLEAN,
was_successful BOOLEAN'''
}
schools = {
'commonids': '''
schoolid INTEGER UNIQUE NOT NULL REFERENCES common.schoolid(schoolid),''',
'partnerids': '''
"noble_sf_school_id" CHAR(18) CHECK (length(noble_sf_school_id) = 18),
"kipp_nj_sf_school_id" CHAR(18) CHECK (length(kipp_nj_sf_school_id) = 18),''',
'data': '''
"name" VARCHAR(120) NOT NULL,
"level" VARCHAR(15) NOT NULL,
CONSTRAINT "valid_level" CHECK (
"level" IN ('high school', 'middle school', '6 through 12', 'elementary')),
"kind" VARCHAR(10),
CONSTRAINT "valid_kind" CHECK (
"kind" IN ('charter', 'magnet', 'private', 'public', 'contract', 'military', 'selective', 'special', 'boarding')),
"parent_organization" VARCHAR(20),
"no_excuses" BOOLEAN,
"zip" INTEGER CHECK ("zip" >= 00501 AND "zip" <= 99950),
"year_of_first_graduating_class" DATE CHECK("year_of_first_graduating_class" > DATE('1000-01-01')),
"dropout_rate" DECIMAL(5,2) CHECK("dropout_rate" >= 0.00 AND "dropout_rate" <= 100.00 ),
"number_of_students" INTEGER CHECK("number_of_students" >= 0),
"avg_class_size" DECIMAL(6,2) CHECK("avg_class_size" >= 0.00),
"number_of_staff" INTEGER CHECK("number_of_staff" >= 0),
"number_of_counselors" INTEGER CHECK("number_of_counselors" >= 0),
"annual_budget" DECIMAL(11,2)'''
}
courses = {
'commonids': '''
studentid INTEGER REFERENCES common.studentid(studentid),
schoolid INTEGER REFERENCES common.schoolid(schoolid),''',
'partnerids': '''
cps_id VARCHAR(8),
noble_powerschool_school_id VARCHAR(18),
kipp_nj_sf_id VARCHAR(18) CHECK(length(kipp_nj_sf_id) = 18),
kipp_nj_sf_school_id VARCHAR(18) CHECK(length(kipp_nj_sf_school_id) = 18),''',
'data': '''
course_name VARCHAR,
course_number VARCHAR,
teacher_id VARCHAR,
year_taken INTEGER CHECK (year_taken > 1000 and year_taken < extract(year from now())),
semester_taken INTEGER CHECK (semester_taken >= 1 and semester_taken <=3),
grade_available DATE CHECK (grade_available > DATE('1900-01-01') and grade_available < (now() + INTERVAL '6 months')::date),
course_length VARCHAR,
CONSTRAINT valid_length CHECK (course_length IN ('year', 'quarter', 'semester', 'summer')),
course_level INTEGER,
--subject_type VARCHAR REFERENCES lookup.subjecttypes (name),
credit_hours NUMERIC,
was_honors BOOLEAN,
was_ap BOOLEAN,
percent_grade NUMERIC CHECK (percent_grade >= 0.0 and percent_grade <= 120.0),
letter_grade VARCHAR CHECK (letter_grade ~* '^[PAaBbCcDdFfIiWw][+-]?$')'''
}
gpa_by_year = {
'commonids': '''
studentid INTEGER REFERENCES common.studentid(studentid),
schoolid INTEGER REFERENCES common.schoolid(schoolid),''',
'partnerids': '''
cps_id VARCHAR(8) CHECK(length(cps_id) = 8),
noble_powerschool_school_id VARCHAR(4) CHECK(length(noble_powerschool_school_id)=4),
kipp_nj_sf_id VARCHAR CHECK(length(kipp_nj_sf_id) = 18),
kipp_nj_sf_school_id VARCHAR CHECK(length(kipp_nj_sf_school_id) = 18),''',
'data': '''
school_year INT,
weighted_gpa NUMERIC CHECK(weighted_gpa >= 0.0),
unweighted_gpa NUMERIC CHECK(unweighted_gpa >= 0.0)'''
}
gpa_cumulative = {
'commonids': '''
studentid INTEGER UNIQUE REFERENCES common.studentid(studentid),
schoolid INTEGER REFERENCES common.schoolid(schoolid),''',
'partnerids': '''
noble_student_sf_id VARCHAR CHECK(length(noble_student_sf_id) = 18),
noble_sf_school_id VARCHAR(18) CHECK(length(noble_sf_school_id)=18),
kipp_nj_sf_id VARCHAR CHECK(length(kipp_nj_sf_id) = 18),
kipp_nj_sf_school_id VARCHAR CHECK(length(kipp_nj_sf_school_id) = 18),''',
'data': '''
cumulative_weighted_gpa NUMERIC CHECK(cumulative_weighted_gpa >= 0.0),
cumulative_unweighted_gpa NUMERIC CHECK(cumulative_unweighted_gpa >= 0.0)'''
}
colleges = {
'commonids': '''
collegeid INTEGER UNIQUE NOT NULL REFERENCES common.collegeid(collegeid),''',
'partnerids': '''
"noble_sf_college_id" CHAR(18) CHECK (length(noble_sf_college_id) = 18),
"ipedid" CHAR(6) CHECK(length(ipedid)=6),''',
'data': '''
"isprivate" BOOLEAN,
"isforprofit" BOOLEAN,
"is4year" BOOLEAN,
"zip" INTEGER CHECK ("zip" >= 00501 AND "zip" <= 99950),
"name" VARCHAR(70),
"isrural" BOOLEAN,
"allmale" BOOLEAN,
"allfemale" BOOLEAN,
"graduationrate_6yr" FLOAT CHECK(graduationrate_6yr>=0 and graduationrate_6yr<=100),
"graduationrate_minority_6yr" FLOAT CHECK(graduationrate_minority_6yr>=0 and graduationrate_minority_6yr<=100),
"transferrate_6yr" FLOAT CHECK(transferrate_6yr>=0 and transferrate_6yr<=100),
"transferrate_minority_6yr" FLOAT CHECK(transferrate_minority_6yr>=0 and transferrate_minority_6yr<=100),
"historicallyblack" BOOLEAN,
"state" VARCHAR(2),
"longitude" NUMERIC,
"latitude" NUMERIC,
"dist_from_chicago" NUMERIC,
"barrons_rating" VARCHAR(30),
"perc_accepted" FLOAT,
"perc_accepted_enroll" FLOAT,
"perc_male" FLOAT,
"perc_female" FLOAT,
"perc_african_american" FLOAT,
"perc_hispanic" FLOAT,
"percentinstate" FLOAT,
"percentoutofstate" FLOAT,
"percentpellgrant" FLOAT,
"avgnetprice" FLOAT,
"netprice0_30" FLOAT,
"netprice30_48" FLOAT,
"netprice48_75" FLOAT,
"locale" VARCHAR,
"size_range" VARCHAR
'''
}
hs_enrollment = {
'commonids': '''
studentid INTEGER REFERENCES common.studentid(studentid),
schoolid INTEGER REFERENCES common.schoolid(schoolid),''',
'partnerids': '''
noble_sf_id CHAR(18) CHECK (length(noble_sf_id) = 18),
noble_sf_school_id CHAR(18) CHECK (length(noble_sf_school_id) = 18),
kipp_nj_sf_id CHAR(18) CHECK (length(kipp_nj_sf_id) = 18),
kipp_nj_sf_school_id CHAR(18) CHECK (length(kipp_nj_sf_school_id) = 18),''',
'data': '''
start_date DATE CHECK (start_date > DATE('1900-01-01') and start_date < now()::date),
end_date DATE CHECK (start_date > DATE('1900-01-01')),
exit_type VARCHAR,
CONSTRAINT valid_exit CHECK (exit_type IN ( 'Graduated', 'Attending', 'Transferred out', 'Withdrew', 'Matriculating', 'Deferred', 'Other')),
high_school_class INTEGER,
CONSTRAINT valid_class CHECK (high_school_class > 1950 AND high_school_class < extract(year from now()) + 10)'''
}
discipline = {
'commonids': '''
"studentid" INTEGER REFERENCES common.studentid(studentid) NOT NULL,
"schoolid" INTEGER REFERENCES common.schoolid(schoolid) NOT NULL,''',
'partnerids': '''
''',
'data': '''
"date" DATE,
CONSTRAINT valid_date CHECK(
"date" >= DATE('1980-01-01') AND "date" < CURRENT_DATE+1),
"type" VARCHAR REFERENCES lookup.disciplinetypes (name),
"severity" INTEGER,
CONSTRAINT valid_severity CHECK(
"severity" >= 0 AND "severity" <= 5),
"consequence_severity" INTEGER,
CONSTRAINT valid_consequence_severity CHECK(
"consequence_severity" >= 0 AND "consequence_severity" <= 5)
'''
}
maptests = {
'commonids': '''
''',
'partnerids': '''
''',
'data': '''
"academicyear" INTEGER CHECK(academicyear <= extract(year from now())),
"season" VARCHAR(6) CHECK (season IN ( 'FALL', 'WINTER', 'SPRING' ),
"subject" VARCHAR(27) CHECK (subject IN ('Reading','Mathematics','Language','Science General','Science Concepts and Processes')),
"score" INTEGER,
"nationalpercentileranking" INTEGER CHECK(nationalpercentileranking>0 and nationalpercentileranking<=100)'''
}
sattests = {
'commonids': '''
''',
'partnerids': '''
''',
'data': '''
"date" DATE CHECK(date <= now()::date),
"score_reading" INTEGER CHECK(score_reading>=200 and score_reading<=800 and score_reading%10=0),
"score_math" INTEGER CHECK(score_math>=200 and score_math<=800 and score_math%10=0),
"score_writing" INTEGER CHECK(score_writing>=200 and score_writing<=800 and score_writing%10=0),
"score_total" INTEGER CHECK(score_total = score_reading + score_math + score_writing),
"onlyhighestavailable" BOOLEAN NOT NULL'''
}
applications = {
'commonids': '''
studentid INTEGER NOT NULL REFERENCES common.studentid(studentid) ON DELETE RESTRICT,
collegeid INTEGER NOT NULL REFERENCES common.collegeid(collegeid) ON DELETE RESTRICT,''',
'partnerids': '''
''',
'data': '''
date_of_initial_interest DATE CHECK (date_of_initial_interest > DATE('1900-01-01') and date_of_initial_interest <= now()::date),
was_early_decision BOOLEAN,
was_accepted BOOLEAN, --this will blur over things like accepted, matriculated, deferred, etc
application_status VARCHAR,
CONSTRAINT "valid_status" CHECK (application_status IN ('Accepted', 'Denied', 'Submitted', 'Matriculated', 'Withdrew Application', 'In progress', 'Waitlist', 'Unknown', 'Deferred'))'''
} | dssg/education-college-public | code/etl/db_schema/SQLtables_cols.py | Python | mit | 16,343 | [
"VisIt"
] | f71624ee67f6b709db973943bb65898cfbbe92d29b68f09d74f11dd9487b335c |
# standard libraries
import os
import setuptools
# third party libraries
pass
# first party libraries
pass
project_name = 'dotedict'
author = 'Brian J Petersen'
author_email = None
def load_file(fname, default=None):
try:
with open(fname, 'rb') as f:
d = f.read()
except:
d = default
return d
readme = load_file('README.md', '')
history = load_file('HISTORY.md', '')
version = load_file('VERSION', None)
license = load_file('LICENSE', None)
roadmap = load_file('TODO.md', '')
assert project_name is not ValueError, 'Please name your project.'
assert author is not ValueError, 'Please define the author\'s name.'
if version is None:
package_data = {}
else:
package_data = {project_name: ['../VERSION', ]}
setuptools.setup(
name = project_name,
version = version,
description = readme,
long_description = readme + '\n\n' + history + '\n\n' + roadmap,
license = license,
author = author,
author_email = author_email,
packages = setuptools.find_packages(),
package_data = package_data,
) | brianjpetersen/dotedict | setup.py | Python | mit | 1,073 | [
"Brian"
] | fae8f9ae7a307f8ff28f7440fcd5248059c0d9f2b5022cbe8d3f33ea27a6e1b7 |
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
"""List of know mimetypes"""
# List of known complex data formats
# you can use any other, but these are widely known and supported by popular
# software packages
# based on Web Processing Service Best Practices Discussion Paper, OGC 12-029
# http://opengeospatial.org/standards/wps
from collections import namedtuple
import mimetypes
_FORMATS = namedtuple('FORMATS', 'GEOJSON, JSON, SHP, GML, METALINK, META4, KML, KMZ, GEOTIFF,'
'WCS, WCS100, WCS110, WCS20, WFS, WFS100,'
'WFS110, WFS20, WMS, WMS130, WMS110,'
'WMS100, TEXT, DODS, NETCDF, NCML, LAZ, LAS, ZIP,'
'XML')
class Format(object):
"""Input/output format specification
Predefined Formats are stored in :class:`pywps.inout.formats.FORMATS`
:param str mime_type: mimetype definition
:param str schema: xml schema definition
:param str encoding: base64 or not
:param function validate: function, which will perform validation. e.g.
:param number mode: validation mode
:param str extension: file extension
"""
def __init__(self, mime_type,
schema=None, encoding=None,
validate=None,
extension=None):
"""Constructor
"""
self._mime_type = None
self._encoding = None
self._schema = None
self._extension = None
self.mime_type = mime_type
self.encoding = encoding
self.schema = schema
self.validate = validate
self.extension = extension
@property
def mime_type(self):
"""Get format mime type
:rtype: String
"""
return self._mime_type
@mime_type.setter
def mime_type(self, mime_type):
"""Set format mime type
"""
try:
# support Format('GML')
frmt = getattr(FORMATS, mime_type)
self._mime_type = frmt.mime_type
except AttributeError:
# if we don't have this as a shortcut, assume it's a real mime type
self._mime_type = mime_type
except NameError:
# TODO: on init of FORMATS, FORMATS is not available. Clean up code!
self._mime_type = mime_type
@property
def encoding(self):
"""Get format encoding
:rtype: String
"""
if self._encoding:
return self._encoding
else:
return ''
@encoding.setter
def encoding(self, encoding):
"""Set format encoding
"""
self._encoding = encoding
@property
def schema(self):
"""Get format schema
:rtype: String
"""
if self._schema:
return self._schema
else:
return ''
@schema.setter
def schema(self, schema):
"""Set format schema
"""
self._schema = schema
@property
def extension(self):
"""Get format extension
:rtype: String
"""
if self._extension:
return self._extension
else:
return ''
@extension.setter
def extension(self, extension):
"""Set format extension
"""
self._extension = extension
def same_as(self, frmt):
"""Check input frmt, if it seems to be the same as self
"""
if not isinstance(frmt, Format):
return False
return all([frmt.mime_type == self.mime_type,
frmt.encoding == self.encoding,
frmt.schema == self.schema])
def __eq__(self, other):
return self.same_as(other)
@property
def json(self):
"""Get format as json
:rtype: dict
"""
return {
'mime_type': self.mime_type,
'encoding': self.encoding,
'schema': self.schema,
'extension': self.extension
}
@json.setter
def json(self, jsonin):
"""Set format from json
:param jsonin:
"""
self.mime_type = jsonin['mime_type']
self.encoding = jsonin['encoding']
self.schema = jsonin['schema']
self.extension = jsonin['extension']
FORMATS = _FORMATS(
Format('application/vnd.geo+json', extension='.geojson'),
Format('application/json', extension='.json'),
Format('application/x-zipped-shp', extension='.zip', encoding='base64'),
Format('application/gml+xml', extension='.gml'),
Format('application/metalink+xml; version=3.0', extension='.metalink', schema="metalink/3.0/metalink.xsd"),
Format('application/metalink+xml; version=4.0', extension='.meta4', schema="metalink/4.0/metalink4.xsd"),
Format('application/vnd.google-earth.kml+xml', extension='.kml'),
Format('application/vnd.google-earth.kmz', extension='.kmz', encoding='base64'),
Format('image/tiff; subtype=geotiff', extension='.tiff', encoding='base64'),
Format('application/x-ogc-wcs', extension='.xml'),
Format('application/x-ogc-wcs; version=1.0.0', extension='.xml'),
Format('application/x-ogc-wcs; version=1.1.0', extension='.xml'),
Format('application/x-ogc-wcs; version=2.0', extension='.xml'),
Format('application/x-ogc-wfs', extension='.xml'),
Format('application/x-ogc-wfs; version=1.0.0', extension='.xml'),
Format('application/x-ogc-wfs; version=1.1.0', extension='.xml'),
Format('application/x-ogc-wfs; version=2.0', extension='.xml'),
Format('application/x-ogc-wms', extension='.xml'),
Format('application/x-ogc-wms; version=1.3.0', extension='.xml'),
Format('application/x-ogc-wms; version=1.1.0', extension='.xml'),
Format('application/x-ogc-wms; version=1.0.0', extension='.xml'),
Format('text/plain', extension='.txt'),
Format('application/x-ogc-dods', extension='.nc'),
Format('application/x-netcdf', extension='.nc', encoding='base64'),
Format('application/ncML+xml', extension='.ncml', schema="ncml/2.2/ncml-2.2.xsd"),
Format('application/octet-stream', extension='.laz'),
Format('application/octet-stream', extension='.las'),
Format('application/zip', extension='.zip', encoding='base64'),
Format('application/xml', extension='.xml'),
)
def _get_mimetypes():
"""Add FORMATS to system wide mimetypes
"""
mimetypes.init()
for pywps_format in FORMATS:
mimetypes.add_type(pywps_format.mime_type, pywps_format.extension, True)
_get_mimetypes()
def get_format(frmt, validator=None):
"""Return Format instance based on given pywps.inout.FORMATS keyword
"""
# TODO this should be probably removed, it's used only in tests
outfrmt = None
if frmt in FORMATS._asdict():
outfrmt = FORMATS._asdict()[frmt]
outfrmt.validate = validator
return outfrmt
else:
return Format('None', validate=validator)
| bird-house/PyWPS | pywps/inout/formats/__init__.py | Python | mit | 7,158 | [
"NetCDF"
] | f290937869558fa7814d1a9fc633a3bba67f23768b81e07a133a8c072366f64e |
######################################################################
# Copyright 2016, 2021 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
"""
Web Steps
Steps file for web interactions with Silenium
For information on Waiting until elements are present in the HTML see:
https://selenium-python.readthedocs.io/waits.html
"""
import logging
from behave import when, then
from compare import expect, ensure
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions
ID_PREFIX = 'pet_'
@when('I visit the "home page"')
def step_impl(context):
""" Make a call to the base URL """
context.driver.get(context.base_url)
# Uncomment next line to take a screenshot of the web page
#context.driver.save_screenshot('home_page.png')
@then('I should see "{message}" in the title')
def step_impl(context, message):
""" Check the document title for a message """
expect(context.driver.title).to_contain(message)
@then('I should not see "{message}"')
def step_impl(context, message):
error_msg = "I should not see '%s' in '%s'" % (message, context.resp.text)
ensure(message in context.resp.text, False, error_msg)
@when('I set the "{element_name}" to "{text_string}"')
def step_impl(context, element_name, text_string):
element_id = ID_PREFIX + element_name.lower()
element = context.driver.find_element_by_id(element_id)
element.clear()
element.send_keys(text_string)
@when('I select "{text}" in the "{element_name}" dropdown')
def step_impl(context, text, element_name):
element_id = ID_PREFIX + element_name.lower()
element = Select(context.driver.find_element_by_id(element_id))
element.select_by_visible_text(text)
@then('I should see "{text}" in the "{element_name}" dropdown')
def step_impl(context, text, element_name):
element_id = ID_PREFIX + element_name.lower()
element = Select(context.driver.find_element_by_id(element_id))
expect(element.first_selected_option.text).to_equal(text)
@then('the "{element_name}" field should be empty')
def step_impl(context, element_name):
element_id = ID_PREFIX + element_name.lower()
element = context.driver.find_element_by_id(element_id)
expect(element.get_attribute('value')).to_be(u'')
##################################################################
# These two function simulate copy and paste
##################################################################
@when('I copy the "{element_name}" field')
def step_impl(context, element_name):
element_id = ID_PREFIX + element_name.lower()
element = WebDriverWait(context.driver, context.WAIT_SECONDS).until(
expected_conditions.presence_of_element_located((By.ID, element_id))
)
context.clipboard = element.get_attribute('value')
logging.info('Clipboard contains: %s', context.clipboard)
@when('I paste the "{element_name}" field')
def step_impl(context, element_name):
element_id = ID_PREFIX + element_name.lower()
element = WebDriverWait(context.driver, context.WAIT_SECONDS).until(
expected_conditions.presence_of_element_located((By.ID, element_id))
)
element.clear()
element.send_keys(context.clipboard)
##################################################################
# This code works because of the following naming convention:
# The buttons have an id in the html hat is the button text
# in lowercase followed by '-btn' so the Clean button has an id of
# id='clear-btn'. That allows us to lowercase the name and add '-btn'
# to get the element id of any button
##################################################################
@when('I press the "{button}" button')
def step_impl(context, button):
button_id = button.lower() + '-btn'
context.driver.find_element_by_id(button_id).click()
@then('I should see "{name}" in the results')
def step_impl(context, name):
found = WebDriverWait(context.driver, context.WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, 'search_results'),
name
)
)
expect(found).to_be(True)
@then('I should not see "{name}" in the results')
def step_impl(context, name):
element = context.driver.find_element_by_id('search_results')
error_msg = "I should not see '%s' in '%s'" % (name, element.text)
ensure(name in element.text, False, error_msg)
@then('I should see the message "{message}"')
def step_impl(context, message):
found = WebDriverWait(context.driver, context.WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, 'flash_message'),
message
)
)
expect(found).to_be(True)
##################################################################
# This code works because of the following naming convention:
# The id field for text input in the html is the element name
# prefixed by ID_PREFIX so the Name field has an id='pet_name'
# We can then lowercase the name and prefix with pet_ to get the id
##################################################################
@then('I should see "{text_string}" in the "{element_name}" field')
def step_impl(context, text_string, element_name):
element_id = ID_PREFIX + element_name.lower()
found = WebDriverWait(context.driver, context.WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element_value(
(By.ID, element_id),
text_string
)
)
expect(found).to_be(True)
@when('I change "{element_name}" to "{text_string}"')
def step_impl(context, element_name, text_string):
element_id = ID_PREFIX + element_name.lower()
element = WebDriverWait(context.driver, context.WAIT_SECONDS).until(
expected_conditions.presence_of_element_located((By.ID, element_id))
)
element.clear()
element.send_keys(text_string)
| nyu-devops/lab-flask-bdd | features/steps/web_steps.py | Python | apache-2.0 | 6,569 | [
"VisIt"
] | ab8bbeae2b06ee1d091e3dac60e1dadad6d19f25cbfa1eba36704d162da3fb7e |
from vtk import *
addStringLabel = vtkProgrammableFilter()
def computeLabel():
input = addStringLabel.GetInput()
output = addStringLabel.GetOutput()
output.ShallowCopy(input)
# Create output array
vertexArray = vtkStringArray()
vertexArray.SetName("label")
vertexArray.SetNumberOfTuples(output.GetNumberOfVertices())
# Loop through all the vertices setting the degree for the new attribute array
for i in range(output.GetNumberOfVertices()):
label = '%02d' % (i)
vertexArray.SetValue(i, label)
# Add the new attribute array to the output graph
output.GetVertexData().AddArray(vertexArray)
addStringLabel.SetExecuteMethod(computeLabel)
source = vtkRandomGraphSource()
source.SetNumberOfVertices(15)
source.SetIncludeEdgeWeights(True)
addStringLabel.SetInputConnection(source.GetOutputPort())
conn_comp = vtkBoostConnectedComponents()
bi_conn_comp = vtkBoostBiconnectedComponents()
conn_comp.SetInputConnection(addStringLabel.GetOutputPort())
bi_conn_comp.SetInputConnection(conn_comp.GetOutputPort())
# Cleave off part of the graph
vertexDataTable = vtkDataObjectToTable()
vertexDataTable.SetInputConnection(bi_conn_comp.GetOutputPort())
vertexDataTable.SetFieldType(3) # Vertex data
# Make a tree out of connected/biconnected components
toTree = vtkTableToTreeFilter()
toTree.AddInputConnection(vertexDataTable.GetOutputPort())
tree1 = vtkGroupLeafVertices()
tree1.AddInputConnection(toTree.GetOutputPort())
tree1.SetInputArrayToProcess(0,0, 0, 4, "component")
tree1.SetInputArrayToProcess(1,0, 0, 4, "label")
tree2 = vtkGroupLeafVertices()
tree2.AddInputConnection(tree1.GetOutputPort())
tree2.SetInputArrayToProcess(0,0, 0, 4, "biconnected component")
tree2.SetInputArrayToProcess(1,0, 0, 4, "label")
# Create a tree ring view on connected/biconnected components
view1 = vtkTreeRingView()
view1.SetTreeFromInputConnection(tree2.GetOutputPort())
view1.SetGraphFromInputConnection(bi_conn_comp.GetOutputPort())
view1.SetLabelPriorityArrayName("GraphVertexDegree")
view1.SetAreaColorArrayName("VertexDegree")
view1.SetAreaLabelArrayName("label")
view1.SetAreaHoverArrayName("label")
view1.SetAreaLabelVisibility(True)
view1.SetBundlingStrength(.5)
view1.SetLayerThickness(.5)
view1.Update()
view1.SetColorEdges(True)
view1.SetEdgeColorArrayName("edge weight")
view2 = vtkGraphLayoutView()
view2.AddRepresentationFromInputConnection(bi_conn_comp.GetOutputPort())
view2.SetVertexLabelArrayName("label")
view2.SetVertexLabelVisibility(True)
view2.SetVertexColorArrayName("label")
view2.SetColorVertices(True)
view2.SetLayoutStrategyToSimple2D()
# Apply a theme to the views
theme = vtkViewTheme.CreateOceanTheme()
view1.ApplyViewTheme(theme)
view2.ApplyViewTheme(theme)
theme.FastDelete()
view1.GetRenderWindow().SetSize(600, 600)
view1.ResetCamera()
view1.Render()
view2.GetRenderWindow().SetSize(600, 600)
view2.ResetCamera()
view2.Render()
view1.GetInteractor().Start()
| collects/VTK | Examples/Infovis/Python/graph_tree_ring.py | Python | bsd-3-clause | 2,928 | [
"VTK"
] | d6758d7fa5ba2c4ac9ac004cf3d5ac7c24c0efe6dc156a862277d84f62c6d2dc |
import os
import theano
import theano.tensor as T
import numpy as np
from mozi.datasets.mnist import Mnist
from mozi.model import Sequential
from mozi.layers.linear import Linear
from mozi.layers.activation import *
from mozi.layers.noise import Dropout, Gaussian
from mozi.log import Log
from mozi.train_object import TrainObject
from mozi.cost import mse, error, entropy
from mozi.learning_method import *
from mozi.weight_init import *
from sklearn.metrics import accuracy_score
def setenv():
NNdir = os.path.dirname(os.path.realpath(__file__))
NNdir = os.path.dirname(NNdir)
# directory to save all the dataset
if not os.getenv('MOZI_DATA_PATH'):
os.environ['MOZI_DATA_PATH'] = NNdir + '/data'
# directory for saving the database that is used for logging the results
if not os.getenv('MOZI_DATABASE_PATH'):
os.environ['MOZI_DATABASE_PATH'] = NNdir + '/database'
# directory to save all the trained models and outputs
if not os.getenv('MOZI_SAVE_PATH'):
os.environ['MOZI_SAVE_PATH'] = NNdir + '/save'
print('MOZI_DATA_PATH = ' + os.environ['MOZI_DATA_PATH'])
print('MOZI_SAVE_PATH = ' + os.environ['MOZI_SAVE_PATH'])
print('MOZI_DATABASE_PATH = ' + os.environ['MOZI_DATABASE_PATH'])
def train():
# build dataset
data = Mnist(batch_size=64, train_valid_test_ratio=[5,1,1])
# for autoencoder, the output will be equal to input
data.set_train(X=data.get_train().X, y=data.get_train().X)
data.set_valid(X=data.get_valid().X, y=data.get_valid().X)
# build model
model = Sequential(input_var=T.matrix(), output_var=T.matrix())
# build encoder
model.add(Gaussian())
encode_layer1 = Linear(prev_dim=28*28, this_dim=200)
model.add(encode_layer1)
model.add(RELU())
encode_layer2 = Linear(prev_dim=200, this_dim=50)
model.add(encode_layer2)
model.add(Tanh())
# build decoder
decode_layer1 = Linear(prev_dim=50, this_dim=200, W=encode_layer2.W.T)
model.add(decode_layer1)
model.add(RELU())
decode_layer2 = Linear(prev_dim=200, this_dim=28*28, W=encode_layer1.W.T)
model.add(decode_layer2)
model.add(Sigmoid())
# build learning method
learning_method = AdaGrad(learning_rate=0.01, momentum=0.9,
lr_decay_factor=0.9, decay_batch=10000)
# put everything into the train object
train_object = TrainObject(model = model,
log = None,
dataset = data,
train_cost = entropy,
valid_cost = entropy,
learning_method = learning_method,
stop_criteria = {'max_epoch' : 10,
'epoch_look_back' : 5,
'percent_decrease' : 0.01}
)
# finally run the code
train_object.setup()
train_object.run()
if __name__ == '__main__':
setenv()
train()
| dksahuji/Mozi | example/mnist_dae.py | Python | mit | 3,060 | [
"Gaussian"
] | a0e6d1f03599b3e7d5b9309565937671a8177416f17ef66d769f548a83f0bea9 |
"""
:mod: DataManager
.. module: DataManager
:synopsis: DataManager links the functionalities of StorageElement and FileCatalog.
This module consists of DataManager and related classes.
"""
# # imports
from datetime import datetime, timedelta
import fnmatch
import os
import time
import errno
# # from DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Utilities.Adler import fileAdler, compareAdler
from DIRAC.Core.Utilities.File import makeGuid, getSize
from DIRAC.Core.Utilities.List import randomize, breakListIntoChunks
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
# # RSCID
__RCSID__ = "$Id$"
def _isOlderThan(stringTime, days):
""" Check if a time stamp is older than a given number of days """
timeDelta = timedelta(days=days)
maxCTime = datetime.utcnow() - timeDelta
# st = time.strptime( stringTime, "%a %b %d %H:%M:%S %Y" )
# cTimeStruct = datetime( st[0], st[1], st[2], st[3], st[4], st[5], st[6], None )
cTimeStruct = stringTime
if cTimeStruct < maxCTime:
return True
return False
def _initialiseAccountingObject(operation, se, files):
""" create accouting record """
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get('username', 'unknown')
accountingDict['User'] = userName
accountingDict['Protocol'] = 'DataManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict(accountingDict)
return oDataOperation
class DataManager(object):
"""
.. class:: DataManager
A DataManager is taking all the actions that impact or require the FileCatalog and the StorageElement together
"""
def __init__(self, catalogs=None, masterCatalogOnly=False, vo=False):
""" c'tor
:param self: self reference
:param catalogs: the list of catalog in which to perform the operations. This
list will be ignored if masterCatalogOnly is set to True
:param masterCatalogOnly: if set to True, the operations will be performed only on the master catalog.
The catalogs parameter will be ignored.
:param vo: the VO for which the DataManager is created, get VO from the current proxy if not specified
"""
self.log = gLogger.getSubLogger(self.__class__.__name__, True)
self.voName = vo
if catalogs is None:
catalogs = []
catalogsToUse = FileCatalog(vo=self.voName).getMasterCatalogNames()[
'Value'] if masterCatalogOnly else catalogs
self.fileCatalog = FileCatalog(catalogs=catalogsToUse, vo=self.voName)
self.accountingClient = None
self.resourceStatus = ResourceStatus()
self.ignoreMissingInFC = Operations(vo=self.voName).getValue(
'DataManagement/IgnoreMissingInFC', False)
self.useCatalogPFN = Operations(vo=self.voName).getValue(
'DataManagement/UseCatalogPFN', True)
self.dmsHelper = DMSHelpers(vo=vo)
self.registrationProtocol = self.dmsHelper.getRegistrationProtocols()
self.thirdPartyProtocols = self.dmsHelper.getThirdPartyProtocols()
def setAccountingClient(self, client):
""" Set Accounting Client instance
"""
self.accountingClient = client
def __hasAccess(self, opType, path):
""" Check if we have permission to execute given operation on the given file (if exists) or its directory
"""
if isinstance(path, basestring):
paths = [path]
else:
paths = list(path)
res = self.fileCatalog.hasAccess(paths, opType)
if not res['OK']:
return res
result = {'Successful': list(), 'Failed': list()}
for path in paths:
isAllowed = res['Value']['Successful'].get(path, False)
if isAllowed:
result['Successful'].append(path)
else:
result['Failed'].append(path)
return S_OK(result)
##########################################################################
#
# These are the bulk removal methods
#
def cleanLogicalDirectory(self, lfnDir):
""" Clean the logical directory from the catalog and storage
"""
log = self.log.getSubLogger('cleanLogicalDirectory')
if isinstance(lfnDir, basestring):
lfnDir = [lfnDir]
retDict = {"Successful": {}, "Failed": {}}
for folder in lfnDir:
res = self.__cleanDirectory(folder)
if not res['OK']:
log.debug("Failed to clean directory.", "%s %s" %
(folder, res['Message']))
retDict["Failed"][folder] = res['Message']
else:
log.debug("Successfully removed directory.", folder)
retDict["Successful"][folder] = res['Value']
return S_OK(retDict)
def __cleanDirectory(self, folder):
""" delete all files from directory :folder: in FileCatalog and StorageElement
:param self: self reference
:param str folder: directory name
"""
log = self.log.getSubLogger('__cleanDirectory')
res = self.__hasAccess('removeDirectory', folder)
if not res['OK']:
return res
if folder not in res['Value']['Successful']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, folder)
return S_ERROR(errStr)
res = self.__getCatalogDirectoryContents([folder])
if not res['OK']:
return res
res = self.removeFile(res['Value'])
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].iteritems():
log.error("Failed to remove file found in the catalog",
"%s %s" % (lfn, reason))
res = returnSingleResult(self.removeFile(['%s/dirac_directory' % folder]))
if not res['OK']:
if not "No such file" in res['Message']:
log.warn('Failed to delete dirac_directory placeholder file')
storageElements = gConfig.getValue(
'Resources/StorageElementGroups/SE_Cleaning_List', [])
failed = False
for storageElement in sorted(storageElements):
res = self.__removeStorageDirectory(folder, storageElement)
if not res['OK']:
failed = True
if failed:
return S_ERROR("Failed to clean storage directory at all SEs")
res = returnSingleResult(
self.fileCatalog.removeDirectory(folder, recursive=True))
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory(self, directory, storageElement):
""" delete SE directory
:param self: self reference
:param str directory: folder to be removed
:param str storageElement: DIRAC SE name
"""
se = StorageElement(storageElement, vo=self.voName)
res = returnSingleResult(se.exists(directory))
log = self.log.getSubLogger('__removeStorageDirectory')
if not res['OK']:
log.debug("Failed to obtain existance of directory", res['Message'])
return res
exists = res['Value']
if not exists:
log.debug("The directory %s does not exist at %s " %
(directory, storageElement))
return S_OK()
res = returnSingleResult(se.removeDirectory(directory, recursive=True))
if not res['OK']:
log.debug("Failed to remove storage directory", res['Message'])
return res
log.debug("Successfully removed %d files from %s at %s" % (res['Value']['FilesRemoved'],
directory,
storageElement))
return S_OK()
def __getCatalogDirectoryContents(self, directories):
""" ls recursively all files in directories
:param self: self reference
:param list directories: folder names
"""
log = self.log.getSubLogger('__getCatalogDirectoryContents')
log.debug('Obtaining the catalog contents for %d directories:' %
len(directories))
activeDirs = directories
allFiles = {}
while len(activeDirs) > 0:
currentDir = activeDirs[0]
res = returnSingleResult(
self.fileCatalog.listDirectory(currentDir, verbose=True))
activeDirs.remove(currentDir)
if not res['OK']:
log.debug("Problem getting the %s directory content" %
currentDir, res['Message'])
else:
dirContents = res['Value']
activeDirs.extend(dirContents['SubDirs'])
allFiles.update(dirContents['Files'])
log.debug("Found %d files" % len(allFiles))
return S_OK(allFiles)
def getReplicasFromDirectory(self, directory):
""" get all replicas from a given directory
:param self: self reference
:param mixed directory: list of directories or one directory
"""
if isinstance(directory, basestring):
directories = [directory]
else:
directories = directory
res = self.__getCatalogDirectoryContents(directories)
if not res['OK']:
return res
allReplicas = dict((lfn, metadata['Replicas'])
for lfn, metadata in res['Value'].iteritems())
return S_OK(allReplicas)
def getFilesFromDirectory(self, directory, days=0, wildcard='*'):
""" get all files from :directory: older than :days: days matching to :wildcard:
:param self: self reference
:param mixed directory: list of directories or directory name
:param int days: ctime days
:param str wildcard: pattern to match
"""
if isinstance(directory, basestring):
directories = [directory]
else:
directories = directory
log = self.log.getSubLogger('getFilesFromDirectory')
log.debug("Obtaining the files older than %d days in %d directories:" %
(days, len(directories)))
for folder in directories:
log.debug(folder)
activeDirs = directories
allFiles = []
while len(activeDirs) > 0:
currentDir = activeDirs[0]
# We only need the metadata (verbose) if a limit date is given
res = returnSingleResult(
self.fileCatalog.listDirectory(currentDir, verbose=(days != 0)))
activeDirs.remove(currentDir)
if not res['OK']:
log.debug("Error retrieving directory contents", "%s %s" %
(currentDir, res['Message']))
else:
dirContents = res['Value']
subdirs = dirContents['SubDirs']
files = dirContents['Files']
log.debug("%s: %d files, %d sub-directories" %
(currentDir, len(files), len(subdirs)))
for subdir in subdirs:
if (not days) or _isOlderThan(subdirs[subdir]['CreationDate'], days):
if subdir[0] != '/':
subdir = currentDir + '/' + subdir
activeDirs.append(subdir)
for fileName in files:
fileInfo = files[fileName]
fileInfo = fileInfo.get('Metadata', fileInfo)
if (not days) or not fileInfo.get('CreationDate') or _isOlderThan(fileInfo['CreationDate'], days):
if wildcard == '*' or fnmatch.fnmatch(fileName, wildcard):
fileName = fileInfo.get('LFN', fileName)
allFiles.append(fileName)
return S_OK(allFiles)
##########################################################################
#
# These are the data transfer methods
#
def getFile(self, lfn, destinationDir='', sourceSE=None):
""" Get a local copy of a LFN from Storage Elements.
'lfn' is the logical file name for the desired file
"""
log = self.log.getSubLogger('getFile')
if isinstance(lfn, list):
lfns = lfn
elif isinstance(lfn, basestring):
lfns = [lfn]
else:
errStr = "Supplied lfn must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
log.debug("Attempting to get %s files." % len(lfns))
res = self.getActiveReplicas(lfns, getUrl=False)
if not res['OK']:
return res
failed = res['Value']['Failed']
lfnReplicas = res['Value']['Successful']
res = self.fileCatalog.getFileMetadata(lfnReplicas.keys())
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
fileMetadata = res['Value']['Successful']
successful = {}
for lfn in fileMetadata:
res = self.__getFile(
lfn, lfnReplicas[lfn], fileMetadata[lfn], destinationDir, sourceSE=sourceSE)
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = res['Value']
return S_OK({'Successful': successful, 'Failed': failed})
def __getFile(self, lfn, replicas, metadata, destinationDir, sourceSE=None):
"""
Method actually doing the job to get a file from storage
"""
log = self.log.getSubLogger('__getFile')
if not replicas:
errStr = "No accessible replicas found"
log.debug(errStr)
return S_ERROR(errStr)
# Determine the best replicas
errTuple = ("No SE", "found")
if sourceSE is None:
sortedSEs = self._getSEProximity(replicas)
else:
if sourceSE not in replicas:
return S_ERROR('No replica at %s' % sourceSE)
else:
sortedSEs = [sourceSE]
for storageElementName in sortedSEs:
se = StorageElement(storageElementName, vo=self.voName)
res = returnSingleResult(se.getFile(
lfn, localPath=os.path.realpath(destinationDir)))
if not res['OK']:
errTuple = ("Error getting file from storage:", "%s from %s, %s" %
(lfn, storageElementName, res['Message']))
errToReturn = res
else:
localFile = os.path.realpath(os.path.join(
destinationDir, os.path.basename(lfn)))
localAdler = fileAdler(localFile)
if metadata['Size'] != res['Value']:
errTuple = ("Mismatch of sizes:", "downloaded = %d, catalog = %d" %
(res['Value'], metadata['Size']))
errToReturn = S_ERROR(DErrno.EFILESIZE, errTuple[1])
elif (metadata['Checksum']) and (not compareAdler(metadata['Checksum'], localAdler)):
errTuple = ("Mismatch of checksums:", "downloaded = %s, catalog = %s" %
(localAdler, metadata['Checksum']))
errToReturn = S_ERROR(DErrno.EBADCKS, errTuple[1])
else:
return S_OK(localFile)
# If we are here, there was an error, log it debug level
log.debug(errTuple[0], errTuple[1])
log.verbose("Failed to get local copy from any replicas:",
"\n%s %s" % errTuple)
return errToReturn
def _getSEProximity(self, replicas):
""" get SE proximity """
siteName = DIRAC.siteName()
self.__filterTapeSEs(replicas)
localSEs = [se for se in self.dmsHelper.getSEsAtSite(
siteName).get('Value', []) if se in replicas]
countrySEs = []
countryCode = str(siteName).split('.')[-1]
res = self.dmsHelper.getSEsAtCountry(countryCode)
if res['OK']:
countrySEs = [se for se in res['Value']
if se in replicas and se not in localSEs]
sortedSEs = randomize(localSEs) + randomize(countrySEs)
sortedSEs += randomize(se for se in replicas if se not in sortedSEs)
return sortedSEs
def putAndRegister(self, lfn, fileName, diracSE, guid=None, path=None,
checksum=None, overwrite=False):
""" Put a local file to a Storage Element and register in the File Catalogues
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'guid' is the guid with which the file is to be registered (if not provided will be generated)
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
'overwrite' removes file from the file catalogue and SE before attempting upload
"""
res = self.__hasAccess('addFile', lfn)
if not res['OK']:
return res
log = self.log.getSubLogger('putAndRegister')
if lfn not in res['Value']['Successful']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, lfn)
return S_ERROR(errStr)
# Check that the local file exists
if not os.path.exists(fileName):
errStr = "Supplied file does not exist."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname(lfn)
# Obtain the size of the local file
size = getSize(fileName)
if size == 0:
errStr = "Supplied file is zero size."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the GUID is not given, generate it here
if not guid:
guid = makeGuid(fileName)
if not checksum:
log.debug("Checksum information not provided. Calculating adler32.")
checksum = fileAdler(fileName)
# Make another try
if not checksum:
log.debug("Checksum calculation failed, try again")
checksum = fileAdler(fileName)
if checksum:
log.debug("Checksum calculated to be %s." % checksum)
else:
return S_ERROR(DErrno.EBADCKS, "Unable to calculate checksum")
res = self.fileCatalog.exists({lfn: guid})
if not res['OK']:
errStr = "Completely failed to determine existence of destination LFN."
log.debug(errStr, lfn)
return res
if lfn not in res['Value']['Successful']:
errStr = "Failed to determine existence of destination LFN."
log.debug(errStr, lfn)
return S_ERROR(errStr)
if res['Value']['Successful'][lfn]:
if res['Value']['Successful'][lfn] == lfn:
if overwrite:
resRm = self.removeFile(lfn, force=True)
if not resRm['OK']:
errStr = "Failed to prepare file for overwrite"
log.debug(errStr, lfn)
return resRm
if lfn not in resRm['Value']['Successful']:
errStr = "Failed to either delete file or LFN"
log.debug(errStr, lfn)
return S_ERROR("%s %s" % (errStr, lfn))
else:
errStr = "The supplied LFN already exists in the File Catalog."
log.debug(errStr, lfn)
return S_ERROR("%s %s" % (errStr, res['Value']['Successful'][lfn]))
else:
# If the returned LFN is different, this is the name of a file
# with the same GUID
errStr = "This file GUID already exists for another file"
log.debug(errStr, res['Value']['Successful'][lfn])
return S_ERROR("%s %s" % (errStr, res['Value']['Successful'][lfn]))
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement(diracSE, vo=self.voName)
res = storageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.debug(errStr, "%s %s" % (diracSE, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
fileDict = {lfn: fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
oDataOperation = _initialiseAccountingObject('putAndRegister', diracSE, 1)
oDataOperation.setStartTime()
oDataOperation.setValueByKey('TransferSize', size)
startTime = time.time()
res = returnSingleResult(storageElement.putFile(fileDict))
putTime = time.time() - startTime
oDataOperation.setValueByKey('TransferTime', putTime)
if not res['OK']:
# We don't consider it a failure if the SE is not valid
if not DErrno.cmpError(res, errno.EACCES):
oDataOperation.setValueByKey('TransferOK', 0)
oDataOperation.setValueByKey('FinalStatus', 'Failed')
oDataOperation.setEndTime()
gDataStoreClient.addRegister(oDataOperation)
gDataStoreClient.commit()
startTime = time.time()
log.debug('putAndRegister: Sending accounting took %.1f seconds' %
(time.time() - startTime))
errStr = "Failed to put file to Storage Element."
log.debug(errStr, "%s: %s" % (fileName, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
successful[lfn] = {'put': putTime}
###########################################################
# Perform the registration here
destinationSE = storageElement.storageElementName()
res = returnSingleResult(storageElement.getURL(
lfn, protocol=self.registrationProtocol))
if not res['OK']:
errStr = "Failed to generate destination PFN."
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
destUrl = res['Value']
oDataOperation.setValueByKey('RegistrationTotal', 1)
fileTuple = (lfn, destUrl, size, destinationSE, guid, checksum)
registerDict = {'LFN': lfn, 'PFN': destUrl, 'Size': size,
'TargetSE': destinationSE, 'GUID': guid, 'Addler': checksum}
startTime = time.time()
res = self.registerFile(fileTuple)
registerTime = time.time() - startTime
oDataOperation.setValueByKey('RegistrationTime', registerTime)
if not res['OK']:
errStr = "Completely failed to register file."
log.debug(errStr, res['Message'])
failed[lfn] = {'register': registerDict}
oDataOperation.setValueByKey('FinalStatus', 'Failed')
elif lfn in res['Value']['Failed']:
errStr = "Failed to register file."
log.debug(errStr, "%s %s" % (lfn, res['Value']['Failed'][lfn]))
oDataOperation.setValueByKey('FinalStatus', 'Failed')
failed[lfn] = {'register': registerDict}
else:
successful[lfn]['register'] = registerTime
oDataOperation.setValueByKey('RegistrationOK', 1)
oDataOperation.setEndTime()
gDataStoreClient.addRegister(oDataOperation)
startTime = time.time()
gDataStoreClient.commit()
log.debug('Sending accounting took %.1f seconds' %
(time.time() - startTime))
return S_OK({'Successful': successful, 'Failed': failed})
def replicateAndRegister(self, lfn, destSE, sourceSE='', destPath='', localCache='', catalog=''):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
log = self.log.getSubLogger('replicateAndRegister')
successful = {}
failed = {}
log.debug("Attempting to replicate %s to %s." % (lfn, destSE))
startReplication = time.time()
res = self.__replicate(lfn, destSE, sourceSE, destPath, localCache)
replicationTime = time.time() - startReplication
if not res['OK']:
errStr = "Completely failed to replicate file."
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
if not res['Value']:
# The file was already present at the destination SE
log.debug("%s already present at %s." % (lfn, destSE))
successful[lfn] = {'replicate': 0, 'register': 0}
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
successful[lfn] = {'replicate': replicationTime}
destPfn = res['Value']['DestPfn']
destSE = res['Value']['DestSE']
log.debug("Attempting to register %s at %s." % (destPfn, destSE))
replicaTuple = (lfn, destPfn, destSE)
startRegistration = time.time()
res = self.registerReplica(replicaTuple, catalog=catalog)
registrationTime = time.time() - startRegistration
if not res['OK']:
# Need to return to the client that the file was replicated but not
# registered
errStr = "Completely failed to register replica."
log.debug(errStr, res['Message'])
failed[lfn] = {'Registration': {
'LFN': lfn, 'TargetSE': destSE, 'PFN': destPfn}}
else:
if lfn in res['Value']['Successful']:
log.debug("Successfully registered replica.")
successful[lfn]['register'] = registrationTime
else:
errStr = "Failed to register replica."
log.debug(errStr, res['Value']['Failed'][lfn])
failed[lfn] = {'Registration': {
'LFN': lfn, 'TargetSE': destSE, 'PFN': destPfn}}
return S_OK({'Successful': successful, 'Failed': failed})
def replicate(self, lfn, destSE, sourceSE='', destPath='', localCache=''):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
log = self.log.getSubLogger('replicate')
log.debug("Attempting to replicate %s to %s." % (lfn, destSE))
res = self.__replicate(lfn, destSE, sourceSE, destPath, localCache)
if not res['OK']:
errStr = "Replication failed."
log.debug(errStr, "%s %s" % (lfn, destSE))
return res
if not res['Value']:
# The file was already present at the destination SE
log.debug("%s already present at %s." % (lfn, destSE))
return res
return S_OK(lfn)
def __getSERealName(self, storageName):
""" get the base name of an SE possibly defined as an alias"""
rootConfigPath = '/Resources/StorageElements'
configPath = '%s/%s' % (rootConfigPath, storageName)
res = gConfig.getOptions(configPath)
if not res['OK']:
errStr = "Failed to get storage options"
return S_ERROR(errStr)
if not res['Value']:
errStr = "Supplied storage doesn't exist."
return S_ERROR(errStr)
if 'Alias' in res['Value']:
configPath += '/Alias'
aliasName = gConfig.getValue(configPath)
result = self.__getSERealName(aliasName)
if not result['OK']:
return result
resolvedName = result['Value']
else:
resolvedName = storageName
return S_OK(resolvedName)
def __isSEInList(self, seName, seList):
""" Check whether an SE is in a list of SEs... All could be aliases """
seSet = set()
for se in seList:
res = self.__getSERealName(se)
if res['OK']:
seSet.add(res['Value'])
return self.__getSERealName(seName).get('Value') in seSet
def __replicate(self, lfn, destSEName, sourceSEName='', destPath='', localCache=''):
""" Replicate a LFN to a destination SE.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' if cannot do third party transfer, we do get and put through this local directory
"""
log = self.log.getSubLogger('__replicate', True)
###########################################################
# Check that we have write permissions to this directory.
res = self.__hasAccess('addReplica', lfn)
if not res['OK']:
return res
if lfn not in res['Value']['Successful']:
errStr = "__replicate: Write access not permitted for this credential."
log.debug(errStr, lfn)
return S_ERROR(errStr)
# Check that the destination storage element is sane and resolve its name
log.debug("Verifying destination StorageElement validity (%s)." %
(destSEName))
destStorageElement = StorageElement(destSEName, vo=self.voName)
res = destStorageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.debug(errStr, "%s %s" % (destSEName, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
# Get the real name of the SE
destSEName = destStorageElement.storageElementName()
###########################################################
# Check whether the destination storage element is banned
log.verbose(
"Determining whether %s ( destination ) is Write-banned." % destSEName)
if not destStorageElement.status()['Write']:
infoStr = "Supplied destination Storage Element is not currently allowed for Write."
log.debug(infoStr, destSEName)
return S_ERROR(infoStr)
# Get the LFN replicas from the file catalog
log.debug("Attempting to obtain replicas for %s." % (lfn))
res = returnSingleResult(self.getReplicas(lfn, getUrl=False))
if not res['OK']:
errStr = "Failed to get replicas for LFN."
log.debug(errStr, "%s %s" % (lfn, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
log.debug("Successfully obtained replicas for LFN.")
lfnReplicas = res['Value']
###########################################################
# If the file catalog size is zero fail the transfer
log.debug("Attempting to obtain size for %s." % lfn)
res = returnSingleResult(self.fileCatalog.getFileSize(lfn))
if not res['OK']:
errStr = "Failed to get size for LFN."
log.debug(errStr, "%s %s" % (lfn, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
catalogSize = res['Value']
if catalogSize == 0:
errStr = "Registered file size is 0."
log.debug(errStr, lfn)
return S_ERROR(errStr)
log.debug("File size determined to be %s." % catalogSize)
###########################################################
# If the LFN already exists at the destination we have nothing to do
if self.__isSEInList(destSEName, lfnReplicas):
log.debug("__replicate: LFN is already registered at %s." % destSEName)
return S_OK()
###########################################################
# If the source is specified, check that it is in the replicas
if sourceSEName:
log.debug("Determining whether source Storage Element specified is sane.")
if sourceSEName not in lfnReplicas:
errStr = "LFN does not exist at supplied source SE."
log.error(errStr, "%s %s" % (lfn, sourceSEName))
return S_ERROR(errStr)
# If sourceSE is specified, then we consider this one only, otherwise
# we consider them all
possibleSourceSEs = [sourceSEName] if sourceSEName else lfnReplicas
# We sort the possibileSourceSEs with the SEs that are on the same site than the destination first
# reverse = True because True > False
possibleSourceSEs = sorted(possibleSourceSEs,
key=lambda x: self.dmsHelper.isSameSiteSE(
x, destSEName).get('Value', False),
reverse=True)
# In case we manage to find SEs that would work as a source, but we can't negotiate a protocol
# we will do a get and put using one of this sane SE
possibleIntermediateSEs = []
# Take into account the destination path
if destPath:
destPath = '%s/%s' % (destPath, os.path.basename(lfn))
else:
destPath = lfn
for candidateSEName in possibleSourceSEs:
log.debug("Consider %s as a source" % candidateSEName)
# Check that the candidate is active
if not self.__checkSEStatus(candidateSEName, status='Read'):
log.debug("%s is currently not allowed as a source." % candidateSEName)
continue
else:
log.debug("%s is available for use." % candidateSEName)
candidateSE = StorageElement(candidateSEName, vo=self.voName)
# Check that the SE is valid
res = candidateSE.isValid()
if not res['OK']:
log.debug("The storage element is not currently valid.",
"%s %s" % (candidateSEName, res['Message']))
continue
else:
log.debug("The storage is currently valid", candidateSEName)
# Check that the file size corresponds to the one in the FC
res = returnSingleResult(candidateSE.getFileSize(lfn))
if not res['OK']:
log.debug("could not get fileSize on %s" %
candidateSEName, res['Message'])
continue
seFileSize = res['Value']
if seFileSize != catalogSize:
log.debug("Catalog size and physical file size mismatch.",
"%s %s" % (catalogSize, seFileSize))
continue
else:
log.debug("Catalog size and physical size match")
res = destStorageElement.negociateProtocolWithOtherSE(
candidateSE, protocols=self.thirdPartyProtocols)
if not res['OK']:
log.debug("Error negotiating replication protocol", res['Message'])
continue
replicationProtocols = res['Value']
if not replicationProtocols:
possibleIntermediateSEs.append(candidateSE)
log.debug("No protocol suitable for replication found")
continue
log.debug('Found common protocols', replicationProtocols)
# THIS WOULD NOT WORK IF PROTO == file !!
# Why did I write that comment ?!
# We try the protocols one by one
# That obviously assumes that there is an overlap and not only
# a compatibility between the output protocols of the source
# and the input protocols of the destination.
# But that is the only way to make sure we are not replicating
# over ourselves.
for compatibleProtocol in replicationProtocols:
# Compare the urls to make sure we are not overwriting
res = returnSingleResult(candidateSE.getURL(
lfn, protocol=compatibleProtocol))
if not res['OK']:
log.debug("Cannot get sourceURL", res['Message'])
continue
sourceURL = res['Value']
destURL = ''
res = returnSingleResult(destStorageElement.getURL(
destPath, protocol=compatibleProtocol))
if not res['OK']:
# for some protocols, in particular srm
# you might get an error because the file does not exist
# which is exactly what we want
# in that case, we just keep going with the comparison
# since destURL will be an empty string
if not DErrno.cmpError(res, errno.ENOENT):
log.debug("Cannot get destURL", res['Message'])
continue
else:
log.debug("File does not exist: Expected error for TargetSE !!")
destURL = res['Value']
if sourceURL == destURL:
log.debug("Same source and destination, give up")
continue
# Attempt the transfer
res = returnSingleResult(destStorageElement.replicateFile({destPath: sourceURL},
sourceSize=catalogSize,
inputProtocol=compatibleProtocol))
if not res['OK']:
log.debug("Replication failed", "%s from %s to %s." %
(lfn, candidateSEName, destSEName))
continue
log.debug("Replication successful.", res['Value'])
res = returnSingleResult(destStorageElement.getURL(
destPath, protocol=self.registrationProtocol))
if not res['OK']:
log.debug('Error getting the registration URL', res['Message'])
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res['Value']
return S_OK({'DestSE': destSEName, 'DestPfn': registrationURL})
# If we are here, that means that we could not make a third party transfer.
# Check if we have some sane SEs from which we could do a get/put
localDir = os.path.realpath(localCache if localCache else '.')
localFile = os.path.join(localDir, os.path.basename(lfn))
log.debug("Will try intermediate transfer from %s sources" %
len(possibleIntermediateSEs))
for candidateSE in possibleIntermediateSEs:
res = returnSingleResult(candidateSE.getFile(lfn, localPath=localDir))
if not res['OK']:
log.debug('Error getting the file from %s' %
candidateSE.name, res['Message'])
continue
res = returnSingleResult(
destStorageElement.putFile({destPath: localFile}))
# Remove the local file whatever happened
try:
os.remove(localFile)
except OSError as e:
log.error('Error removing local file', '%s %s' % (localFile, e))
if not res['OK']:
log.debug('Error putting file coming from %s' %
candidateSE.name, res['Message'])
# if the put is the problem, it's maybe pointless to try the other
# candidateSEs...
continue
# get URL with default protocol to return it
res = returnSingleResult(destStorageElement.getURL(
destPath, protocol=self.registrationProtocol))
if not res['OK']:
log.debug('Error getting the registration URL', res['Message'])
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res['Value']
return S_OK({'DestSE': destSEName, 'DestPfn': registrationURL})
# If here, we are really doomed
errStr = "Failed to replicate with all sources."
log.debug(errStr, lfn)
return S_ERROR(errStr)
###################################################################
#
# These are the file catalog write methods
#
def registerFile(self, fileTuple, catalog=''):
""" Register a file or a list of files
:param self: self reference
:param tuple fileTuple: (lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum )
:param str catalog: catalog name
"""
log = self.log.getSubLogger('registerFile')
if isinstance(fileTuple, (list, set)):
fileTuples = fileTuple
elif isinstance(fileTuple, tuple):
fileTuples = [fileTuple]
for fileTuple in fileTuples:
if not isinstance(fileTuple, tuple):
errStr = "Supplied file info must be tuple or list of tuples."
log.debug(errStr)
return S_ERROR(errStr)
if not fileTuples:
return S_OK({'Successful': [], 'Failed': {}})
log.debug("Attempting to register %s files." % len(fileTuples))
res = self.__registerFile(fileTuples, catalog)
if not res['OK']:
errStr = "Completely failed to register files."
log.debug(errStr, res['Message'])
return res
return res
def __registerFile(self, fileTuples, catalog):
""" register file to catalog """
fileDict = {}
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuples:
fileDict[lfn] = {'PFN': physicalFile,
'Size': fileSize,
'SE': storageElementName,
'GUID': fileGuid,
'Checksum': checksum}
if catalog:
fileCatalog = FileCatalog(catalog, vo=self.voName)
if not fileCatalog.isOK():
return S_ERROR("Can't get FileCatalog %s" % catalog)
else:
fileCatalog = self.fileCatalog
res = fileCatalog.addFile(fileDict)
if not res['OK']:
errStr = "Completely failed to register files."
self.log.getSubLogger('__registerFile').debug(errStr, res['Message'])
return res
def registerReplica(self, replicaTuple, catalog=''):
""" Register a replica (or list of) supplied in the replicaTuples.
'replicaTuple' is a tuple or list of tuples of the form (lfn,pfn,se)
"""
log = self.log.getSubLogger('registerReplica')
if isinstance(replicaTuple, (list, set)):
replicaTuples = replicaTuple
elif isinstance(replicaTuple, tuple):
replicaTuples = [replicaTuple]
for replicaTuple in replicaTuples:
if not isinstance(replicaTuple, tuple):
errStr = "Supplied file info must be tuple or list of tuples."
log.debug(errStr)
return S_ERROR(errStr)
if not replicaTuples:
return S_OK({'Successful': [], 'Failed': {}})
log.debug("Attempting to register %s replicas." % len(replicaTuples))
res = self.__registerReplica(replicaTuples, catalog)
if not res['OK']:
errStr = "Completely failed to register replicas."
log.debug(errStr, res['Message'])
return res
return res
def __registerReplica(self, replicaTuples, catalog):
""" register replica to catalogue """
log = self.log.getSubLogger('__registerReplica')
seDict = {}
for lfn, url, storageElementName in replicaTuples:
seDict.setdefault(storageElementName, []).append((lfn, url))
failed = {}
replicaTuples = []
for storageElementName, replicaTuple in seDict.iteritems():
destStorageElement = StorageElement(storageElementName, vo=self.voName)
res = destStorageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.debug(errStr, "%s %s" % (storageElementName, res['Message']))
for lfn, url in replicaTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.storageElementName()
for lfn, url in replicaTuple:
res = returnSingleResult(destStorageElement.getURL(
lfn, protocol=self.registrationProtocol))
if not res['OK']:
failed[lfn] = res['Message']
else:
replicaTuple = (lfn, res['Value'], storageElementName, False)
replicaTuples.append(replicaTuple)
log.debug("Successfully resolved %s replicas for registration." %
len(replicaTuples))
# HACK!
replicaDict = {}
for lfn, url, se, _master in replicaTuples:
replicaDict[lfn] = {'SE': se, 'PFN': url}
if catalog:
fileCatalog = FileCatalog(catalog, vo=self.voName)
res = fileCatalog.addReplica(replicaDict)
else:
res = self.fileCatalog.addReplica(replicaDict)
if not res['OK']:
errStr = "Completely failed to register replicas."
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
###################################################################
#
# These are the removal methods for physical and catalogue removal
#
def removeFile(self, lfn, force=None):
""" Remove the file (all replicas) from Storage Elements and file catalogue
'lfn' is the file to be removed
"""
log = self.log.getSubLogger('removeFile')
if not lfn:
return S_OK({'Successful': {}, 'Failed': {}})
if force is None:
force = self.ignoreMissingInFC
if isinstance(lfn, (list, dict, set, tuple)):
lfns = list(lfn)
else:
lfns = [lfn]
for lfn in lfns:
if not isinstance(lfn, basestring):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
# First check if the file exists in the FC
res = self.fileCatalog.exists(lfns)
if not res['OK']:
return res
success = res['Value']['Successful']
lfns = [lfn for lfn in success if success[lfn]]
if force:
# Files that don't exist are removed successfully
successful = dict.fromkeys(
(lfn for lfn in success if not success[lfn]), True)
else:
failed = dict.fromkeys(
(lfn for lfn in success if not success[lfn]), 'No such file or directory')
# Check that we have write permissions to this directory and to the file.
if lfns:
res = self.__hasAccess('removeFile', lfns)
if not res['OK']:
return res
if res['Value']['Failed']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, 'for %d files' % len(res['Value']['Failed']))
failed.update(dict.fromkeys(res['Value']['Failed'], errStr))
lfns = res['Value']['Successful']
if lfns:
log.debug(
"Attempting to remove %d files from Storage and Catalogue. Get replicas first" % len(lfns))
res = self.fileCatalog.getReplicas(lfns, allStatus=True)
if not res['OK']:
errStr = "DataManager.removeFile: Completely failed to get replicas for lfns."
log.debug(errStr, res['Message'])
return res
lfnDict = res['Value']['Successful']
for lfn, reason in res['Value']['Failed'].iteritems():
# Ignore files missing in FC if force is set
if reason == 'No such file or directory' and force:
successful[lfn] = True
elif reason == 'File has zero replicas':
lfnDict[lfn] = {}
else:
failed[lfn] = reason
res = self.__removeFile(lfnDict)
if not res['OK']:
# This can never happen
return res
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
gDataStoreClient.commit()
return S_OK({'Successful': successful, 'Failed': failed})
def __removeFile(self, lfnDict):
""" remove file """
storageElementDict = {}
# # sorted and reversed
for lfn, repDict in sorted(lfnDict.items(), reverse=True):
for se in repDict:
storageElementDict.setdefault(se, []).append(lfn)
failed = {}
successful = {}
for storageElementName in sorted(storageElementDict):
lfns = storageElementDict[storageElementName]
res = self.__removeReplica(storageElementName, lfns, replicaDict=lfnDict)
if not res['OK']:
errStr = res['Message']
for lfn in lfns:
failed[lfn] = failed.setdefault(lfn, '') + " %s" % errStr
else:
for lfn, errStr in res['Value']['Failed'].iteritems():
failed[lfn] = failed.setdefault(lfn, '') + " %s" % errStr
completelyRemovedFiles = set(lfnDict) - set(failed)
if completelyRemovedFiles:
res = self.fileCatalog.removeFile(list(completelyRemovedFiles))
if not res['OK']:
failed.update(dict.fromkeys(completelyRemovedFiles,
"Failed to remove file from the catalog: %s" % res['Message']))
else:
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def removeReplica(self, storageElementName, lfn):
""" Remove replica at the supplied Storage Element from Storage Element then file catalogue
'storageElementName' is the storage where the file is to be removed
'lfn' is the file to be removed
"""
log = self.log.getSubLogger('removeReplica')
if isinstance(lfn, (list, dict, set, tuple)):
lfns = set(lfn)
else:
lfns = set([lfn])
for lfn in lfns:
if not isinstance(lfn, basestring):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
# Check that we have write permissions to this file.
res = self.__hasAccess('removeReplica', lfns)
if not res['OK']:
log.debug('Error in __verifyWritePermisison', res['Message'])
return res
if res['Value']['Failed']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, 'for %d files' % len(res['Value']['Failed']))
failed.update(dict.fromkeys(res['Value']['Failed'], errStr))
lfns -= set(res['Value']['Failed'])
if not lfns:
log.debug('Permission denied for all files')
else:
log.debug("Will remove %s lfns at %s." % (len(lfns), storageElementName))
res = self.fileCatalog.getReplicas(list(lfns), allStatus=True)
if not res['OK']:
errStr = "Completely failed to get replicas for lfns."
log.debug(errStr, res['Message'])
return res
failed.update(res['Value']['Failed'])
replicaDict = res['Value']['Successful']
lfnsToRemove = set()
for lfn, repDict in replicaDict.iteritems():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to
# remove it
successful[lfn] = True
elif len(repDict) == 1:
# The file has only a single replica so don't remove
log.debug("The replica you are trying to remove is the only one.",
"%s @ %s" % (lfn, storageElementName))
failed[lfn] = "Failed to remove sole replica"
else:
lfnsToRemove.add(lfn)
if lfnsToRemove:
res = self.__removeReplica(
storageElementName, lfnsToRemove, replicaDict=replicaDict)
if not res['OK']:
log.debug("Failed in __removeReplica", res['Message'])
return res
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
gDataStoreClient.commit()
return S_OK({'Successful': successful, 'Failed': failed})
def __removeReplica(self, storageElementName, lfns, replicaDict=None):
""" remove replica
Remove the replica from the storageElement, and then from the catalog
:param storageElementName : The name of the storage Element
:param lfns : list of lfn we want to remove
:param replicaDict : cache of fc.getReplicas(lfns) : { lfn { se : catalog url } }
"""
log = self.log.getSubLogger('__removeReplica')
failed = {}
successful = {}
replicaDict = replicaDict if replicaDict else {}
lfnsToRemove = set()
for lfn in lfns:
res = self.__hasAccess('removeReplica', lfn)
if not res['OK']:
log.debug('Error in __verifyWritePermission', res['Message'])
return res
if lfn not in res['Value']['Successful']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, lfn)
failed[lfn] = errStr
else:
lfnsToRemove.add(lfn)
# Remove physical replicas first
res = self.__removePhysicalReplica(
storageElementName, lfnsToRemove, replicaDict=replicaDict)
if not res['OK']:
errStr = "Failed to remove physical replicas."
log.debug(errStr, res['Message'])
return res
failed.update(res['Value']['Failed'])
# Here we use the FC PFN...
replicaTuples = [(lfn, replicaDict[lfn][storageElementName], storageElementName)
for lfn in res['Value']['Successful']]
if replicaTuples:
res = self.__removeCatalogReplica(replicaTuples)
if not res['OK']:
errStr = "Completely failed to remove physical files."
log.debug(errStr, res['Message'])
failed.update(dict.fromkeys(
(lfn for lfn, _pfn, _se in replicaTuples), res['Message']))
successful = {}
else:
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def removeReplicaFromCatalog(self, storageElementName, lfn):
""" remove :lfn: replica from :storageElementName: SE
:param self: self reference
:param str storageElementName: SE name
:param mixed lfn: a single LFN or list of LFNs
"""
# FIXME: this method is dangerous ans should eventually be removed as well
# as the script dirac-dms-remove-catalog-replicas
log = self.log.getSubLogger('removeReplicaFromCatalog')
# Remove replica from the file catalog 'lfn' are the file
# to be removed 'storageElementName' is the storage where the file is to
# be removed
if isinstance(lfn, (list, dict, set, tuple)):
lfns = list(lfn)
else:
lfns = [lfn]
for lfn in lfns:
if not isinstance(lfn, basestring):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
log.debug("Will remove catalogue entry for %s lfns at %s." %
(len(lfns), storageElementName))
res = self.fileCatalog.getReplicas(lfns, allStatus=True)
if not res['OK']:
errStr = "Completely failed to get replicas for lfns."
log.debug(errStr, res['Message'])
return res
failed = {}
successful = {}
for lfn, reason in res['Value']['Failed'].iteritems():
if reason in ('No such file or directory', 'File has zero replicas'):
successful[lfn] = True
else:
failed[lfn] = reason
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].iteritems():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove
# it
successful[lfn] = True
else:
replicaTuples.append(
(lfn, repDict[storageElementName], storageElementName))
log.debug("Resolved %s pfns for catalog removal at %s." % (len(replicaTuples),
storageElementName))
res = self.__removeCatalogReplica(replicaTuples)
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
def __removeCatalogReplica(self, replicaTuples):
""" remove replica form catalogue
:param replicaTuples : list of (lfn, catalogPFN, se)
"""
log = self.log.getSubLogger('__removeCatalogReplica')
oDataOperation = _initialiseAccountingObject(
'removeCatalogReplica', '', len(replicaTuples))
oDataOperation.setStartTime()
start = time.time()
# HACK!
replicaDict = {}
for lfn, pfn, se in replicaTuples:
replicaDict[lfn] = {'SE': se, 'PFN': pfn}
res = self.fileCatalog.removeReplica(replicaDict)
oDataOperation.setEndTime()
oDataOperation.setValueByKey('RegistrationTime', time.time() - start)
if not res['OK']:
oDataOperation.setValueByKey('RegistrationOK', 0)
oDataOperation.setValueByKey('FinalStatus', 'Failed')
gDataStoreClient.addRegister(oDataOperation)
errStr = "Completely failed to remove replica: "
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
success = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn, error in failed.items():
# Ignore error if file doesn't exist
# This assumes all catalogs return an error as { catalog : error }
for catalog, err in error.items():
if 'no such file' in err.lower():
success.setdefault(lfn, {}).update({catalog: True})
error.pop(catalog)
if not failed[lfn]:
failed.pop(lfn)
else:
log.error("Failed to remove replica.", "%s %s" % (lfn, error))
# Only for logging information
if success:
log.debug("Removed %d replicas" % len(success))
for lfn in success:
log.debug("Successfully removed replica.", lfn)
oDataOperation.setValueByKey('RegistrationOK', len(success))
gDataStoreClient.addRegister(oDataOperation)
return res
def __removePhysicalReplica(self, storageElementName, lfnsToRemove, replicaDict=None):
""" remove replica from storage element
:param storageElementName : name of the storage Element
:param lfnsToRemove : set of lfn to removes
:param replicaDict : cache of fc.getReplicas, to be passed to the SE
"""
log = self.log.getSubLogger('__removePhysicalReplica')
log.debug("Attempting to remove %s pfns at %s." %
(len(lfnsToRemove), storageElementName))
storageElement = StorageElement(storageElementName, vo=self.voName)
res = storageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.debug(errStr, "%s %s" % (storageElementName, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
oDataOperation = _initialiseAccountingObject('removePhysicalReplica',
storageElementName,
len(lfnsToRemove))
oDataOperation.setStartTime()
start = time.time()
lfnsToRemove = list(lfnsToRemove)
ret = storageElement.getFileSize(lfnsToRemove, replicaDict=replicaDict)
deletedSizes = ret.get('Value', {}).get('Successful', {})
res = storageElement.removeFile(lfnsToRemove, replicaDict=replicaDict)
oDataOperation.setEndTime()
oDataOperation.setValueByKey('TransferTime', time.time() - start)
if not res['OK']:
oDataOperation.setValueByKey('TransferOK', 0)
oDataOperation.setValueByKey('FinalStatus', 'Failed')
gDataStoreClient.addRegister(oDataOperation)
log.debug("Failed to remove replicas.", res['Message'])
else:
for lfn, value in res['Value']['Failed'].items():
if 'No such file or directory' in value:
res['Value']['Successful'][lfn] = lfn
res['Value']['Failed'].pop(lfn)
for lfn in res['Value']['Successful']:
res['Value']['Successful'][lfn] = True
deletedSize = sum(deletedSizes.get(lfn, 0)
for lfn in res['Value']['Successful'])
oDataOperation.setValueByKey('TransferSize', deletedSize)
oDataOperation.setValueByKey(
'TransferOK', len(res['Value']['Successful']))
gDataStoreClient.addRegister(oDataOperation)
infoStr = "Successfully issued accounting removal request."
log.debug(infoStr)
return res
#########################################################################
#
# File transfer methods
#
def put(self, lfn, fileName, diracSE, path=None):
""" Put a local file to a Storage Element
:param self: self reference
:param str lfn: LFN
:param str fileName: the full path to the local file
:param str diracSE: the Storage Element to which to put the file
:param str path: the path on the storage where the file will be put (if not provided the LFN will be used)
"""
log = self.log.getSubLogger('put')
# Check that the local file exists
if not os.path.exists(fileName):
errStr = "Supplied file does not exist."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname(lfn)
# Obtain the size of the local file
size = getSize(fileName)
if size == 0:
errStr = "Supplied file is zero size."
log.debug(errStr, fileName)
return S_ERROR(errStr)
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement(diracSE, vo=self.voName)
res = storageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.debug(errStr, "%s %s" % (diracSE, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
fileDict = {lfn: fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = time.time()
res = returnSingleResult(storageElement.putFile(fileDict))
putTime = time.time() - startTime
if not res['OK']:
errStr = "Failed to put file to Storage Element."
failed[lfn] = res['Message']
log.debug(errStr, "%s: %s" % (fileName, res['Message']))
else:
log.debug("Put file to storage in %s seconds." % putTime)
successful[lfn] = res['Value']
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
#########################################################################
#
# File catalog methods
#
def getActiveReplicas(self, lfns, getUrl=True, diskOnly=False, preferDisk=False):
""" Get all the replicas for the SEs which are in Active status for reading.
"""
return self.getReplicas(lfns, allStatus=False, getUrl=getUrl, diskOnly=diskOnly,
preferDisk=preferDisk, active=True)
def __filterTapeReplicas(self, replicaDict, diskOnly=False):
"""
Check a replica dictionary for disk replicas:
If there is a disk replica, removetape replicas, else keep all
The input argument is modified
"""
seList = set(
se for ses in replicaDict['Successful'].itervalues() for se in ses)
# Get a cache of SE statuses for long list of replicas
seStatus = dict((se,
(self.__checkSEStatus(se, status='DiskSE'),
self.__checkSEStatus(se, status='TapeSE'))) for se in seList)
# Beware, there is a del below
for lfn, replicas in replicaDict['Successful'].items():
self.__filterTapeSEs(replicas, diskOnly=diskOnly, seStatus=seStatus)
# If diskOnly, one may not have any replica in the end, set Failed
if diskOnly and not replicas:
del replicaDict['Successful'][lfn]
replicaDict['Failed'][lfn] = 'No disk replicas'
return
def __filterReplicasForJobs(self, replicaDict):
""" Remove the SEs that are not to be used for jobs, and archive SEs if there are others
The input argument is modified
"""
seList = set(
se for ses in replicaDict['Successful'].itervalues() for se in ses)
# Get a cache of SE statuses for long list of replicas
seStatus = dict((se, (self.dmsHelper.isSEForJobs(
se), self.dmsHelper.isSEArchive(se))) for se in seList)
# Beware, there is a del below
for lfn, replicas in replicaDict['Successful'].items():
otherThanArchive = set(se for se in replicas if not seStatus[se][1])
for se in replicas.keys():
# Remove the SE if it should not be used for jobs or if it is an
# archive and there are other SEs
if not seStatus[se][0] or (otherThanArchive and seStatus[se][1]):
replicas.pop(se)
# If in the end there is no replica, set Failed
if not replicas:
del replicaDict['Successful'][lfn]
replicaDict['Failed'][lfn] = 'No replicas for jobs'
return
def __filterTapeSEs(self, replicas, diskOnly=False, seStatus=None):
""" Remove the tape SEs as soon as there is one disk SE or diskOnly is requested
The input argument is modified
"""
# Build the SE status cache if not existing
if seStatus is None:
seStatus = dict((se,
(self.__checkSEStatus(se, status='DiskSE'),
self.__checkSEStatus(se, status='TapeSE'))) for se in replicas)
for se in replicas: # There is a del below but we then return!
# First find a disk replica, otherwise do nothing unless diskOnly is set
if diskOnly or seStatus[se][0]:
# There is one disk replica, remove tape replicas and exit loop
for se in replicas.keys(): # Beware: there is a pop below
if seStatus[se][1]:
replicas.pop(se)
return
return
def checkActiveReplicas(self, replicaDict):
"""
Check a replica dictionary for active replicas, and verify input structure first
"""
if not isinstance(replicaDict, dict):
return S_ERROR('Wrong argument type %s, expected a dictionary' % type(replicaDict))
for key in ['Successful', 'Failed']:
if key not in replicaDict:
return S_ERROR('Missing key "%s" in replica dictionary' % key)
if not isinstance(replicaDict[key], dict):
return S_ERROR('Wrong argument type %s, expected a dictionary' % type(replicaDict[key]))
activeDict = {'Successful': {}, 'Failed': replicaDict['Failed'].copy()}
for lfn, replicas in replicaDict['Successful'].iteritems():
if not isinstance(replicas, dict):
activeDict['Failed'][lfn] = 'Wrong replica info'
else:
activeDict['Successful'][lfn] = replicas.copy()
self.__filterActiveReplicas(activeDict)
return S_OK(activeDict)
def __filterActiveReplicas(self, replicaDict):
"""
Check a replica dictionary for active replicas
The input dict is modified, no returned value
"""
seList = set(
se for ses in replicaDict['Successful'].itervalues() for se in ses)
# Get a cache of SE statuses for long list of replicas
seStatus = dict((se, self.__checkSEStatus(se, status='Read'))
for se in seList)
for replicas in replicaDict['Successful'].itervalues():
for se in replicas.keys(): # Beware: there is a pop below
if not seStatus[se]:
replicas.pop(se)
return
def __checkSEStatus(self, se, status='Read'):
""" returns the value of a certain SE status flag (access or other) """
return StorageElement(se, vo=self.voName).status().get(status, False)
def getReplicas(self, lfns, allStatus=True, getUrl=True, diskOnly=False, preferDisk=False, active=False):
""" get replicas from catalogue and filter if requested
Warning: all filters are independent, hence active and preferDisk should be set if using forJobs
"""
catalogReplicas = {}
failed = {}
for lfnChunk in breakListIntoChunks(lfns, 1000):
res = self.fileCatalog.getReplicas(lfnChunk, allStatus=allStatus)
if res['OK']:
catalogReplicas.update(res['Value']['Successful'])
failed.update(res['Value']['Failed'])
else:
return res
if not getUrl:
for lfn in catalogReplicas:
catalogReplicas[lfn] = dict.fromkeys(catalogReplicas[lfn], True)
elif not self.useCatalogPFN:
if res['OK']:
se_lfn = {}
# We group the query to getURL by storage element to gain in speed
for lfn in catalogReplicas:
for se in catalogReplicas[lfn]:
se_lfn.setdefault(se, []).append(lfn)
for se in se_lfn:
seObj = StorageElement(se, vo=self.voName)
succPfn = seObj.getURL(se_lfn[se],
protocol=self.registrationProtocol).get('Value', {}).get('Successful', {})
for lfn in succPfn:
# catalogReplicas still points res["value"]["Successful"] so res
# will be updated
catalogReplicas[lfn][se] = succPfn[lfn]
result = {'Successful': catalogReplicas, 'Failed': failed}
if active:
self.__filterActiveReplicas(result)
if diskOnly or preferDisk:
self.__filterTapeReplicas(result, diskOnly=diskOnly)
return S_OK(result)
def getReplicasForJobs(self, lfns, allStatus=False, getUrl=True, diskOnly=False):
""" get replicas useful for jobs
"""
# Call getReplicas with no filter and enforce filters in this method
result = self.getReplicas(lfns, allStatus=allStatus, getUrl=getUrl)
if not result['OK']:
return result
replicaDict = result['Value']
# For jobs replicas must be active
self.__filterActiveReplicas(replicaDict)
# For jobs, give preference to disk replicas but not only
self.__filterTapeReplicas(replicaDict, diskOnly=diskOnly)
# don't use SEs excluded for jobs (e.g. Failover)
self.__filterReplicasForJobs(replicaDict)
return S_OK(replicaDict)
# 3
# Methods from the catalogToStorage. It would all work with the direct call to the SE, but this checks
# first if the replica is known to the catalog
def __executeIfReplicaExists(self, storageElementName, lfn, method, **kwargs):
""" a simple wrapper that allows replica querying then perform the StorageElement operation
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: a LFN str, list of LFNs or dict with LFNs as keys
"""
log = self.log.getSubLogger('__executeIfReplicaExists')
# # default value
kwargs = kwargs if kwargs else {}
# # get replicas for lfn
res = FileCatalog(vo=self.voName).getReplicas(lfn)
if not res["OK"]:
errStr = "Completely failed to get replicas for LFNs."
log.debug(errStr, res["Message"])
return res
# # returned dict, get failed replicase
retDict = {"Failed": res["Value"]["Failed"],
"Successful": {}}
# # print errors
for lfn, reason in retDict["Failed"].iteritems():
log.error("_callReplicaSEFcn: Failed to get replicas for file.",
"%s %s" % (lfn, reason))
# # good replicas
lfnReplicas = res["Value"]["Successful"]
# # store PFN to LFN mapping
lfnList = []
for lfn, replicas in lfnReplicas.iteritems():
if storageElementName in replicas:
lfnList.append(lfn)
else:
errStr = "File hasn't got replica at supplied Storage Element."
log.error(errStr, "%s %s" % (lfn, storageElementName))
retDict["Failed"][lfn] = errStr
if 'replicaDict' not in kwargs:
kwargs['replicaDict'] = lfnReplicas
# # call StorageElement function at least
se = StorageElement(storageElementName, vo=self.voName)
fcn = getattr(se, method)
res = fcn(lfnList, **kwargs)
# # check result
if not res["OK"]:
errStr = "Failed to execute %s StorageElement method." % method
log.error(errStr, res["Message"])
return res
# # filter out failed and successful
retDict["Successful"].update(res["Value"]["Successful"])
retDict["Failed"].update(res["Value"]["Failed"])
return S_OK(retDict)
def getReplicaIsFile(self, lfn, storageElementName):
""" determine whether the supplied lfns are files at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "isFile")
def getReplicaSize(self, lfn, storageElementName):
""" get the size of files for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getFileSize")
def getReplicaAccessUrl(self, lfn, storageElementName, protocol=False):
""" get the access url for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getURL", protocol=protocol)
def getReplicaMetadata(self, lfn, storageElementName):
""" get the file metadata for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getFileMetadata")
def prestageReplica(self, lfn, storageElementName, lifetime=86400):
""" issue a prestage requests for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn,
"prestageFile", lifetime=lifetime)
def pinReplica(self, lfn, storageElementName, lifetime=86400):
""" pin the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn,
"pinFile", lifetime=lifetime)
def releaseReplica(self, lfn, storageElementName):
""" release pins for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "releaseFile")
def getReplica(self, lfn, storageElementName, localPath=False):
""" copy replicas from DIRAC SE to local directory
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: path in the local file system, if False, os.getcwd() will be used
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn,
"getFile", localPath=localPath)
| Andrew-McNab-UK/DIRAC | DataManagementSystem/Client/DataManager.py | Python | gpl-3.0 | 74,437 | [
"DIRAC"
] | fa09e1725033e5de091292dce0dc455a1fa71641a9e77231419859f3f5c983f0 |
# Lint as: python2, python3
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts according to an API change specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import os
import re
import shutil
import sys
import tempfile
import traceback
import pasta
import six
from six.moves import range
# Some regular expressions we will need for parsing
FIND_OPEN = re.compile(r"^\s*(\[).*$")
FIND_STRING_CHARS = re.compile(r"['\"]")
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
ImportRename = collections.namedtuple(
"ImportRename", ["new_name", "excluded_prefixes"])
def full_name_node(name, ctx=ast.Load()):
"""Make an Attribute or Name node for name.
Translate a qualified name into nested Attribute nodes (and a Name node).
Args:
name: The name to translate to a node.
ctx: What context this name is used in. Defaults to Load()
Returns:
A Name or Attribute node.
"""
names = six.ensure_str(name).split(".")
names.reverse()
node = ast.Name(id=names.pop(), ctx=ast.Load())
while names:
node = ast.Attribute(value=node, attr=names.pop(), ctx=ast.Load())
# Change outermost ctx to the one given to us (inner ones should be Load).
node.ctx = ctx
return node
def get_arg_value(node, arg_name, arg_pos=None):
"""Get the value of an argument from a ast.Call node.
This function goes through the positional and keyword arguments to check
whether a given argument was used, and if so, returns its value (the node
representing its value).
This cannot introspect *args or **args, but it safely handles *args in
Python3.5+.
Args:
node: The ast.Call node to extract arg values from.
arg_name: The name of the argument to extract.
arg_pos: The position of the argument (in case it's passed as a positional
argument).
Returns:
A tuple (arg_present, arg_value) containing a boolean indicating whether
the argument is present, and its value in case it is.
"""
# Check keyword args
if arg_name is not None:
for kw in node.keywords:
if kw.arg == arg_name:
return (True, kw.value)
# Check positional args
if arg_pos is not None:
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't parse Starred
if idx == arg_pos:
return (True, arg)
idx += 1
return (False, None)
def uses_star_args_in_call(node):
"""Check if an ast.Call node uses arbitrary-length positional *args.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
if sys.version_info[:2] >= (3, 5):
# Check for an *args usage in python 3.5+
for arg in node.args:
if isinstance(arg, ast.Starred):
return True
else:
if node.starargs:
return True
return False
def uses_star_kwargs_in_call(node):
"""Check if an ast.Call node uses arbitrary-length **kwargs.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
if sys.version_info[:2] >= (3, 5):
# Check for a **kwarg usage in python 3.5+
for keyword in node.keywords:
if keyword.arg is None:
return True
else:
if node.kwargs:
return True
return False
def uses_star_args_or_kwargs_in_call(node):
"""Check if an ast.Call node uses arbitrary-length *args or **kwargs.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
return uses_star_args_in_call(node) or uses_star_kwargs_in_call(node)
def excluded_from_module_rename(module, import_rename_spec):
"""Check if this module import should not be renamed.
Args:
module: (string) module name.
import_rename_spec: ImportRename instance.
Returns:
True if this import should not be renamed according to the
import_rename_spec.
"""
for excluded_prefix in import_rename_spec.excluded_prefixes:
if module.startswith(excluded_prefix):
return True
return False
class APIChangeSpec(object):
"""This class defines the transformations that need to happen.
This class must provide the following fields:
* `function_keyword_renames`: maps function names to a map of old -> new
argument names
* `symbol_renames`: maps function names to new function names
* `change_to_function`: a set of function names that have changed (for
notifications)
* `function_reorders`: maps functions whose argument order has changed to the
list of arguments in the new order
* `function_warnings`: maps full names of functions to warnings that will be
printed out if the function is used. (e.g. tf.nn.convolution())
* `function_transformers`: maps function names to custom handlers
* `module_deprecations`: maps module names to warnings that will be printed
if the module is still used after all other transformations have run
* `import_renames`: maps import name (must be a short name without '.')
to ImportRename instance.
For an example, see `TFAPIChangeSpec`.
"""
def preprocess(self, root_node): # pylint: disable=unused-argument
"""Preprocess a parse tree. Return any produced logs and errors."""
return [], []
def clear_preprocessing(self):
"""Restore this APIChangeSpec to before it preprocessed a file.
This is needed if preprocessing a file changed any rewriting rules.
"""
pass
class NoUpdateSpec(APIChangeSpec):
"""A specification of an API change which doesn't change anything."""
def __init__(self):
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
self.module_deprecations = {}
self.function_transformers = {}
self.import_renames = {}
class _PastaEditVisitor(ast.NodeVisitor):
"""AST Visitor that processes function calls.
Updates function calls from old API version to new API version using a given
change spec.
"""
def __init__(self, api_change_spec):
self._api_change_spec = api_change_spec
self._log = [] # Holds 4-tuples: severity, line, col, msg.
self._stack = [] # Allow easy access to parents.
# Overridden to maintain a stack of nodes to allow for parent access
def visit(self, node):
self._stack.append(node)
super(_PastaEditVisitor, self).visit(node)
self._stack.pop()
@property
def errors(self):
return [log for log in self._log if log[0] == ERROR]
@property
def warnings(self):
return [log for log in self._log if log[0] == WARNING]
@property
def warnings_and_errors(self):
return [log for log in self._log if log[0] in (WARNING, ERROR)]
@property
def info(self):
return [log for log in self._log if log[0] == INFO]
@property
def log(self):
return self._log
def add_log(self, severity, lineno, col, msg):
self._log.append((severity, lineno, col, msg))
print("%s line %d:%d: %s" % (severity, lineno, col, msg))
def add_logs(self, logs):
"""Record a log and print it.
The log should be a tuple `(severity, lineno, col_offset, msg)`, which will
be printed and recorded. It is part of the log available in the `self.log`
property.
Args:
logs: The logs to add. Must be a list of tuples
`(severity, lineno, col_offset, msg)`.
"""
self._log.extend(logs)
for log in logs:
print("%s line %d:%d: %s" % log)
def _get_applicable_entries(self, transformer_field, full_name, name):
"""Get all list entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + six.ensure_str(name) if name else None
transformers = []
if full_name in function_transformers:
transformers.append(function_transformers[full_name])
if glob_name in function_transformers:
transformers.append(function_transformers[glob_name])
if "*" in function_transformers:
transformers.append(function_transformers["*"])
return transformers
def _get_applicable_dict(self, transformer_field, full_name, name):
"""Get all dict entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + six.ensure_str(name) if name else None
transformers = function_transformers.get("*", {}).copy()
transformers.update(function_transformers.get(glob_name, {}))
transformers.update(function_transformers.get(full_name, {}))
return transformers
def _get_full_name(self, node):
"""Traverse an Attribute node to generate a full name, e.g., "tf.foo.bar".
This is the inverse of `full_name_node`.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if node was not Attribute or Name.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _maybe_add_warning(self, node, full_name):
"""Adds an error to be printed about full_name at node."""
function_warnings = self._api_change_spec.function_warnings
if full_name in function_warnings:
level, message = function_warnings[full_name]
message = six.ensure_str(message).replace("<function name>", full_name)
self.add_log(level, node.lineno, node.col_offset,
"%s requires manual check. %s" % (full_name, message))
return True
else:
return False
def _maybe_add_module_deprecation_warning(self, node, full_name, whole_name):
"""Adds a warning if full_name is a deprecated module."""
warnings = self._api_change_spec.module_deprecations
if full_name in warnings:
level, message = warnings[full_name]
message = six.ensure_str(message).replace("<function name>",
six.ensure_str(whole_name))
self.add_log(level, node.lineno, node.col_offset,
"Using member %s in deprecated module %s. %s" % (whole_name,
full_name,
message))
return True
else:
return False
def _maybe_add_call_warning(self, node, full_name, name):
"""Print a warning when specific functions are called with selected args.
The function _print_warning_for_function matches the full name of the called
function, e.g., tf.foo.bar(). This function matches the function name that
is called, as long as the function is an attribute. For example,
`tf.foo.bar()` and `foo.bar()` are matched, but not `bar()`.
Args:
node: ast.Call object
full_name: The precomputed full name of the callable, if one exists, None
otherwise.
name: The precomputed name of the callable, if one exists, None otherwise.
Returns:
Whether an error was recorded.
"""
# Only look for *.-warnings here, the other will be handled by the Attribute
# visitor. Also, do not warn for bare functions, only if the call func is
# an attribute.
warned = False
if isinstance(node.func, ast.Attribute):
warned = self._maybe_add_warning(node, "*." + six.ensure_str(name))
# All arg warnings are handled here, since only we have the args
arg_warnings = self._get_applicable_dict("function_arg_warnings",
full_name, name)
variadic_args = uses_star_args_or_kwargs_in_call(node)
for (kwarg, arg), (level, warning) in sorted(arg_warnings.items()):
present, _ = get_arg_value(node, kwarg, arg) or variadic_args
if present:
warned = True
warning_message = six.ensure_str(warning).replace(
"<function name>", six.ensure_str(full_name or name))
template = "%s called with %s argument, requires manual check: %s"
if variadic_args:
template = ("%s called with *args or **kwargs that may include %s, "
"requires manual check: %s")
self.add_log(level, node.lineno, node.col_offset,
template % (full_name or name, kwarg, warning_message))
return warned
def _maybe_rename(self, parent, node, full_name):
"""Replace node (Attribute or Name) with a node representing full_name."""
new_name = self._api_change_spec.symbol_renames.get(full_name, None)
if new_name:
self.add_log(INFO, node.lineno, node.col_offset,
"Renamed %r to %r" % (full_name, new_name))
new_node = full_name_node(new_name, node.ctx)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
return True
else:
return False
def _maybe_change_to_function_call(self, parent, node, full_name):
"""Wraps node (typically, an Attribute or Expr) in a Call."""
if full_name in self._api_change_spec.change_to_function:
if not isinstance(parent, ast.Call):
# ast.Call's constructor is really picky about how many arguments it
# wants, and also, it changed between Py2 and Py3.
if six.PY2:
new_node = ast.Call(node, [], [], None, None)
else:
new_node = ast.Call(node, [], [])
pasta.ast_utils.replace_child(parent, node, new_node)
ast.copy_location(new_node, node)
self.add_log(INFO, node.lineno, node.col_offset,
"Changed %r to a function call" % full_name)
return True
return False
def _maybe_add_arg_names(self, node, full_name):
"""Make args into keyword args if function called full_name requires it."""
function_reorders = self._api_change_spec.function_reorders
if full_name in function_reorders:
if uses_star_args_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"re-ordering the call arguments, but it was passed "
"variable-length positional *args. The upgrade "
"script cannot handle these automatically." % full_name)
reordered = function_reorders[full_name]
new_keywords = []
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't move Starred to keywords
keyword_arg = reordered[idx]
keyword = ast.keyword(arg=keyword_arg, value=arg)
new_keywords.append(keyword)
idx += 1
if new_keywords:
self.add_log(INFO, node.lineno, node.col_offset,
"Added keywords to args of function %r" % full_name)
node.args = []
node.keywords = new_keywords + (node.keywords or [])
return True
return False
def _maybe_modify_args(self, node, full_name, name):
"""Rename keyword args if the function called full_name requires it."""
renamed_keywords = self._get_applicable_dict("function_keyword_renames",
full_name, name)
if not renamed_keywords:
return False
if uses_star_kwargs_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"renaming or removing call arguments, but it was passed "
"variable-length *args or **kwargs. The upgrade "
"script cannot handle these automatically." %
(full_name or name))
modified = False
new_keywords = []
for keyword in node.keywords:
argkey = keyword.arg
if argkey in renamed_keywords:
modified = True
if renamed_keywords[argkey] is None:
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
argkey, full_name or name))
else:
keyword.arg = renamed_keywords[argkey]
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Renamed keyword argument for %s from %s to %s" % (
full_name, argkey, renamed_keywords[argkey]))
new_keywords.append(keyword)
else:
new_keywords.append(keyword)
if modified:
node.keywords = new_keywords
return modified
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
assert self._stack[-1] is node
# Get the name for this call, so we can index stuff with it.
full_name = self._get_full_name(node.func)
if full_name:
name = full_name.split(".")[-1]
elif isinstance(node.func, ast.Name):
name = node.func.id
elif isinstance(node.func, ast.Attribute):
name = node.func.attr
else:
name = None
# Call standard transformers for this node.
# Make sure warnings come first, since args or names triggering warnings
# may be removed by the other transformations.
self._maybe_add_call_warning(node, full_name, name)
# Make all args into kwargs
self._maybe_add_arg_names(node, full_name)
# Argument name changes or deletions
self._maybe_modify_args(node, full_name, name)
# Call transformers. These have the ability to modify the node, and if they
# do, will return the new node they created (or the same node if they just
# changed it). The are given the parent, but we will take care of
# integrating their changes into the parent if they return a new node.
#
# These are matched on the old name, since renaming is performed by the
# Attribute visitor, which happens later.
transformers = self._get_applicable_entries("function_transformers",
full_name, name)
parent = self._stack[-2]
if transformers:
if uses_star_args_or_kwargs_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"modifying call arguments, but it was passed "
"variable-length *args or **kwargs. The upgrade "
"script cannot handle these automatically." %
(full_name or name))
for transformer in transformers:
logs = []
new_node = transformer(parent, node, full_name, name, logs)
self.add_logs(logs)
if new_node and new_node is not node:
pasta.ast_utils.replace_child(parent, node, new_node)
node = new_node
self._stack[-1] = node
self.generic_visit(node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
assert self._stack[-1] is node
full_name = self._get_full_name(node)
if full_name:
parent = self._stack[-2]
# Make sure the warning comes first, otherwise the name may have changed
self._maybe_add_warning(node, full_name)
# Once we did a modification, node is invalid and not worth inspecting
# further. Also, we only perform modifications for simple nodes, so
# There'd be no point in descending further.
if self._maybe_rename(parent, node, full_name):
return
if self._maybe_change_to_function_call(parent, node, full_name):
return
# The isinstance check is enough -- a bare Attribute is never root.
i = 2
while isinstance(self._stack[-i], ast.Attribute):
i += 1
whole_name = pasta.dump(self._stack[-(i-1)])
self._maybe_add_module_deprecation_warning(node, full_name, whole_name)
self.generic_visit(node)
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
new_aliases = []
import_updated = False
import_renames = getattr(self._api_change_spec, "import_renames", {})
max_submodule_depth = getattr(self._api_change_spec, "max_submodule_depth",
1)
inserts_after_imports = getattr(self._api_change_spec,
"inserts_after_imports", {})
# This loop processes imports in the format
# import foo as f, bar as b
for import_alias in node.names:
all_import_components = six.ensure_str(import_alias.name).split(".")
# Look for rename, starting with longest import levels.
found_update = False
for i in reversed(list(range(1, max_submodule_depth + 1))):
import_component = all_import_components[0]
for j in range(1, min(i, len(all_import_components))):
import_component += "." + six.ensure_str(all_import_components[j])
import_rename_spec = import_renames.get(import_component, None)
if not import_rename_spec or excluded_from_module_rename(
import_alias.name, import_rename_spec):
continue
new_name = (
import_rename_spec.new_name +
import_alias.name[len(import_component):])
# If current import is
# import foo
# then new import should preserve imported name:
# import new_foo as foo
# This happens when module has just one component.
new_asname = import_alias.asname
if not new_asname and "." not in import_alias.name:
new_asname = import_alias.name
new_alias = ast.alias(name=new_name, asname=new_asname)
new_aliases.append(new_alias)
import_updated = True
found_update = True
# Insert any followup lines that should happen after this import.
full_import = (import_alias.name, import_alias.asname)
insert_offset = 1
for line_to_insert in inserts_after_imports.get(full_import, []):
assert self._stack[-1] is node
parent = self._stack[-2]
new_line_node = pasta.parse(line_to_insert)
ast.copy_location(new_line_node, node)
parent.body.insert(
parent.body.index(node) + insert_offset, new_line_node)
insert_offset += 1
# Insert a newline after the import if necessary
old_suffix = pasta.base.formatting.get(node, "suffix")
if old_suffix is None:
old_suffix = os.linesep
if os.linesep not in old_suffix:
pasta.base.formatting.set(node, "suffix",
six.ensure_str(old_suffix) + os.linesep)
# Apply indentation to new node.
pasta.base.formatting.set(new_line_node, "prefix",
pasta.base.formatting.get(node, "prefix"))
pasta.base.formatting.set(new_line_node, "suffix", os.linesep)
self.add_log(
INFO, node.lineno, node.col_offset,
"Adding `%s` after import of %s" %
(new_line_node, import_alias.name))
# Find one match, break
if found_update:
break
# No rename is found for all levels
if not found_update:
new_aliases.append(import_alias) # no change needed
# Replace the node if at least one import needs to be updated.
if import_updated:
assert self._stack[-1] is node
parent = self._stack[-2]
new_node = ast.Import(new_aliases)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r." %
(pasta.dump(node), pasta.dump(new_node)))
self.generic_visit(node)
def visit_ImportFrom(self, node): # pylint: disable=invalid-name
"""Handle visiting an import-from node in the AST.
Args:
node: Current Node
"""
if not node.module:
self.generic_visit(node)
return
from_import = node.module
# Look for rename based on first component of from-import.
# i.e. based on foo in foo.bar.
from_import_first_component = six.ensure_str(from_import).split(".")[0]
import_renames = getattr(self._api_change_spec, "import_renames", {})
import_rename_spec = import_renames.get(from_import_first_component, None)
if not import_rename_spec:
self.generic_visit(node)
return
# Split module aliases into the ones that require import update
# and those that don't. For e.g. if we want to rename "a" to "b"
# unless we import "a.c" in the following:
# from a import c, d
# we want to update import for "d" but not for "c".
updated_aliases = []
same_aliases = []
for import_alias in node.names:
full_module_name = "%s.%s" % (from_import, import_alias.name)
if excluded_from_module_rename(full_module_name, import_rename_spec):
same_aliases.append(import_alias)
else:
updated_aliases.append(import_alias)
if not updated_aliases:
self.generic_visit(node)
return
assert self._stack[-1] is node
parent = self._stack[-2]
# Replace first component of from-import with new name.
new_from_import = (
import_rename_spec.new_name +
from_import[len(from_import_first_component):])
updated_node = ast.ImportFrom(new_from_import, updated_aliases, node.level)
ast.copy_location(updated_node, node)
pasta.ast_utils.replace_child(parent, node, updated_node)
# If some imports had to stay the same, add another import for them.
additional_import_log = ""
if same_aliases:
same_node = ast.ImportFrom(from_import, same_aliases, node.level,
col_offset=node.col_offset, lineno=node.lineno)
ast.copy_location(same_node, node)
parent.body.insert(parent.body.index(updated_node), same_node)
# Apply indentation to new node.
pasta.base.formatting.set(
same_node, "prefix",
pasta.base.formatting.get(updated_node, "prefix"))
additional_import_log = " and %r" % pasta.dump(same_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r%s." %
(pasta.dump(node),
pasta.dump(updated_node),
additional_import_log))
self.generic_visit(node)
class AnalysisResult(object):
"""This class represents an analysis result and how it should be logged.
This class must provide the following fields:
* `log_level`: The log level to which this detection should be logged
* `log_message`: The message that should be logged for this detection
For an example, see `VersionedTFImport`.
"""
class APIAnalysisSpec(object):
"""This class defines how `AnalysisResult`s should be generated.
It specifies how to map imports and symbols to `AnalysisResult`s.
This class must provide the following fields:
* `symbols_to_detect`: maps function names to `AnalysisResult`s
* `imports_to_detect`: maps imports represented as (full module name, alias)
tuples to `AnalysisResult`s
notifications)
For an example, see `TFAPIImportAnalysisSpec`.
"""
class PastaAnalyzeVisitor(_PastaEditVisitor):
"""AST Visitor that looks for specific API usage without editing anything.
This is used before any rewriting is done to detect if any symbols are used
that require changing imports or disabling rewriting altogether.
"""
def __init__(self, api_analysis_spec):
super(PastaAnalyzeVisitor, self).__init__(NoUpdateSpec())
self._api_analysis_spec = api_analysis_spec
self._results = [] # Holds AnalysisResult objects
@property
def results(self):
return self._results
def add_result(self, analysis_result):
self._results.append(analysis_result)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
full_name = self._get_full_name(node)
if full_name:
detection = self._api_analysis_spec.symbols_to_detect.get(full_name, None)
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
for import_alias in node.names:
# Detect based on full import name and alias)
full_import = (import_alias.name, import_alias.asname)
detection = (self._api_analysis_spec
.imports_to_detect.get(full_import, None))
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
def visit_ImportFrom(self, node): # pylint: disable=invalid-name
"""Handle visiting an import-from node in the AST.
Args:
node: Current Node
"""
if not node.module:
self.generic_visit(node)
return
from_import = node.module
for import_alias in node.names:
# Detect based on full import name(to & as)
full_module_name = "%s.%s" % (from_import, import_alias.name)
full_import = (full_module_name, import_alias.asname)
detection = (self._api_analysis_spec
.imports_to_detect.get(full_import, None))
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
class ASTCodeUpgrader(object):
"""Handles upgrading a set of Python files using a given API change spec."""
def __init__(self, api_change_spec):
if not isinstance(api_change_spec, APIChangeSpec):
raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
type(api_change_spec))
self._api_change_spec = api_change_spec
def process_file(self,
in_filename,
out_filename,
no_change_to_outfile_on_error=False):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
no_change_to_outfile_on_error: not modify the output file on errors
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
# pylint: disable=g-backslash-continuation
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(in_filename, in_file, out_filename,
temp_file)
# pylint: enable=g-backslash-continuation
if no_change_to_outfile_on_error and ret[0] == 0:
os.remove(temp_file.name)
else:
shutil.move(temp_file.name, out_filename)
return ret
def format_log(self, log, in_filename):
log_string = "%d:%d: %s: %s" % (log[1], log[2], log[0], log[3])
if in_filename:
return six.ensure_str(in_filename) + ":" + log_string
else:
return log_string
def update_string_pasta(self, text, in_filename):
"""Updates a file using pasta."""
try:
t = pasta.parse(text)
except (SyntaxError, ValueError, TypeError):
log = ["ERROR: Failed to parse.\n" + traceback.format_exc()]
return 0, "", log, []
preprocess_logs, preprocess_errors = self._api_change_spec.preprocess(t)
visitor = _PastaEditVisitor(self._api_change_spec)
visitor.visit(t)
self._api_change_spec.clear_preprocessing()
logs = [self.format_log(log, None) for log in (preprocess_logs +
visitor.log)]
errors = [self.format_log(error, in_filename)
for error in (preprocess_errors +
visitor.warnings_and_errors)]
return 1, pasta.dump(t), logs, errors
def _format_log(self, log, in_filename, out_filename):
text = six.ensure_str("-" * 80) + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += six.ensure_str("-" * 80) + "\n\n"
text += "\n".join(log) + "\n"
text += six.ensure_str("-" * 80) + "\n\n"
return text
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
lines = in_file.readlines()
processed_file, new_file_content, log, process_errors = (
self.update_string_pasta("".join(lines), in_filename))
if out_file and processed_file:
out_file.write(new_file_content)
return (processed_file,
self._format_log(log, in_filename, out_filename),
process_errors)
def process_tree(self, root_directory, output_root_directory,
copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base.
copy_other_files: Copy files that are not touched by this converter.
Returns:
A tuple of files processed, the report string for all files, and a dict
mapping filenames to errors encountered in that file.
"""
if output_root_directory == root_directory:
return self.process_tree_inplace(root_directory)
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." %
(output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" %
(root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if six.ensure_str(f).endswith(".py")]
copy_files = [
f for f in file_list if not six.ensure_str(f).endswith(".py")
]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(fullpath,
root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(
fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = {}
report = ""
report += six.ensure_str(("=" * 80)) + "\n"
report += "Input tree: %r\n" % root_directory
report += six.ensure_str(("=" * 80)) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
if os.path.islink(input_path):
link_target = os.readlink(input_path)
link_target_output = os.path.join(
output_root_directory, os.path.relpath(link_target, root_directory))
if (link_target, link_target_output) in files_to_process:
# Create a link to the new location of the target file
os.symlink(link_target_output, output_path)
else:
report += "Copying symlink %s without modifying its target %s" % (
input_path, link_target)
os.symlink(link_target, output_path)
continue
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors[input_path] = l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
def process_tree_inplace(self, root_directory):
"""Process a directory of python files in place."""
files_to_process = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [
os.path.join(dir_name, f)
for f in file_list
if six.ensure_str(f).endswith(".py")
]
files_to_process += py_files
file_count = 0
tree_errors = {}
report = ""
report += six.ensure_str(("=" * 80)) + "\n"
report += "Input tree: %r\n" % root_directory
report += six.ensure_str(("=" * 80)) + "\n"
for path in files_to_process:
if os.path.islink(path):
report += "Skipping symlink %s.\n" % path
continue
file_count += 1
_, l_report, l_errors = self.process_file(path, path)
tree_errors[path] = l_errors
report += l_report
return file_count, report, tree_errors
| xzturn/tensorflow | tensorflow/tools/compatibility/ast_edits.py | Python | apache-2.0 | 40,392 | [
"VisIt"
] | 383f8a57481a7cfd04eb0f21bd2a8e0332be86834ce72532be17bd1b164b1f10 |
#!/usr/bin/env python
"""Copyright 2008 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.4'
__date__ = 'May 02 2008'
#Basic imports
from ctypes import *
import sys
#Phidget specific imports
from Phidgets.PhidgetException import *
from Phidgets.Events.Events import *
from Phidgets.Devices.PHSensor import *
#Create an PHSensor object
phSensor = PHSensor()
#Information Display Function
def displayDeviceInfo():
print "|------------|----------------------------------|--------------|------------|"
print "|- Attached -|- Type -|- Serial No. -|- Version -|"
print "|------------|----------------------------------|--------------|------------|"
print "|- %8s -|- %30s -|- %10d -|- %8d -|" % (phSensor.isAttached(), phSensor.getDeviceType(), phSensor.getSerialNum(), phSensor.getDeviceVersion())
print "|------------|----------------------------------|--------------|------------|"
print "PH Sensitivity: %f" % (phSensor.getPHChangeTrigger())
print "Current Potential: %f" % (phSensor.getPotential())
return 0
#Event Handler Callback Functions
def phSensorAttached(e):
attached = e.device
print "PHSensor %i Attached!" % (attached.getSerialNum())
return 0
def phSensorDetached(e):
detached = e.device
print "PHSensor %i Detached!" % (detached.getSerialNum())
return 0
def phSensorError(e):
print "Phidget Error %i: %s" % (e.eCode, e.description)
return 0
def phSensorPHChanged(e):
potential = phSensor.getPotential()
print "PH: %f -- Potential: %f" % (e.PH, potential)
return 0
#Main Program Code
try:
phSensor.setOnAttachHandler(phSensorAttached)
phSensor.setOnDetachHandler(phSensorDetached)
phSensor.setOnErrorhandler(phSensorError)
phSensor.setOnPHChangeHandler(phSensorPHChanged)
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
print "Exiting...."
exit(1)
print "Opening phidget object...."
try:
phSensor.openPhidget()
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
print "Exiting...."
exit(1)
print "Waiting for attach...."
try:
phSensor.waitForAttach(10000)
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
try:
phSensor.closePhidget()
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
print "Exiting...."
exit(1)
print "Exiting...."
exit(1)
else:
displayDeviceInfo()
print "Increasing sensitivity to 10.00"
phSensor.setPHChangeTrigger(10.00)
print "Press Enter to quit...."
chr = sys.stdin.read(1)
print "Closing..."
try:
phSensor.closePhidget()
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
print "Exiting...."
exit(1)
print "Done."
exit(0) | jantman/tuxostat | fs_backup/home/tuxostat/devel/Python/PHSensor-simple.py | Python | gpl-3.0 | 3,154 | [
"VisIt"
] | fd32abcd31f856cc17245561515b1784707a951d86ffc9b58a88f94c887bf65a |
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import sessionmaker
from gmi.mostusedsites.backend.models import Base, User, Visit
import pytest
class TestModels:
def test_user(self, session):
user = User(unique_id='foo')
session.add(user)
assert session.flush() is None
def test_user_id_is_unique(self, session):
user = User(unique_id='bar')
user2 = User(unique_id='bar')
session.add(user)
session.add(user2)
with pytest.raises(IntegrityError):
session.flush()
def test_visit(self, session):
visit = Visit('http://foo', visited_at=1, duration=1, active=True)
session.add(visit)
assert session.flush() is None
def test_visit_belongs_to_user(self):
user = User(unique_id='foo')
visit = Visit('http://foo', visited_at=1, duration=1, active=True)
user.visits.append(visit)
assert visit.user is user
def test_visit_url_is_present(self):
with pytest.raises(TypeError):
visit = Visit(visited_at=1, duration=1, active=True)
def test_visit_visited_at_is_present(self, session):
visit = Visit('http://foo_visited', duration=1, active=True)
session.add(visit)
with pytest.raises(IntegrityError):
session.flush()
def test_visit_duration_is_present(self, session):
visit = Visit('http://foo_duration', visited_at=1, active=True)
session.add(visit)
with pytest.raises(IntegrityError):
session.flush()
def test_visit_active_is_present(self, session):
visit = Visit('http://foo_active', duration=1, visited_at=1)
session.add(visit)
with pytest.raises(IntegrityError):
session.flush()
def test_visit_id(self):
visit = Visit('http://foo', visited_at=1, duration=1)
assert visit.id == '1aa0598ba12ba82e6b6a88f97f010948f33a01d5'
def test_visit_id_unique(self, session):
visit = Visit('http://bar', visited_at=1, duration=1)
visit1 = Visit(url='http://bar', visited_at=1, duration=1)
session.add(visit)
session.add(visit1)
with pytest.raises(IntegrityError):
session.flush()
def test_visit_id_long_timestamp(self):
visit = Visit("http://bar", visited_at=1449073100894, duration=1)
assert visit.id == 'cdd896d38b96f973e2d0f446daf5da090878f585'
def test_visit_schema(self):
visit = Visit('https://bar', visited_at=1, duration=1)
assert visit.scheme == 'https'
def test_visit_host(self):
visit = Visit('https://bar', visited_at=1, duration=1)
assert visit.host == 'bar'
def test_visit_path(self):
visit = Visit('https://bar/foo/baz', visited_at=1, duration=1)
assert visit.path == '/foo/baz'
def test_visit_no_params(self):
visit = Visit('https://bar/foo?a=1&b=2', visited_at=1, duration=1)
assert visit.path == '/foo'
| geyer-moldfusz/gmi.most_used_sites.backend | src/gmi/mostusedsites/backend/tests/test_models.py | Python | gpl-3.0 | 3,037 | [
"VisIt"
] | c4105b1e7c92f12d27d4ee413a9561f7ec2eaf934397f8990120a47749dc2c93 |
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom formatting for Thunderbird."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
# pylint: disable-msg=C0301
import copy
import pyatspi
import orca.formatting
import orca.scripts.toolkits.Gecko.formatting as GeckoFormatting
formatting = {
'speech': {
pyatspi.ROLE_DOCUMENT_FRAME: {
'basicWhereAmI': 'label + readOnly + textRole + textContent + anyTextSelection + ' + orca.formatting.MNEMONIC,
'detailedWhereAmI': 'label + readOnly + textRole + textContentWithAttributes + anyTextSelection + ' + orca.formatting.MNEMONIC + ' + ' + orca.formatting.TUTORIAL
},
}
}
class Formatting(GeckoFormatting.Formatting):
def __init__(self, script):
GeckoFormatting.Formatting.__init__(self, script)
self.update(copy.deepcopy(formatting))
self._defaultFormatting = orca.formatting.Formatting(script)
def getFormat(self, **args):
if args.get('useDefaultFormatting', False):
return self._defaultFormatting.getFormat(**args)
else:
return GeckoFormatting.Formatting.getFormat(self, **args)
| Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/Thunderbird/formatting.py | Python | gpl-3.0 | 1,990 | [
"ORCA"
] | fb4a2ecac6f55f34b9d13457162825beb0876862a3091b3a65c861ed478f5ec5 |
#!/usr/bin/env python3
########################################################################
# Solves problem 131 from projectEuler.net.
# Finds the number of primes below 1 millon for which exists an n that
# n ** 3 + (n ** 2) * p is a perfect cube.
# Copyright (C) 2011 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://san-ss.is-a-geek.com.ar
########################################################################
from CommonFunctions import find_primes_less_than
from itertools import count, takewhile
primes = find_primes_less_than(int(1000000 ** 0.5))
def is_prime(n):
limit = n ** 0.5
for p in primes:
if p > limit:
return True
if n % p == 0:
return False
return True
if __name__ == '__main__':
result = sum(1 for i in
takewhile(
lambda x: x < 1000000,
((i + 1) ** 3 - i ** 3 for i in count(1))
)
if is_prime(i)
)
print("The result is:", result)
| sanSS/programming-contests | project-euler/problem131.py | Python | gpl-3.0 | 1,762 | [
"VisIt"
] | 3b8d7b6413f4b764be25f26bd7354d837f6e71c9bec43439c4c2d9f861d7dc7a |
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import sys
import time
import yaml
from jinja2 import Environment, FileSystemLoader
import ansible.constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils._text import to_text
from ansible.playbook.role.requirement import RoleRequirement
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def set_action(self):
super(GalaxyCLI, self).set_action()
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', 'apb' and 'network'.")
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=C.GALAXY_ROLE_SKELETON,
help='The path to a role skeleton that the new role should be based upon.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
self.parser.add_option('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False, help='Use tar instead of the scm archive option when packaging the role')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
"[--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if self.action not in ("delete", "import", "init", "login", "setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.unfrack_paths, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg'
' file (/etc/ansible/roles if not configured)', type='str')
if self.action in ("init", "install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage="usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
self.set_action()
super(GalaxyCLI, self).parse()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
def run(self):
super(GalaxyCLI, self).run()
self.api = GalaxyAPI(self.galaxy)
self.execute()
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.options.ignore_errors:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def _display_role_info(self, role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
############################
# execute actions
############################
def execute_init(self):
"""
creates the skeleton framework of a role that complies with the galaxy metadata format.
"""
init_path = self.options.init_path
force = self.options.force
role_skeleton = self.options.role_skeleton
role_name = self.args.pop(0).strip() if self.args else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
inject_data = dict(
role_name=role_name,
author='your name',
description='your description',
company='your company (optional)',
license='license (GPLv2, CC-BY, etc)',
issue_tracker_url='http://example.com/issue/tracker',
min_ansible_version='1.2',
role_type=self.options.role_type
)
# create role directory
if not os.path.exists(role_path):
os.makedirs(role_path)
if role_skeleton is not None:
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
role_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
role_skeleton = os.path.expanduser(role_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
template_env = Environment(loader=FileSystemLoader(role_skeleton))
for root, dirs, files in os.walk(role_skeleton, topdown=True):
rel_root = os.path.relpath(root, role_skeleton)
in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(role_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path))
for d in dirs:
dir_path = os.path.join(role_path, rel_root, d)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.options.roles_path
data = ''
for role in self.args:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not self.options.offline:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
uses the args list of roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.
"""
role_file = self.options.role_file
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = self.options.no_deps
force = self.options.force
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
if "include" not in role:
role = RoleRequirement.role_yaml_parse(role)
display.vvv("found role %s in yaml file" % str(role))
if "name" not in role and "scm" not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role["include"]) as f_include:
try:
roles_left += [
GalaxyRole(self.galaxy, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))
]
except Exception as e:
msg = "Unable to load data from the include requirements file: %s %s"
raise AnsibleError(msg % (role_file, e))
else:
display.deprecated("going forward only the yaml format will be supported", version="2.6")
# roles listed in a file, one per line
for rline in f.readlines():
if rline.startswith("#") or rline.strip() == '':
continue
display.debug('found role %s in text file' % str(rline))
role = RoleRequirement.role_yaml_parse(rline.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
f.close()
except (IOError, OSError) as e:
raise AnsibleError('Unable to open %s: %s' % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and self.args and role.name not in self.args:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % str(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
display.warning('- dependency %s from role %s differs from already installed version (%s), skipping' %
(str(dep_role), role.name, dep_role.install_info['version']))
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
display.display("- %s, %s" % (name, version))
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.options.roles_path
path_found = False
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
display.warning("- the configured path %s does not exist." % role_path)
continue
elif not os.path.isdir(role_path):
display.warning("- the configured path %s, exists, but it is not a directory." % role_path)
continue
path_files = os.listdir(role_path)
path_found = True
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (path_file, version))
if not path_found:
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
return 0
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if len(self.args):
terms = []
for i in range(len(self.args)):
terms.append(self.args.pop())
search = '+'.join(terms[::-1])
if not search and not self.options.platforms and not self.options.galaxy_tags and not self.options.author:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=self.options.platforms,
tags=self.options.galaxy_tags, author=self.options.author, page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if self.options.token is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = self.options.token
galaxy_response = self.api.authenticate(github_token)
if self.options.token is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(self.args) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_repo = to_text(self.args.pop(), errors='surrogate_or_strict')
github_user = to_text(self.args.pop(), errors='surrogate_or_strict')
if self.options.check_status:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference, role_name=self.options.role_name)
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not self.options.wait:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if self.options.check_status or self.options.wait:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if self.options.setup_list:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if self.options.remove_id:
# Remove a secret
self.api.remove_secret(self.options.remove_id)
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(self.args) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
secret = self.args.pop()
github_repo = self.args.pop()
github_user = self.args.pop()
source = self.args.pop()
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
if len(self.args) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_repo = self.args.pop()
github_user = self.args.pop()
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
| konstruktoid/ansible-upstream | lib/ansible/cli/galaxy.py | Python | gpl-3.0 | 32,475 | [
"Galaxy"
] | 21eec9714de6e9ef1eafcd12512aade116b27fc157bab944cbb128393df2e82b |
#!/usr/bin/env python
import argparse
import pysam
import numpy as np
import sys
import pdb
import matplotlib.pyplot as plt
ap = argparse.ArgumentParser(description="Summarize various aspects of alignments in SAM format.")
ap.add_argument("sam", help="Input.", nargs="+")
ap.add_argument("-F", help="Exclude these flages.", default=256)
ap.add_argument("-f", help="Include these flags.", default=0xFFFF)
ap.add_argument("-v", help="Verbose output.", action='store_true', default=False)
ap.add_argument("--plotInsert", help="Plot insert size distribution to this file", default=None)
ap.add_argument("--plotReadlen", help="Plot read length distribution to this file", default=None)
args = ap.parse_args()
nReads = 0
lengths = []
rLengths = []
subLengths = []
alnIndex = 0;
prl = {}
fl = {}
for samFile in args.sam:
if (samFile.find('.sam') >= 0):
mode='r'
else:
# bam file
mode='rb'
f = pysam.Samfile(samFile,mode)
sys.stderr.write(samFile + "\n")
for aln in f.fetch():
if (aln.flag & args.F != 0):
continue
xe = 0
xs = 0
zmwId = '/'.join(aln.qname.split('/')[0:2])
tmp = aln.qname.split('/')[2].split('_')
subreadStart = int(tmp[0])
subreadEnd = int(tmp[1])
subLengths.append(subreadEnd - subreadStart)
for t in aln.tags:
if (t[0]=='XS'):
xs = t[1]
if (t[0] =='XE'):
xe = t[1]
if (xe != 0):
rLengths.append(xe - xs)
if (args.v == True):
print "xlen: " +str(xe - xs)
print "len: " + str(aln.tlen)
print "flag: " + str(aln.flag)
print aln
lengths.append(aln.tlen)
if (zmwId not in prl):
prl[zmwId] = 0
prl[zmwId] += aln.tlen
if (zmwId not in fl):
fl[zmwId] = 0
if (fl[zmwId] < aln.tlen):
fl[zmwId] = aln.tlen
if (alnIndex % 10000 == 0 and alnIndex > 0):
sys.stderr.write("processed {}\n".format(alnIndex))
alnIndex += 1
f.close()
polyLengths= np.asarray(prl.values())
insertLengths = np.asarray(fl.values())
polyLengths.sort()
insertLengths.sort()
npLengths = np.asarray(lengths)
nprLengths = np.asarray(rLengths)
npLengths.sort()
nprLengths.sort()
npsLengths = np.asarray(subLengths)
npsLengths.sort()
print "#Files {}".format(len(args.sam))
print "#Alignmetns {}".format(len(npLengths))
print "#Read Alignments {}".format(len(nprLengths))
print "#bases {}".format(sum(npLengths))
titles = ["RefAlignLength", "ReadAlignLength", "SubreadLength", "Proccessed", "Insert"]
counts = [len(npLengths), len(nprLengths), len(npsLengths), len(polyLengths), len(insertLengths)]
means = [npLengths.mean(), nprLengths.mean(), npsLengths.mean(), polyLengths.mean(), insertLengths.mean()]
medians = [npLengths[len(npLengths)/2], nprLengths[len(nprLengths)/2], npsLengths[len(npsLengths)/2], polyLengths[len(polyLengths)/2], insertLengths[len(insertLengths)/2]]
tops = [npLengths[int(len(npLengths)*0.95)], nprLengths[int(len(nprLengths)*0.95)], npsLengths[int(len(npsLengths)*0.95)], polyLengths[int(len(polyLengths)*.95)], insertLengths[int(len(insertLengths)*0.95)]]
total = [npLengths.sum(), nprLengths.sum(), npsLengths.sum(), polyLengths.sum(), insertLengths.sum()]
print "Type \t" + "\t".join(titles) + "\n"
print "N: \t" + "\t".join([str(m) for m in counts])
print "Mean: \t " + "\t".join(["{:.2f}".format(m) for m in means])
print "Median: \t" + "\t".join([str(m) for m in medians])
print "95th: \t" + "\t".join([str(i) for i in tops])
print "Total: \t" + "\t".join([str(s) for s in total])
| yunlongliukm/chm1_scripts | SummarizeFromAlignments.py | Python | mit | 3,704 | [
"pysam"
] | 68fbf08bc800fc2aad58dcd61728a073c8b54d4d1dd8221ee824fd9f36754751 |
from __future__ import division, print_function, absolute_import
import sys
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('sids',parent_package,top_path)
config.add_subpackage('helper')
config.add_subpackage('es')
config.add_subpackage('k')
config.add_subpackage('siesta')
#config.make_config_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| zerothi/siesta-es | sids/setup.py | Python | gpl-3.0 | 547 | [
"SIESTA"
] | cf2e92fe1c8b79fb68dee0884fcfbb181f630e8978165531cbf4f9e2bbb5a7ce |
#!/usr/bin/env python3
"""
Template by pypi-mobans
"""
import os
import sys
import codecs
import locale
import platform
from shutil import rmtree
from setuptools import Command, setup, find_packages
from setuptools import __version__ as setuptools_version
from pkg_resources import parse_version
import pkg_resources
try:
import _markerlib.markers
except ImportError:
_markerlib = None
PY2 = sys.version_info[0] == 2
PY26 = PY2 and sys.version_info[1] < 7
PY33 = sys.version_info < (3, 4)
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
# This work around is only if a project supports Python < 3.4
# Work around for locale not being set
try:
lc = locale.getlocale()
pf = platform.system()
if pf != "Windows" and lc == (None, None):
locale.setlocale(locale.LC_ALL, "C.UTF-8")
except (ValueError, UnicodeError, locale.Error):
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
NAME = "moban"
AUTHOR = "chfw"
VERSION = "0.8.2"
EMAIL = "wangc_2011@hotmail.com"
LICENSE = "MIT"
ENTRY_POINTS = {
"console_scripts": [
"moban = moban.main:main"
],
}
DESCRIPTION = (
"General purpose static text generator"
)
URL = "https://github.com/moremoban/moban"
DOWNLOAD_URL = "%s/archive/0.8.2.tar.gz" % URL
FILES = ["README.rst", "CONTRIBUTORS.rst", "CHANGELOG.rst"]
KEYWORDS = [
"python",
"jinja2",
"moban",
]
CLASSIFIERS = [
"Topic :: Software Development :: Libraries",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
]
PYTHON_REQUIRES = ">=3.6"
INSTALL_REQUIRES = [
"jinja2>=2.7.1",
"lml>=0.0.9",
"appdirs>=1.4.3",
"crayons>= 0.1.0",
"fs>=2.4.11",
"jinja2-fsloader>=0.2.0",
"moban-jinja2-github",
]
SETUP_COMMANDS = {}
PACKAGES = find_packages(exclude=["ez_setup", "examples", "tests", "tests.*"])
EXTRAS_REQUIRE = {
":python_version == '3.7'": ["ruamel.yaml>=0.15.42"],
":python_version != '3.4' and python_version < '3.7'": ["ruamel.yaml>=0.15.5"],
":python_version == '3.8'": ["ruamel.yaml>=0.15.98"],
}
# You do not need to read beyond this line
PUBLISH_COMMAND = "{0} setup.py sdist bdist_wheel upload -r pypi".format(sys.executable)
GS_COMMAND = ("gs moban v0.8.2 " +
"Find 0.8.2 in changelog for more details")
NO_GS_MESSAGE = ("Automatic github release is disabled. " +
"Please install gease to enable it.")
UPLOAD_FAILED_MSG = (
'Upload failed. please run "%s" yourself.' % PUBLISH_COMMAND)
HERE = os.path.abspath(os.path.dirname(__file__))
class PublishCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package on github and pypi"
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds...")
rmtree(os.path.join(HERE, "dist"))
rmtree(os.path.join(HERE, "build"))
rmtree(os.path.join(HERE, "moban.egg-info"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution...")
run_status = True
if has_gease():
run_status = os.system(GS_COMMAND) == 0
else:
self.status(NO_GS_MESSAGE)
if run_status:
if os.system(PUBLISH_COMMAND) != 0:
self.status(UPLOAD_FAILED_MSG)
sys.exit()
SETUP_COMMANDS.update({
"publish": PublishCommand
})
def has_gease():
"""
test if github release command is installed
visit http://github.com/moremoban/gease for more info
"""
try:
import gease # noqa
return True
except ImportError:
return False
def read_files(*files):
"""Read files into setup"""
text = ""
for single_file in files:
content = read(single_file)
text = text + content + "\n"
return text
def read(afile):
"""Read a file into setup"""
the_relative_file = os.path.join(HERE, afile)
with codecs.open(the_relative_file, "r", "utf-8") as opened_file:
content = filter_out_test_code(opened_file)
content = "".join(list(content))
return content
def filter_out_test_code(file_handle):
found_test_code = False
for line in file_handle.readlines():
if line.startswith(".. testcode:"):
found_test_code = True
continue
if found_test_code is True:
if line.startswith(" "):
continue
else:
empty_line = line.strip()
if len(empty_line) == 0:
continue
else:
found_test_code = False
yield line
else:
for keyword in ["|version|", "|today|"]:
if keyword in line:
break
else:
yield line
# _markerlib.default_environment() obtains its data from _VARS
# and wraps it in another dict, but _markerlib_evaluate writes
# to the dict while it is iterating the keys, causing an error
# on Python 3 only.
# Replace _markerlib.default_environment to return a custom dict
# that has all the necessary markers, and ignores any writes.
class Python3MarkerDict(dict):
def __setitem__(self, key, value):
pass
def pop(self, i=-1):
return self[i]
if _markerlib and sys.version_info[0] == 3:
env = _markerlib.markers._VARS
for key in list(env.keys()):
new_key = key.replace(".", "_")
if new_key != key:
env[new_key] = env[key]
_markerlib.markers._VARS = Python3MarkerDict(env)
def default_environment():
return _markerlib.markers._VARS
_markerlib.default_environment = default_environment
# Avoid the very buggy pkg_resources.parser, which does not consistently
# recognise the markers needed by this setup.py
# See https://github.com/pypa/packaging/issues/72 for details
# Change this to setuptools 20.10.0 to support all markers.
if pkg_resources:
if parse_version(setuptools_version) < parse_version("18.5"):
MarkerEvaluation = pkg_resources.MarkerEvaluation
del pkg_resources.parser
pkg_resources.evaluate_marker = MarkerEvaluation._markerlib_evaluate
MarkerEvaluation.evaluate_marker = MarkerEvaluation._markerlib_evaluate
if __name__ == "__main__":
setup(
test_suite="tests",
name=NAME,
author=AUTHOR,
version=VERSION,
author_email=EMAIL,
description=DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
long_description=read_files(*FILES),
license=LICENSE,
keywords=KEYWORDS,
python_requires=PYTHON_REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=["nose"],
install_requires=INSTALL_REQUIRES,
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
entry_points=ENTRY_POINTS,
classifiers=CLASSIFIERS,
cmdclass=SETUP_COMMANDS
)
| chfw/moban | setup.py | Python | mit | 7,396 | [
"VisIt"
] | 583557caeeac657e06a9be61b39e9e94fe346647ec4a6658ff4ce5453f5902a8 |
import numpy as np
from ase import Atoms
from ase.visualize.vtk.sources import vtkAtomSource, vtkForceSource, \
vtkVelocitySource
from ase.visualize.vtk.cell import vtkUnitCellModule, vtkAxesModule
from ase.visualize.vtk.grid import vtkAtomicPositions
from ase.visualize.vtk.module import vtkModuleAnchor, vtkGlyphModule
# -------------------------------------------------------------------
class vtkAtoms(vtkModuleAnchor, vtkAtomicPositions):
"""Provides fundamental representation for ``Atoms``-specific data in VTK.
The ``vtkAtoms`` class plots atoms during simulations, extracting the
relevant information from the list of atoms. It is created using
the list of atoms as an argument to the constructor. Then one or more
visualization modules can be attached using add_module(name, module).
Example:
>>> va = vtkAtoms(atoms)
>>> va.add_forces()
>>> va.add_axes()
>>> XXX va.add_to_renderer(vtk_ren)
"""
def __init__(self, atoms, scale=1):
"""Construct a fundamental VTK-representation of atoms.
atoms: Atoms object or list of Atoms objects
The atoms to be plotted.
scale = 1: float or int
Relative scaling of all Atoms-specific visualization.
"""
assert isinstance(atoms, Atoms)
self.atoms = atoms
self.scale = scale
vtkModuleAnchor.__init__(self)
vtkAtomicPositions.__init__(self, self.atoms.get_positions(),
vtkUnitCellModule(self.atoms))
self.force = None
self.velocity = None
symbols = self.atoms.get_chemical_symbols()
for symbol in np.unique(symbols):
# Construct mask for all atoms with this symbol
mask = np.array(symbols) == symbol
if mask.all():
subset = None
else:
subset = np.argwhere(mask).ravel()
# Get relevant VTK unstructured grid
vtk_ugd = self.get_unstructured_grid(subset)
# Create atomic glyph source for this symbol
glyph_source = vtkAtomSource(symbol, self.scale)
# Create glyph module and anchor it
self.add_module(symbol, vtkGlyphModule(vtk_ugd, glyph_source))
def has_forces(self):
return self.force is not None
def has_velocities(self):
return self.velocity is not None
def add_cell(self):
"""Add a box outline of the cell using atoms.get_cell(). The existing
``vtkUnitCellModule`` is added to the module anchor under ``cell``."""
self.add_module('cell', self.cell)
def add_axes(self):
"""Add an orientation indicator for the cartesian axes. An appropriate
``vtkAxesModule`` is added to the module anchor under ``axes``."""
self.add_module('axes', vtkAxesModule(self.cell))
def add_forces(self):
"""Add force vectors for the atoms using atoms.get_forces(). A
``vtkGlyphModule`` is added to the module anchor under ``force``."""
if self.has_forces():
raise RuntimeError('Forces already present.')
elif self.has_velocities():
raise NotImplementedError('Can\'t add forces due to velocities.')
# Add forces to VTK unstructured grid as vector data
vtk_fda = self.add_vector_property(self.atoms.get_forces(), 'force')
# Calculate max norm of the forces
fmax = vtk_fda.GetMaxNorm()
# Get relevant VTK unstructured grid
vtk_ugd = self.get_unstructured_grid()
self.force = vtkGlyphModule(vtk_ugd, vtkForceSource(fmax, self.scale),
scalemode='vector', colormode=None)
self.add_module('force', self.force)
def add_velocities(self):
"""Add velocity vectors for the atoms using atoms.get_velocities(). A
``vtkGlyphModule`` is added to the module anchor under ``velocity``."""
if self.has_velocities():
raise RuntimeError('Velocities already present.')
elif self.has_forces():
raise NotImplementedError('Can\'t add velocities due to forces.')
# Add velocities to VTK unstructured grid as vector data
vtk_vda = self.add_vector_property(self.atoms.get_velocities(), 'velocity')
# Calculate max norm of the velocities
vmax = vtk_vda.GetMaxNorm()
# Get relevant VTK unstructured grid
vtk_ugd = self.get_unstructured_grid()
self.velocity = vtkGlyphModule(vtk_ugd, vtkVelocitySource(vmax, self.scale),
scalemode='vector', colormode=None)
self.add_module('velocity', self.velocity)
| grhawk/ASE | tools/ase/visualize/vtk/atoms.py | Python | gpl-2.0 | 4,730 | [
"ASE",
"VTK"
] | 2b492a0ca0470e4839cb423a2afb3443f419a0ff57c23181c02f71291a973031 |
####################################################################################################
# #
# RUN SCRIPT to perform an MD simulation in Sire with OpenMM #
# #
# author: Julien Michel #
# author: Gaetano Calabro #
# author: Antonia Mey <antonia.mey@ed.ac.uk> #
# #
####################################################################################################
####################################################################################################
#
# IMPORTS
#
####################################################################################################
import os
import re
import sys
from Sire.Base import *
# Make sure that the OPENMM_PLUGIN_DIR enviroment variable is set correctly if unset.
try:
# The user has already set the plugin location.
os.environ["OPENMM_PLUGIN_DIR"]
except KeyError:
# Set to the default location of the bundled OpenMM package.
os.environ["OPENMM_PLUGIN_DIR"] = getLibDir() + "/plugins"
from Sire.IO import *
from Sire.Mol import *
from Sire.CAS import *
from Sire.System import *
from Sire.Move import *
from Sire.MM import *
from Sire.FF import *
from Sire.Units import *
from Sire.Vol import *
from Sire.Maths import *
from Sire.Qt import *
from Sire.ID import *
from Sire.Config import *
from Sire.Analysis import *
from Sire.Tools.DCDFile import *
from Sire.Tools import Parameter, resolveParameters
import Sire.Stream
import time
import numpy as np
MIN_MASSES = {'C': 5.96, 'N': 7.96}
HMR_MIN = 1.0
HMR_MAX = 4.0
####################################################################################################
#
# Config file parameters
#
####################################################################################################
gpu = Parameter("gpu", 0, """The device ID of the GPU on which to run the simulation.""")
rf_dielectric = Parameter("reaction field dielectric", 78.3,
"""Dielectric constant to use if the reaction field cutoff method is used.""")
temperature = Parameter("temperature", 25 * celsius, """Simulation temperature""")
pressure = Parameter("pressure", 1 * atm, """Simulation pressure""")
topfile = Parameter("topfile", "SYSTEM.top",
"""File name of the topology file containing the system to be simulated.""")
crdfile = Parameter("crdfile", "SYSTEM.crd",
"""File name of the coordinate file containing the coordinates of the
system to be simulated.""")
s3file = Parameter("s3file", "SYSTEM.s3",
"""Filename for the system state file. The system state after topology and and coordinates
were loaded are saved in this file.""")
restart_file = Parameter("restart file", "sim_restart.s3",
"""Filename of the restart file to use to save progress during the simulation.""")
dcd_root = Parameter("dcd root", "traj", """Root of the filename of the output DCD trajectory files.""")
nmoves = Parameter("nmoves", 1000, """Number of Molecular Dynamics moves to be performed during the simulation.""")
debug_seed = Parameter("debug seed", 0, """Debugging seed number seed. Set this if you
want to reproduce a single cycle. Don't use this seed for production simulations
since the same seed will be used for all cycles! A value of zero means that a unique
seed will be generated for each cycle.""")
ncycles = Parameter("ncycles", 1,
"""The number of MD cycles. The total elapsed time will be nmoves*ncycles*timestep""")
maxcycles = Parameter("maxcycles",99999,
"""The maximum number of MD cycles to carry out. Useful to restart simulations from a checkpoint""")
ncycles_per_snap = Parameter("ncycles_per_snap", 1, """Number of cycles between saving snapshots""")
save_coords = Parameter("save coordinates", True, """Whether or not to save coordinates.""")
buffered_coords_freq = Parameter("buffered coordinates frequency", 1,
"""The number of time steps between saving of coordinates during
a cycle of MD. 0 disables buffering.""")
minimal_coordinate_saving = Parameter("minimal coordinate saving", False, "Reduce the number of coordiantes writing for states"
"with lambda in ]0,1[")
time_to_skip = Parameter("time to skip", 0 * picosecond, """Time to skip in picoseconds""")
minimise = Parameter("minimise", False, """Whether or not to perform minimization before the simulation.""")
minimise_tol = Parameter("minimise tolerance", 1, """Tolerance used to know when minimization is complete.""")
minimise_max_iter = Parameter("minimise maximum iterations", 1000, """Maximum number of iterations for minimization.""")
equilibrate = Parameter("equilibrate", False , """Whether or not to perform equilibration before dynamics.""")
equil_iterations = Parameter("equilibration iterations", 2000, """Number of equilibration steps to perform.""")
equil_timestep = Parameter("equilibration timestep", 0.5 * femtosecond, """Timestep to use during equilibration.""")
combining_rules = Parameter("combining rules", "arithmetic",
"""Combining rules to use for the non-bonded interactions.""")
timestep = Parameter("timestep", 2 * femtosecond, """Timestep for the dynamics simulation.""")
platform = Parameter("platform", "CUDA", """Which OpenMM platform should be used to perform the dynamics.""")
precision = Parameter("precision", "mixed", """The floating point precision model to use during dynamics.""")
constraint = Parameter("constraint", "hbonds", """The constraint model to use during dynamics.""")
cutoff_type = Parameter("cutoff type", "cutoffperiodic", """The cutoff method to use during the simulation.""")
cutoff_dist = Parameter("cutoff distance", 10 * angstrom,
"""The cutoff distance to use for the non-bonded interactions.""")
integrator_type = Parameter("integrator", "leapfrogverlet", """The integrator to use for dynamics.""")
inverse_friction = Parameter("inverse friction", 0.1 * picosecond,
"""Inverse friction time for the Langevin thermostat.""")
andersen = Parameter("thermostat", True,
"""Whether or not to use the Andersen thermostat (needed for NVT or NPT simulation).""")
barostat = Parameter("barostat", True, """Whether or not to use a barostat (needed for NPT simulation).""")
andersen_frequency = Parameter("andersen frequency", 10.0, """Collision frequency in units of (1/ps)""")
barostat_frequency = Parameter("barostat frequency", 25,
"""Number of steps before attempting box changes if using the barostat.""")
lj_dispersion = Parameter("lj dispersion", False, """Whether or not to calculate and include the LJ dispersion term.""")
cmm_removal = Parameter("center of mass frequency", 10,
"""Frequency of which the system center of mass motion is removed.""")
center_solute = Parameter("center solute", False,
"""Whether or not to centre the centre of geometry of the solute in the box.""")
use_restraints = Parameter("use restraints", False, """Whether or not to use harmonic restaints on the solute atoms.""")
k_restraint = Parameter("restraint force constant", 100.0, """Force constant to use for the harmonic restaints.""")
heavy_mass_restraint = Parameter("heavy mass restraint", 1.10,
"""Only restrain solute atoms whose mass is greater than this value.""")
unrestrained_residues = Parameter("unrestrained residues", ["WAT", "HOH"],
"""Names of residues that are never restrained.""")
freeze_residues = Parameter("freeze residues", False, """Whether or not to freeze certain residues.""")
frozen_residues = Parameter("frozen residues", ["LGR", "SIT", "NEG", "POS"],
"""List of residues to freeze if 'freeze residues' is True.""")
use_distance_restraints = Parameter("use distance restraints", False,
"""Whether or not to use restraints distances between pairs of atoms.""")
distance_restraints_dict = Parameter("distance restraints dictionary", {},
"""Dictionary of pair of atoms whose distance is restrained, and restraint
parameters. Syntax is {(atom0,atom1):(reql, kl, Dl)} where atom0, atom1 are atomic
indices. reql the equilibrium distance. Kl the force constant of the restraint.
D the flat bottom radius. WARNING: PBC distance checks not implemented, avoid
restraining pair of atoms that may diffuse out of the box.""")
hydrogen_mass_repartitioning_factor = \
Parameter('hydrogen mass repartitioning factor', 1.0,
f'If larger than {HMR_MIN} (maximum is {HMR_MAX}), all hydrogen '
'atoms in the molecule will have their mass increased by this '
'factor. The atomic mass of the heavy atom bonded to the '
'hydrogen is decreased to keep the total mass constant '
'(except when this would lead to a heavy atom to be lighter '
'than a minimum mass).')
## Free energy specific keywords
morphfile = Parameter("morphfile", "MORPH.pert",
"""Name of the morph file containing the perturbation to apply to the system.""")
lambda_val = Parameter("lambda_val", 0.0,
"""Value of the lambda parameter at which to evaluate free energy gradients.""")
delta_lambda = Parameter("delta_lambda", 0.001,
"""Value of the lambda interval used to evaluate free energy gradients by finite difference.""")
lambda_array = Parameter("lambda array",[] ,
"""Array with all lambda values lambda_val needs to be part of the array. """)
shift_delta = Parameter("shift delta", 2.0,
"""Value of the Lennard-Jones soft-core parameter.""")
coulomb_power = Parameter("coulomb power", 0,
"""Value of the Coulombic soft-core parameter.""")
energy_frequency = Parameter("energy frequency", 1,
"""The number of time steps between evaluation of free energy gradients.""")
simfile = Parameter("outdata_file", "simfile.dat", """Filename that records all output needed for the free energy analysis""")
perturbed_resnum = Parameter("perturbed residue number",1,"""The residue number of the molecule to morph.""")
verbose = Parameter("verbose", False, """Print debug output""")
####################################################################################################
#
# Helper functions
#
####################################################################################################
def setupDCD(system):
r"""
Parameters:
----------
system : sire system
sire system to be saved
Return:
------
trajectory : trajectory
"""
files = os.listdir(os.getcwd())
dcds = []
for f in files:
if f.endswith(".dcd"):
dcds.append(f)
dcds.sort()
index = len(dcds) + 1
dcd_filename = dcd_root.val + "%0009d" % index + ".dcd"
softcore_almbda = True
if lambda_val.val == 1.0 or lambda_val.val == 0.0:
softcore_almbda = False
if minimal_coordinate_saving.val and softcore_almbda:
interval = ncycles.val*nmoves.val
Trajectory = DCDFile(dcd_filename, system[MGName("all")], system.property("space"), timestep.val, interval)
else:
Trajectory = DCDFile(dcd_filename, system[MGName("all")], system.property("space"), timestep.val,
interval=buffered_coords_freq.val * ncycles_per_snap.val)
return Trajectory
def writeSystemData(system, moves, Trajectory, block, softcore_lambda=False):
if softcore_lambda:
if block == ncycles.val or block == 1:
Trajectory.writeModel(system[MGName("all")], system.property("space"))
else:
if block % ncycles_per_snap.val == 0:
if buffered_coords_freq.val > 0:
dimensions = {}
sysprops = system.propertyKeys()
for prop in sysprops:
if prop.startswith("buffered_space"):
dimensions[str(prop)] = system.property(prop)
Trajectory.writeBufferedModels(system[MGName("all")], dimensions)
else:
Trajectory.writeModel(system[MGName("all")], system.property("space"))
# Write a PDB coordinate file each cycle.
pdb = PDB2(system)
pdb.writeToFile("latest.pdb")
moves_file = open("moves.dat", "w")
print("%s" % moves, file=moves_file)
moves_file.close()
def getSolute(system):
"""Find the solute molecule based on the perturbed residue number.
Args:
system (system): The Sire system
Returns:
molecule: Molecule matching perturbed residue number assumed to be solvent
"""
# Search the system for a single molcule containing a residue
# matching the perturbed_resnum.val.
# Create the query string.
query = f"mol with resnum {perturbed_resnum.val}"
# Perform the search.
search = system.search(query)
# Make sure there is only one result.
if len(search) != 1:
msg = ("FATAL! Could not find a solute to perturb with residue "
f"number {perturbed_resnum.val} in the input! Check the value of "
"your config keyword 'perturbed residue number' The system should "
"contain a single molecule with this residue number.")
raise Exception(msg)
# Return the matching molecule, i.e. the solute.
return search[0]
def centerSolute(system, space):
if space.isPeriodic():
# Periodic box.
try:
box_center = space.dimensions() / 2
# TriclincBox.
except:
box_center = 0.5*(space.vector0() + space.vector1() + space.vector2())
else:
box_center = Vector(0.0, 0.0, 0.0)
solute = getSolute(system)
assert(solute.hasProperty('perturbations'))
solute_cog = CenterOfGeometry(solute).point()
delta = box_center - solute_cog
molNums = system.molNums()
for molnum in molNums:
mol = system.molecule(molnum)[0].molecule()
molcoords = mol.property("coordinates")
molcoords.translate(delta)
mol = mol.edit().setProperty("coordinates", molcoords).commit()
system.update(mol)
return system
def createSystem(molecules):
#print("Applying flexibility and zmatrix templates...")
print("Creating the system...")
moleculeNumbers = molecules.molNums()
moleculeList = []
for moleculeNumber in moleculeNumbers:
molecule = molecules.molecule(moleculeNumber)[0].molecule()
moleculeList.append(molecule)
molecules = MoleculeGroup("molecules")
ions = MoleculeGroup("ions")
for molecule in moleculeList:
natoms = molecule.nAtoms()
if natoms == 1:
ions.add(molecule)
else:
molecules.add(molecule)
all = MoleculeGroup("all")
all.add(molecules)
all.add(ions)
# Add these groups to the System
system = System()
system.add(all)
system.add(molecules)
system.add(ions)
return system
def setupForcefields(system, space):
print("Creating force fields... ")
all = system[MGName("all")]
molecules = system[MGName("molecules")]
ions = system[MGName("ions")]
# - first solvent-solvent coulomb/LJ (CLJ) energy
internonbondedff = InterCLJFF("molecules:molecules")
if (cutoff_type.val != "nocutoff"):
internonbondedff.setUseReactionField(True)
internonbondedff.setReactionFieldDielectric(rf_dielectric.val)
internonbondedff.add(molecules)
inter_ions_nonbondedff = InterCLJFF("ions:ions")
if (cutoff_type.val != "nocutoff"):
inter_ions_nonbondedff.setUseReactionField(True)
inter_ions_nonbondedff.setReactionFieldDielectric(rf_dielectric.val)
inter_ions_nonbondedff.add(ions)
inter_ions_molecules_nonbondedff = InterGroupCLJFF("ions:molecules")
if (cutoff_type.val != "nocutoff"):
inter_ions_molecules_nonbondedff.setUseReactionField(True)
inter_ions_molecules_nonbondedff.setReactionFieldDielectric(rf_dielectric.val)
inter_ions_molecules_nonbondedff.add(ions, MGIdx(0))
inter_ions_molecules_nonbondedff.add(molecules, MGIdx(1))
# Now solute bond, angle, dihedral energy
intrabondedff = InternalFF("molecules-intrabonded")
intrabondedff.add(molecules)
# Now solute intramolecular CLJ energy
intranonbondedff = IntraCLJFF("molecules-intranonbonded")
if (cutoff_type.val != "nocutoff"):
intranonbondedff.setUseReactionField(True)
intranonbondedff.setReactionFieldDielectric(rf_dielectric.val)
intranonbondedff.add(molecules)
# solute restraint energy
#
# We restrain atoms based ont he contents of the property "restrainedatoms"
#
restraintff = RestraintFF("restraint")
if use_restraints.val:
molnums = molecules.molecules().molNums()
for molnum in molnums:
mol = molecules.molecule(molnum)[0].molecule()
try:
mol_restrained_atoms = propertyToAtomNumVectorList(mol.property("restrainedatoms"))
except UserWarning as error:
error_type = re.search(r"(Sire\w*::\w*)", str(error)).group(0)
if error_type == "SireBase::missing_property":
continue
else:
raise error
for restrained_line in mol_restrained_atoms:
atnum = restrained_line[0]
restraint_atom = mol.select(atnum)
restraint_coords = restrained_line[1]
restraint_k = restrained_line[2] * kcal_per_mol / (angstrom * angstrom)
restraint = DistanceRestraint.harmonic(restraint_atom, restraint_coords, restraint_k)
restraintff.add(restraint)
# Here is the list of all forcefields
forcefields = [internonbondedff, intrabondedff, intranonbondedff, inter_ions_nonbondedff,
inter_ions_molecules_nonbondedff, restraintff]
for forcefield in forcefields:
system.add(forcefield)
system.setProperty("space", space)
system.setProperty("switchingFunction", CHARMMSwitchingFunction(cutoff_dist.val))
system.setProperty("combiningRules", VariantProperty(combining_rules.val))
total_nrg = internonbondedff.components().total() + \
intranonbondedff.components().total() + intrabondedff.components().total() + \
inter_ions_nonbondedff.components().total() + inter_ions_molecules_nonbondedff.components().total() + \
restraintff.components().total()
e_total = system.totalComponent()
system.setComponent(e_total, total_nrg)
# Add a monitor that calculates the average total energy and average energy
# deltas - we will collect both a mean average and an zwanzig average
system.add("total_energy", MonitorComponent(e_total, Average()))
return system
def setupMoves(system, debug_seed, GPUS):
print("Setting up moves...")
molecules = system[MGName("all")]
Integrator_OpenMM = OpenMMMDIntegrator(molecules)
Integrator_OpenMM.setPlatform(platform.val)
Integrator_OpenMM.setConstraintType(constraint.val)
Integrator_OpenMM.setCutoffType(cutoff_type.val)
Integrator_OpenMM.setIntegrator(integrator_type.val)
Integrator_OpenMM.setFriction(inverse_friction.val) # Only meaningful for Langevin/Brownian integrators
Integrator_OpenMM.setPrecision(precision.val)
Integrator_OpenMM.setTimetoSkip(time_to_skip.val)
Integrator_OpenMM.setDeviceIndex(str(GPUS))
Integrator_OpenMM.setLJDispersion(lj_dispersion.val)
if cutoff_type.val != "nocutoff":
Integrator_OpenMM.setCutoffDistance(cutoff_dist.val)
if cutoff_type.val == "cutoffperiodic":
Integrator_OpenMM.setFieldDielectric(rf_dielectric.val)
Integrator_OpenMM.setCMMremovalFrequency(cmm_removal.val)
Integrator_OpenMM.setBufferFrequency(buffered_coords_freq.val)
if use_restraints.val:
Integrator_OpenMM.setRestraint(True)
if andersen.val:
Integrator_OpenMM.setTemperature(temperature.val)
Integrator_OpenMM.setAndersen(andersen.val)
Integrator_OpenMM.setAndersenFrequency(andersen_frequency.val)
if barostat.val:
Integrator_OpenMM.setPressure(pressure.val)
Integrator_OpenMM.setMCBarostat(barostat.val)
Integrator_OpenMM.setMCBarostatFrequency(barostat_frequency.val)
#print Integrator_OpenMM.getDeviceIndex()
Integrator_OpenMM.initialise()
mdmove = MolecularDynamics(molecules, Integrator_OpenMM, timestep.val,
{"velocity generator": MaxwellBoltzmann(temperature.val)})
print("Created a MD move that uses OpenMM for all molecules on %s " % GPUS)
moves = WeightedMoves()
moves.add(mdmove, 1)
# Choose a random seed for Sire if a debugging seed hasn't been set.
if debug_seed == 0:
seed = RanGenerator().randInt(100000, 1000000)
else:
seed = debug_seed
print("Using debugging seed number %d " % debug_seed)
moves.setGenerator(RanGenerator(seed))
return moves
def atomNumListToProperty(list):
prop = Properties()
i = 0
for value in list:
prop.setProperty(str(i), VariantProperty(value.value()))
i += 1
return prop
def atomNumVectorListToProperty(list):
prop = Properties()
i = 0
for value in list:
prop.setProperty("AtomNum(%d)" % i, VariantProperty(value[0].value()))
prop.setProperty("x(%d)" % i, VariantProperty(value[1].x()))
prop.setProperty("y(%d)" % i, VariantProperty(value[1].y()))
prop.setProperty("z(%d)" % i, VariantProperty(value[1].z()))
prop.setProperty("k(%d)" % i, VariantProperty(value[2].val ) )
i += 1
prop.setProperty("nrestrainedatoms", VariantProperty(i));
return prop
def linkbondVectorListToProperty(list):
prop = Properties()
i = 0
for value in list:
prop.setProperty("AtomNum0(%d)" % i, VariantProperty(value[0]))
prop.setProperty("AtomNum1(%d)" % i, VariantProperty(value[1]))
prop.setProperty("reql(%d)" % i, VariantProperty(value[2]))
prop.setProperty("kl(%d)" % i, VariantProperty(value[3]))
prop.setProperty("dl(%d)" % i, VariantProperty(value[4]))
i += 1
prop.setProperty("nbondlinks", VariantProperty(i));
return prop
def propertyToAtomNumList(prop):
list = []
i = 0
try:
while True:
list.append(AtomNum(prop[str(i)].toInt()))
i += 1
except:
pass
return list
def propertyToAtomNumVectorList(prop):
list = []
i = 0
try:
while True:
num = AtomNum(prop["AtomNum(%d)" % i].toInt())
x = prop["x(%d)" % i].toDouble()
y = prop["y(%d)" % i].toDouble()
z = prop["z(%d)" % i].toDouble()
k = prop["k(%d)" % i].toDouble()
list.append((num, Vector(x, y, z), k ))
i += 1
except:
pass
return list
def setupRestraints(system):
molecules = system[MGName("all")].molecules()
molnums = molecules.molNums()
for molnum in molnums:
mol = molecules.molecule(molnum)[0].molecule()
nats = mol.nAtoms()
atoms = mol.atoms()
restrainedAtoms = []
#
# This will apply a restraint to every atom that is
# A) NOT a hydrogen
# B) NOT in an unrestrained residue.
#
for x in range(0, nats):
at = atoms[x]
atnumber = at.number()
#print at, atnumber
if at.residue().name().value() in unrestrained_residues.val:
continue
#print at, at.property("mass"), heavyMass
if ( at.property("mass").value() < heavy_mass_restraint.val ):
#print "LIGHT, skip"
continue
atcoords = at.property("coordinates")
#print at
restrainedAtoms.append((atnumber, atcoords, k_restraint))
#restrainedAtoms.append( atnumber )
if len(restrainedAtoms) > 0:
mol = mol.edit().setProperty("restrainedatoms", atomNumVectorListToProperty(restrainedAtoms)).commit()
#print restrainedAtoms
#print propertyToAtomNumVectorList( mol.property("restrainedatoms") )
system.update(mol)
return system
def setupDistanceRestraints(system, restraints=None):
prop_list = []
molecules = system[MGName("all")].molecules()
if restraints is None:
#dic_items = list(distance_restraints_dict.val.items())
dic_items = list(dict(distance_restraints_dict.val).items())
else:
dic_items = list(restraints.items())
molecules = system[MGName("all")].molecules()
moleculeNumbers = molecules.molNums()
for moleculeNumber in moleculeNumbers:
mol = molecules.molecule(moleculeNumber)[0].molecule()
atoms_mol = mol.atoms()
natoms_mol = mol.nAtoms()
for j in range(0, natoms_mol):
at = atoms_mol[j]
atnumber = at.number()
for k in range(len(dic_items)):
if dic_items[k][0][0] == dic_items[k][0][1]:
print ("Error! It is not possible to place a distance restraint on the same atom")
sys.exit(-1)
if atnumber.value() - 1 in dic_items[k][0]:
print (at)
# atom0index atom1index, reql, kl, dl
prop_list.append((
dic_items[k][0][0] + 1, dic_items[k][0][1] + 1, dic_items[k][1][0], dic_items[k][1][1],
dic_items[k][1][2]))
unique_prop_list = []
[unique_prop_list.append(item) for item in prop_list if item not in unique_prop_list]
print (unique_prop_list)
# The solute will store all the information related to the receptor-ligand restraints
solute = getSolute(system)
solute = solute.edit().setProperty("linkbonds", linkbondVectorListToProperty(unique_prop_list)).commit()
system.update(solute)
return system
def freezeResidues(system):
molecules = system[MGName("all")].molecules()
molnums = molecules.molNums()
for molnum in molnums:
mol = molecules.molecule(molnum)[0].molecule()
nats = mol.nAtoms()
atoms = mol.atoms()
for x in range(0, nats):
at = atoms[x]
atnumber = at.number()
if at.residue().name().value() in frozen_residues.val:
print("Freezing %s %s %s " % (at, atnumber, at.residue().name().value() ))
mol = at.edit().setProperty("mass", 0 * g_per_mol).molecule()
system.update(mol)
return system
def repartitionMasses(system, hmassfactor=4.0):
"""
Increase the mass of hydrogen atoms to hmass times * amu, and subtract the mass
increase from the heavy atom the hydrogen is bonded to.
"""
if not (HMR_MIN <= hmassfactor <= HMR_MAX):
print(f'The HMR factor must be between {HMR_MIN} and {HMR_MAX} '
f'and not {hmassfactor}')
sys.exit(-1)
print ("Applying Hydrogen Mass repartition to input using a factor of %s " % hmassfactor)
molecules = system[MGName("all")].molecules()
molnums = molecules.molNums()
for molnum in molnums:
mol = molecules.molecule(molnum)[0].molecule()
nats = mol.nAtoms()
atoms = mol.atoms()
if nats == 1:
connect = None
else:
connect = mol.property("connectivity")
atom_masses = {}
#
# First pass. Initialise changes in atom_masses to effect
#
for x in range(0,nats):
at = atoms[x]
atidx = at.index()
atom_masses[atidx.value()] = 0 * g_per_mol
total_delta = 0.0 * g_per_mol
#
# Second pass. Decide how the mass of each atom should change.
#
for x in range(0,nats):
at = atoms[x]
atidx = at.index()
atmass = at.property("mass")
# units are in g_per_mol
if (atmass.value() < 1.1):
# Atoms with a mass < 1.1 g_per_mol are assumed to be hydrogen atoms
atmass = at.property("mass")
deltamass = atmass * hmassfactor - atmass
#print("Increasing mass %s by %s " % (at, deltamass))
total_delta += deltamass
atom_masses[atidx.value()] = deltamass
# Skip monoatomic systems without connectivity property
if connect is None:
continue
bonds = connect.getBonds(atidx)
# Get list of atoms that share one bond with this atom. Ignore all atoms that have a
# mass < 1.1 g_mol in the ORIGINAL atoms list
# For each atom this was bonded to, substract delta_mass / nbonded
bonded_atoms = []
for bond in bonds:
at0 = mol.select(bond.atom0()).index()
at1 = mol.select(bond.atom1()).index()
if at0 == atidx:
heavyatidx = at1
else:
heavyatidx = at0
if heavyatidx in bonded_atoms:
continue
heavyat = mol.select(heavyatidx)
heavyat_mass = heavyat.property("mass")
# units are in g_per_mol
if heavyat_mass.value() < 1.1:
continue
bonded_atoms.append(heavyatidx)
for bonded_atom in bonded_atoms:
#print("Increasing mass %s by %s " % (mol.select(bonded_atom), -deltamass))
total_delta += - deltamass
atom_masses[bonded_atom.value()] += - deltamass
# Sanity check (g_per_mol)
if total_delta.value() > 0.001:
print ("WARNING ! The mass repartitioning algorithm is not conserving atomic masses for",
"molecule %s (total delta is %s). Report bug to a Sire developer." % (molnum,total_delta.value()) )
sys.exit(-1)
# Now that have worked out mass changes per molecule, update molecule
for x in range(0,nats):
at = atoms[x]
atidx = at.index()
atmass = at.property("mass")
newmass = atmass + atom_masses[atidx.value()]
# Sanity check. Note this is likely to occur if hmassfactor > 4
if (newmass.value() < 0.0):
print ("""WARNING ! The mass of atom %s is less than zero after hydrogen mass repartitioning.
This should not happen ! Decrease hydrogen mass repartitioning factor in your cfg file
and try again.""" % atidx)
sys.exit(-1)
mol = mol.edit().atom(atidx).setProperty("mass", newmass )[0].molecule()
system.update(mol)
#import pdb; pdb.set_trace()
return system
def getDummies(molecule):
print ("Selecting dummy groups")
natoms = molecule.nAtoms()
atoms = molecule.atoms()
from_dummies = None
to_dummies = None
for x in range(0, natoms):
atom = atoms[x]
if atom.property("initial_ambertype") == "du":
if from_dummies is None:
from_dummies = molecule.selectAll(atom.index())
else:
from_dummies += molecule.selectAll(atom.index())
elif atom.property("final_ambertype") == "du":
if to_dummies is None:
to_dummies = molecule.selectAll(atom.index())
else:
to_dummies += molecule.selectAll(atom.index())
return to_dummies, from_dummies
def createSystemFreeEnergy(molecules):
r"""creates the system for free energy calculation
Parameters
----------
molecules : Sire.molecules
Sire object that contains a lot of information about molecules
Returns
-------
system : Sire.system
"""
print ("Create the System...")
moleculeNumbers = molecules.molNums()
moleculeList = []
for moleculeNumber in moleculeNumbers:
molecule = molecules.molecule(moleculeNumber)[0].molecule()
moleculeList.append(molecule)
# Scan input to find a molecule with passed residue number
# The residue name of the first residue in this molecule is
# used to name the solute. This is used later to match
# templates in the flex/pert files.
solute = None
for molecule in moleculeList:
if ( molecule.residue(ResIdx(0)).number() == ResNum(perturbed_resnum.val) ):
solute = molecule
moleculeList.remove(molecule)
break
if solute is None:
msg = ("FATAL! Could not find a solute to perturb with residue "
f"number {perturbed_resnum.val} in the input! Check the value of "
"your config keyword 'perturbed residue number' The system should "
"contain a single molecule with this residue number.")
raise Exception(msg)
#solute = moleculeList[0]
lig_name = solute.residue(ResIdx(0)).name().value()
solute = solute.edit().rename(lig_name).commit()
perturbations_lib = PerturbationsLibrary(morphfile.val)
solute = perturbations_lib.applyTemplate(solute)
perturbations = solute.property("perturbations")
lam = Symbol("lambda")
initial = Perturbation.symbols().initial()
final = Perturbation.symbols().final()
solute = solute.edit().setProperty("perturbations",
perturbations.recreate((1 - lam) * initial + lam * final)).commit()
# We put atoms in three groups depending on what happens in the perturbation
# non dummy to non dummy --> the hard group, uses a normal intermolecular FF
# non dummy to dummy --> the todummy group, uses SoftFF with alpha = Lambda
# dummy to non dummy --> the fromdummy group, uses SoftFF with alpha = 1 - Lambda
# We start assuming all atoms are hard atoms. Then we call getDummies to find which atoms
# start/end as dummies and update the hard, todummy and fromdummy groups accordingly
solute_grp_ref = MoleculeGroup("solute_ref", solute)
solute_grp_ref_hard = MoleculeGroup("solute_ref_hard")
solute_grp_ref_todummy = MoleculeGroup("solute_ref_todummy")
solute_grp_ref_fromdummy = MoleculeGroup("solute_ref_fromdummy")
solute_ref_hard = solute.selectAllAtoms()
solute_ref_todummy = solute_ref_hard.invert()
solute_ref_fromdummy = solute_ref_hard.invert()
to_dummies, from_dummies = getDummies(solute)
if to_dummies is not None:
ndummies = to_dummies.count()
dummies = to_dummies.atoms()
for x in range(0, ndummies):
dummy_index = dummies[x].index()
solute_ref_hard = solute_ref_hard.subtract(solute.select(dummy_index))
solute_ref_todummy = solute_ref_todummy.add(solute.select(dummy_index))
if from_dummies is not None:
ndummies = from_dummies.count()
dummies = from_dummies.atoms()
for x in range(0, ndummies):
dummy_index = dummies[x].index()
solute_ref_hard = solute_ref_hard.subtract(solute.select(dummy_index))
solute_ref_fromdummy = solute_ref_fromdummy.add(solute.select(dummy_index))
solute_grp_ref_hard.add(solute_ref_hard)
solute_grp_ref_todummy.add(solute_ref_todummy)
solute_grp_ref_fromdummy.add(solute_ref_fromdummy)
solutes = MoleculeGroup("solutes")
solutes.add(solute)
molecules = MoleculeGroup("molecules")
molecules.add(solute)
solvent = MoleculeGroup("solvent")
#for molecule in moleculeList[1:]:
for molecule in moleculeList:
molecules.add(molecule)
solvent.add(molecule)
all = MoleculeGroup("all")
all.add(molecules)
all.add(solvent)
all.add(solutes)
all.add(solute_grp_ref)
all.add(solute_grp_ref_hard)
all.add(solute_grp_ref_todummy)
all.add(solute_grp_ref_fromdummy)
# Add these groups to the System
system = System()
system.add(solutes)
system.add(solute_grp_ref)
system.add(solute_grp_ref_hard)
system.add(solute_grp_ref_todummy)
system.add(solute_grp_ref_fromdummy)
system.add(molecules)
system.add(solvent)
system.add(all)
return system
def setupForceFieldsFreeEnergy(system, space):
r"""sets up the force field for the free energy calculation
Parameters
----------
system : Sire.system
space : Sire.space
Returns
-------
system : Sire.system
"""
print ("Creating force fields... ")
solutes = system[MGName("solutes")]
solute = system[MGName("solute_ref")]
solute_hard = system[MGName("solute_ref_hard")]
solute_todummy = system[MGName("solute_ref_todummy")]
solute_fromdummy = system[MGName("solute_ref_fromdummy")]
solvent = system[MGName("solvent")]
all = system[MGName("all")]
# ''solvent'' is actually every molecule that isn't perturbed !
solvent_intraff = InternalFF("solvent_intraff")
solvent_intraff.add(solvent)
# Solute bond, angle, dihedral energy
solute_intraff = InternalFF("solute_intraff")
solute_intraff.add(solute)
# Solvent-solvent coulomb/LJ (CLJ) energy
solventff = InterCLJFF("solvent:solvent")
if (cutoff_type.val != "nocutoff"):
solventff.setUseReactionField(True)
solventff.setReactionFieldDielectric(rf_dielectric.val)
solventff.add(solvent)
#Solvent intramolecular CLJ energy
solvent_intraclj = IntraCLJFF("solvent_intraclj")
if (cutoff_type.val != "nocutoff"):
solvent_intraclj.setUseReactionField(True)
solvent_intraclj.setReactionFieldDielectric(rf_dielectric.val)
solvent_intraclj.add(solvent)
# Solute intramolecular CLJ energy
solute_hard_intraclj = IntraCLJFF("solute_hard_intraclj")
if (cutoff_type.val != "nocutoff"):
solute_hard_intraclj.setUseReactionField(True)
solute_hard_intraclj.setReactionFieldDielectric(rf_dielectric.val)
solute_hard_intraclj.add(solute_hard)
solute_todummy_intraclj = IntraSoftCLJFF("solute_todummy_intraclj")
solute_todummy_intraclj.setShiftDelta(shift_delta.val)
solute_todummy_intraclj.setCoulombPower(coulomb_power.val)
if (cutoff_type.val != "nocutoff"):
solute_todummy_intraclj.setUseReactionField(True)
solute_todummy_intraclj.setReactionFieldDielectric(rf_dielectric.val)
solute_todummy_intraclj.add(solute_todummy)
solute_fromdummy_intraclj = IntraSoftCLJFF("solute_fromdummy_intraclj")
solute_fromdummy_intraclj.setShiftDelta(shift_delta.val)
solute_fromdummy_intraclj.setCoulombPower(coulomb_power.val)
if (cutoff_type.val != "nocutoff"):
solute_fromdummy_intraclj.setUseReactionField(True)
solute_fromdummy_intraclj.setReactionFieldDielectric(rf_dielectric.val)
solute_fromdummy_intraclj.add(solute_fromdummy)
solute_hard_todummy_intraclj = IntraGroupSoftCLJFF("solute_hard:todummy_intraclj")
solute_hard_todummy_intraclj.setShiftDelta(shift_delta.val)
solute_hard_todummy_intraclj.setCoulombPower(coulomb_power.val)
if (cutoff_type.val != "nocutoff"):
solute_hard_todummy_intraclj.setUseReactionField(True)
solute_hard_todummy_intraclj.setReactionFieldDielectric(rf_dielectric.val)
solute_hard_todummy_intraclj.add(solute_hard, MGIdx(0))
solute_hard_todummy_intraclj.add(solute_todummy, MGIdx(1))
solute_hard_fromdummy_intraclj = IntraGroupSoftCLJFF("solute_hard:fromdummy_intraclj")
solute_hard_fromdummy_intraclj.setShiftDelta(shift_delta.val)
solute_hard_fromdummy_intraclj.setCoulombPower(coulomb_power.val)
if (cutoff_type.val != "nocutoff"):
solute_hard_fromdummy_intraclj.setUseReactionField(True)
solute_hard_fromdummy_intraclj.setReactionFieldDielectric(rf_dielectric.val)
solute_hard_fromdummy_intraclj.add(solute_hard, MGIdx(0))
solute_hard_fromdummy_intraclj.add(solute_fromdummy, MGIdx(1))
solute_todummy_fromdummy_intraclj = IntraGroupSoftCLJFF("solute_todummy:fromdummy_intraclj")
solute_todummy_fromdummy_intraclj.setShiftDelta(shift_delta.val)
solute_todummy_fromdummy_intraclj.setCoulombPower(coulomb_power.val)
if (cutoff_type.val != "nocutoff"):
solute_todummy_fromdummy_intraclj.setUseReactionField(True)
solute_todummy_fromdummy_intraclj.setReactionFieldDielectric(rf_dielectric.val)
solute_todummy_fromdummy_intraclj.add(solute_todummy, MGIdx(0))
solute_todummy_fromdummy_intraclj.add(solute_fromdummy, MGIdx(1))
#Solute-solvent CLJ energy
solute_hard_solventff = InterGroupCLJFF("solute_hard:solvent")
if (cutoff_type.val != "nocutoff"):
solute_hard_solventff.setUseReactionField(True)
solute_hard_solventff.setReactionFieldDielectric(rf_dielectric.val)
solute_hard_solventff.add(solute_hard, MGIdx(0))
solute_hard_solventff.add(solvent, MGIdx(1))
solute_todummy_solventff = InterGroupSoftCLJFF("solute_todummy:solvent")
if (cutoff_type.val != "nocutoff"):
solute_todummy_solventff.setUseReactionField(True)
solute_todummy_solventff.setReactionFieldDielectric(rf_dielectric.val)
solute_todummy_solventff.add(solute_todummy, MGIdx(0))
solute_todummy_solventff.add(solvent, MGIdx(1))
solute_fromdummy_solventff = InterGroupSoftCLJFF("solute_fromdummy:solvent")
if (cutoff_type.val != "nocutoff"):
solute_fromdummy_solventff.setUseReactionField(True)
solute_fromdummy_solventff.setReactionFieldDielectric(rf_dielectric.val)
solute_fromdummy_solventff.add(solute_fromdummy, MGIdx(0))
solute_fromdummy_solventff.add(solvent, MGIdx(1))
# TOTAL
forcefields = [solute_intraff,
solute_hard_intraclj, solute_todummy_intraclj, solute_fromdummy_intraclj,
solute_hard_todummy_intraclj, solute_hard_fromdummy_intraclj,
solute_todummy_fromdummy_intraclj,
solvent_intraff,
solventff, solvent_intraclj,
solute_hard_solventff, solute_todummy_solventff, solute_fromdummy_solventff]
for forcefield in forcefields:
system.add(forcefield)
system.setProperty("space", space)
if (cutoff_type.val != "nocutoff"):
system.setProperty("switchingFunction", CHARMMSwitchingFunction(cutoff_dist.val))
else:
system.setProperty("switchingFunction", NoCutoff())
system.setProperty("combiningRules", VariantProperty(combining_rules.val))
system.setProperty("coulombPower", VariantProperty(coulomb_power.val))
system.setProperty("shiftDelta", VariantProperty(shift_delta.val))
# TOTAL
total_nrg = solute_intraff.components().total() + solute_hard_intraclj.components().total() + \
solute_todummy_intraclj.components().total(0) + solute_fromdummy_intraclj.components().total(0) + \
solute_hard_todummy_intraclj.components().total(
0) + solute_hard_fromdummy_intraclj.components().total(0) + \
solute_todummy_fromdummy_intraclj.components().total(0) + \
solvent_intraff.components().total() + solventff.components().total() + \
solvent_intraclj.components().total() + \
solute_hard_solventff.components().total() + \
solute_todummy_solventff.components().total(0) + \
solute_fromdummy_solventff.components().total(0)
e_total = system.totalComponent()
lam = Symbol("lambda")
system.setComponent(e_total, total_nrg)
system.setConstant(lam, 0.0)
system.add(PerturbationConstraint(solutes))
# NON BONDED Alpha constraints for the soft force fields
system.add(PropertyConstraint("alpha0", FFName("solute_todummy_intraclj"), lam))
system.add(PropertyConstraint("alpha0", FFName("solute_fromdummy_intraclj"), 1 - lam))
system.add(PropertyConstraint("alpha0", FFName("solute_hard:todummy_intraclj"), lam))
system.add(PropertyConstraint("alpha0", FFName("solute_hard:fromdummy_intraclj"), 1 - lam))
system.add(PropertyConstraint("alpha0", FFName("solute_todummy:fromdummy_intraclj"), Max(lam, 1 - lam)))
system.add(PropertyConstraint("alpha0", FFName("solute_todummy:solvent"), lam))
system.add(PropertyConstraint("alpha0", FFName("solute_fromdummy:solvent"), 1 - lam))
system.setComponent(lam, lambda_val.val)
# printEnergies( system.componentValues() )
return system
def setupMovesFreeEnergy(system, debug_seed, GPUS, lam_val):
print ("Setting up moves...")
molecules = system[MGName("molecules")]
solute = system[MGName("solute_ref")]
solute_hard = system[MGName("solute_ref_hard")]
solute_todummy = system[MGName("solute_ref_todummy")]
solute_fromdummy = system[MGName("solute_ref_fromdummy")]
#import pdb ; pdb.set_trace()
Integrator_OpenMM = OpenMMFrEnergyST(molecules, solute, solute_hard, solute_todummy, solute_fromdummy)
Integrator_OpenMM.setRandomSeed(debug_seed)
Integrator_OpenMM.setIntegrator(integrator_type.val)
Integrator_OpenMM.setFriction(inverse_friction.val) # Only meaningful for Langevin/Brownian integrators
Integrator_OpenMM.setPlatform(platform.val)
Integrator_OpenMM.setConstraintType(constraint.val)
Integrator_OpenMM.setCutoffType(cutoff_type.val)
Integrator_OpenMM.setFieldDielectric(rf_dielectric.val)
Integrator_OpenMM.setAlchemicalValue(lambda_val.val)
Integrator_OpenMM.setAlchemicalArray(lambda_array.val)
Integrator_OpenMM.setDeviceIndex(str(GPUS))
Integrator_OpenMM.setCoulombPower(coulomb_power.val)
Integrator_OpenMM.setShiftDelta(shift_delta.val)
Integrator_OpenMM.setDeltatAlchemical(delta_lambda.val)
Integrator_OpenMM.setPrecision(precision.val)
Integrator_OpenMM.setTimetoSkip(time_to_skip.val)
Integrator_OpenMM.setBufferFrequency(buffered_coords_freq.val)
if cutoff_type != "nocutoff":
Integrator_OpenMM.setCutoffDistance(cutoff_dist.val)
Integrator_OpenMM.setCMMremovalFrequency(cmm_removal.val)
Integrator_OpenMM.setEnergyFrequency(energy_frequency.val)
if use_restraints.val:
Integrator_OpenMM.setRestraint(True)
if andersen.val:
Integrator_OpenMM.setTemperature(temperature.val)
Integrator_OpenMM.setAndersen(andersen.val)
Integrator_OpenMM.setAndersenFrequency(andersen_frequency.val)
if barostat.val:
Integrator_OpenMM.setPressure(pressure.val)
Integrator_OpenMM.setMCBarostat(barostat.val)
Integrator_OpenMM.setMCBarostatFrequency(barostat_frequency.val)
# Choose a random seed for Sire if a debugging seed hasn't been set.
if debug_seed == 0:
seed = RanGenerator().randInt(100000, 1000000)
else:
seed = debug_seed
print("Using debugging seed number %d " % debug_seed)
#This calls the OpenMMFrEnergyST initialise function
Integrator_OpenMM.initialise()
velocity_generator = MaxwellBoltzmann(temperature.val)
velocity_generator.setGenerator(RanGenerator(seed))
mdmove = MolecularDynamics(molecules, Integrator_OpenMM, timestep.val,
{"velocity generator":velocity_generator})
print("Created a MD move that uses OpenMM for all molecules on %s " % GPUS)
moves = WeightedMoves()
moves.add(mdmove, 1)
moves.setGenerator(RanGenerator(seed))
return moves
def clearBuffers(system):
r"""
Parameters
----------
system : Sire.system
contains Sire system
Returns
-------
system : Sire.system
returns a
"""
print ("Clearing buffers...")
mols = system[MGName("all")].molecules()
molnums = mols.molNums()
changedmols = MoleculeGroup("changedmols")
for molnum in molnums:
mol = mols.molecule(molnum)[0].molecule()
molprops = mol.propertyKeys()
editmol = mol.edit()
for molprop in molprops:
if molprop.startswith("buffered_"):
#print "Removing property %s " % molprop
editmol.removeProperty(PropertyName(molprop))
mol = editmol.commit()
changedmols.add(mol)
#system.update(mol)
system.update(changedmols)
return system
def getAllData(integrator, steps):
gradients = integrator.getGradients()
f_metropolis = integrator.getForwardMetropolis()
b_metropolis = integrator.getBackwardMetropolis()
energies = integrator.getEnergies()
reduced_pot_en = integrator.getReducedPerturbedEnergies()
outdata = None
l = [len(gradients), len(f_metropolis), len(b_metropolis), len(energies), len(steps)]
if len(set(l))!=1:
print("Whoops somehow the data generated does not agree in their first dimensions...exiting now.")
exit(-1)
else:
if len(gradients) == len(reduced_pot_en):
outdata = np.column_stack((steps, energies, gradients,
f_metropolis, b_metropolis,
reduced_pot_en))
elif len(reduced_pot_en)==0:
outdata = np.column_stack((steps, energies, gradients,
f_metropolis, b_metropolis))
print("Warning: you didn't specify a lambda array, no reduced perturbed energies can be written to file.")
else:
print("Whoops somehow the data generated does not agree in their first dimensions...exiting now.")
exit(-1)
return outdata
def getAtomNearCOG( molecule ):
mol_centre = molecule.evaluate().center()
mindist = 99999.0
for x in range(0, molecule.nAtoms()):
atom = molecule.atoms()[x]
at_coords = atom.property('coordinates')
dist = Vector().distance2(at_coords, mol_centre)
if dist < mindist:
mindist = dist
nearest_atom = atom
return nearest_atom
def generateDistanceRestraintsDict(system):
r"""
Parameters
----------
system : Sire.system
contains Sire system
Updates the contents of the Paramete distance_restraints_dict
"""
# Step 1) Assume ligand is first solute
# Find atom nearest to COG
molecules = system.molecules()
molnums = molecules.molNums()
solute = molecules.at(MolNum(1))[0].molecule()
nearestcog_atom = getAtomNearCOG( solute )
icoord = nearestcog_atom.property("coordinates")
# Step 2) Find nearest 'CA' heavy atom in other solutes (skip water & ions)
dmin = 9999999.0
closest = None
for molnum in molnums:
molecule = molecules.molecule(molnum)[0].molecule()
if molecule == solute:
continue
if molecule.residues()[0].name() == ResName("WAT"):
continue
#print (molecule)
ca_atoms = molecule.selectAll(AtomName("CA"))
for ca in ca_atoms:
jcoord = ca.property("coordinates")
d = Vector().distance(icoord,jcoord)
if d < dmin:
dmin = d
closest = ca
# Step 3) Compute position of 'mirror' CA. Find nearest CA atom to that point
jcoord = closest.property("coordinates")
mirror_coord = icoord-(jcoord-icoord)
dmin = 9999999.0
mirror_closest = None
for molnum in molnums:
molecule = molecules.molecule(molnum)[0].molecule()
if molecule == solute:
continue
if molecule.residues()[0].name() == ResName("WAT"):
continue
#print (molecule)
ca_atoms = molecule.selectAll(AtomName("CA"))
for ca in ca_atoms:
jcoord = ca.property("coordinates")
d = Vector().distance(mirror_coord,jcoord)
if d < dmin:
dmin = d
mirror_closest = ca
#print (mirror_closest)
# Step 4) Setup restraint parameters
kl = 10.00 # kcal/mol/Angstrom^2
Dl = 2.00 # Angstrom
i0 = nearestcog_atom.index().value()
i1 = closest.index().value()
i2 = mirror_closest.index().value()
r01 = Vector().distance(nearestcog_atom.property("coordinates"),closest.property("coordinates"))
r02 = Vector().distance(nearestcog_atom.property("coordinates"),mirror_closest.property("coordinates"))
restraints = { (i0, i1): (r01, kl, Dl), (i0,i2): (r02, kl, Dl) }
#print restraints
#distance_restraints_dict.val = restraints
#distance_restraints_dict
#import pdb; pdb.set_trace()
return restraints
######## MAIN SCRIPTS #############
@resolveParameters
def run():
try:
host = os.environ['HOSTNAME']
except KeyError:
host = "unknown"
print("\n### Running Molecular Dynamics simulation on %s ###" % host)
if verbose.val:
print("###================= Simulation Parameters=====================###")
Parameter.printAll()
print ("###===========================================================###\n")
timer = QTime()
timer.start()
# Setup the system from scratch if no restart file is available
print("###================Setting up calculation=====================###")
if not os.path.exists(restart_file.val):
print("New run. Loading input and creating restart")
amber = Amber()
if os.path.exists(s3file.val):
(molecules, space) = Sire.Stream.load(s3file.val)
else:
(molecules, space) = amber.readCrdTop(crdfile.val, topfile.val)
Sire.Stream.save((molecules, space), s3file.val)
system = createSystem(molecules)
if center_solute.val:
system = centerSolute(system, space)
if use_restraints.val:
system = setupRestraints(system)
if use_distance_restraints.val:
restraints = None
if len(distance_restraints_dict.val) == 0:
print ("Distance restraints have been activated, but none have been specified. Will autogenerate.")
restraints = generateDistanceRestraintsDict(system)
# Save restraints
print ("Autogenerated distance restraints values: %s " % distance_restraints_dict)
stream = open("restraints.cfg",'w')
stream.write("distance restraints dictionary = %s\n" % restraints)
stream.close()
system = setupDistanceRestraints(system, restraints=restraints)
if hydrogen_mass_repartitioning_factor.val > 1.0:
system = repartitionMasses(system, hmassfactor=hydrogen_mass_repartitioning_factor.val)
# Note that this just set the mass to zero which freezes residues in OpenMM but Sire doesn't known that
if freeze_residues.val:
system = freezeResidues(system)
system = setupForcefields(system, space)
if debug_seed.val != 0:
print("Setting up the simulation with debugging seed %s" % debug_seed.val)
moves = setupMoves(system, debug_seed.val, gpu.val)
print("Saving restart")
Sire.Stream.save([system, moves], restart_file.val)
else:
system, moves = Sire.Stream.load(restart_file.val)
move0 = moves.moves()[0]
integrator = move0.integrator()
integrator.setDeviceIndex(str(gpu.val))
move0.setIntegrator(integrator)
moves = WeightedMoves()
moves.add(move0)
print("Index GPU = %s " % moves.moves()[0].integrator().getDeviceIndex())
print("Loaded a restart file on which we have performed %d moves." % moves.nMoves())
#Maybe include a runtime error here!
if minimise.val:
print ('WARNING: You are trying to minimise from a restart! Revise your config file!')
if equilibrate.val:
print ('WARNING: You are trying to equilibrate from a restart! Revise your config file!')
cycle_start = int(moves.nMoves() / nmoves.val) + 1
cycle_end = cycle_start + ncycles.val
if (save_coords.val):
trajectory = setupDCD(system)
mdmoves = moves.moves()[0]
integrator = mdmoves.integrator()
print ("###===========================================================###\n")
if minimise.val:
print("###=======================Minimisation========================###")
print('Running minimisation.')
if verbose.val:
print ("Energy before the minimisation: " + str(system.energy()))
print ('Tolerance for minimisation: ' + str(minimise_tol.val))
print ('Maximum number of minimisation iterations: ' + str(minimise_max_iter.val))
integrator.setConstraintType("none")
system = integrator.minimiseEnergy(system, minimise_tol.val, minimise_max_iter.val)
system.mustNowRecalculateFromScratch()
if verbose.val:
print ("Energy after the minimization: " + str(system.energy()))
print ("Energy minimization done.")
integrator.setConstraintType(constraint.val)
print("###===========================================================###\n", flush=True)
if equilibrate.val:
print("###======================Equilibration========================###")
print ('Running equilibration.')
# Here we anneal lambda (To be determined)
if verbose.val:
print ('Equilibration timestep ' + str(equil_timestep.val))
print ('Number of equilibration steps: ' + str(equil_iterations.val))
system = integrator.equilibrateSystem(system, equil_timestep.val, equil_iterations.val)
system.mustNowRecalculateFromScratch()
if verbose.val:
print ("Energy after the equilibration: " + str(system.energy()))
print ('Equilibration done.\n')
print("###===========================================================###\n", flush=True)
simtime=nmoves.val*ncycles.val*timestep.val
print("###=======================somd run============================###")
print ("Starting somd run...")
print ("%s moves %s cycles, %s simulation time" %(nmoves.val, ncycles.val, simtime))
s1 = timer.elapsed() / 1000.
for i in range(cycle_start, cycle_end):
print("\nCycle = ", i, flush=True )
system = moves.move(system, nmoves.val, True)
if (save_coords.val):
writeSystemData(system, moves, trajectory, i)
s2 = timer.elapsed() / 1000.
print("Simulation took %d s " % ( s2 - s1))
print("Saving restart")
Sire.Stream.save([system, moves], restart_file.val)
@resolveParameters
def runFreeNrg():
#if (save_coords.val):
# buffer_freq = 500
#else:
# buffer_freq = 0
try:
host = os.environ['HOSTNAME']
except KeyError:
host = "unknown"
print("### Running Single Topology Molecular Dynamics Free Energy on %s ###" % host)
if verbose.val:
print("###================= Simulation Parameters=====================###")
Parameter.printAll()
print ("###===========================================================###\n")
timer = QTime()
timer.start()
outfile = open(simfile.val, "ab")
lam_str = "%7.5f" % lambda_val.val
simtime=nmoves.val*ncycles.val*timestep.val
# Setup the system from scratch if no restart file is available
print("###================Setting up calculation=====================###")
if not os.path.exists(restart_file.val):
print("New run. Loading input and creating restart")
print("lambda is %s" % lambda_val.val)
amber = Amber()
if os.path.exists(s3file.val):
(molecules, space) = Sire.Stream.load(s3file.val)
else:
(molecules, space) = amber.readCrdTop(crdfile.val, topfile.val)
Sire.Stream.save((molecules, space), s3file.val)
system = createSystemFreeEnergy(molecules)
if (center_solute.val):
system = centerSolute(system, space)
if use_restraints.val:
system = setupRestraints(system)
if use_distance_restraints.val:
restraints = None
if len(distance_restraints_dict.val) == 0:
print ("Distance restraints have been activated, but none have been specified. Will autogenerate.")
restraints = generateDistanceRestraintsDict(system)
# Save restraints
print ("Autogenerated distance restraints values: %s " % distance_restraints_dict)
stream = open("restraints.cfg",'w')
stream.write("distance restraints dictionary = %s\n" % restraints)
stream.close()
system = setupDistanceRestraints(system, restraints=restraints)
#import pdb; pdb.set_trace()
if hydrogen_mass_repartitioning_factor.val > 1.0:
system = repartitionMasses(system, hmassfactor=hydrogen_mass_repartitioning_factor.val)
# Note that this just set the mass to zero which freezes residues in OpenMM but Sire doesn't known that
if freeze_residues.val:
system = freezeResidues(system)
system = setupForceFieldsFreeEnergy(system, space)
if debug_seed.val != 0:
print("Setting up the simulation with debugging seed %s" % debug_seed.val)
moves = setupMovesFreeEnergy(system, debug_seed.val, gpu.val, lambda_val.val)
print("Saving restart")
Sire.Stream.save([system, moves], restart_file.val)
print("Setting up sim file. ")
outfile.write(bytes("#This file was generated on "+time.strftime("%c")+"\n", "UTF-8"))
outfile.write(bytes("#Using the somd command, of the molecular library Sire version <%s> \n" %Sire.__version__,"UTF-8"))
outfile.write(bytes("#For more information visit: https://github.com/michellab/Sire\n#\n","UTF-8"))
outfile.write(bytes("#General information on simulation parameters:\n", "UTF-8"))
outfile.write(bytes("#Simulation used %s moves, %s cycles and %s of simulation time \n" %(nmoves.val,
ncycles.val, simtime), "UTF-8"))
outfile.write(bytes("#Generating lambda is\t\t " + lam_str+"\n", "UTF-8"))
outfile.write(bytes("#Alchemical array is\t\t "+ str(lambda_array.val) +"\n", "UTF-8"))
outfile.write(bytes("#Generating temperature is \t"+str(temperature.val)+"\n", "UTF-8"))
outfile.write(bytes("#Energy was saved every "+str(energy_frequency.val)+ " steps \n#\n#\n", "UTF-8"))
outfile.write(bytes("# %8s %25s %25s %25s %25s %25s" % ("[step]", "[potential kcal/mol]", "[gradient kcal/mol]",
"[forward Metropolis]", "[backward Metropolis]", "[u_kl]\n"),
"UTF-8"))
else:
system, moves = Sire.Stream.load(restart_file.val)
move0 = moves.moves()[0]
integrator = move0.integrator()
integrator.setDeviceIndex(str(gpu.val))
move0.setIntegrator(integrator)
moves = WeightedMoves()
moves.add(move0)
cycle_start = int(moves.nMoves() / nmoves.val)
cycle_end = cycle_start + ncycles.val
print("Index GPU = %s " % moves.moves()[0].integrator().getDeviceIndex())
print("Loaded a restart file on which we have performed %d moves." % moves.nMoves())
restart = True
cycle_start = int(moves.nMoves() / nmoves.val) + 1
if cycle_start > maxcycles.val:
print("Maxinum number of cycles reached (%s). If you wish to extend the simulation increase the value of the parameter maxcycle." % maxcycles.val)
sys.exit(-1)
cycle_end = cycle_start + ncycles.val
if (cycle_end > maxcycles.val):
cycle_end = maxcycles.val + 1
outgradients = open("gradients.dat", "a", 1)
outgradients.write("# lambda_val.val %s\n" % lam_str)
if (save_coords.val):
trajectory = setupDCD(system)
mdmoves = moves.moves()[0]
integrator = mdmoves.integrator()
print ("###===========================================================###\n")
if minimise.val:
print("###=======================Minimisation========================###")
print('Running minimisation.')
#if verbose.val:
if True:
print ("Energy before the minimisation: " + str(system.energy()))
print ('Tolerance for minimisation: ' + str(minimise_tol.val))
print ('Maximum number of minimisation iterations: ' + str(minimise_max_iter.val))
system = integrator.minimiseEnergy(system, minimise_tol.val, minimise_max_iter.val)
system.mustNowRecalculateFromScratch()
#if verbose.val:
if True:
print ("Energy after the minimization: " + str(system.energy()))
print ("Energy minimization done.")
print("###===========================================================###\n")
if equilibrate.val:
print("###======================Equilibration========================###")
print ('Running lambda equilibration to lambda=%s.' %lambda_val.val)
# Here we anneal lambda (To be determined)
if verbose.val:
print ('Equilibration timestep ' + str(equil_timestep.val))
print ('Number of equilibration steps: ' + str(equil_iterations.val))
system = integrator.annealSystemToLambda(system, equil_timestep.val, equil_iterations.val)
system.mustNowRecalculateFromScratch()
if verbose.val:
print ("Energy after the annealing: " + str(system.energy()))
print ('Lambda annealing done.\n')
print("###===========================================================###\n")
print("###====================somd-freenrg run=======================###")
print ("Starting somd-freenrg run...")
print ("%s moves %s cycles, %s simulation time" %(nmoves.val, ncycles.val, simtime))
softcore_lambda = False
if minimal_coordinate_saving.val:
if lambda_val.val == 1.0 or lambda_val.val == 0.0:
softcore_lambda = False
else:
softcore_lambda = True
grads = {}
grads[lambda_val.val] = AverageAndStddev()
s1 = timer.elapsed() / 1000.
for i in range(cycle_start, cycle_end):
print("\nCycle = ", i, "\n")
system = moves.move(system, nmoves.val, True)
if save_coords.val:
writeSystemData(system, moves, trajectory, i, softcore_lambda)
mdmoves = moves.moves()[0]
integrator = mdmoves.integrator()
#saving all data
beg = (nmoves.val*(i-1))
end = nmoves.val*(i-1)+nmoves.val
steps = list(range(beg, end, energy_frequency.val))
outdata = getAllData(integrator, steps)
gradients = integrator.getGradients()
fmt =" ".join(["%8d"] + ["%25.8e"] + ["%25.8e"] + ["%25.8e"] + ["%25.8e"] + ["%25.15e"]*(len(lambda_array.val)))
np.savetxt(outfile, outdata, fmt=fmt)
mean_gradient = np.average(gradients)
outgradients.write("%5d %20.10f\n" % (i, mean_gradient))
for gradient in gradients:
#grads[lambda_val.val].accumulate(gradients[i-1])
grads[lambda_val.val].accumulate(gradient)
# Save restart
print("Backing up previous restart")
cmd = "cp %s %s.previous" % (restart_file.val, restart_file.val)
os.system(cmd)
print ("Saving new restart")
Sire.Stream.save([system, moves], restart_file.val)
s2 = timer.elapsed() / 1000.
outgradients.flush()
outfile.flush()
outgradients.close()
outfile.close()
print("Simulation took %d s " % ( s2 - s1))
print("###===========================================================###\n")
if os.path.exists("gradients.s3"):
siregrads = Sire.Stream.load("gradients.s3")
else:
siregrads = Gradients()
siregrads = siregrads + Gradients(grads)
Sire.Stream.save(siregrads, "gradients.s3")
if buffered_coords_freq.val > 0:
system = clearBuffers(system)
# Necessary to write correct restart
system.mustNowRecalculateFromScratch()
if __name__ == '__main__':
runFreeNrg()
| michellab/Sire | wrapper/Tools/OpenMMMD.py | Python | gpl-2.0 | 69,390 | [
"Amber",
"OpenMM",
"VisIt"
] | 9f0bfd1ce22f7ed110d30eba7d4187629fdc70c4244c5a6573f5237337df6d90 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012 Michal Kalewski <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
Provides an implementation of the uniform placement model.
In the uniform placement model, a simulation area of a given size is chosen and
a given number of nodes are placed over it with the uniform probability
distribution.
"""
from sim2net.placement._placement import Placement
from sim2net.utility.validation import check_argument_type
__docformat__ = 'reStructuredText'
class Uniform(Placement):
"""
This class implements implements the uniform placement model, in which a
given number of nodes are placed over a simulation area with the uniform
probability distribution.
"""
def __init__(self, area, nodes_number):
"""
*Parameters*:
- **area**: an object representing the simulation area;
- **nodes_number** (`int`): a number of nodes to place over the
simulation area.
*Raises*:
- **ValueError**: raised when the number of nodes is less or equal
to 0, or when the given value of the *area* parameter is `None`.
"""
if area is None:
raise ValueError('Parameter "area": a simulation area object'
' expected but "None" value given!')
super(Uniform, self).__init__(Uniform.__name__)
self.__area = area
check_argument_type(Uniform.__name__, 'nodes_number', int,
nodes_number, self.logger)
if nodes_number <= 0:
raise ValueError('Parameter "nodes_number": the number of nodes'
' cannot be less or equal to zero but %d given!' %
int(nodes_number))
self.__nodes_number = int(nodes_number)
def get_placement(self):
"""
Generates uniform placement coordinates for the given number
of nodes and returns the result as a dictionary.
*Returns*:
A list of tuples of horizontal and vertical coordinates for each
host.
"""
if self.__area is None or self.__nodes_number is None:
return None
while True:
horizontal_coordinates = \
[self.random_generator.uniform(0, self.__area.width)
for coordinate in range(0, self.__nodes_number)]
vertical_coordinates = \
[self.random_generator.uniform(0, self.__area.height)
for coordinate in range(0, self.__nodes_number)]
if Placement.position_conflict(horizontal_coordinates,
vertical_coordinates) == -1:
break
self.logger.debug('Initial placement coordinates has been'
' generated: %d nodes within %dx%d %s simulation'
' area' %
(self.__nodes_number, self.__area.width,
self.__area.height,
self.__area.__class__.__name__.lower()))
return zip(horizontal_coordinates, vertical_coordinates)
| mkalewski/sim2net | sim2net/placement/uniform.py | Python | mit | 3,617 | [
"VisIt"
] | c20d9e4ca211024b59dac3552b549d89d8fc0ab20b710f6fb898e2f0bfa147c1 |
"""Tests for QHA calculations."""
import os
import numpy as np
from phonopy import PhonopyQHA
current_dir = os.path.dirname(os.path.abspath(__file__))
ev_vs_v = np.array(
[
[140.030000, -42.132246],
[144.500000, -42.600974],
[149.060000, -42.949142],
[153.720000, -43.188162],
[158.470000, -43.326751],
[163.320000, -43.375124],
[168.270000, -43.339884],
[173.320000, -43.230619],
[178.470000, -43.054343],
[183.720000, -42.817825],
[189.070000, -42.527932],
]
)
temperatures = np.arange(0, 2101, 10)
tprop_file = os.path.join(current_dir, "tprop.dat")
cv, entropy, fe_phonon = np.loadtxt(tprop_file).reshape(3, 211, 11)
thermal_expansion = np.array(
[
0.0,
-0.6332219,
5.6139850,
9.6750859,
11.8141234,
13.0844083,
13.9458837,
14.5977009,
15.1336183,
15.6020829,
16.0296249,
]
)
helmholtz_volume = np.array(
[
-41.5839894,
-41.6004724,
-41.6770546,
-41.8127769,
-42.0001647,
-42.2311973,
-42.4992712,
-42.7992502,
-43.1271352,
-43.4797635,
-43.8545876,
]
)
volume_temperature = np.array(
[
164.4548783,
164.4442152,
164.4847063,
164.6142652,
164.7929816,
164.9990617,
165.2226063,
165.4587889,
165.7050586,
165.9599810,
166.2227137,
]
)
gibbs_temperature = np.array(
[
-42.8932829,
-42.9039937,
-42.9721912,
-43.1059496,
-43.2954558,
-43.5308843,
-43.8047360,
-44.1114190,
-44.4466860,
-44.8072307,
-45.1904183,
]
)
bulkmodulus_temperature = np.array(
[
87.4121501,
87.2126795,
86.5084539,
85.5863262,
84.5997708,
83.5933127,
82.5823028,
81.5733203,
80.5697051,
79.5733964,
78.5856509,
]
)
cp_temperature = np.array(
[
0.0000000,
61.6615825,
128.3828570,
161.0031288,
176.4325115,
184.6087521,
189.4345190,
192.5460752,
194.6985008,
196.2812400,
197.5052927,
]
)
cp_temperature_polyfit = np.array(
[
0.0000000,
61.7161021,
128.3966796,
160.9982814,
176.4240892,
184.6003622,
189.4249754,
192.5323933,
194.6826330,
196.2629828,
197.4862251,
]
)
gruneisen_temperature = np.array(
[
0.0000000,
-0.0886154,
0.3748304,
0.5106203,
0.5637079,
0.5910095,
0.6080069,
0.6201481,
0.6297201,
0.6378298,
0.6450508,
]
)
def test_QHA_Si():
"""Test of QHA calculation by Si."""
indices = list(range(11))
phonopy_qha = PhonopyQHA(
volumes=ev_vs_v[indices, 0],
electronic_energies=ev_vs_v[indices, 1],
eos="vinet",
temperatures=temperatures,
free_energy=fe_phonon[:, indices],
cv=cv[:, indices],
entropy=entropy[:, indices],
t_max=1000,
verbose=True,
)
t_indices = list(range(0, 101, 10))
# Bulk modulus without phonon
np.testing.assert_almost_equal(phonopy_qha.bulk_modulus, 0.5559133052877888)
# Thermal expansion
np.testing.assert_allclose(
[phonopy_qha.thermal_expansion[i] for i in t_indices],
thermal_expansion * 1e-6,
atol=1e-5,
)
# Helmholtz free energies vs volumes
np.testing.assert_allclose(
phonopy_qha.helmholtz_volume[t_indices, 0], helmholtz_volume, atol=1e-5
)
# Volume vs temperature
np.testing.assert_allclose(
phonopy_qha.volume_temperature[t_indices], volume_temperature, atol=1e-5
)
# Volume vs temperature
np.testing.assert_allclose(
phonopy_qha.gibbs_temperature[t_indices], gibbs_temperature, atol=1e-5
)
# Bulk modulus vs temperature
np.testing.assert_allclose(
phonopy_qha.bulk_modulus_temperature[t_indices],
bulkmodulus_temperature,
atol=1e-5,
)
# Cp vs temperature by numerical second derivative
np.testing.assert_allclose(
np.array(phonopy_qha.heat_capacity_P_numerical)[t_indices],
cp_temperature,
atol=0.01,
)
# Cp vs temperature by polynomial fittings of Cv and S
np.testing.assert_allclose(
np.array(phonopy_qha.heat_capacity_P_polyfit)[t_indices],
cp_temperature_polyfit,
atol=1e-5,
)
# Gruneisen parameters vs temperature
np.testing.assert_allclose(
np.array(phonopy_qha.gruneisen_temperature)[t_indices],
gruneisen_temperature,
atol=1e-5,
)
def _print_values(values):
print("%.7f," % values[0])
for line in np.reshape(values[1:], (-1, 5)):
print("".join(["%.7f, " % v for v in line]))
| atztogo/phonopy | test/qha/test_QHA.py | Python | bsd-3-clause | 5,043 | [
"phonopy"
] | 4d08c8c0335aa449b6575e8820e451c49d4f792a7c733d14720b83339203fb6d |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Fragment analysis based on parsed ADF data."""
import logging
import random
import numpy
numpy.inv = numpy.linalg.inv
from cclib.method.calculationmethod import Method
class FragmentAnalysis(Method):
"""Convert a molecule's basis functions from atomic-based to fragment MO-based"""
def __init__(self, data, progress=None, loglevel=logging.INFO,
logname="FragmentAnalysis of"):
super().__init__(data, progress, loglevel, logname)
self.parsed = False
def __str__(self):
"""Return a string representation of the object."""
return "Fragment molecule basis of %s" % (self.data)
def __repr__(self):
"""Return a representation of the object."""
return 'Fragment molecular basis("%s")' % (self.data)
def calculate(self, fragments, cupdate=0.05):
nFragBasis = 0
nFragAlpha = 0
nFragBeta = 0
self.fonames = []
unrestricted = ( len(self.data.mocoeffs) == 2 )
self.logger.info("Creating attribute fonames[]")
# Collect basis info on the fragments.
for j in range(len(fragments)):
nFragBasis += fragments[j].nbasis
nFragAlpha += fragments[j].homos[0] + 1
if unrestricted and len(fragments[j].homos) == 1:
nFragBeta += fragments[j].homos[0] + 1 #assume restricted fragment
elif unrestricted and len(fragments[j].homos) == 2:
nFragBeta += fragments[j].homos[1] + 1 #assume unrestricted fragment
#assign fonames based on fragment name and MO number
for i in range(fragments[j].nbasis):
if hasattr(fragments[j],"name"):
self.fonames.append("%s_%i"%(fragments[j].name,i+1))
else:
self.fonames.append("noname%i_%i"%(j,i+1))
nBasis = self.data.nbasis
nAlpha = self.data.homos[0] + 1
if unrestricted:
nBeta = self.data.homos[1] + 1
# Check to make sure calcs have the right properties.
if nBasis != nFragBasis:
self.logger.error("Basis functions don't match")
return False
if nAlpha != nFragAlpha:
self.logger.error("Alpha electrons don't match")
return False
if unrestricted and nBeta != nFragBeta:
self.logger.error("Beta electrons don't match")
return False
if len(self.data.atomcoords) != 1:
self.logger.warning("Molecule calc appears to be an optimization")
for frag in fragments:
if len(frag.atomcoords) != 1:
msg = "One or more fragment appears to be an optimization"
self.logger.warning(msg)
break
last = 0
for frag in fragments:
size = frag.natom
if self.data.atomcoords[0][last:last+size].tolist() != \
frag.atomcoords[0].tolist():
self.logger.error("Atom coordinates aren't aligned")
return False
if self.data.atomnos[last:last+size].tolist() != \
frag.atomnos.tolist():
self.logger.error("Elements don't match")
return False
last += size
# And let's begin!
self.mocoeffs = []
self.logger.info("Creating mocoeffs in new fragment MO basis: mocoeffs[]")
for spin in range(len(self.data.mocoeffs)):
blockMatrix = numpy.zeros((nBasis,nBasis), "d")
pos = 0
# Build up block-diagonal matrix from fragment mocoeffs.
# Need to switch ordering from [mo,ao] to [ao,mo].
for i in range(len(fragments)):
size = fragments[i].nbasis
if len(fragments[i].mocoeffs) == 1:
temp = numpy.transpose(fragments[i].mocoeffs[0])
blockMatrix[pos:pos+size, pos:pos+size] = temp
else:
temp = numpy.transpose(fragments[i].mocoeffs[spin])
blockMatrix[pos:pos+size, pos:pos+size] = temp
pos += size
# Invert and mutliply to result in fragment MOs as basis.
iBlockMatrix = numpy.inv(blockMatrix)
temp = numpy.transpose(self.data.mocoeffs[spin])
results = numpy.transpose(numpy.dot(iBlockMatrix, temp))
self.mocoeffs.append(results)
if hasattr(self.data, "aooverlaps"):
tempMatrix = numpy.dot(self.data.aooverlaps, blockMatrix)
tBlockMatrix = numpy.transpose(blockMatrix)
if spin == 0:
self.fooverlaps = numpy.dot(tBlockMatrix, tempMatrix)
self.logger.info("Creating fooverlaps: array[x,y]")
elif spin == 1:
self.fooverlaps2 = numpy.dot(tBlockMatrix, tempMatrix)
self.logger.info("Creating fooverlaps (beta): array[x,y]")
else:
self.logger.warning("Overlap matrix missing")
self.parsed = True
self.nbasis = nBasis
self.homos = self.data.homos
return True
| cclib/cclib | cclib/method/fragments.py | Python | bsd-3-clause | 5,375 | [
"ADF",
"cclib"
] | 2a8de76181c3aefb81ce4461e48852d7dea5d7d4a98eb215db701bf0186edab9 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkPDBReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkPDBReader(), 'Reading vtkPDB.',
(), ('vtkPDB',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkPDBReader.py | Python | bsd-3-clause | 464 | [
"VTK"
] | 886b02382737797e05e7e04bf69fa351c760a1a5113af1382eec7c995c2eaad4 |
# MIT License
#
# Copyright (c) 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import numpy as np
import pandas as pd
from math import log10
from sklearn.decomposition import PCA, KernelPCA
from rdkit import Chem
from rdkit.Chem import AllChem
# Conditional import of deepchem
__deepchem_imported__ = True
from deepchem.feat import Featurizer, CoulombMatrix, CoulombMatrixEig, ConvMolFeaturizer
from deepchem.feat.mol_graphs import ConvMol
try:
import deepchem as dc
except ImportError:
__deepchem_imported__ = False
# ===================================================================================================
#
# This class reads in the training/test CSV file.
# The class also generates molecular properties from the SMILES.
# See the rdkit documentation for further info.
#
# ===================================================================================================
class OSMGenerateData(object):
"""Generate molecular properties"""
def __init__(self, args, log):
self.log = log
self.args = args
self.data = self.read_csv(self.args.dataFilename)
self.dragon = self.read_dragon(self.args.dragonFilename)
self.dragon_fields = self.read_dragon_fields("DragonFields.csv")
self.dragon_truncation_rank = 100
self.trunc_dragon = self.truncated_dragon()
self.max_atoms = self.check_smiles(self.data)
if self.args.coulombFlag:
self.generate_coulomb_matrices()
self.generate_conv_mol()
self.generate_fields()
if self.args.varFlag: # If the "--vars" flag is specified then list the variables and exit.
self.log.info("The Available Modelling Variables are:")
self.display_variables()
sys.exit()
def display_variables(self):
for column in self.data.columns:
msg = "Variable: {:20s} Dimension: {}".format(column, self.get_dimension(column))
self.log.info(msg)
def get_dragon_headers(self):
# Note this returns 1666 columns in the same order that the columns are in the pandas array.
# The first column is a SMILE and is removed.
field_list = list(self.dragon.columns.values)
field_list.pop(0)
return field_list
def get_dragon_fields(self):
# Sorts field info into dragon header order and returns pandas data frame.
sorted_dragon_fields = pd.DataFrame(pd.Series(self.get_dragon_headers()), columns=["FIELD"])
sorted_dragon_fields = pd.merge(sorted_dragon_fields, self.dragon_fields, how="inner", on=["FIELD"])
sorted_dragon_fields = sorted_dragon_fields.drop_duplicates(subset=["FIELD"], keep="first")
after_merge = list(sorted_dragon_fields["FIELD"])
missing_list = list(set(self.get_dragon_headers()) - set(after_merge))
for missing in missing_list:
self.log.error("Dropped FIELD ID: %s after sort and merge with Dragon headers", missing)
if len(missing_list) > 0:
sys.exit()
return sorted_dragon_fields
def get_truncated_dragon_headers(self):
return self.trunc_dragon
def get_truncated_dragon_fields(self):
return self.dragon_fields.loc[self.dragon_fields["RANK"] <= self.dragon_truncation_rank]
def get_data(self):
return self.data
def get_dimension(self, column_name):
if self.data[column_name].shape[0] == 0:
self.log.error("Unexpected Error column %s contains no rows", column_name)
sys.exit()
if isinstance(self.data[column_name][0], np.ndarray):
shape = self.data[column_name][0].shape
return shape
else:
return 1
def generate_fields(self):
self.log.info("Calculating QSAR fields, may take a few moments....")
# Add the finger print columns
self.generate_fingerprints()
# Add the potency classes
self.generate_potency_class(200)
self.generate_potency_class(500)
self.generate_potency_class(1000)
# Add pEC50
self.generate_pEC50()
# ION_ACTIVE class reduces ION_ACTIVITY to 2 classes ["ACTIVE", "INACTIVE"]
self.generate_ion_active()
def generate_coulomb_matrices(self):
if not __deepchem_imported__:
self.log.error("deepchem was not imported or available in this execution environment")
sys.exit()
self.log.info("Generating Coulomb Matrices, may take a few moments ...")
matrix_featurizer = CoulombMatrix(self.max_atoms, randomize=False, n_samples=1)
eigen_featurizer = CoulombMatrixEig(self.max_atoms)
matrices = []
smiles = []
arrays = []
eigenarrays = []
num_confs = 1
for index, row in self.data.iterrows():
mol = Chem.MolFromSmiles(row["SMILE"])
Chem.AddHs(mol)
ids = AllChem.EmbedMultipleConfs(mol, numConfs=num_confs)
if len(ids) != num_confs:
ids = AllChem.EmbedMultipleConfs(mol, numConfs=num_confs, ignoreSmoothingFailures=True)
if len(ids) != num_confs:
self.log.warning("Coulomb Matrix - unable to generate %d conformer(s) for smile: %s"
, num_confs, row["SMILE"])
if len(ids) == num_confs:
for id in ids:
AllChem.UFFOptimizeMolecule(mol, confId=id)
matrix = matrix_featurizer.coulomb_matrix(mol)
matrices.append(matrix)
arrays.append(matrix[0].flatten())
smiles.append(row["SMILE"])
eigenvalues = eigen_featurizer.featurize([mol])
eigenarrays.append(eigenvalues[0].flatten())
pd_dict = { "SMILE": smiles, "COULOMB": matrices, "COULOMB_ARRAY": arrays, "COULOMB_EIGEN" : eigenarrays }
coulomb_frame = pd.DataFrame(pd_dict)
before_ids = list(self.data["ID"])
self.data = pd.merge(self.data, coulomb_frame, how="inner", on=["SMILE"])
self.data = self.data.drop_duplicates(subset=["SMILE"], keep="first")
after_ids = list(self.data["ID"])
missing_list = list(set(before_ids) - set(after_ids))
for missing in missing_list:
self.log.warning("Dropped molecule ID: %s after join with Coulomb Matrix data", missing)
def generate_conv_mol(self):
self.log.info("Generating Molecular Convolutions, may take a few moments ...")
mol_list = []
for index, row in self.data.iterrows():
mol = Chem.MolFromSmiles(row["SMILE"])
Chem.AddHs(mol)
# AllChem.UFFOptimizeMolecule(mol, confId=id)
mol_list.append(mol)
conv_array = ConvMolFeaturizer().featurize(mol_list)
print("len(conv_array)",len(conv_array),
"type(conv_array)", type(conv_array),
"type(conv_array[0]", type(conv_array[0]),
"get_atom_features", conv_array[0].get_atom_features().shape,)
def generate_potency_class(self, nMol):
column_name = "EC50_{}".format(int(nMol))
EC50 = self.data["EC50"]
potency_class = []
for ec50 in EC50:
if pd.isnull(ec50) or ec50 <= 0:
potency_class.append(ec50)
else:
klass = "ACTIVE" if ec50 * 1000 <= nMol else "INACTIVE"
potency_class.append(klass)
self.data[column_name] = pd.Series(potency_class, index=self.data.index)
def generate_pEC50(self):
column_name = "pEC50"
EC50 = self.data["EC50"]
pEC50_list = []
for ec50 in EC50:
if pd.isnull(ec50) or ec50 <= 0:
pEC50_list.append(ec50)
else:
pEC50_list.append(log10(ec50))
self.data[column_name] = pd.Series(pEC50_list, index=self.data.index)
def generate_ion_active(self):
column_name = "ION_ACTIVE"
ion_activity = self.data["ION_ACTIVITY"]
ion_active = []
for ion in ion_activity:
if pd.isnull(ion):
ion_active.append(np.NaN)
else:
klass = "ACTIVE" if ion == "ACTIVE" else "INACTIVE"
ion_active.append(klass)
self.data[column_name] = pd.Series(ion_active, index=self.data.index)
def generate_fingerprints(self):
# Add the finger print columns
morgan_1024 = lambda x: AllChem.GetMorganFingerprintAsBitVect(x, 4, nBits=1024)
morgan_2048_1 = lambda x: AllChem.GetMorganFingerprintAsBitVect(x, 1, nBits=2048)
morgan_2048_2 = lambda x: AllChem.GetMorganFingerprintAsBitVect(x, 2, nBits=2048)
morgan_2048_3 = lambda x: AllChem.GetMorganFingerprintAsBitVect(x, 3, nBits=2048)
morgan_2048_4 = lambda x: AllChem.GetMorganFingerprintAsBitVect(x, 4, nBits=2048)
morgan_2048_5 = lambda x: AllChem.GetMorganFingerprintAsBitVect(x, 5, nBits=2048)
morgan_2048_6 = lambda x: AllChem.GetMorganFingerprintAsBitVect(x, 6, nBits=2048)
topological_2048 = lambda x: AllChem.GetHashedTopologicalTorsionFingerprintAsBitVect(x)
macc = lambda x: AllChem.GetMACCSKeysFingerprint(x)
self.add_bitvect_fingerprint(self.data, morgan_1024, "MORGAN1024")
self.add_bitvect_fingerprint(self.data, morgan_2048_1, "MORGAN2048_1")
self.add_bitvect_fingerprint(self.data, morgan_2048_2, "MORGAN2048_2")
self.add_bitvect_fingerprint(self.data, morgan_2048_3, "MORGAN2048_3")
self.add_bitvect_fingerprint(self.data, morgan_2048_4, "MORGAN2048_4")
self.add_bitvect_fingerprint(self.data, morgan_2048_5, "MORGAN2048_5")
self.add_bitvect_fingerprint(self.data, morgan_2048_6, "MORGAN2048_6")
self.add_bitvect_fingerprint(self.data, topological_2048, "TOPOLOGICAL2048")
self.add_bitvect_fingerprint(self.data, macc, "MACCFP")
# Read the Dragon data as a pandas object with 2 fields, [SMILE, np.ndarray] and join (merge)
# on "SMILE" with the OSMData data.
def read_dragon(self, file_name):
self.log.info("Loading EDragon QSAR file: %s ...", file_name)
try:
dragon_data_frame = pd.read_csv(file_name, low_memory=False)
new_frame = pd.DataFrame(dragon_data_frame, columns=["SMILE"])
data_frame = dragon_data_frame.drop("SMILE", 1)
narray = data_frame.as_matrix(columns=None)
narray = narray.astype(np.float64)
narray_list = [ x for x in narray]
new_frame["DRAGON"] = pd.Series(narray_list, index=data_frame.index)
before_ids = list(self.data["ID"])
self.data = pd.merge(self.data, new_frame, how="inner", on=["SMILE"])
self.data = self.data.drop_duplicates(subset=["SMILE"], keep="first")
after_ids = list(self.data["ID"])
missing_list = list(set(before_ids) - set(after_ids))
for missing in missing_list:
self.log.warning("Dropped molecule ID: %s after join with DRAGON data", missing)
except IOError:
self.log.error('Problem reading EDragon file %s, Check "--data", ""--dir" and --help" flags.', file_name)
sys.exit()
self.log.info("Read %d records from file %s", data_frame.shape[0], file_name)
return dragon_data_frame
def read_dragon_fields(self, file_name):
path_name = os.path.join(self.args.workDirectory, file_name)
self.log.info("Loading EDragon Ranked Fields: %s ...", path_name)
try:
dragon_field_frame = pd.read_csv(path_name, low_memory=False)
except IOError:
self.log.error('Problem reading EDragon Field file %s', path_name)
sys.exit()
self.log.info("Read %d records from file %s", dragon_field_frame.shape[0], path_name)
return dragon_field_frame
def truncated_dragon(self):
trunc_fields = self.get_truncated_dragon_fields()
field_list = trunc_fields["FIELD"].tolist()
new_frame = pd.DataFrame(self.dragon, columns=["SMILE"])
data_frame = pd.DataFrame(self.dragon, columns=field_list)
narray = data_frame.as_matrix(columns=None)
narray = narray.astype(np.float64)
narray_list = [ x for x in narray]
new_frame["TRUNC_DRAGON"] = pd.Series(narray_list, index=data_frame.index)
before_ids = list(self.data["ID"])
self.data = pd.merge(self.data, new_frame, how="inner", on=["SMILE"])
self.data = self.data.drop_duplicates(subset=["SMILE"], keep="first")
after_ids = list(self.data["ID"])
missing_list = list(set(before_ids) - set(after_ids))
for missing in missing_list:
self.log.warning("Dropped molecule ID: %s after join with TRUNC_DRAGON data", missing)
return field_list
# Read CSV File into a pandas data frame
def read_csv(self, file_name):
self.log.info("Loading data file: %s ...", file_name)
mandatory_fields = ["EC50", "SMILE", "ID", "CLASS", "ION_ACTIVITY"]
try:
data_frame = pd.read_csv(file_name)
if not set(mandatory_fields) <= set(data_frame):
self.log.error("Mandatory data fields %s absent.", ",".join(mandatory_fields))
self.log.error("File %s contains fields %s.", file_name, ",".join(data_frame))
sys.exit()
except IOError:
self.log.error('Problem reading data file %s, Check the "--data", ""--dir" and --help" flags.', file_name)
sys.exit()
self.log.info("Read %d records from file %s", data_frame.shape[0], file_name)
return data_frame
# Generate the molecular fingerprints..
def add_bitvect_fingerprint(self, data_frame, finger_printer, column_name):
""" Generate molecular fingerprints as a numpy array of floats"""
int_list = []
for index, row in data_frame.iterrows():
mol = Chem.MolFromSmiles(row["SMILE"])
fp = finger_printer(mol)
int_fp = [int(x) for x in fp]
np_fp = np.array(int_fp, dtype=float)
int_list.append(np_fp)
# Store a list of numpy.arrays
data_frame[column_name] = pd.Series(int_list, index=data_frame.index)
def check_smiles(self, data_frame):
# Check all the "SMILES" and ensure they are valid.
# also calculates the maximum number of atoms for calculating
# coulomb matrices
max_atoms = 0
for index, row in data_frame.iterrows():
mol = Chem.MolFromSmiles(row["SMILE"])
try:
result = Chem.SanitizeMol(mol)
if result != Chem.SanitizeFlags.SANITIZE_NONE:
sanitized_smile = Chem.MolToSmiles(mol)
self.log.warning("Sanitized SMILE %s, Compound ID:%s", sanitized_smile, row["ID"])
data_frame.set_value(index, "SMILE", sanitized_smile)
except:
self.log.warning("Unable to Sanitize SMILE %s, Compound ID:%s", row["SMILE"] , row["ID"])
self.log.warning("Record Deleted. OSM_QSAR attempts to continue ....")
data_frame.drop(index, inplace=True)
num_atoms = mol.GetNumAtoms()
if num_atoms > max_atoms:
max_atoms = num_atoms
self.log.info("Maximum Molecular Atoms: %d", max_atoms)
return max_atoms
| kellerberrin/OSM-QSAR | OSMProperties.py | Python | mit | 16,637 | [
"RDKit"
] | 31cb3ce0b62bc159398ddc05939d7656159cdf13377f733688888271406ea4be |
import logging
from cStringIO import StringIO
from math import exp
from lxml import etree
from path import path # NOTE (THK): Only used for detecting presence of syllabus
import requests
from datetime import datetime
import dateutil.parser
from lazy import lazy
from xmodule.modulestore import Location
from xmodule.partitions.partitions import UserPartition
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.graders import grader_from_conf
from xmodule.tabs import CourseTabList
import json
from xblock.fields import Scope, List, String, Dict, Boolean, Integer
from .fields import Date
from xmodule.modulestore.locator import CourseLocator
from django.utils.timezone import UTC
log = logging.getLogger(__name__)
class StringOrDate(Date):
def from_json(self, value):
"""
Parse an optional metadata key containing a time or a string:
if present, assume it's a string if it doesn't parse.
"""
try:
result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
if result is None:
return value
else:
return result
def to_json(self, value):
"""
Convert a time struct or string to a string.
"""
try:
result = super(StringOrDate, self).to_json(value)
except:
return value
if result is None:
return value
else:
return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
_cached_toc = {}
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
self.book_url = book_url
@lazy
def start_page(self):
return int(self.table_of_contents[0].attrib['page'])
@lazy
def end_page(self):
# The last page should be the last element in the table of contents,
# but it may be nested. So recurse all the way down the last element
last_el = self.table_of_contents[-1]
while last_el.getchildren():
last_el = last_el[-1]
return int(last_el.attrib['page'])
@lazy
def table_of_contents(self):
"""
Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url
Returns XML tree representation of the table of contents
"""
toc_url = self.book_url + 'toc.xml'
# cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)
# course modules have a very short lifespan and are constantly being created and torn down.
# Since this module in the __init__() method does a synchronous call to AWS to get the TOC
# this is causing a big performance problem. So let's be a bit smarter about this and cache
# each fetch and store in-mem for 10 minutes.
# NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and
# rewrite to use the traditional Django in-memory cache.
try:
# see if we already fetched this
if toc_url in _cached_toc:
(table_of_contents, timestamp) = _cached_toc[toc_url]
age = datetime.now(UTC) - timestamp
# expire every 10 minutes
if age.seconds < 600:
return table_of_contents
except Exception as err:
pass
# Get the table of contents from S3
log.info("Retrieving textbook table of contents from %s" % toc_url)
try:
r = requests.get(toc_url)
except Exception as err:
msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
# TOC is XML. Parse it
try:
table_of_contents = etree.fromstring(r.text)
except Exception as err:
msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
return table_of_contents
def __eq__(self, other):
return (self.title == other.title and
self.book_url == other.book_url)
def __ne__(self, other):
return not self == other
class TextbookList(List):
def from_json(self, values):
textbooks = []
for title, book_url in values:
try:
textbooks.append(Textbook(title, book_url))
except:
# If we can't get to S3 (e.g. on a train with no internet), don't break
# the rest of the courseware.
log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url))
continue
return textbooks
def to_json(self, values):
json_data = []
for val in values:
if isinstance(val, Textbook):
json_data.append((val.title, val.book_url))
elif isinstance(val, tuple):
json_data.append(val)
else:
continue
return json_data
class UserPartitionList(List):
"""Special List class for listing UserPartitions"""
def from_json(self, values):
return [UserPartition.from_json(v) for v in values]
def to_json(self, values):
return [user_partition.to_json()
for user_partition in values]
class CourseFields(object):
lti_passports = List(help="LTI tools passports as id:client_key:client_secret", scope=Scope.settings)
textbooks = TextbookList(help="List of pairs of (title, url) for textbooks used in this course",
default=[], scope=Scope.content)
# This is should be scoped to content, but since it's defined in the policy
# file, it is currently scoped to settings.
user_partitions = UserPartitionList(
help="List of user partitions of this course into groups, used e.g. for experiments",
default=[],
scope=Scope.settings
)
wiki_slug = String(help="Slug that points to the wiki for this course", scope=Scope.content)
enrollment_start = Date(help="Date that enrollment for this class is opened", scope=Scope.settings)
enrollment_end = Date(help="Date that enrollment for this class is closed", scope=Scope.settings)
start = Date(help="Start time when this module is visible",
default=datetime(2030, 1, 1, tzinfo=UTC()),
scope=Scope.settings)
end = Date(help="Date that this class ends", scope=Scope.settings)
advertised_start = String(help="Date that this course is advertised to start", scope=Scope.settings)
grading_policy = Dict(help="Grading policy definition for this class",
default={"GRADER": [
{
"type": "Homework",
"min_count": 12,
"drop_count": 2,
"short_label": "HW",
"weight": 0.15
},
{
"type": "Lab",
"min_count": 12,
"drop_count": 2,
"weight": 0.15
},
{
"type": "Midterm Exam",
"short_label": "Midterm",
"min_count": 1,
"drop_count": 0,
"weight": 0.3
},
{
"type": "Final Exam",
"short_label": "Final",
"min_count": 1,
"drop_count": 0,
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5
}},
scope=Scope.content)
show_calculator = Boolean(help="Whether to show the calculator in this course", default=False, scope=Scope.settings)
display_name = String(help="Display name for this module", default="Empty", display_name="Display Name", scope=Scope.settings)
course_edit_method = String(help="Method with which this course is edited.", default="Studio", scope=Scope.settings)
show_chat = Boolean(help="Whether to show the chat widget in this course", default=False, scope=Scope.settings)
tabs = CourseTabList(help="List of tabs to enable in this course", scope=Scope.settings, default=[])
end_of_course_survey_url = String(help="Url for the end-of-course survey", scope=Scope.settings)
discussion_blackouts = List(help="List of pairs of start/end dates for discussion blackouts", scope=Scope.settings)
discussion_topics = Dict(help="Map of topics names to ids", scope=Scope.settings)
discussion_sort_alpha = Boolean(scope=Scope.settings, default=False, help="Sort forum categories and subcategories alphabetically.")
announcement = Date(help="Date this course is announced", scope=Scope.settings)
cohort_config = Dict(help="Dictionary defining cohort configuration", scope=Scope.settings)
is_new = Boolean(help="Whether this course should be flagged as new", scope=Scope.settings)
no_grade = Boolean(help="True if this course isn't graded", default=False, scope=Scope.settings)
disable_progress_graph = Boolean(help="True if this course shouldn't display the progress graph", default=False, scope=Scope.settings)
pdf_textbooks = List(help="List of dictionaries containing pdf_textbook configuration", scope=Scope.settings)
html_textbooks = List(help="List of dictionaries containing html_textbook configuration", scope=Scope.settings)
remote_gradebook = Dict(scope=Scope.settings)
allow_anonymous = Boolean(scope=Scope.settings, default=True)
allow_anonymous_to_peers = Boolean(scope=Scope.settings, default=False)
advanced_modules = List(help="Beta modules used in your course", scope=Scope.settings)
has_children = True
checklists = List(scope=Scope.settings,
default=[
{"short_description": "Getting Started With Studio",
"items": [{"short_description": "Add Course Team Members",
"long_description": "Grant your collaborators permission to edit your course so you can work together.",
"is_checked": False,
"action_url": "ManageUsers",
"action_text": "Edit Course Team",
"action_external": False},
{"short_description": "Set Important Dates for Your Course",
"long_description": "Establish your course's student enrollment and launch dates on the Schedule and Details page.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Details & Schedule",
"action_external": False},
{"short_description": "Draft Your Course's Grading Policy",
"long_description": "Set up your assignment types and grading policy even if you haven't created all your assignments.",
"is_checked": False,
"action_url": "SettingsGrading",
"action_text": "Edit Grading Settings",
"action_external": False},
{"short_description": "Explore the Other Studio Checklists",
"long_description": "Discover other available course authoring tools, and find help when you need it.",
"is_checked": False,
"action_url": "",
"action_text": "",
"action_external": False}]},
{"short_description": "Draft a Rough Course Outline",
"items": [{"short_description": "Create Your First Section and Subsection",
"long_description": "Use your course outline to build your first Section and Subsection.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Set Section Release Dates",
"long_description": "Specify the release dates for each Section in your course. Sections become visible to students on their release dates.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Designate a Subsection as Graded",
"long_description": "Set a Subsection to be graded as a specific assignment type. Assignments within graded Subsections count toward a student's final grade.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Reordering Course Content",
"long_description": "Use drag and drop to reorder the content in your course.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Renaming Sections",
"long_description": "Rename Sections by clicking the Section name from the Course Outline.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Deleting Course Content",
"long_description": "Delete Sections, Subsections, or Units you don't need anymore. Be careful, as there is no Undo function.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Add an Instructor-Only Section to Your Outline",
"long_description": "Some course authors find using a section for unsorted, in-progress work useful. To do this, create a section and set the release date to the distant future.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False}]},
{"short_description": "Explore edX's Support Tools",
"items": [{"short_description": "Explore the Studio Help Forum",
"long_description": "Access the Studio Help forum from the menu that appears when you click your user name in the top right corner of Studio.",
"is_checked": False,
"action_url": "http://help.edge.edx.org/",
"action_text": "Visit Studio Help",
"action_external": True},
{"short_description": "Enroll in edX 101",
"long_description": "Register for edX 101, edX's primer for course creation.",
"is_checked": False,
"action_url": "https://edge.edx.org/courses/edX/edX101/How_to_Create_an_edX_Course/about",
"action_text": "Register for edX 101",
"action_external": True},
{"short_description": "Download the Studio Documentation",
"long_description": "Download the searchable Studio reference documentation in PDF form.",
"is_checked": False,
"action_url": "http://files.edx.org/Getting_Started_with_Studio.pdf",
"action_text": "Download Documentation",
"action_external": True}]},
{"short_description": "Draft Your Course About Page",
"items": [{"short_description": "Draft a Course Description",
"long_description": "Courses on edX have an About page that includes a course video, description, and more. Draft the text students will read before deciding to enroll in your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Staff Bios",
"long_description": "Showing prospective students who their instructor will be is helpful. Include staff bios on the course About page.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Course FAQs",
"long_description": "Include a short list of frequently asked questions about your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Course Prerequisites",
"long_description": "Let students know what knowledge and/or skills they should have before they enroll in your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False}]}
])
info_sidebar_name = String(scope=Scope.settings, default='Course Handouts')
show_timezone = Boolean(
help="True if timezones should be shown on dates in the courseware. Deprecated in favor of due_date_display_format.",
scope=Scope.settings, default=True
)
due_date_display_format = String(
help="Format supported by strftime for displaying due dates. Takes precedence over show_timezone.",
scope=Scope.settings, default=None
)
enrollment_domain = String(help="External login method associated with user accounts allowed to register in course",
scope=Scope.settings)
certificates_show_before_end = Boolean(help="True if students may download certificates before course end",
scope=Scope.settings,
default=False)
course_image = String(
help="Filename of the course image",
scope=Scope.settings,
# Ensure that courses imported from XML keep their image
default="images_course_image.jpg"
)
## Course level Certificate Name overrides.
cert_name_short = String(
help="Sitewide name of completion statements given to students (short).",
scope=Scope.settings,
default=""
)
cert_name_long = String(
help="Sitewide name of completion statements given to students (long).",
scope=Scope.settings,
default=""
)
# An extra property is used rather than the wiki_slug/number because
# there are courses that change the number for different runs. This allows
# courses to share the same css_class across runs even if they have
# different numbers.
#
# TODO get rid of this as soon as possible or potentially build in a robust
# way to add in course-specific styling. There needs to be a discussion
# about the right way to do this, but arjun will address this ASAP. Also
# note that the courseware template needs to change when this is removed.
css_class = String(help="DO NOT USE THIS", scope=Scope.settings, default="")
# TODO: This is a quick kludge to allow CS50 (and other courses) to
# specify their own discussion forums as external links by specifying a
# "discussion_link" in their policy JSON file. This should later get
# folded in with Syllabus, Course Info, and additional Custom tabs in a
# more sensible framework later.
discussion_link = String(help="DO NOT USE THIS", scope=Scope.settings)
# TODO: same as above, intended to let internal CS50 hide the progress tab
# until we get grade integration set up.
# Explicit comparison to True because we always want to return a bool.
hide_progress_tab = Boolean(help="DO NOT USE THIS", scope=Scope.settings)
display_organization = String(help="An optional display string for the course organization that will get rendered in the LMS",
scope=Scope.settings)
display_coursenumber = String(help="An optional display string for the course number that will get rendered in the LMS",
scope=Scope.settings)
max_student_enrollments_allowed = Integer(help="Limit the number of students allowed to enroll in this course.",
scope=Scope.settings)
allow_public_wiki_access = Boolean(help="Whether to allow an unenrolled user to view the Wiki",
default=False,
scope=Scope.settings)
class CourseDescriptor(CourseFields, SequenceDescriptor):
module_class = SequenceModule
def __init__(self, *args, **kwargs):
"""
Expects the same arguments as XModuleDescriptor.__init__
"""
super(CourseDescriptor, self).__init__(*args, **kwargs)
_ = self.runtime.service(self, "i18n").ugettext
if self.wiki_slug is None:
if isinstance(self.location, Location):
self.wiki_slug = self.location.course
elif isinstance(self.location, CourseLocator):
self.wiki_slug = self.id.offering or self.display_name
if self.due_date_display_format is None and self.show_timezone is False:
# For existing courses with show_timezone set to False (and no due_date_display_format specified),
# set the due_date_display_format to what would have been shown previously (with no timezone).
# Then remove show_timezone so that if the user clears out the due_date_display_format,
# they get the default date display.
self.due_date_display_format = "DATE_TIME"
delattr(self, 'show_timezone')
# NOTE: relies on the modulestore to call set_grading_policy() right after
# init. (Modulestore is in charge of figuring out where to load the policy from)
# NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically
# disable the syllabus content for courses that do not provide a syllabus
if self.system.resources_fs is None:
self.syllabus_present = False
else:
self.syllabus_present = self.system.resources_fs.exists(path('syllabus'))
self._grading_policy = {}
self.set_grading_policy(self.grading_policy)
if self.discussion_topics == {}:
self.discussion_topics = {_('General'): {'id': self.location.html_id()}}
if not getattr(self, "tabs", []):
CourseTabList.initialize_default(self)
def set_grading_policy(self, course_policy):
"""
The JSON object can have the keys GRADER and GRADE_CUTOFFS. If either is
missing, it reverts to the default.
"""
if course_policy is None:
course_policy = {}
# Load the global settings as a dictionary
grading_policy = self.grading_policy
# BOY DO I HATE THIS grading_policy CODE ACROBATICS YET HERE I ADD MORE (dhm)--this fixes things persisted w/
# defective grading policy values (but not None)
if 'GRADER' not in grading_policy:
grading_policy['GRADER'] = CourseFields.grading_policy.default['GRADER']
if 'GRADE_CUTOFFS' not in grading_policy:
grading_policy['GRADE_CUTOFFS'] = CourseFields.grading_policy.default['GRADE_CUTOFFS']
# Override any global settings with the course settings
grading_policy.update(course_policy)
# Here is where we should parse any configurations, so that we can fail early
# Use setters so that side effecting to .definitions works
self.raw_grader = grading_policy['GRADER'] # used for cms access
self.grade_cutoffs = grading_policy['GRADE_CUTOFFS']
@classmethod
def read_grading_policy(cls, paths, system):
"""Load a grading policy from the specified paths, in order, if it exists."""
# Default to a blank policy dict
policy_str = '{}'
for policy_path in paths:
if not system.resources_fs.exists(policy_path):
continue
log.debug("Loading grading policy from {0}".format(policy_path))
try:
with system.resources_fs.open(policy_path) as grading_policy_file:
policy_str = grading_policy_file.read()
# if we successfully read the file, stop looking at backups
break
except (IOError):
msg = "Unable to load course settings file from '{0}'".format(policy_path)
log.warning(msg)
return policy_str
@classmethod
def from_xml(cls, xml_data, system, id_generator):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, id_generator)
# bleh, have to parse the XML here to just pull out the url_name attribute
# I don't think it's stored anywhere in the instance.
course_file = StringIO(xml_data.encode('ascii', 'ignore'))
xml_obj = etree.parse(course_file, parser=edx_xml_parser).getroot()
policy_dir = None
url_name = xml_obj.get('url_name', xml_obj.get('slug'))
if url_name:
policy_dir = 'policies/' + url_name
# Try to load grading policy
paths = ['grading_policy.json']
if policy_dir:
paths = [policy_dir + '/grading_policy.json'] + paths
try:
policy = json.loads(cls.read_grading_policy(paths, system))
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = {}
# now set the current instance. set_grading_policy() will apply some inheritance rules
instance.set_grading_policy(policy)
return instance
@classmethod
def definition_from_xml(cls, xml_object, system):
textbooks = []
for textbook in xml_object.findall("textbook"):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
# Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
wiki_slug = wiki_tag.attrib.get("slug", default=None)
xml_object.remove(wiki_tag)
definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system)
definition['textbooks'] = textbooks
definition['wiki_slug'] = wiki_slug
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = super(CourseDescriptor, self).definition_to_xml(resource_fs)
if len(self.textbooks) > 0:
textbook_xml_object = etree.Element('textbook')
for textbook in self.textbooks:
textbook_xml_object.set('title', textbook.title)
textbook_xml_object.set('book_url', textbook.book_url)
xml_object.append(textbook_xml_object)
if self.wiki_slug is not None:
wiki_xml_object = etree.Element('wiki')
wiki_xml_object.set('slug', self.wiki_slug)
xml_object.append(wiki_xml_object)
return xml_object
def has_ended(self):
"""
Returns True if the current time is after the specified course end date.
Returns False if there is no end date specified.
"""
if self.end is None:
return False
return datetime.now(UTC()) > self.end
def may_certify(self):
"""
Return True if it is acceptable to show the student a certificate download link
"""
return self.certificates_show_before_end or self.has_ended()
def has_started(self):
return datetime.now(UTC()) > self.start
@property
def grader(self):
return grader_from_conf(self.raw_grader)
@property
def raw_grader(self):
# force the caching of the xblock value so that it can detect the change
# pylint: disable=pointless-statement
self.grading_policy['GRADER']
return self._grading_policy['RAW_GRADER']
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
self._grading_policy['RAW_GRADER'] = value
self.grading_policy['GRADER'] = value
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
# XBlock fields don't update after mutation
policy = self.grading_policy
policy['GRADE_CUTOFFS'] = value
self.grading_policy = policy
@property
def lowest_passing_grade(self):
return min(self._grading_policy['GRADE_CUTOFFS'].values())
@property
def is_cohorted(self):
"""
Return whether the course is cohorted.
"""
config = self.cohort_config
if config is None:
return False
return bool(config.get("cohorted"))
@property
def auto_cohort(self):
"""
Return whether the course is auto-cohorted.
"""
if not self.is_cohorted:
return False
return bool(self.cohort_config.get(
"auto_cohort", False))
@property
def auto_cohort_groups(self):
"""
Return the list of groups to put students into. Returns [] if not
specified. Returns specified list even if is_cohorted and/or auto_cohort are
false.
"""
if self.cohort_config is None:
return []
else:
return self.cohort_config.get("auto_cohort_groups", [])
@property
def top_level_discussion_topic_ids(self):
"""
Return list of topic ids defined in course policy.
"""
topics = self.discussion_topics
return [d["id"] for d in topics.values()]
@property
def cohorted_discussions(self):
"""
Return the set of discussions that is explicitly cohorted. It may be
the empty set. Note that all inline discussions are automatically
cohorted based on the course's is_cohorted setting.
"""
config = self.cohort_config
if config is None:
return set()
return set(config.get("cohorted_discussions", []))
@property
def is_newish(self):
"""
Returns if the course has been flagged as new. If
there is no flag, return a heuristic value considering the
announcement and the start dates.
"""
flag = self.is_new
if flag is None:
# Use a heuristic if the course has not been flagged
announcement, start, now = self._sorting_dates()
if announcement and (now - announcement).days < 30:
# The course has been announced for less that month
return True
elif (now - start).days < 1:
# The course has not started yet
return True
else:
return False
elif isinstance(flag, basestring):
return flag.lower() in ['true', 'yes', 'y']
else:
return bool(flag)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertized) start dates of the course if available.
The lower the number the "newer" the course.
"""
# Make courses that have an announcement date shave a lower
# score than courses than don't, older courses should have a
# higher score.
announcement, start, now = self._sorting_dates()
scale = 300.0 # about a year
if announcement:
days = (now - announcement).days
score = -exp(-days / scale)
else:
days = (now - start).days
score = exp(days / scale)
return score
def _sorting_dates(self):
# utility function to get datetime objects for dates used to
# compute the is_new flag and the sorting_score
announcement = self.announcement
if announcement is not None:
announcement = announcement
try:
start = dateutil.parser.parse(self.advertised_start)
if start.tzinfo is None:
start = start.replace(tzinfo=UTC())
except (ValueError, AttributeError):
start = self.start
now = datetime.now(UTC())
return announcement, start, now
@lazy
def grading_context(self):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_descriptor" : The section descriptor
"xmoduledescriptors" : An array of xmoduledescriptors that
could possibly be in the section, for any student
all_descriptors - This contains a list of all xmodules that can
effect grading a student. This is used to efficiently fetch
all the xmodule state for a FieldDataCache without walking
the descriptor tree again.
"""
all_descriptors = []
graded_sections = {}
def yield_descriptor_descendents(module_descriptor):
for child in module_descriptor.get_children():
yield child
for module_descriptor in yield_descriptor_descendents(child):
yield module_descriptor
for c in self.get_children():
for s in c.get_children():
if s.graded:
xmoduledescriptors = list(yield_descriptor_descendents(s))
xmoduledescriptors.append(s)
# The xmoduledescriptors included here are only the ones that have scores.
section_description = {
'section_descriptor': s,
'xmoduledescriptors': filter(lambda child: child.has_score, xmoduledescriptors)
}
section_format = s.format if s.format is not None else ''
graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description]
all_descriptors.extend(xmoduledescriptors)
all_descriptors.append(s)
return {'graded_sections': graded_sections,
'all_descriptors': all_descriptors, }
@staticmethod
def make_id(org, course, url_name):
return '/'.join([org, course, url_name])
@property
def id(self):
"""Return the course_id for this course"""
return self.location.course_key
@property
def start_date_text(self):
"""
Returns the desired text corresponding the course's start date. Prefers .advertised_start,
then falls back to .start
"""
i18n = self.runtime.service(self, "i18n")
_ = i18n.ugettext
strftime = i18n.strftime
def try_parse_iso_8601(text):
try:
result = Date().from_json(text)
if result is None:
result = text.title()
else:
result = strftime(result, "SHORT_DATE")
except ValueError:
result = text.title()
return result
if isinstance(self.advertised_start, basestring):
return try_parse_iso_8601(self.advertised_start)
elif self.start_date_is_still_default:
# Translators: TBD stands for 'To Be Determined' and is used when a course
# does not yet have an announced start date.
return _('TBD')
else:
when = self.advertised_start or self.start
return strftime(when, "SHORT_DATE")
@property
def start_date_is_still_default(self):
"""
Checks if the start date set for the course is still default, i.e. .start has not been modified,
and .advertised_start has not been set.
"""
return self.advertised_start is None and self.start == CourseFields.start.default
@property
def end_date_text(self):
"""
Returns the end date for the course formatted as a string.
If the course does not have an end date set (course.end is None), an empty string will be returned.
"""
if self.end is None:
return ''
else:
strftime = self.runtime.service(self, "i18n").strftime
return strftime(self.end, "SHORT_DATE")
@property
def forum_posts_allowed(self):
date_proxy = Date()
try:
blackout_periods = [(date_proxy.from_json(start),
date_proxy.from_json(end))
for start, end
in self.discussion_blackouts]
now = datetime.now(UTC())
for start, end in blackout_periods:
if start <= now <= end:
return False
except:
log.exception("Error parsing discussion_blackouts for course {0}".format(self.id))
return True
@property
def number(self):
return self.location.course
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.number
@property
def org(self):
return self.location.org
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that is in the location
"""
if self.display_organization:
return self.display_organization
return self.org
| morenopc/edx-platform | common/lib/xmodule/xmodule/course_module.py | Python | agpl-3.0 | 42,110 | [
"VisIt"
] | e3af1f1c458fe4c3fd343f2423fc626fa263043ade37f315b5c101efcfef449c |
#!/usr/bin/env python3
# B a r a K u d a
#
# L. Brodeau, 2017]
import sys
import numpy as nmp
from PIL import Image
import os
from netCDF4 import Dataset
import datetime
#l_fake_coor = True
l_fake_coor = False
l_nemo_like = True
narg = len(sys.argv)
if not narg in [2, 3]:
print('Usage: '+sys.argv[0]+' <image> (<field divider for field>)'); sys.exit(0)
cf_im = sys.argv[1]
idiv = 1
if narg == 3: idiv = int(sys.argv[2])
print(idiv)
cfname, cfext = os.path.splitext(cf_im)
#(nj,ni) = nmp.shape(nav_lon)
cf_nc = str.replace(os.path.basename(cf_im), cfext, '.nc')
# Opening Images:
print(' *** Opening image '+cf_im)
pic = Image.open(cf_im)
lcolor = False ; # if false it is a
vshape_pic = nmp.shape(pic)
if len(vshape_pic) == 3:
(ny,nx,nrgb) = vshape_pic
if nrgb != 3: print(' Problem #1 with your image, not what we expected!') ; sys.exit(0)
lcolor = True ; # RGB color picture => 3 2D array
print("\n It's a RGB color picture!\n")
elif len(vshape_pic) == 2:
lcolor = False ; # grey-scale picture (true black and white) => 1 2D array
(ny,nx) = vshape_pic
nrgb = 1
print("\n It's a grey-scale B&W picture!\n")
else:
print(' Problem #2 with your image, not what we expected!') ; sys.exit(0)
print(" *** shape of pic: ", (ny,nx))
xpic = nmp.array(pic)
if l_fake_coor:
# Prepare coordinates if needed:
vlon = nmp.zeros(nx) ; dx = 360./float(nx)
for ji in range(nx): vlon[ji] = (float(ji) + 0.5)*dx
vlat = nmp.zeros(ny) ; dy = 180./float(ny)
for jj in range(ny): vlat[jj] = -90 + (float(jj) + 0.5)*dy
#print(vlat[:])
#sys.exit(0)
f_out = Dataset(cf_nc, 'w', format='NETCDF4')
# Dimensions:
cdim_x = 'longitude'
cdim_y = 'latitude'
if l_nemo_like:
cdim_x = 'x'
cdim_y = 'y'
#if l_fake_coor:
# cdim_x = 'lon'
# cdim_y = 'lat'
f_out.createDimension(cdim_x, nx)
f_out.createDimension(cdim_y, ny)
#if l_nemo_like: f_out.createDimension('t', None)
if l_fake_coor:
id_lon = f_out.createVariable('lon0','f4',(cdim_x,))
id_lat = f_out.createVariable('lat0','f4',(cdim_y,))
id_lon[:] = vlon[:]
id_lat[:] = vlat[:]
if lcolor:
id_red = f_out.createVariable('red','f4',(cdim_y,cdim_x,))
id_red.long_name = 'Red (of RGB)'
id_green = f_out.createVariable('green','f4',(cdim_y,cdim_x,))
id_green.long_name = 'Green (of RGB)'
id_blue = f_out.createVariable('blue','f4',(cdim_y,cdim_x,))
id_blue.long_name = 'Blue (of RGB)'
id_red[:,:] = nmp.flipud(xpic[:,:,0])
id_green[:,:] = nmp.flipud(xpic[:,:,1])
id_blue[:,:] = nmp.flipud(xpic[:,:,2])
else:
#if l_nemo_like:
# id_bw = f_out.createVariable('bw','i1',('t',cdim_y,cdim_x,))
# id_bw.long_name = 'Grey scale'
# #id_bw[0,:,:] = nmp.flipud(xpic[:,:]) / idiv
# id_bw[0,:,:] = 1 - (nmp.flipud(xpic[:,:]) + 1)/idiv
#else:
id_bw = f_out.createVariable('bw','i1',(cdim_y,cdim_x,))
id_bw.long_name = 'Grey scale'
id_bw[:,:] = 1 - (nmp.flipud(xpic[:,:]) + 1)/idiv
f_out.About = 'Image '+cf_im+' converted to netcdf.'
f_out.Author = 'Generated with image_to_netcdf.py of BARAKUDA (https://github.com/brodeau/barakuda)'
f_out.close()
print(cf_nc+' created!!!')
| brodeau/barakuda | python/exec/image_to_netcdf.py | Python | gpl-2.0 | 3,263 | [
"NetCDF"
] | 57772cb51d2e429c18c517a149137eeb82dc7f2ee9fc639ad5e448060b8e49a4 |
""" Collection of utilities for finding paths in the CS
"""
from urllib import parse
from DIRAC.Core.Utilities import List
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client.Helpers import Path
def getDIRACSetup():
"""Get DIRAC default setup name
:return: str
"""
return gConfigurationData.extractOptionFromCFG("/DIRAC/Setup")
def divideFullName(entityName, componentName=None):
"""Convert component full name to tuple
:param str entityName: component full name, e.g.: 'Framework/ProxyManager'
:param str componentName: component name
:return: tuple -- contain system and component name
"""
entityName = entityName.strip("/")
if entityName and "/" not in entityName and componentName:
return (entityName, componentName)
fields = [field.strip() for field in entityName.split("/") if field.strip()]
if len(fields) == 2:
return tuple(fields)
raise RuntimeError("Service (%s) name must be with the form system/service" % entityName)
def getSystemInstance(system, setup=False):
"""Find system instance name
:param str system: system name
:param str setup: setup name
:return: str
"""
optionPath = Path.cfgPath("/DIRAC/Setups", setup or getDIRACSetup(), system)
instance = gConfigurationData.extractOptionFromCFG(optionPath)
if not instance:
raise RuntimeError("Option %s is not defined" % optionPath)
return instance
def getSystemSection(system, instance=False, setup=False):
"""Get system section
:param str system: system name
:param str instance: instance name
:param str setup: setup name
:return: str -- system section path
"""
system, _ = divideFullName(system, "_") # for backward compatibility
return Path.cfgPath(
"/Systems",
system,
instance or getSystemInstance(system, setup=setup),
)
def getComponentSection(system, component=False, setup=False, componentCategory="Services"):
"""Function returns the path to the component.
:param str system: system name or component name prefixed by the system in which it is placed.
e.g. 'WorkloadManagement/SandboxStoreHandler'
:param str component: component name, e.g. 'SandboxStoreHandler'
:param str setup: Name of the setup.
:param str componentCategory: Category of the component, it can be:
'Agents', 'Services', 'Executors' or 'Databases'.
:return: Complete path to the component
:rtype: str
:raise RuntimeException: If in the system - the system part does not correspond to any known system in DIRAC.
Examples:
getComponentSection('WorkloadManagement/SandboxStoreHandler', setup='Production', componentCategory='Services')
getComponentSection('WorkloadManagement', 'SandboxStoreHandler', 'Production')
"""
system, component = divideFullName(system, component)
return Path.cfgPath(getSystemSection(system, setup=setup), componentCategory, component)
def getAPISection(system, endpointName=False, setup=False):
"""Get API section in a system
:param str system: system name
:param str endpointName: endpoint name
:return: str
"""
return getComponentSection(system, component=endpointName, setup=setup, componentCategory="APIs")
def getServiceSection(system, serviceName=False, setup=False):
"""Get service section in a system
:param str system: system name
:param str serviceName: service name
:param str setup: setup name
:return: str
"""
return getComponentSection(system, component=serviceName, setup=setup)
def getAgentSection(system, agentName=False, setup=False):
"""Get agent section in a system
:param str system: system name
:param str agentName: agent name
:param str setup: setup name
:return: str
"""
return getComponentSection(system, component=agentName, setup=setup, componentCategory="Agents")
def getExecutorSection(system, executorName=None, component=False, setup=False):
"""Get executor section in a system
:param str system: system name
:param str executorName: executor name
:param str setup: setup name
:return: str
"""
return getComponentSection(system, component=executorName, setup=setup, componentCategory="Executors")
def getDatabaseSection(system, dbName=False, setup=False):
"""Get DB section in a system
:param str system: system name
:param str dbName: DB name
:param str setup: setup name
:return: str
"""
return getComponentSection(system, component=dbName, setup=setup, componentCategory="Databases")
def getSystemURLSection(system, setup=False):
"""Get URLs section in a system
:param str system: system name
:param str setup: setup name
:return: str
"""
return Path.cfgPath(getSystemSection(system, setup=setup), "URLs")
def checkComponentURL(componentURL, system=None, component=None, pathMandatory=False):
"""Check component URL port and path. Set default ports for http scheme and raise if no port can be found.
Set path if its mandatory or raise if its empty in this case.
:param str componentURL: full URL, e.g.: dips://some-domain:3424/Framework/Service
:param str system: system name
:param str component: component name
:param bool pathMandatory: raise error if the path could not be generated
:return: str
"""
url = parse.urlparse(componentURL)
# Check port
if not url.port:
if url.scheme == "dips":
raise RuntimeError("No port found for %s/%s URL!" % (system, component))
url = url._replace(netloc=url.netloc + ":" + str(80 if url.scheme == "http" else 443))
# Check path
if not url.path.strip("/"):
if system and component:
url = url._replace(path="/%s/%s" % (system, component))
elif pathMandatory:
raise RuntimeError("No path found for %s/%s URL!" % (system, component))
return url.geturl()
def getSystemURLs(system, setup=False, failover=False):
"""Generate url.
:param str system: system name or full name e.g.: Framework/ProxyManager
:param str setup: DIRAC setup name, can be defined in dirac.cfg
:param bool failover: to add failover URLs to end of result list
:return: dict -- complete urls. e.g. [dips://some-domain:3424/Framework/Service]
"""
urlDict = {}
for service in gConfigurationData.getOptionsFromCFG("%s/URLs" % getSystemSection(system, setup=setup)) or []:
urlDict[service] = getServiceURLs(system, service, setup=setup, failover=failover)
return urlDict
def getServiceURLs(system, service=None, setup=False, failover=False):
"""Generate url.
:param str system: system name or full name e.g.: Framework/ProxyManager
:param str service: service name, like 'ProxyManager'.
:param str setup: DIRAC setup name, can be defined in dirac.cfg
:param bool failover: to add failover URLs to end of result list
:return: list -- complete urls. e.g. [dips://some-domain:3424/Framework/Service]
"""
system, service = divideFullName(system, service)
resList = []
mainServers = None
systemSection = getSystemSection(system, setup=setup)
# Add failover URLs at the end of the list
failover = "Failover" if failover else ""
for fURLs in ["", "Failover"] if failover else [""]:
urlList = []
urls = List.fromChar(gConfigurationData.extractOptionFromCFG("%s/%sURLs/%s" % (systemSection, fURLs, service)))
# Be sure that urls not None
for url in urls or []:
# Trying if we are refering to the list of main servers
# which would be like dips://$MAINSERVERS$:1234/System/Component
if "$MAINSERVERS$" in url:
if not mainServers:
# Operations cannot be imported at the beginning because of a bootstrap problem
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
mainServers = Operations(setup=setup).getValue("MainServers", [])
if not mainServers:
raise Exception("No Main servers defined")
for srv in mainServers:
_url = checkComponentURL(url.replace("$MAINSERVERS$", srv), system, service, pathMandatory=True)
if _url not in urlList:
urlList.append(_url)
continue
_url = checkComponentURL(url, system, service, pathMandatory=True)
if _url not in urlList:
urlList.append(_url)
# Randomize list if needed
resList.extend(List.randomize(urlList))
return resList
def getServiceURL(system, service=None, setup=False):
"""Generate url.
:param str system: system name or full name e.g.: Framework/ProxyManager
:param str service: service name, like 'ProxyManager'.
:param str setup: DIRAC setup name, can be defined in dirac.cfg
:return: str -- complete list of urls. e.g. dips://some-domain:3424/Framework/Service, dips://..
"""
system, service = divideFullName(system, service)
urls = getServiceURLs(system, service=service, setup=setup)
return ",".join(urls) if urls else ""
def getServiceFailoverURL(system, service=None, setup=False):
"""Get failover URLs for service
:param str system: system name or full name, like 'Framework/Service'.
:param str service: service name, like 'ProxyManager'.
:param str setup: DIRAC setup name, can be defined in dirac.cfg
:return: str -- complete list of urls
"""
system, service = divideFullName(system, service)
systemSection = getSystemSection(system, setup=setup)
failovers = gConfigurationData.extractOptionFromCFG("%s/FailoverURLs/%s" % (systemSection, service))
if not failovers:
return ""
return ",".join([checkComponentURL(u, system, service) for u in List.fromChar(failovers, ",") if u])
def getGatewayURLs(system="", service=None):
"""Get gateway URLs for service
:param str system: system name or full name, like 'Framework/Service'.
:param str service: service name, like 'ProxyManager'.
:return: list or False
"""
if system:
system, service = divideFullName(system, service)
siteName = gConfigurationData.extractOptionFromCFG("/LocalSite/Site")
if not siteName:
return False
gateways = gConfigurationData.extractOptionFromCFG("/DIRAC/Gateways/%s" % siteName)
if not gateways:
return False
gateways = List.randomize(List.fromChar(gateways, ","))
return [checkComponentURL(u, system, service) for u in gateways if u] if system and service else gateways
| DIRACGrid/DIRAC | src/DIRAC/ConfigurationSystem/Client/PathFinder.py | Python | gpl-3.0 | 10,836 | [
"DIRAC"
] | 802e1b2e1581cb037accdd97b1798b5441e8ff3e0885cae38f9757c30b52eb95 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0011_location_is_primary'),
]
operations = [
migrations.AddField(
model_name='visit',
name='status',
field=models.CharField(default='complete', max_length=255, choices=[(b'noshow', b'No-Show'), (b'complete', b'Complete'), (b'saved', b'Saved')]),
preserve_default=False,
),
]
| koebbe/homeworks | visit/migrations/0012_visit_status.py | Python | mit | 540 | [
"VisIt"
] | 1c3b30c6a79e45e715528a1dc4aaf219c8e16e40f3fe19a71bfd5d3262186527 |
"""
This module was copied from the scipy project.
In the process of copying, some methods were removed because they depended on
other parts of scipy (especially on compiled components), allowing seaborn to
have a simple and pure Python implementation. These include:
- integrate_gaussian
- integrate_box
- integrate_box_1d
- integrate_kde
- logpdf
- resample
Additionally, the numpy.linalg module was substituted for scipy.linalg,
and the examples section (with doctests) was removed from the docstring
The original scipy license is copied below:
Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# -------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to SciPy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
# -------------------------------------------------------------------------------
import numpy as np
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi,
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
ones, cov)
from numpy import linalg
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self._weights = atleast_1d(weights).astype(float)
self._weights /= sum(self._weights)
if self.weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(self._weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1/sum(self._weights**2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
output_dtype = np.common_type(self.covariance, points)
result = zeros((m,), dtype=output_dtype)
whitening = linalg.cholesky(self.inv_cov)
scaled_dataset = dot(whitening, self.dataset)
scaled_points = dot(whitening, points)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = scaled_dataset[:, i, newaxis] - scaled_points
energy = sum(diff * diff, axis=0) / 2.0
result += self.weights[i]*exp(-energy)
else:
# loop over points
for i in range(m):
diff = scaled_dataset - scaled_points[:, i, newaxis]
energy = sum(diff * diff, axis=0) / 2.0
result[i] = sum(exp(-energy)*self.weights, axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def scotts_factor(self):
"""Compute Scott's factor.
Returns
-------
s : float
Scott's factor.
"""
return power(self.neff, -1./(self.d+4))
def silverman_factor(self):
"""Compute the Silverman factor.
Returns
-------
s : float
The silverman factor.
"""
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False,
aweights=self.weights))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
@property
def weights(self):
try:
return self._weights
except AttributeError:
self._weights = ones(self.n)/self.n
return self._weights
@property
def neff(self):
try:
return self._neff
except AttributeError:
self._neff = 1/sum(self.weights**2)
return self._neff
| mwaskom/seaborn | seaborn/external/kde.py | Python | bsd-3-clause | 13,806 | [
"Gaussian"
] | 5327f7c6a8f51b3949e039b0e3c0a0f1c8ad06c30b1ffc720190c9ed512bfaa0 |
# This module contains functions that output a cmd string for calling executables, should be broad/odyssey compatible
import os, sys, subprocess
script_path = os.path.dirname(__file__)
sys.path.append(script_path)
import myos
def start_java_cmd(mem_usage, additional_option):
return 'java -Xmx%sm %s -jar' %(mem_usage, additional_option)
def fastx_trimmer(options, input_fn, output_fn):
''' writes command for running fastx_trimmer
options should be a string -Q 33 -f 16'''
en = 'fastx_trimmer'
cmd = '%s %s -i %s -o %s;' %(en, options, input_fn, output_fn)
return cmd
def casava_quality_filter(input_fn, output_fn):
''' writes command for running fastx_trimmer
options should be a string -Q 33'''
en = 'perl /seq/epiprod/de/Cerebellum/code/casava_filter.pl'
cmd = '%s -i %s -o %s;' %(en, input_fn, output_fn)
return cmd
def fastx_adaptor_filter(adaptor_seq, options, input_fn, output_fn):
''' writes command for running fastx_clipper
options should be a string -Q 33'''
en = 'fastx_clipper'
cmd = '%s -a %s %s -i %s -o %s;' %(en, adaptor_seq, options, input_fn, output_fn)
return cmd
def fastq_quality_filter(options, input_fn, output_fn):
''' writes command for running fastx_trimmer
options should be a string -Q 33'''
en = 'fastq_quality_filter'
cmd = '%s %s -i %s -o %s;' %(en, options, input_fn, output_fn)
return cmd
def fastx_artifacts_filter(options, input_fn, output_fn):
''' writes command for running fastx_trimmer
options should be a string -Q 33'''
en = 'fastx_artifacts_filter'
cmd = '%s %s -i %s -o %s;' %(en, options, input_fn, output_fn)
return cmd
def fastqc(input_fns, output_dir):
''' writes command for running fastx_trimmer
options should be a string -Q 33'''
server = myos.which_server()
if server == 'broad':
en = 'perl /broad/software/free/Linux/redhat_5_x86_64/pkgs/fastqc_0.10.1/FastQC/fastqc'
elif server == 'odyssey':
en = 'perl /n/dulacfs2/Users/dfernand/de/software/FastQC/fastqc'
cmd = '%s -o %s %s' %(en, output_dir, input_fns)
return cmd
def trim_galore_filter(adapter_seq, options, input_fn, output_dir):
''' writes command for running fastx_trimmer
options should be a string -Q 33'''
server = myos.which_server()
if server == 'broad':
en = '/home/unix/dfernand/bin/trim_galore/trim_galore'
cmd = '%s -o %s -a %s %s %s' %(en, output_dir, adapter_seq, options, input_fn)
elif server == 'odyssey':
en = '/n/dulacfs2/Users/dfernand/de/software/trim_galore_v0.3.3/trim_galore'
cmd = 'module load centos6/cutadapt-1.2.1_python-2.7.3;%s -o %s -a %s %s %s' %(en, output_dir, adapter_seq, options, input_fn)
return cmd
def bowtie_1_run(options, index_fn, input_fn, output_fn):
''' writes command for running bowtie mapper
NOTE: output_fn always needs to be a basefullname with .sorted, it will add the .bam
'''
en = "/home/unix/dfernand/bin/bowtie-1.0.0/bowtie"
cmd = "%s %s %s %s | samtools view -bS - | samtools sort -n - %s" %(en, options, index_fn, input_fn, output_fn)
return cmd
def phasedBam2bed(bam_in_fn, bed_p_fn, bed_m_fn):
''' command for nimrod executable phasedBam2bed
'''
en = "/seq/epiprod/de/scripts/nimrod/samtoolsUtils/phasedBam2bed"
cmd = "%s -b %s -p %s -m %s" %(en, bam_in_fn, bed_p_fn, bed_m_fn)
return cmd
class bedops:
def __init__(self):
self.server = myos.which_server()
if self.server == 'broad':
self.dep_cmd = myos.load_dependencies_cmd(['.bedops-2.0.0b'])
elif self.server == 'odyssey':
self.dep_cmd = myos.load_dependencies_cmd(['centos6/bedops-2.3.0'])
def sortbed(self, bed_in_fn, bed_out_fn, options=''):
if options == '':
en = 'sort-bed %s > %s' %(bed_in_fn, bed_out_fn)
else:
en = 'sort-bed %s %s > %s' %(options, bed_in_fn, bed_out_fn)
return self.dep_cmd+en
def vcf2bed(self, vcf_in_fn, vcf_out_fn):
en = 'vcf2bed < %s > %s' %(vcf_in_fn, vcf_out_fn)
return self.dep_cmd+en
def bedmap(self, bedmap_options, reference_in_fn, map_fn, out_fn):
en = 'bedmap %s %s %s > %s' %(bedmap_options, reference_in_fn, map_fn, out_fn)
return self.dep_cmd+en
class igvtools:
def __init__(self):
self.server = myos.which_server()
if self.server == 'broad':
self.igv_fn = '/home/unix/dfernand/bin/IGVTools/igvtools.jar'
elif self.server == 'odyssey':
self.dep_cmd = myos.load_dependencies_cmd(['bio/igvtools-2.2.2'])
self.igv_fn = '/n/sw/igvtools-2.2.2/igvtools.jar'
def index(self, in_fn):
java_cmd = start_java_cmd('3000', '-Djava.awt.headless=true')
en = '%s %s index %s' %(java_cmd, self.igv_fn, in_fn)
if self.server == 'odyssey':
return self.dep_cmd+en
elif self.server == 'broad':
return en
def sort(self, in_fn, sort_fn):
java_cmd = start_java_cmd('3000', '-Djava.awt.headless=true')
en = '%s %s sort %s %s' %(java_cmd, self.igv_fn, in_fn, sort_fn)
if self.server == 'odyssey':
return self.dep_cmd+en
elif self.server == 'broad':
return en
| dfernan/de | myutils/execs_commands.py | Python | mit | 5,296 | [
"Bowtie"
] | 726ebd9e5676f2ad3e1e2dfb1069f25246d1a23aa592481fd9f9c0313c656a58 |
"""Minimal Python 2 & 3 shim around all Qt bindings
DOCUMENTATION
Qt.py was born in the film and visual effects industry to address
the growing need for the development of software capable of running
with more than one flavour of the Qt bindings for Python - PySide,
PySide2, PyQt4 and PyQt5.
1. Build for one, run with all
2. Explicit is better than implicit
3. Support co-existence
Default resolution order:
- PySide2
- PyQt5
- PySide
- PyQt4
Usage:
>> import sys
>> from Qt import QtWidgets
>> app = QtWidgets.QApplication(sys.argv)
>> button = QtWidgets.QPushButton("Hello World")
>> button.show()
>> app.exec_()
All members of PySide2 are mapped from other bindings, should they exist.
If no equivalent member exist, it is excluded from Qt.py and inaccessible.
The idea is to highlight members that exist across all supported binding,
and guarantee that code that runs on one binding runs on all others.
For more details, visit https://github.com/mottosso/Qt.py
LICENSE
See end of file for license (MIT, BSD) information.
"""
import os
import sys
import types
import shutil
import importlib
__version__ = "1.1.0.b1"
# Enable support for `from Qt import *`
__all__ = []
# Flags from environment variables
QT_VERBOSE = bool(os.getenv("QT_VERBOSE"))
QT_PREFERRED_BINDING = os.getenv("QT_PREFERRED_BINDING", "")
QT_SIP_API_HINT = os.getenv("QT_SIP_API_HINT")
# Reference to Qt.py
Qt = sys.modules[__name__]
Qt.QtCompat = types.ModuleType("QtCompat")
try:
long
except NameError:
# Python 3 compatibility
long = int
"""Common members of all bindings
This is where each member of Qt.py is explicitly defined.
It is based on a "lowest common denominator" of all bindings;
including members found in each of the 4 bindings.
Find or add excluded members in build_membership.py
"""
_common_members = {
"QtGui": [
"QAbstractTextDocumentLayout",
"QActionEvent",
"QBitmap",
"QBrush",
"QClipboard",
"QCloseEvent",
"QColor",
"QConicalGradient",
"QContextMenuEvent",
"QCursor",
"QDoubleValidator",
"QDrag",
"QDragEnterEvent",
"QDragLeaveEvent",
"QDragMoveEvent",
"QDropEvent",
"QFileOpenEvent",
"QFocusEvent",
"QFont",
"QFontDatabase",
"QFontInfo",
"QFontMetrics",
"QFontMetricsF",
"QGradient",
"QHelpEvent",
"QHideEvent",
"QHoverEvent",
"QIcon",
"QIconDragEvent",
"QIconEngine",
"QImage",
"QImageIOHandler",
"QImageReader",
"QImageWriter",
"QInputEvent",
"QInputMethodEvent",
"QIntValidator",
"QKeyEvent",
"QKeySequence",
"QLinearGradient",
"QMatrix2x2",
"QMatrix2x3",
"QMatrix2x4",
"QMatrix3x2",
"QMatrix3x3",
"QMatrix3x4",
"QMatrix4x2",
"QMatrix4x3",
"QMatrix4x4",
"QMouseEvent",
"QMoveEvent",
"QMovie",
"QPaintDevice",
"QPaintEngine",
"QPaintEngineState",
"QPaintEvent",
"QPainter",
"QPainterPath",
"QPainterPathStroker",
"QPalette",
"QPen",
"QPicture",
"QPictureIO",
"QPixmap",
"QPixmapCache",
"QPolygon",
"QPolygonF",
"QQuaternion",
"QRadialGradient",
"QRegExpValidator",
"QRegion",
"QResizeEvent",
"QSessionManager",
"QShortcutEvent",
"QShowEvent",
"QStandardItem",
"QStandardItemModel",
"QStatusTipEvent",
"QSyntaxHighlighter",
"QTabletEvent",
"QTextBlock",
"QTextBlockFormat",
"QTextBlockGroup",
"QTextBlockUserData",
"QTextCharFormat",
"QTextCursor",
"QTextDocument",
"QTextDocumentFragment",
"QTextFormat",
"QTextFragment",
"QTextFrame",
"QTextFrameFormat",
"QTextImageFormat",
"QTextInlineObject",
"QTextItem",
"QTextLayout",
"QTextLength",
"QTextLine",
"QTextList",
"QTextListFormat",
"QTextObject",
"QTextObjectInterface",
"QTextOption",
"QTextTable",
"QTextTableCell",
"QTextTableCellFormat",
"QTextTableFormat",
"QTransform",
"QValidator",
"QVector2D",
"QVector3D",
"QVector4D",
"QWhatsThisClickedEvent",
"QWheelEvent",
"QWindowStateChangeEvent",
"qAlpha",
"qBlue",
"qGray",
"qGreen",
"qIsGray",
"qRed",
"qRgb",
"qRgb",
],
"QtWidgets": [
"QAbstractButton",
"QAbstractGraphicsShapeItem",
"QAbstractItemDelegate",
"QAbstractItemView",
"QAbstractScrollArea",
"QAbstractSlider",
"QAbstractSpinBox",
"QAction",
"QActionGroup",
"QApplication",
"QBoxLayout",
"QButtonGroup",
"QCalendarWidget",
"QCheckBox",
"QColorDialog",
"QColumnView",
"QComboBox",
"QCommandLinkButton",
"QCommonStyle",
"QCompleter",
"QDataWidgetMapper",
"QDateEdit",
"QDateTimeEdit",
"QDesktopWidget",
"QDial",
"QDialog",
"QDialogButtonBox",
"QDirModel",
"QDockWidget",
"QDoubleSpinBox",
"QErrorMessage",
"QFileDialog",
"QFileIconProvider",
"QFileSystemModel",
"QFocusFrame",
"QFontComboBox",
"QFontDialog",
"QFormLayout",
"QFrame",
"QGesture",
"QGestureEvent",
"QGestureRecognizer",
"QGraphicsAnchor",
"QGraphicsAnchorLayout",
"QGraphicsBlurEffect",
"QGraphicsColorizeEffect",
"QGraphicsDropShadowEffect",
"QGraphicsEffect",
"QGraphicsEllipseItem",
"QGraphicsGridLayout",
"QGraphicsItem",
"QGraphicsItemGroup",
"QGraphicsLayout",
"QGraphicsLayoutItem",
"QGraphicsLineItem",
"QGraphicsLinearLayout",
"QGraphicsObject",
"QGraphicsOpacityEffect",
"QGraphicsPathItem",
"QGraphicsPixmapItem",
"QGraphicsPolygonItem",
"QGraphicsProxyWidget",
"QGraphicsRectItem",
"QGraphicsRotation",
"QGraphicsScale",
"QGraphicsScene",
"QGraphicsSceneContextMenuEvent",
"QGraphicsSceneDragDropEvent",
"QGraphicsSceneEvent",
"QGraphicsSceneHelpEvent",
"QGraphicsSceneHoverEvent",
"QGraphicsSceneMouseEvent",
"QGraphicsSceneMoveEvent",
"QGraphicsSceneResizeEvent",
"QGraphicsSceneWheelEvent",
"QGraphicsSimpleTextItem",
"QGraphicsTextItem",
"QGraphicsTransform",
"QGraphicsView",
"QGraphicsWidget",
"QGridLayout",
"QGroupBox",
"QHBoxLayout",
"QHeaderView",
"QInputDialog",
"QItemDelegate",
"QItemEditorCreatorBase",
"QItemEditorFactory",
"QKeyEventTransition",
"QLCDNumber",
"QLabel",
"QLayout",
"QLayoutItem",
"QLineEdit",
"QListView",
"QListWidget",
"QListWidgetItem",
"QMainWindow",
"QMdiArea",
"QMdiSubWindow",
"QMenu",
"QMenuBar",
"QMessageBox",
"QMouseEventTransition",
"QPanGesture",
"QPinchGesture",
"QPlainTextDocumentLayout",
"QPlainTextEdit",
"QProgressBar",
"QProgressDialog",
"QPushButton",
"QRadioButton",
"QRubberBand",
"QScrollArea",
"QScrollBar",
"QShortcut",
"QSizeGrip",
"QSizePolicy",
"QSlider",
"QSpacerItem",
"QSpinBox",
"QSplashScreen",
"QSplitter",
"QSplitterHandle",
"QStackedLayout",
"QStackedWidget",
"QStatusBar",
"QStyle",
"QStyleFactory",
"QStyleHintReturn",
"QStyleHintReturnMask",
"QStyleHintReturnVariant",
"QStyleOption",
"QStyleOptionButton",
"QStyleOptionComboBox",
"QStyleOptionComplex",
"QStyleOptionDockWidget",
"QStyleOptionFocusRect",
"QStyleOptionFrame",
"QStyleOptionGraphicsItem",
"QStyleOptionGroupBox",
"QStyleOptionHeader",
"QStyleOptionMenuItem",
"QStyleOptionProgressBar",
"QStyleOptionRubberBand",
"QStyleOptionSizeGrip",
"QStyleOptionSlider",
"QStyleOptionSpinBox",
"QStyleOptionTab",
"QStyleOptionTabBarBase",
"QStyleOptionTabWidgetFrame",
"QStyleOptionTitleBar",
"QStyleOptionToolBar",
"QStyleOptionToolBox",
"QStyleOptionToolButton",
"QStyleOptionViewItem",
"QStylePainter",
"QStyledItemDelegate",
"QSwipeGesture",
"QSystemTrayIcon",
"QTabBar",
"QTabWidget",
"QTableView",
"QTableWidget",
"QTableWidgetItem",
"QTableWidgetSelectionRange",
"QTapAndHoldGesture",
"QTapGesture",
"QTextBrowser",
"QTextEdit",
"QTimeEdit",
"QToolBar",
"QToolBox",
"QToolButton",
"QToolTip",
"QTreeView",
"QTreeWidget",
"QTreeWidgetItem",
"QTreeWidgetItemIterator",
"QUndoCommand",
"QUndoGroup",
"QUndoStack",
"QUndoView",
"QVBoxLayout",
"QWhatsThis",
"QWidget",
"QWidgetAction",
"QWidgetItem",
"QWizard",
"QWizardPage",
],
"QtCore": [
"QAbstractAnimation",
"QAbstractEventDispatcher",
"QAbstractItemModel",
"QAbstractListModel",
"QAbstractState",
"QAbstractTableModel",
"QAbstractTransition",
"QAnimationGroup",
"QBasicTimer",
"QBitArray",
"QBuffer",
"QByteArray",
"QByteArrayMatcher",
"QChildEvent",
"QCoreApplication",
"QCryptographicHash",
"QDataStream",
"QDate",
"QDateTime",
"QDir",
"QDirIterator",
"QDynamicPropertyChangeEvent",
"QEasingCurve",
"QElapsedTimer",
"QEvent",
"QEventLoop",
"QEventTransition",
"QFile",
"QFileInfo",
"QFileSystemWatcher",
"QFinalState",
"QGenericArgument",
"QGenericReturnArgument",
"QHistoryState",
"QIODevice",
"QLibraryInfo",
"QLine",
"QLineF",
"QLocale",
"QMargins",
"QMetaClassInfo",
"QMetaEnum",
"QMetaMethod",
"QMetaObject",
"QMetaProperty",
"QMetaType",
"QMimeData",
"QModelIndex",
"QMutex",
"QMutexLocker",
"QObject",
"QParallelAnimationGroup",
"QPauseAnimation",
"QPersistentModelIndex",
"QPluginLoader",
"QPoint",
"QPointF",
"QProcess",
"QProcessEnvironment",
"QPropertyAnimation",
"QReadLocker",
"QReadWriteLock",
"QRect",
"QRectF",
"QRegExp",
"QResource",
"QRunnable",
"QSemaphore",
"QSequentialAnimationGroup",
"QSettings",
"QSignalMapper",
"QSignalTransition",
"QSize",
"QSizeF",
"QSocketNotifier",
"QState",
"QStateMachine",
"QSysInfo",
"QSystemSemaphore",
"QTemporaryFile",
"QTextBoundaryFinder",
"QTextCodec",
"QTextDecoder",
"QTextEncoder",
"QTextStream",
"QTextStreamManipulator",
"QThread",
"QThreadPool",
"QTime",
"QTimeLine",
"QTimer",
"QTimerEvent",
"QTranslator",
"QUrl",
"QVariantAnimation",
"QWaitCondition",
"QWriteLocker",
"QXmlStreamAttribute",
"QXmlStreamAttributes",
"QXmlStreamEntityDeclaration",
"QXmlStreamEntityResolver",
"QXmlStreamNamespaceDeclaration",
"QXmlStreamNotationDeclaration",
"QXmlStreamReader",
"QXmlStreamWriter",
"Qt",
"QtCriticalMsg",
"QtDebugMsg",
"QtFatalMsg",
"QtMsgType",
"QtSystemMsg",
"QtWarningMsg",
"qAbs",
"qAddPostRoutine",
"qChecksum",
"qCritical",
"qDebug",
"qFatal",
"qFuzzyCompare",
"qIsFinite",
"qIsInf",
"qIsNaN",
"qIsNull",
"qRegisterResourceData",
"qUnregisterResourceData",
"qVersion",
"qWarning",
"qrand",
"qsrand",
],
"QtXml": [
"QDomAttr",
"QDomCDATASection",
"QDomCharacterData",
"QDomComment",
"QDomDocument",
"QDomDocumentFragment",
"QDomDocumentType",
"QDomElement",
"QDomEntity",
"QDomEntityReference",
"QDomImplementation",
"QDomNamedNodeMap",
"QDomNode",
"QDomNodeList",
"QDomNotation",
"QDomProcessingInstruction",
"QDomText",
"QXmlAttributes",
"QXmlContentHandler",
"QXmlDTDHandler",
"QXmlDeclHandler",
"QXmlDefaultHandler",
"QXmlEntityResolver",
"QXmlErrorHandler",
"QXmlInputSource",
"QXmlLexicalHandler",
"QXmlLocator",
"QXmlNamespaceSupport",
"QXmlParseException",
"QXmlReader",
"QXmlSimpleReader"
],
"QtHelp": [
"QHelpContentItem",
"QHelpContentModel",
"QHelpContentWidget",
"QHelpEngine",
"QHelpEngineCore",
"QHelpIndexModel",
"QHelpIndexWidget",
"QHelpSearchEngine",
"QHelpSearchQuery",
"QHelpSearchQueryWidget",
"QHelpSearchResultWidget"
],
"QtNetwork": [
"QAbstractNetworkCache",
"QAbstractSocket",
"QAuthenticator",
"QHostAddress",
"QHostInfo",
"QLocalServer",
"QLocalSocket",
"QNetworkAccessManager",
"QNetworkAddressEntry",
"QNetworkCacheMetaData",
"QNetworkConfiguration",
"QNetworkConfigurationManager",
"QNetworkCookie",
"QNetworkCookieJar",
"QNetworkDiskCache",
"QNetworkInterface",
"QNetworkProxy",
"QNetworkProxyFactory",
"QNetworkProxyQuery",
"QNetworkReply",
"QNetworkRequest",
"QNetworkSession",
"QSsl",
"QTcpServer",
"QTcpSocket",
"QUdpSocket"
],
"QtOpenGL": [
"QGL",
"QGLContext",
"QGLFormat",
"QGLWidget"
]
}
"""Misplaced members
These members from the original submodule are misplaced relative PySide2
"""
_misplaced_members = {
"pyside2": {
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtCore.Property": "QtCore.Property",
"QtCore.Signal": "QtCore.Signal",
"QtCore.Slot": "QtCore.Slot",
"QtCore.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtCore.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtCore.QItemSelection": "QtCore.QItemSelection",
"QtCore.QItemSelectionModel": "QtCore.QItemSelectionModel",
},
"pyqt5": {
"QtCore.pyqtProperty": "QtCore.Property",
"QtCore.pyqtSignal": "QtCore.Signal",
"QtCore.pyqtSlot": "QtCore.Slot",
"QtCore.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtCore.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtCore.QStringListModel": "QtCore.QStringListModel",
"QtCore.QItemSelection": "QtCore.QItemSelection",
"QtCore.QItemSelectionModel": "QtCore.QItemSelectionModel",
},
"pyside": {
"QtGui.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtGui.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtGui.QItemSelection": "QtCore.QItemSelection",
"QtGui.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.Property": "QtCore.Property",
"QtCore.Signal": "QtCore.Signal",
"QtCore.Slot": "QtCore.Slot",
},
"pyqt4": {
"QtGui.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtGui.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtGui.QItemSelection": "QtCore.QItemSelection",
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtGui.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.pyqtProperty": "QtCore.Property",
"QtCore.pyqtSignal": "QtCore.Signal",
"QtCore.pyqtSlot": "QtCore.Slot",
}
}
def _apply_site_config():
try:
import QtSiteConfig
except ImportError:
# If no QtSiteConfig module found, no modifications
# to _common_members are needed.
pass
else:
# Update _common_members with any changes made by QtSiteConfig
QtSiteConfig.update_members(_common_members)
def _new_module(name):
return types.ModuleType(__name__ + "." + name)
def _setup(module, extras):
"""Install common submodules"""
Qt.__binding__ = module.__name__
for name in list(_common_members) + extras:
try:
submodule = importlib.import_module(
module.__name__ + "." + name)
except ImportError:
continue
setattr(Qt, "_" + name, submodule)
if name not in extras:
# Store reference to original binding,
# but don't store speciality modules
# such as uic or QtUiTools
setattr(Qt, name, _new_module(name))
def _wrapinstance(func, ptr, base=None):
"""Enable implicit cast of pointer to most suitable class
This behaviour is available in sip per default.
Based on http://nathanhorne.com/pyqtpyside-wrap-instance
Usage:
This mechanism kicks in under these circumstances.
1. Qt.py is using PySide 1 or 2.
2. A `base` argument is not provided.
See :func:`QtCompat.wrapInstance()`
Arguments:
func (function): Original function
ptr (long): Pointer to QObject in memory
base (QObject, optional): Base class to wrap with. Defaults to QObject,
which should handle anything.
"""
assert isinstance(ptr, long), "Argument 'ptr' must be of type <long>"
assert (base is None) or issubclass(base, Qt.QtCore.QObject), (
"Argument 'base' must be of type <QObject>")
if base is None:
q_object = func(long(ptr), Qt.QtCore.QObject)
meta_object = q_object.metaObject()
class_name = meta_object.className()
super_class_name = meta_object.superClass().className()
if hasattr(Qt.QtWidgets, class_name):
base = getattr(Qt.QtWidgets, class_name)
elif hasattr(Qt.QtWidgets, super_class_name):
base = getattr(Qt.QtWidgets, super_class_name)
else:
base = Qt.QtCore.QObject
return func(long(ptr), base)
def _reassign_misplaced_members(binding):
"""Parse `_misplaced_members` dict and remap
values based on the underlying binding.
:param str binding: Top level binding in _misplaced_members.
"""
for src, dst in _misplaced_members[binding].items():
src_module, src_member = src.split(".")
dst_module, dst_member = dst.split(".")
try:
src_object = getattr(Qt, dst_module)
except AttributeError:
# Skip reassignment of non-existing members.
# This can happen if a request was made to
# rename a member that didn't exist, for example
# if QtWidgets isn't available on the target platform.
continue
dst_value = getattr(getattr(Qt, "_" + src_module), src_member)
setattr(
src_object,
dst_member,
dst_value
)
def _pyside2():
"""Initialise PySide2
These functions serve to test the existence of a binding
along with set it up in such a way that it aligns with
the final step; adding members from the original binding
to Qt.py
"""
import PySide2 as module
_setup(module, ["QtUiTools"])
Qt.__binding_version__ = module.__version__
try:
import shiboken2
Qt.QtCompat.wrapInstance = (
lambda ptr, base=None: _wrapinstance(
shiboken2.wrapInstance, ptr, base)
)
Qt.QtCompat.getCppPointer = lambda object: \
shiboken2.getCppPointer(object)[0]
except ImportError:
pass # Optional
if hasattr(Qt, "_QtUiTools"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtCore"):
Qt.__qt_version__ = Qt._QtCore.qVersion()
Qt.QtCompat.translate = Qt._QtCore.QCoreApplication.translate
if hasattr(Qt, "_QtWidgets"):
Qt.QtCompat.setSectionResizeMode = \
Qt._QtWidgets.QHeaderView.setSectionResizeMode
_reassign_misplaced_members("pyside2")
def _pyside():
"""Initialise PySide"""
import PySide as module
_setup(module, ["QtUiTools"])
Qt.__binding_version__ = module.__version__
try:
import shiboken
Qt.QtCompat.wrapInstance = (
lambda ptr, base=None: _wrapinstance(
shiboken.wrapInstance, ptr, base)
)
Qt.QtCompat.getCppPointer = lambda object: \
shiboken.getCppPointer(object)[0]
except ImportError:
pass # Optional
if hasattr(Qt, "_QtUiTools"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtGui"):
setattr(Qt, "QtWidgets", _new_module("QtWidgets"))
setattr(Qt, "_QtWidgets", Qt._QtGui)
Qt.QtCompat.setSectionResizeMode = Qt._QtGui.QHeaderView.setResizeMode
if hasattr(Qt, "_QtCore"):
Qt.__qt_version__ = Qt._QtCore.qVersion()
QCoreApplication = Qt._QtCore.QCoreApplication
Qt.QtCompat.translate = (
lambda context, sourceText, disambiguation, n:
QCoreApplication.translate(
context,
sourceText,
disambiguation,
QCoreApplication.CodecForTr,
n
)
)
_reassign_misplaced_members("pyside")
def _pyqt5():
"""Initialise PyQt5"""
import PyQt5 as module
_setup(module, ["uic"])
try:
import sip
Qt.QtCompat.wrapInstance = (
lambda ptr, base=None: _wrapinstance(
sip.wrapinstance, ptr, base)
)
Qt.QtCompat.getCppPointer = lambda object: \
sip.unwrapinstance(object)
except ImportError:
pass # Optional
if hasattr(Qt, "_uic"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtCore"):
Qt.__binding_version__ = Qt._QtCore.PYQT_VERSION_STR
Qt.__qt_version__ = Qt._QtCore.QT_VERSION_STR
Qt.QtCompat.translate = Qt._QtCore.QCoreApplication.translate
if hasattr(Qt, "_QtWidgets"):
Qt.QtCompat.setSectionResizeMode = \
Qt._QtWidgets.QHeaderView.setSectionResizeMode
_reassign_misplaced_members("pyqt5")
def _pyqt4():
"""Initialise PyQt4"""
import sip
# Validation of envivornment variable. Prevents an error if
# the variable is invalid since it's just a hint.
try:
hint = int(QT_SIP_API_HINT)
except TypeError:
hint = None # Variable was None, i.e. not set.
except ValueError:
raise ImportError("QT_SIP_API_HINT=%s must be a 1 or 2")
for api in ("QString",
"QVariant",
"QDate",
"QDateTime",
"QTextStream",
"QTime",
"QUrl"):
try:
sip.setapi(api, hint or 2)
except AttributeError:
raise ImportError("PyQt4 < 4.6 isn't supported by Qt.py")
except ValueError:
actual = sip.getapi(api)
if not hint:
raise ImportError("API version already set to %d" % actual)
else:
# Having provided a hint indicates a soft constraint, one
# that doesn't throw an exception.
sys.stderr.write(
"Warning: API '%s' has already been set to %d.\n"
% (api, actual)
)
import PyQt4 as module
_setup(module, ["uic"])
try:
import sip
Qt.QtCompat.wrapInstance = (
lambda ptr, base=None: _wrapinstance(
sip.wrapinstance, ptr, base)
)
Qt.QtCompat.getCppPointer = lambda object: \
sip.unwrapinstance(object)
except ImportError:
pass # Optional
if hasattr(Qt, "_uic"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtGui"):
setattr(Qt, "QtWidgets", _new_module("QtWidgets"))
setattr(Qt, "_QtWidgets", Qt._QtGui)
Qt.QtCompat.setSectionResizeMode = \
Qt._QtGui.QHeaderView.setResizeMode
if hasattr(Qt, "_QtCore"):
Qt.__binding_version__ = Qt._QtCore.PYQT_VERSION_STR
Qt.__qt_version__ = Qt._QtCore.QT_VERSION_STR
QCoreApplication = Qt._QtCore.QCoreApplication
Qt.QtCompat.translate = (
lambda context, sourceText, disambiguation, n:
QCoreApplication.translate(
context,
sourceText,
disambiguation,
QCoreApplication.CodecForTr,
n)
)
_reassign_misplaced_members("pyqt4")
def _none():
"""Internal option (used in installer)"""
Mock = type("Mock", (), {"__getattr__": lambda Qt, attr: None})
Qt.__binding__ = "None"
Qt.__qt_version__ = "0.0.0"
Qt.__binding_version__ = "0.0.0"
Qt.QtCompat.loadUi = lambda uifile, baseinstance=None: None
Qt.QtCompat.setSectionResizeMode = lambda *args, **kwargs: None
for submodule in _common_members.keys():
setattr(Qt, submodule, Mock())
setattr(Qt, "_" + submodule, Mock())
def _log(text):
if QT_VERBOSE:
sys.stdout.write(text + "\n")
def _loadUi(uifile, baseinstance=None):
"""Dynamically load a user interface from the given `uifile`
This function calls `uic.loadUi` if using PyQt bindings,
else it implements a comparable binding for PySide.
Documentation:
http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi
Arguments:
uifile (str): Absolute path to Qt Designer file.
baseinstance (QWidget): Instantiated QWidget or subclass thereof
Return:
baseinstance if `baseinstance` is not `None`. Otherwise
return the newly created instance of the user interface.
"""
if hasattr(baseinstance, "layout") and baseinstance.layout():
message = ("QLayout: Attempting to add Layout to %s which "
"already has a layout")
raise RuntimeError(message % (baseinstance))
if hasattr(Qt, "_uic"):
return Qt._uic.loadUi(uifile, baseinstance)
elif hasattr(Qt, "_QtUiTools"):
# Implement `PyQt5.uic.loadUi` for PySide(2)
class _UiLoader(Qt._QtUiTools.QUiLoader):
"""Create the user interface in a base instance.
Unlike `Qt._QtUiTools.QUiLoader` itself this class does not
create a new instance of the top-level widget, but creates the user
interface in an existing instance of the top-level class if needed.
This mimics the behaviour of `PyQt5.uic.loadUi`.
"""
def __init__(self, baseinstance):
super(_UiLoader, self).__init__(baseinstance)
self.baseinstance = baseinstance
def load(self, uifile, *args, **kwargs):
from xml.etree.ElementTree import ElementTree
# For whatever reason, if this doesn't happen then
# reading an invalid or non-existing .ui file throws
# a RuntimeError.
etree = ElementTree()
etree.parse(uifile)
widget = Qt._QtUiTools.QUiLoader.load(
self, uifile, *args, **kwargs)
# Workaround for PySide 1.0.9, see issue #208
widget.parentWidget()
return widget
def createWidget(self, class_name, parent=None, name=""):
"""Called for each widget defined in ui file
Overridden here to populate `baseinstance` instead.
"""
if parent is None and self.baseinstance:
# Supposed to create the top-level widget,
# return the base instance instead
return self.baseinstance
# For some reason, Line is not in the list of available
# widgets, but works fine, so we have to special case it here.
if class_name in self.availableWidgets() + ["Line"]:
# Create a new widget for child widgets
widget = Qt._QtUiTools.QUiLoader.createWidget(self,
class_name,
parent,
name)
else:
raise Exception("Custom widget '%s' not supported"
% class_name)
if self.baseinstance:
# Set an attribute for the new child widget on the base
# instance, just like PyQt5.uic.loadUi does.
setattr(self.baseinstance, name, widget)
return widget
widget = _UiLoader(baseinstance).load(uifile)
Qt.QtCore.QMetaObject.connectSlotsByName(widget)
return widget
else:
raise NotImplementedError("No implementation available for loadUi")
def _convert(lines):
"""Convert compiled .ui file from PySide2 to Qt.py
Arguments:
lines (list): Each line of of .ui file
Usage:
>> with open("myui.py") as f:
.. lines = _convert(f.readlines())
"""
def parse(line):
line = line.replace("from PySide2 import", "from Qt import QtCompat,")
line = line.replace("QtWidgets.QApplication.translate",
"QtCompat.translate")
return line
parsed = list()
for line in lines:
line = parse(line)
parsed.append(line)
return parsed
def _cli(args):
"""Qt.py command-line interface"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--convert",
help="Path to compiled Python module, e.g. my_ui.py")
parser.add_argument("--compile",
help="Accept raw .ui file and compile with native "
"PySide2 compiler.")
parser.add_argument("--stdout",
help="Write to stdout instead of file",
action="store_true")
parser.add_argument("--stdin",
help="Read from stdin instead of file",
action="store_true")
args = parser.parse_args(args)
if args.stdout:
raise NotImplementedError("--stdout")
if args.stdin:
raise NotImplementedError("--stdin")
if args.compile:
raise NotImplementedError("--compile")
if args.convert:
sys.stdout.write("#\n"
"# WARNING: --convert is an ALPHA feature.\n#\n"
"# See https://github.com/mottosso/Qt.py/pull/132\n"
"# for details.\n"
"#\n")
#
# ------> Read
#
with open(args.convert) as f:
lines = _convert(f.readlines())
backup = "%s_backup%s" % os.path.splitext(args.convert)
sys.stdout.write("Creating \"%s\"..\n" % backup)
shutil.copy(args.convert, backup)
#
# <------ Write
#
with open(args.convert, "w") as f:
f.write("".join(lines))
sys.stdout.write("Successfully converted \"%s\"\n" % args.convert)
def _install():
# Default order (customise order and content via QT_PREFERRED_BINDING)
default_order = ("PySide2", "PyQt5", "PySide", "PyQt4")
preferred_order = list(
b for b in QT_PREFERRED_BINDING.split(os.pathsep) if b
)
order = preferred_order or default_order
available = {
"PySide2": _pyside2,
"PyQt5": _pyqt5,
"PySide": _pyside,
"PyQt4": _pyqt4,
"None": _none
}
_log("Order: '%s'" % "', '".join(order))
# Allow site-level customization of the available modules.
_apply_site_config()
found_binding = False
for name in order:
_log("Trying %s" % name)
try:
available[name]()
found_binding = True
break
except ImportError as e:
_log("ImportError: %s" % e)
except KeyError:
_log("ImportError: Preferred binding '%s' not found." % name)
if not found_binding:
# If not binding were found, throw this error
raise ImportError("No Qt binding were found.")
# Install individual members
for name, members in _common_members.items():
try:
their_submodule = getattr(Qt, "_%s" % name)
except AttributeError:
continue
our_submodule = getattr(Qt, name)
# Enable import *
__all__.append(name)
# Enable direct import of submodule,
# e.g. import Qt.QtCore
sys.modules[__name__ + "." + name] = our_submodule
for member in members:
# Accept that a submodule may miss certain members.
try:
their_member = getattr(their_submodule, member)
except AttributeError:
_log("'%s.%s' was missing." % (name, member))
continue
setattr(our_submodule, member, their_member)
# Backwards compatibility
Qt.QtCompat.load_ui = Qt.QtCompat.loadUi
_install()
# Setup Binding Enum states
Qt.IsPySide2 = Qt.__binding__ == 'PySide2'
Qt.IsPyQt5 = Qt.__binding__ == 'PyQt5'
Qt.IsPySide = Qt.__binding__ == 'PySide'
Qt.IsPyQt4 = Qt.__binding__ == 'PyQt4'
"""Augment QtCompat
QtCompat contains wrappers and added functionality
to the original bindings, such as the CLI interface
and otherwise incompatible members between bindings,
such as `QHeaderView.setSectionResizeMode`.
"""
Qt.QtCompat._cli = _cli
Qt.QtCompat._convert = _convert
# Enable command-line interface
if __name__ == "__main__":
_cli(sys.argv[1:])
# The MIT License (MIT)
#
# Copyright (c) 2016-2017 Marcus Ottosson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# In PySide(2), loadUi does not exist, so we implement it
#
# `_UiLoader` is adapted from the qtpy project, which was further influenced
# by qt-helpers which was released under a 3-clause BSD license which in turn
# is based on a solution at:
#
# - https://gist.github.com/cpbotha/1b42a20c8f3eb9bb7cb8
#
# The License for this code is as follows:
#
# qt-helpers - a common front-end to various Qt modules
#
# Copyright (c) 2015, Chris Beaumont and Thomas Robitaille
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Glue project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Which itself was based on the solution at
#
# https://gist.github.com/cpbotha/1b42a20c8f3eb9bb7cb8
#
# which was released under the MIT license:
#
# Copyright (c) 2011 Sebastian Wiesner <lunaryorn@gmail.com>
# Modifications by Charl Botha <cpbotha@vxlabs.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files
# (the "Software"),to deal in the Software without restriction,
# including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| duncanskertchly/dm2skin | dependencies/Qt.py | Python | unlicense | 39,781 | [
"VisIt"
] | 2b7ad8537cbf1ee3abdb72a6a67b7d63e34b8bac6bb380750869779d5ee66ad5 |
"""Shared functionality useful across multiple structural variant callers.
Handles exclusion regions and preparing discordant regions.
"""
import collections
from contextlib import closing
import os
import numpy
import pybedtools
import pysam
import toolz as tz
import yaml
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.bam import callable
from bcbio.ngsalign import postalign
from bcbio.pipeline import shared, config_utils
from bcbio.provenance import do
from bcbio.variation import population
# ## Case/control
def find_case_control(items):
"""Find case/control items in a population of multiple samples.
"""
cases = []
controls = []
for data in items:
if population.get_affected_status(data) == 1:
controls.append(data)
else:
cases.append(data)
return cases, controls
# ## Prepare exclusion regions (repeats, telomeres, centromeres)
def _get_sv_exclude_file(items):
"""Retrieve SV file of regions to exclude.
"""
sv_bed = utils.get_in(items[0], ("genome_resources", "variation", "sv_repeat"))
if sv_bed and os.path.exists(sv_bed):
return sv_bed
def _get_variant_regions(items):
"""Retrieve variant regions defined in any of the input items.
"""
return filter(lambda x: x is not None,
[tz.get_in(("config", "algorithm", "variant_regions"), data)
for data in items
if tz.get_in(["config", "algorithm", "coverage_interval"], data) != "genome"])
def has_variant_regions(items, base_file, chrom=None):
"""Determine if we should process this chromosome: needs variant regions defined.
"""
if chrom:
all_vrs = _get_variant_regions(items)
if len(all_vrs) > 0:
test = shared.subset_variant_regions(tz.first(all_vrs), chrom, base_file, items)
if test == chrom:
return False
return True
def remove_exclude_regions(orig_bed, base_file, items, remove_entire_feature=False):
"""Remove centromere and short end regions from an existing BED file of regions to target.
"""
out_bed = os.path.join("%s-noexclude.bed" % (utils.splitext_plus(base_file)[0]))
exclude_bed = prepare_exclude_file(items, base_file)
with file_transaction(items[0], out_bed) as tx_out_bed:
pybedtools.BedTool(orig_bed).subtract(pybedtools.BedTool(exclude_bed),
A=remove_entire_feature).saveas(tx_out_bed)
if utils.file_exists(out_bed):
return out_bed
else:
return orig_bed
def prepare_exclude_file(items, base_file, chrom=None):
"""Prepare a BED file for exclusion.
Excludes high depth and centromere regions which contribute to long run times and
false positive structural variant calls.
"""
out_file = "%s-exclude%s.bed" % (utils.splitext_plus(base_file)[0], "-%s" % chrom if chrom else "")
if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"):
with shared.bedtools_tmpdir(items[0]):
# Get a bedtool for the full region if no variant regions
want_bedtool = callable.get_ref_bedtool(tz.get_in(["reference", "fasta", "base"], items[0]),
items[0]["config"], chrom)
if chrom:
want_bedtool = pybedtools.BedTool(shared.subset_bed_by_chrom(want_bedtool.saveas().fn,
chrom, items[0]))
sv_exclude_bed = _get_sv_exclude_file(items)
if sv_exclude_bed and len(want_bedtool) > 0:
want_bedtool = want_bedtool.subtract(sv_exclude_bed).saveas()
want_bedtool = pybedtools.BedTool(shared.remove_highdepth_regions(want_bedtool.saveas().fn, items))
with file_transaction(items[0], out_file) as tx_out_file:
full_bedtool = callable.get_ref_bedtool(tz.get_in(["reference", "fasta", "base"], items[0]),
items[0]["config"])
if len(want_bedtool) > 0:
full_bedtool.subtract(want_bedtool).saveas(tx_out_file)
else:
full_bedtool.saveas(tx_out_file)
return out_file
def exclude_by_ends(in_file, exclude_file, data, in_params=None):
"""Exclude calls based on overlap of the ends with exclusion regions.
Removes structural variants with either end being in a repeat: a large
source of false positives.
Parameters tuned based on removal of LCR overlapping false positives in DREAM
synthetic 3 data.
"""
params = {"end_buffer": 50,
"rpt_pct": 0.9,
"total_rpt_pct": 0.2,
"sv_pct": 0.5}
if in_params:
params.update(in_params)
assert in_file.endswith(".bed")
out_file = "%s-norepeats%s" % utils.splitext_plus(in_file)
to_filter = collections.defaultdict(list)
removed = 0
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with shared.bedtools_tmpdir(data):
for coord, end_name in [(1, "end1"), (2, "end2")]:
base, ext = utils.splitext_plus(tx_out_file)
end_file = _create_end_file(in_file, coord, params, "%s-%s%s" % (base, end_name, ext))
to_filter = _find_to_filter(end_file, exclude_file, params, to_filter)
with open(tx_out_file, "w") as out_handle:
with open(in_file) as in_handle:
for line in in_handle:
key = "%s:%s-%s" % tuple(line.strip().split("\t")[:3])
total_rpt_size = sum(to_filter.get(key, [0]))
if total_rpt_size <= (params["total_rpt_pct"] * params["end_buffer"]):
out_handle.write(line)
else:
removed += 1
return out_file, removed
def _find_to_filter(in_file, exclude_file, params, to_exclude):
"""Identify regions in the end file that overlap the exclusion file.
We look for ends with a large percentage in a repeat or where the end contains
an entire repeat.
"""
for feat in pybedtools.BedTool(in_file).intersect(pybedtools.BedTool(exclude_file), wao=True, nonamecheck=True):
us_chrom, us_start, us_end, name, other_chrom, other_start, other_end, overlap = feat.fields
if float(overlap) > 0:
other_size = float(other_end) - float(other_start)
other_pct = float(overlap) / other_size
us_pct = float(overlap) / (float(us_end) - float(us_start))
if us_pct > params["sv_pct"] or (other_pct > params["rpt_pct"]):
to_exclude[name].append(float(overlap))
return to_exclude
def _create_end_file(in_file, coord, params, out_file):
with open(in_file) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
parts = line.strip().split("\t")
name = "%s:%s-%s" % tuple(parts[:3])
curpos = int(parts[coord])
if coord == 1:
start, end = curpos, curpos + params["end_buffer"]
else:
start, end = curpos - params["end_buffer"], curpos
if start > 0:
out_handle.write("\t".join([parts[0], str(start),
str(end), name])
+ "\n")
return out_file
def get_sv_chroms(items, exclude_file):
"""Retrieve chromosomes to process on, avoiding extra skipped chromosomes.
"""
exclude_regions = {}
for region in pybedtools.BedTool(exclude_file):
if int(region.start) == 0:
exclude_regions[region.chrom] = int(region.end)
out = []
with closing(pysam.Samfile(items[0]["work_bam"], "rb")) as pysam_work_bam:
for chrom, length in zip(pysam_work_bam.references, pysam_work_bam.lengths):
exclude_length = exclude_regions.get(chrom, 0)
if exclude_length < length:
out.append(chrom)
return out
# ## Read preparation
def _extract_split_and_discordants(in_bam, work_dir, data):
"""Retrieve split-read alignments from input BAM file.
"""
dedup_file = os.path.join(work_dir, "%s-dedup.bam" % os.path.splitext(os.path.basename(in_bam))[0])
sr_file = os.path.join(work_dir, "%s-sr.bam" % os.path.splitext(os.path.basename(in_bam))[0])
disc_file = os.path.join(work_dir, "%s-disc.bam" % os.path.splitext(os.path.basename(in_bam))[0])
samtools = config_utils.get_program("samtools", data["config"])
cores = utils.get_in(data, ("config", "algorithm", "num_cores"), 1)
resources = config_utils.get_resources("samtools", data["config"])
mem = config_utils.adjust_memory(resources.get("memory", "2G"),
3, "decrease").upper()
if not utils.file_exists(sr_file) or not utils.file_exists(disc_file) or utils.file_exists(dedup_file):
with tx_tmpdir(data) as tmpdir:
with file_transaction(data, sr_file) as tx_sr_file:
with file_transaction(data, disc_file) as tx_disc_file:
with file_transaction(data, dedup_file) as tx_dedup_file:
samblaster_cl = postalign.samblaster_dedup_sort(data, tx_dedup_file,
tx_sr_file, tx_disc_file)
out_base = os.path.join(tmpdir,
"%s-namesort" % os.path.splitext(os.path.basename(in_bam))[0])
cmd = ("{samtools} sort -n -@ {cores} -m {mem} -O sam -T {out_base} {in_bam} | ")
cmd = cmd.format(**locals()) + samblaster_cl
do.run(cmd, "samblaster: split and discordant reads", data)
for fname in [sr_file, disc_file, dedup_file]:
bam.index(fname, data["config"])
return dedup_file, sr_file, disc_file
def _find_existing_inputs(in_bam):
"""Check for pre-calculated split reads and discordants done as part of alignment streaming.
"""
sr_file = "%s-sr.bam" % os.path.splitext(in_bam)[0]
disc_file = "%s-disc.bam" % os.path.splitext(in_bam)[0]
if utils.file_exists(sr_file) and utils.file_exists(disc_file):
return in_bam, sr_file, disc_file
else:
return None, None, None
def get_split_discordants(data, work_dir):
"""Retrieve full, split and discordant reads, potentially calculating with samblaster as needed.
"""
dedup_bam, sr_bam, disc_bam = _find_existing_inputs(data["align_bam"])
if not dedup_bam:
work_dir = (work_dir if not os.access(os.path.dirname(data["align_bam"]), os.W_OK | os.X_OK)
else os.path.dirname(data["align_bam"]))
dedup_bam, sr_bam, disc_bam = _extract_split_and_discordants(data["align_bam"], work_dir, data)
return dedup_bam, sr_bam, disc_bam
def get_cur_batch(items):
"""Retrieve name of the batch shared between all items in a group.
"""
batches = []
for data in items:
batch = tz.get_in(["metadata", "batch"], data, [])
batches.append(set(batch) if isinstance(batch, (list, tuple)) else set([batch]))
combo_batches = reduce(lambda b1, b2: b1.intersection(b2), batches)
if len(combo_batches) == 1:
return combo_batches.pop()
elif len(combo_batches) == 0:
return None
else:
raise ValueError("Found multiple overlapping batches: %s -- %s" % (combo_batches, batches))
def outname_from_inputs(in_files):
base = os.path.commonprefix(in_files)
if base.endswith("chr"):
base = base[:-3]
while base.endswith(("-", "_", ".")):
base = base[:-1]
return base
# -- Insert size calculation
def insert_size_stats(dists):
"""Calcualtes mean/median and MAD from distances, avoiding outliers.
MAD is the Median Absolute Deviation: http://en.wikipedia.org/wiki/Median_absolute_deviation
"""
med = numpy.median(dists)
filter_dists = filter(lambda x: x < med + 10 * med, dists)
median = numpy.median(filter_dists)
return {"mean": float(numpy.mean(filter_dists)), "std": float(numpy.std(filter_dists)),
"median": float(median),
"mad": float(numpy.median([abs(x - median) for x in filter_dists]))}
def calc_paired_insert_stats(in_bam, nsample=1000000):
"""Retrieve statistics for paired end read insert distances.
"""
dists = []
n = 0
with closing(pysam.Samfile(in_bam, "rb")) as in_pysam:
for read in in_pysam:
if read.is_proper_pair and read.is_read1:
n += 1
dists.append(abs(read.isize))
if n >= nsample:
break
return insert_size_stats(dists)
def calc_paired_insert_stats_save(in_bam, stat_file, nsample=1000000):
"""Calculate paired stats, saving to a file for re-runs.
"""
if utils.file_exists(stat_file):
with open(stat_file) as in_handle:
return yaml.safe_load(in_handle)
else:
stats = calc_paired_insert_stats(in_bam, nsample)
with open(stat_file, "w") as out_handle:
yaml.safe_dump(stats, out_handle, default_flow_style=False, allow_unicode=False)
return stats
| elkingtonmcb/bcbio-nextgen | bcbio/structural/shared.py | Python | mit | 13,523 | [
"pysam"
] | 9d973310bda6213af19276335be0152849f1ce40ad9ac7396878ed9edad92460 |
# -----------------------------------------------------------------------------------------------------
# CONDOR
# Simulator for diffractive single-particle imaging experiments with X-ray lasers
# http://xfel.icm.uu.se/condor/
# -----------------------------------------------------------------------------------------------------
# Copyright 2016 Max Hantke, Filipe R.N.C. Maia, Tomas Ekeberg
# Condor is distributed under the terms of the BSD 2-Clause License
# -----------------------------------------------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------------------------------
# General note:
# All variables are in SI units by default. Exceptions explicit by variable name.
# -----------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import # Compatibility with python 2 and 3
import sys,os
import numpy
import scipy.constants as constants
import logging
logger = logging.getLogger(__name__)
import condor.utils.log
from condor.utils.log import log_and_raise_error,log_warning,log_info,log_debug
from condor.utils.variation import Variation
from condor.utils.photon import Photon
from condor.utils.profile import Profile
class Source:
"""
Class for an X-ray source
Args:
:wavelength (float): X-ray wavelength in unit meter
:focus_diameter (float): Focus diameter (characteristic transverse dimension) in unit meter
:pulse_energy (float): (Statistical mean of) pulse energy in unit Joule
Kwargs:
:profile_model (str): Model for the spatial illumination profile (default `None`)
.. note:: The (keyword) arguments ``focus_diameter`` and ``profile_model`` are passed on to the constructor of :class:`condor.utils.profile.Profile`. For more detailed information read the documentation of the initialisation function.
:pulse_energy_variation (str): Statistical variation of the pulse energy (default ``None``)
:pulse_energy_spread (float): Statistical spread of the pulse energy in unit Joule (default ``None``)
:pulse_energy_variation_n (int): Number of samples within the specified range (default ``None``)
:polarization (str): Type of polarization can be either *vertical*, *horizontal*, *unpolarized*, or *ignore* (default ``ignore``)
.. note:: The keyword arguments ``pulse_energy_variation``, ``pulse_energy_spread``, and ``pulse_energy_variation_n`` are passed on to :meth:`condor.source.Source.set_pulse_energy_variation` during initialisation. For more detailed information read the documentation of the method.
"""
def __init__(self, wavelength, focus_diameter, pulse_energy, profile_model=None, pulse_energy_variation=None, pulse_energy_spread=None, pulse_energy_variation_n=None, polarization="ignore"):
self.photon = Photon(wavelength=wavelength)
self.pulse_energy_mean = pulse_energy
self.set_pulse_energy_variation(pulse_energy_variation, pulse_energy_spread, pulse_energy_variation_n)
self.profile = Profile(model=profile_model, focus_diameter=focus_diameter)
if polarization not in ["vertical", "horizontal", "unpolarized", "ignore"]:
log_and_raise_error(logger, "polarization = \"%s\" is an invalid input for initialization of Source instance.")
return
self.polarization = polarization
log_debug(logger, "Source configured")
def get_conf(self):
"""
Get configuration in form of a dictionary. Another identically configured Source instance can be initialised by:
.. code-block:: python
conf = S0.get_conf() # S0: already existing Source instance
S1 = condor.Source(**conf) # S1: new Source instance with the same configuration as S0
"""
conf = {}
conf["source"] = {}
conf["source"]["wavelength"] = self.photon.get_wavelength()
conf["source"]["focus_diameter"] = self.profile.focus_diameter
conf["source"]["pulse_energy"] = self.pulse_energy_mean
conf["source"]["profile_model"] = self.profile.get_model()
pevar = self._pulse_energy_variation.get_conf()
conf["source"]["pulse_energy_variation"] = pevar["mode"]
conf["source"]["pulse_energy_spread"] = pevar["spread"]
conf["source"]["pulse_energy_variation_n"] = pevar["n"]
conf["source"]["polarization"] = self.polarization
return conf
def set_pulse_energy_variation(self, pulse_energy_variation = None, pulse_energy_spread = None, pulse_energy_variation_n = None):
"""
Set variation of the pulse energy
Kwargs:
:pulse_energy_variation (str): Statistical variation of the pulse energy (default ``None``)
*Choose one of the following options:*
- ``\'normal\'`` - random normal (Gaussian) distribution
- ``\'uniform\'`` - random uniform distribution
- ``\'range\'`` - equispaced pulse energies around ``pulse_energy``
- ``None`` - no variation of the pulse energy
:pulse_energy_spread (float): Statistical spread of the pulse energy in unit Joule (default ``None``)
:pulse_energy_variation_n (int): Number of samples within the specified range
.. note:: The argument ``pulse_energy_variation_n`` takes effect only in combination with ``pulse_energy_variation=\'range\'``
"""
self._pulse_energy_variation = Variation(pulse_energy_variation, pulse_energy_spread, pulse_energy_variation_n, number_of_dimensions=1)
def get_intensity(self, position, unit = "ph/m2", pulse_energy = None):
"""
Calculate the intensity at a given position in the focus
Args:
:position: Coordinates [*x*, *y*, *z*] of the position where the intensity shall be calculated
Kwargs:
:unit (str): Intensity unit (default ``\'ph/m2\'``)
*Choose one of the following options:*
- ``\'ph/m2\'``
- ``\'J/m2\'``
- ``\'J/um2\'``
- ``\'mJ/um2\'``
- ``\'ph/um2\'``
:pulse_energy (float): Pulse energy of that particular pulse in unit Joule. If ``None`` the mean of the pulse energy will be used (default ``None``)
"""
# Assuming
# 1) Radially symmetric profile that is invariant along the beam axis within the sample volume
# 2) The variation of intensity are on much larger scale than the dimension of the particle size (i.e. flat wavefront)
r = numpy.sqrt(position[1]**2 + position[2]**2)
I = (self.profile.get_radial())(r) * (pulse_energy if pulse_energy is not None else self.pulse_energy_mean)
if unit == "J/m2":
pass
elif unit == "ph/m2":
I /= self.photon.get_energy()
elif unit == "J/um2":
I *= 1.E-12
elif unit == "mJ/um2":
I *= 1.E-9
elif unit == "ph/um2":
I /= self.photon.get_energy()
I *= 1.E-12
else:
log_and_raise_error(logger, "%s is not a valid unit." % unit)
return
return I
def get_next(self):
"""
Iterate the parameters of the Source instance and return them as a dictionary
"""
return {"pulse_energy":self._get_next_pulse_energy(),
"wavelength":self.photon.get_wavelength(),
"photon_energy":self.photon.get_energy(),
"photon_energy_eV":self.photon.get_energy_eV()}
def _get_next_pulse_energy(self):
p = self._pulse_energy_variation.get(self.pulse_energy_mean)
# Non-random
if self._pulse_energy_variation._mode in [None,"range"]:
if p <= 0:
log_and_raise_error(logger, "Pulse energy smaller-equals zero. Change your configuration.")
else:
return p
# Random
else:
if p <= 0.:
log_warning(logger, "Pulse energy smaller-equals zero. Try again.")
self._get_next_pulse_energy()
else:
return p
| FXIhub/condor | condor/source.py | Python | bsd-2-clause | 9,648 | [
"Gaussian"
] | 60cb2497dfdaa946caaff768a98eafeb5d53896c37b2bfe0aabeee3f68c28d9f |
#!/usr/bin/env python
"""Module to create, configure and dispose separate database instances for individual OrthoMCL runs."""
from ConfigParser import SafeConfigParser
import MySQLdb
import collections
from datetime import datetime
import os
import shutil
import logging as log
from shared import resource_filename
__author__ = "Tim te Beek"
__copyright__ = "Copyright 2011, Netherlands Bioinformatics Centre"
__license__ = "MIT"
Credentials = collections.namedtuple('Credentials', ['host', 'port', 'user', 'passwd'])
def _get_root_credentials():
"""Retrieve MySQL credentials from orthomcl.config to an account that is allowed to create new databases."""
orthomcl_credentials_file = resource_filename(__name__, 'credentials/orthomcl.cfg')
# Copy template config file to actual search path when file can not be found
if not os.path.exists(orthomcl_credentials_file):
shutil.copy(orthomcl_credentials_file + '.sample', orthomcl_credentials_file)
log.info('Copied .sample file to %s', orthomcl_credentials_file)
# Parse configuration file
config = SafeConfigParser()
config.read(orthomcl_credentials_file)
host = config.get('mysql', 'host')
port = config.getint('mysql', 'port')
user = config.get('mysql', 'user')
passwd = config.get('mysql', 'pass')
# Fall back to environment value for password when available
if passwd == 'pass' and 'mysql_password' in os.environ:
passwd = os.environ['mysql_password']
return Credentials(host, port, user, passwd)
def create_database():
"""Create database orthomcl_{random suffix}, grant rights to orthomcl user and return """
# Build a unique URL using todays date
dbname = 'orthomcl_{t.year}_{t.month}_{t.day}_at_{t.hour}_{t.minute}_{t.second}'.format(t=datetime.today())
dbhost, port, user, passwd = _get_root_credentials()
clhost = 'odose.nl' if dbhost not in ['127.0.0.1', 'localhost'] else dbhost
db_connection = MySQLdb.connect(host=dbhost, port=port, user=user, passwd=passwd)
cursor = db_connection.cursor()
cursor.execute('CREATE DATABASE ' + dbname)
cursor.execute('GRANT ALL on {0}.* TO orthomcl@\'{1}\' IDENTIFIED BY \'pass\';'.format(dbname, clhost))
db_connection.commit()
cursor.close()
db_connection.close()
log.info('Created database %s as %s on %s', dbname, user, dbhost)
return dbname
def get_configuration_file(run_dir, dbname, evalue_exponent):
"""Return OrthoMCL configuration file for generated database and evalue_exponent.
dbname - unique unused database name
evalue_exponent - BLAST similarities with Expect value exponents greater than this value are ignored"""
host, port = _get_root_credentials()[:2]
config = """# OrthoMCL configuration file for generated database
dbVendor=mysql
dbConnectString=dbi:mysql:{dbname}:{host}:{port}
dbLogin=orthomcl
dbPassword=pass
similarSequencesTable=SimilarSequences
orthologTable=Ortholog
inParalogTable=InParalog
coOrthologTable=CoOrtholog
interTaxonMatchView=InterTaxonMatch
percentMatchCutoff=50
evalueExponentCutoff={evalue_exponent}
oracleIndexTblSpc=NONE""".format(dbname=dbname, host=host, port=port, evalue_exponent=evalue_exponent)
# Write to file & return file
config_file = os.path.join(run_dir, '{0}.cfg'.format(dbname))
with open(config_file, mode='w') as write_handle:
write_handle.write(config)
return config_file
def delete_database(dbname):
"""Delete database after running OrthoMCL analysis."""
host, port, user, passwd = _get_root_credentials()
db_connection = MySQLdb.connect(host=host, port=port, user=user, passwd=passwd)
cursor = db_connection.cursor()
cursor.execute('DROP DATABASE ' + dbname)
db_connection.commit()
cursor.close()
db_connection.close()
log.info('Deleted database %s as %s from %s', dbname, user, host)
| ODoSE/odose.nl | orthomcl_database.py | Python | mit | 3,880 | [
"BLAST"
] | d4a2b092646dbfca02bfb56115d18154423f92610c3eea5603996c6bf6c7a9f0 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBiocgenerics(RPackage):
"""S4 generic functions used in Bioconductor.
The package defines S4 generic functions used in Bioconductor."""
homepage = "https://bioconductor.org/packages/BiocGenerics"
git = "https://git.bioconductor.org/packages/BiocGenerics.git"
version('0.34.0', commit='f7c2020')
version('0.30.0', commit='fc7c3af4a5635a30988a062ed09332c13ca1d1a8')
version('0.28.0', commit='041fc496504f2ab1d4d863fffb23372db214394b')
version('0.26.0', commit='5b2a6df639e48c3cd53789e0b174aec9dda6b67d')
version('0.24.0', commit='3db111e8c1f876267da89f4f0c5406a9d5c31cd1')
version('0.22.1', commit='9c90bb8926885289d596a81ff318ee3745cbb6ad')
depends_on('r@3.6.0:', when='@0.30.0:', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/r-biocgenerics/package.py | Python | lgpl-2.1 | 986 | [
"Bioconductor"
] | d17ae77c0cd5aa76a8e96a012792aa1c084607f17faa9fbd8b34dba7d943a256 |
import logging
import os
import re
from tweakmark.formatters.base import FormatterBase
logger = logging.getLogger(__name__)
class TerminalFormatterConfig:
def __init__(self, style=None, strict=True, width=None):
style = style or {}
self.width = width
class FormattingState():
def __init__(self, formatter, width=80, padding_right=2):
self.formatter = formatter
self.width = width
self.max_text_width = width - padding_right
self.text_width = width - padding_right
self._indent_list = []
self._section_indent_list = []
self._special_indent_list = []
self._cursor = 0
self._formatting_stack = []
self._blocks = [] # A block contains lines that belong to each other
self._block_parts = []
self.tokens = []
self.staging = None # Value for handlers to play with
self._list_key_lengths = []
self.token_split_pattern = re.compile(r' +')
self.formatting = ''
self.formatting_end = '\x1b[0m'
# block
# ----------------------------------------------------------------------------------------------
def stash_block(self, block):
self._blocks.append(block)
# line
# ----------------------------------------------------------------------------------------------
def stash_line(self, line):
self._block_parts.append(line)
def stash_line_list(self, parts):
self._block_parts.extend(parts)
def submit_lines(self):
self._blocks.append('\n'.join(self._block_parts))
self._block_parts = []
# line part
# ----------------------------------------------------------------------------------------------
def stash_token(self, line_part):
self.tokens.append(line_part)
def stash_token_list(self, line_part_list):
self.tokens.extend(line_part_list)
def submit_tokens(self):
indent_string = self.indent_string
self.stash_line(indent_string + ''.join(self.tokens))
self.tokens = []
# staging
# ----------------------------------------------------------------------------------------------
def clear_staging(self):
self.staging = None
def push_ansi_code(self, format):
self._formatting_stack.append(format)
self.compile_formatting()
def pop_ansi_code(self):
self._formatting_stack.pop()
self.compile_formatting()
def compile_formatting(self):
joined = ';'.join(f for f in self._formatting_stack if f != '')
if joined:
self.formatting = '\x1b[{}m'.format(';'.join(f for f in self._formatting_stack if f != ''))
else:
self.formatting = ''
def push_indent_string(self, string):
self._indent_list.append(string)
self.text_width -= len(string)
def pop_indent_string(self):
string = self._indent_list.pop()
self.text_width += len(string)
def add_special_indent(self, string):
self._special_indent_list.append(string)
def _remove_special_indent(self, index):
return self._special_indent_list.pop(index)
def advance_cursor(self, number):
self._cursor += number
def set_cursor_relative(self, number):
self._cursor = number
def set_cursor_absolute(self, number):
self._cursor = len(self.indent_string) + number
def push_section_indent_string(self, string):
self._section_indent_list.append(string)
self.text_width = self.max_text_width - len(string)
def pop_section_indent_string(self):
string = self._section_indent_list.pop()
s = self._section_indent_list[-1] if self._section_indent_list else ''
self.text_width = len(s)
@property
def section_indent(self):
if self._section_indent_list:
return self._section_indent_list[-1]
else:
return ''
# list
# ----------------------------------------------------------------------------------------------
def add_key_length(self, level, key_length):
# arr is the last array for the current level
if level < len(self.list_key_lengths):
arr = []
self._list_key_lengths.append(arr)
else:
arr = self._list_key_lengths[level][-1]
@property
def list_key_lengths(self):
return self._list_key_lengths
@property
def blocks(self):
return self._blocks
@property
def cursor(self):
return self._cursor
@property
def indent_string(self):
if self._special_indent_list:
return self._remove_special_indent(0)
elif self._section_indent_list:
return self.section_indent + ''.join(self._indent_list)
else:
return ''.join(self._indent_list)
@property
def formatted(self):
return '\n\n'.join(self.block_list)
@property
def block_list(self):
return self._blocks
def ctermid_os():
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
try:
size = os.get_terminal_size(fd)
return size.lines, size.columns
except OSError as e:
return 24, 80
finally:
os.close(fd)
except Exception as e:
return 24, 80
class TerminalFormatter(FormatterBase):
def __init__(self, *args, width=None, **kwargs):
super().__init__(*args, **kwargs)
self.width = width or ctermid_os()[1]
def format(self, document):
return '\n\n'.join(self.format_as_list(document))
def format_as_list(self, document):
state = FormattingState(self, width=self.width)
# Don't start at the root but at its children. root is not interesting
for child in document.root.children:
child.visit(self, state)
return state.block_list
| delins/tweakmark | tweakmark/formatters/terminal_formatter/formatter.py | Python | lgpl-3.0 | 5,894 | [
"VisIt"
] | c0799b8847b86f2dad139887dc7e7b48c5aa00e38178ad3253ff98d7edcc822d |
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
import unittest as ut
import espressomd
import numpy as np
from espressomd.magnetostatics import *
from tests_common import *
class MagnetostaticsInteractionsTests(ut.TestCase):
# Handle to espresso system
system = espressomd.System()
def setUp(self):
self.system.box_l = 10, 10, 10
if not self.system.part.exists(0):
self.system.part.add(id=0, pos=(0.1, 0.1, 0.1), dip=(1.3, 2.1, -6))
if not self.system.part.exists(1):
self.system.part.add(id=1, pos=(0, 0, 0), dip=(7.3, 6.1, -4))
if "DP3M" in espressomd.features():
test_DP3M = generate_test_for_class(DipolarP3M, dict(prefactor=1.0,
epsilon=0.0,
inter=1000,
mesh_off=[
0.5, 0.5, 0.5],
r_cut=2.4,
mesh=[
8, 8, 8],
cao=1,
alpha=12,
accuracy=0.01))
if "DIPOLAR_DIRECT_SUM" in espressomd.features():
test_DdsCpu = generate_test_for_class(
DipolarDirectSumCpu, dict(prefactor=3.4))
test_DdsRCpu = generate_test_for_class(
DipolarDirectSumWithReplicaCpu, dict(prefactor=3.4, n_replica=2))
if __name__ == "__main__":
print("Features: ", espressomd.features())
ut.main()
| tbereau/espresso | testsuite/python/magnetostaticInteractions.py | Python | gpl-3.0 | 2,692 | [
"ESPResSo"
] | f25c80d97e8c7f3b2156cf82f27c7fb19f9cb754c093d999c6ee7cfe1b4e5393 |
# Orca
#
# Copyright 2004-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
""" Custom script for Thunderbird 3."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004-2008 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import orca.orca as orca
import orca.cmdnames as cmdnames
import orca.debug as debug
import orca.input_event as input_event
import orca.scripts.default as default
import orca.settings_manager as settings_manager
import orca.orca_state as orca_state
import orca.speech as speech
import orca.scripts.toolkits.Gecko as Gecko
from orca.orca_i18n import _
from .formatting import Formatting
from .speech_generator import SpeechGenerator
from .spellcheck import SpellCheck
from .script_utilities import Utilities
_settingsManager = settings_manager.getManager()
########################################################################
# #
# The Thunderbird script class. #
# #
########################################################################
class Script(Gecko.Script):
"""The script for Thunderbird."""
def __init__(self, app):
""" Creates a new script for the given application.
Arguments:
- app: the application to create a script for.
"""
# Store the last autocompleted string for the address fields
# so that we're not too 'chatty'. See bug #533042.
#
self._lastAutoComplete = ""
if _settingsManager.getSetting('sayAllOnLoad') == None:
_settingsManager.setSetting('sayAllOnLoad', False)
Gecko.Script.__init__(self, app)
def setupInputEventHandlers(self):
Gecko.Script.setupInputEventHandlers(self)
self.inputEventHandlers["togglePresentationModeHandler"] = \
input_event.InputEventHandler(
Script.togglePresentationMode,
cmdnames.TOGGLE_PRESENTATION_MODE)
def getFormatting(self):
"""Returns the formatting strings for this script."""
return Formatting(self)
def getSpeechGenerator(self):
"""Returns the speech generator for this script."""
return SpeechGenerator(self)
def getSpellCheck(self):
"""Returns the spellcheck support for this script."""
return SpellCheck(self)
def getUtilities(self):
"""Returns the utilites for this script."""
return Utilities(self)
def getAppPreferencesGUI(self):
"""Return a GtkGrid containing the application unique configuration
GUI items for the current application."""
grid = Gecko.Script.getAppPreferencesGUI(self)
self.sayAllOnLoadCheckButton.set_active(
_settingsManager.getSetting('sayAllOnLoad'))
spellcheck = self.spellcheck.getAppPreferencesGUI()
grid.attach(spellcheck, 0, len(grid.get_children()), 1, 1)
grid.show_all()
return grid
def getPreferencesFromGUI(self):
"""Returns a dictionary with the app-specific preferences."""
prefs = Gecko.Script.getPreferencesFromGUI(self)
prefs['sayAllOnLoad'] = self.sayAllOnLoadCheckButton.get_active()
prefs.update(self.spellcheck.getPreferencesFromGUI())
return prefs
def doWhereAmI(self, inputEvent, basicOnly):
"""Performs the whereAmI operation."""
if self.spellcheck.isActive():
self.spellcheck.presentErrorDetails(not basicOnly)
return
Gecko.Script.doWhereAmI(self,inputEvent, basicOnly)
def _useFocusMode(self, obj):
if self.isEditableMessage(obj):
return True
return Gecko.Script._useFocusMode(self, obj)
def togglePresentationMode(self, inputEvent):
if self._inFocusMode and self.isEditableMessage(orca_state.locusOfFocus):
return
Gecko.Script.togglePresentationMode(self, inputEvent)
def useStructuralNavigationModel(self):
"""Returns True if structural navigation should be enabled here."""
if self.isEditableMessage(orca_state.locusOfFocus):
return False
return Gecko.Script.useStructuralNavigationModel(self)
def onFocusedChanged(self, event):
"""Callback for object:state-changed:focused accessibility events."""
if not event.detail1:
return
self._lastAutoComplete = ""
self.pointOfReference['lastAutoComplete'] = None
obj = event.source
if self.spellcheck.isAutoFocusEvent(event):
orca.setLocusOfFocus(event, event.source, False)
self.updateBraille(orca_state.locusOfFocus)
if self.spellcheck.isSuggestionsItem(event.source) \
and self.spellcheck.isSuggestionsItem(orca_state.locusOfFocus):
orca.setLocusOfFocus(event, event.source, False)
self.updateBraille(orca_state.locusOfFocus)
self.spellcheck.presentSuggestionListItem()
return
if not self.inDocumentContent(obj):
default.Script.onFocusedChanged(self, event)
return
if self.isEditableMessage(obj):
default.Script.onFocusedChanged(self, event)
return
role = obj.getRole()
if role != pyatspi.ROLE_DOCUMENT_FRAME:
Gecko.Script.onFocusedChanged(self, event)
return
contextObj, contextOffset = self.getCaretContext()
if contextObj:
return
orca.setLocusOfFocus(event, obj, notifyScript=False)
def onBusyChanged(self, event):
"""Callback for object:state-changed:busy accessibility events."""
obj = event.source
if obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME and not event.detail1:
if self.inDocumentContent():
self.speakMessage(obj.name)
self._presentMessage(obj)
def onCaretMoved(self, event):
"""Callback for object:text-caret-moved accessibility events."""
if self.isEditableMessage(event.source):
if event.detail1 == -1:
return
self.spellcheck.setDocumentPosition(event.source, event.detail1)
Gecko.Script.onCaretMoved(self, event)
def onChildrenChanged(self, event):
"""Callback for object:children-changed accessibility events."""
default.Script.onChildrenChanged(self, event)
def onSelectionChanged(self, event):
"""Callback for object:state-changed:showing accessibility events."""
# We present changes when the list has focus via focus-changed events.
if event.source == self.spellcheck.getSuggestionsList():
return
parent = event.source.parent
if parent and parent.getRole() == pyatspi.ROLE_COMBO_BOX \
and not parent.getState().contains(pyatspi.STATE_FOCUSED):
return
Gecko.Script.onSelectionChanged(self, event)
def onSensitiveChanged(self, event):
"""Callback for object:state-changed:sensitive accessibility events."""
if event.source == self.spellcheck.getChangeToEntry() \
and self.spellcheck.presentCompletionMessage():
return
Gecko.Script.onSensitiveChanged(self, event)
def onShowingChanged(self, event):
"""Callback for object:state-changed:showing accessibility events."""
# TODO - JD: Once there are separate scripts for the Gecko toolkit
# and the Firefox browser, this method can be deleted. It's here
# right now just to prevent the Gecko script from presenting non-
# existent browsery autocompletes for Thunderbird.
default.Script.onShowingChanged(self, event)
def onTextDeleted(self, event):
"""Called whenever text is from an an object.
Arguments:
- event: the Event
"""
obj = event.source
parent = obj.parent
try:
role = event.source.getRole()
parentRole = parent.getRole()
except:
return
if role == pyatspi.ROLE_LABEL and parentRole == pyatspi.ROLE_STATUS_BAR:
return
Gecko.Script.onTextDeleted(self, event)
def onTextInserted(self, event):
"""Callback for object:text-changed:insert accessibility events."""
obj = event.source
try:
role = obj.getRole()
parentRole = obj.parent.getRole()
except:
return
if role == pyatspi.ROLE_LABEL and parentRole == pyatspi.ROLE_STATUS_BAR:
return
if len(event.any_data) > 1 and obj == self.spellcheck.getChangeToEntry():
return
isSystemEvent = event.type.endswith("system")
# Try to stop unwanted chatter when a message is being replied to.
# See bgo#618484.
if isSystemEvent and self.isEditableMessage(obj):
return
# Speak the autocompleted text, but only if it is different
# address so that we're not too "chatty." See bug #533042.
if parentRole == pyatspi.ROLE_AUTOCOMPLETE:
if len(event.any_data) == 1:
default.Script.onTextInserted(self, event)
return
if self._lastAutoComplete and self._lastAutoComplete in event.any_data:
return
# Mozilla cannot seem to get their ":system" suffix right
# to save their lives, so we'll add yet another sad hack.
try:
text = event.source.queryText()
except:
hasSelection = False
else:
hasSelection = text.getNSelections() > 0
if hasSelection or isSystemEvent:
speech.speak(event.any_data)
self._lastAutoComplete = event.any_data
self.pointOfReference['lastAutoComplete'] = hash(obj)
return
Gecko.Script.onTextInserted(self, event)
def onTextSelectionChanged(self, event):
"""Callback for object:text-selection-changed accessibility events."""
obj = event.source
spellCheckEntry = self.spellcheck.getChangeToEntry()
if obj == spellCheckEntry:
return
if self.isEditableMessage(obj) and self.spellcheck.isActive():
text = obj.queryText()
selStart, selEnd = text.getSelection(0)
self.spellcheck.setDocumentPosition(obj, selStart)
self.spellcheck.presentErrorDetails()
return
default.Script.onTextSelectionChanged(self, event)
def onNameChanged(self, event):
"""Callback for object:property-change:accessible-name events."""
if event.source.name == self.spellcheck.getMisspelledWord():
self.spellcheck.presentErrorDetails()
return
obj = event.source
# If the user has just deleted an open mail message, then we want to
# try to speak the new name of the open mail message frame and also
# present the first line of that message to be consistent with what
# we do when a new message window is opened. See bug #540039 for more
# details.
#
rolesList = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_INTERNAL_FRAME,
pyatspi.ROLE_FRAME,
pyatspi.ROLE_APPLICATION]
if self.utilities.hasMatchingHierarchy(event.source, rolesList):
lastKey, mods = self.utilities.lastKeyAndModifiers()
if lastKey == "Delete":
speech.speak(obj.name)
[obj, offset] = self.findFirstCaretContext(obj, 0)
self.setCaretPosition(obj, offset)
return
def _presentMessage(self, documentFrame):
"""Presents the first line of the message, or the entire message,
depending on the user's sayAllOnLoad setting."""
[obj, offset] = self.findFirstCaretContext(documentFrame, 0)
self.setCaretPosition(obj, offset)
self.updateBraille(obj)
if not _settingsManager.getSetting('sayAllOnLoad'):
self.presentLine(obj, offset)
elif _settingsManager.getSetting('enableSpeech'):
self.sayAll(None)
def sayCharacter(self, obj):
"""Speaks the character at the current caret position."""
if self.isEditableMessage(obj):
text = self.utilities.queryNonEmptyText(obj)
if text and text.caretOffset + 1 >= text.characterCount:
default.Script.sayCharacter(self, obj)
return
Gecko.Script.sayCharacter(self, obj)
def getBottomOfFile(self):
"""Returns the object and last caret offset at the bottom of the
document frame. Overridden here to handle editable messages.
"""
# Pylint thinks that obj is an instance of a list. It most
# certainly is not. Silly pylint.
#
# pylint: disable-msg=E1103
#
[obj, offset] = Gecko.Script.getBottomOfFile(self)
if obj and obj.getState().contains(pyatspi.STATE_EDITABLE):
offset += 1
return [obj, offset]
def toggleFlatReviewMode(self, inputEvent=None):
"""Toggles between flat review mode and focus tracking mode."""
# If we're leaving flat review dump the cache. See bug 568658.
#
if self.flatReviewContext:
pyatspi.clearCache()
return default.Script.toggleFlatReviewMode(self, inputEvent)
def isNonHTMLEntry(self, obj):
"""Checks for ROLE_ENTRY areas that are not part of an HTML
document. See bug #607414.
Returns True is this is something like the Subject: entry
"""
result = obj and obj.getRole() == pyatspi.ROLE_ENTRY \
and not self.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_DOCUMENT_FRAME], [pyatspi.ROLE_FRAME])
return result
def isEditableMessage(self, obj):
"""Returns True if this is a editable message."""
if not obj:
return False
if not obj.getState().contains(pyatspi.STATE_EDITABLE):
return False
if self.isNonHTMLEntry(obj):
return False
return True
def onWindowActivated(self, event):
"""Callback for window:activate accessibility events."""
Gecko.Script.onWindowActivated(self, event)
if not self.spellcheck.isCheckWindow(event.source):
return
self.spellcheck.presentErrorDetails()
orca.setLocusOfFocus(None, self.spellcheck.getChangeToEntry(), False)
self.updateBraille(orca_state.locusOfFocus)
def onWindowDeactivated(self, event):
"""Callback for window:deactivate accessibility events."""
Gecko.Script.onWindowDeactivated(self, event)
self.spellcheck.deactivate()
| ruibarreira/linuxtrail | usr/lib/python3/dist-packages/orca/scripts/apps/Thunderbird/script.py | Python | gpl-3.0 | 15,728 | [
"ORCA"
] | f51be77b61f8dc8d1a99d44cb1302fe453f37b3a3f7376ca7e67ab1c1de742e3 |
import os
import _iso
import numpy as np
from mpl_toolkits.basemap import Basemap, shiftgrid
from matplotlib.mlab import griddata
import matplotlib.colors as colors
from scipy.signal import medfilt2d
import netCDF4
import pyroms
import pyroms_toolbox
from bathy_smoother import *
# Grid dimension
Lm = 140
Mm = 120
lon0=117.5 ; lat0 = 41.
lon1=117.5 ; lat1 = 34.5
lon2 = 127. ; lat2 = 34.5
lon3 = 127. ; lat3 = 41.
map = Basemap(projection='lcc', lat_0=35., lat_1=30., lat_2=40, lon_0 =123, \
width=2000000, height=2000000, resolution='i')
lonp = np.array([lon0, lon1, lon2, lon3])
latp = np.array([lat0, lat1, lat2, lat3])
beta = np.array([1, 1, 1, 1])
#generate the new grid
# Do this if you aren't going to move the grid corners interactively.
hgrd = pyroms.grid.Gridgen(lonp, latp, beta, (Mm+3, Lm+3), proj=map)
# Do this if you are going to use the Boundary Interactor
#map.drawcoastlines()
#xp, yp = map(lonp, latp)
#bry = pyroms.hgrid.BoundaryInteractor(xp, yp, beta, shp=(Mm+3,Lm+3), proj=map)
#hgrd=bry.grd
lonv, latv = map(hgrd.x_vert, hgrd.y_vert, inverse=True)
hgrd = pyroms.grid.CGrid_geo(lonv, latv, map)
# generate the mask
#for verts in map.coastsegs:
# hgrd.mask_polygon(verts)
# alternate version from johan.navarro.padron
for xx,yy in map.coastpolygons:
xa = np.array(xx, np.float32)
ya = np.array(yy,np.float32)
vv = np.zeros((xa.shape[0],2))
vv[:, 0] = xa
vv[:, 1] = ya
hgrd.mask_polygon(vv,mask_value=0)
# Edit the land mask interactively.
#pyroms.grid.edit_mask_mesh(hgrd, proj=map)
#edit_mask_mesh_ij is a faster version using imshow... but no map projection.
coast = pyroms.utility.get_coast_from_map(map)
pyroms.grid.edit_mask_mesh_ij(hgrd, coast=coast)
#### Use the following to interpolate from etopo2 bathymetry.
# generate the bathy
# read in topo data (on a regular lat/lon grid)
# this topo come with basemap so you should have it on your laptop.
# just update datadir with the appropriate path
# you can get this data from matplolib svn with
# svn co https://matplotlib.svn.sourceforge.net/svnroot/matplotlib/trunk/htdocs/screenshots/data/"
datadir = 'data/'
topo = np.loadtxt(os.path.join(datadir, 'etopo20data.gz'))
lons = np.loadtxt(os.path.join(datadir, 'etopo20lons.gz'))
lats = np.loadtxt(os.path.join(datadir, 'etopo20lats.gz'))
# depth positive
topo = -topo
# fix minimum depth
hmin = 5
topo = pyroms_toolbox.change(topo, '<', hmin, hmin)
# interpolate new bathymetry
lon, lat = np.meshgrid(lons, lats)
h = griddata(lon.flat,lat.flat,topo.flat,hgrd.lon_rho,hgrd.lat_rho)
# insure that depth is always deeper than hmin
h = pyroms_toolbox.change(h, '<', hmin, hmin)
# set depth to hmin where masked
idx = np.where(hgrd.mask_rho == 0)
h[idx] = hmin
# save raw bathymetry
hraw = h.copy()
# check bathymetry roughness
RoughMat = bathy_tools.RoughnessMatrix(h, hgrd.mask_rho)
print 'Max Roughness value is: ', RoughMat.max()
# smooth the raw bathy using the direct iterative method from Martinho and Batteen (2006)
rx0_max = 0.35
h = bathy_smoothing.smoothing_Positive_rx0(hgrd.mask_rho, h, rx0_max)
# check bathymetry roughness again
RoughMat = bathy_tools.RoughnessMatrix(h, hgrd.mask_rho)
print 'Max Roughness value is: ', RoughMat.max()
# vertical coordinate
theta_b = 2
theta_s = 7.0
Tcline = 50
N = 30
vgrd = pyroms.vgrid.s_coordinate_4(h, theta_b, theta_s, Tcline, N, hraw=hraw)
# ROMS grid
grd_name = 'YELLOW'
grd = pyroms.grid.ROMS_Grid(grd_name, hgrd, vgrd)
# write grid to netcdf file
pyroms.grid.write_ROMS_grid(grd, filename='YELLOW_grd_v1.nc')
| kshedstrom/pyroms | examples/Yellow_Sea/make_YELLOW_grd_v1.py | Python | bsd-3-clause | 3,559 | [
"NetCDF"
] | 43688a711a1a84a94dcafb67dd1680b4bc2129c688652477d2615adcb2a79b92 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import re
def getOptionFilenames(options, base, extensions=[]):
"""
Return a list of filenames with correct absolute path.
Inputs:
options: The command-line options from argparse
base: The base filenames to begin form (i.e., 'exodus')
extension: (Optional) The file extension to parse from 'arguments' command-line option.
"""
# Complete list of Exodus files to open
if not isinstance(extensions, list):
extensions = [extensions]
filenames = getattr(options, base)
if extensions:
for arg in options.arguments:
for e in extensions:
if re.match(e, arg):
filenames.append(arg)
# Make all paths absolute
for i, fname in enumerate(filenames):
if not os.path.isabs(fname):
filenames[i] = os.path.abspath(os.path.join(options.start_dir, fname))
return filenames
| nuclear-wizard/moose | python/peacock/utils/__init__.py | Python | lgpl-2.1 | 1,231 | [
"MOOSE"
] | 7aa1b1a03dec818eae6ceba5abd2ae7016868dab0e28166da5da8924b5dd3399 |
import os, re
from copy import copy
from difflib import unified_diff
from sqlalchemy import *
from sqlalchemy.orm import *
from model import *
from cStringIO import StringIO
from cgi import FieldStorage
from tw import framework
framework.default_view = 'mako'
#try:
import xml.etree.ElementTree as etree
#except ImportError:
# import cElementTree as etree
session = None
engine = None
connect = None
sorted_user_columns = ['_password', 'created', 'display_name', 'email_address',
'groups', 'password', 'sprox_id', 'town',
'user_name']
def remove_whitespace_nodes(node):
new_node = copy(node)
new_node._children = []
if new_node.text and new_node.text.strip() == '':
new_node.text = ''
if new_node.tail and new_node.tail.strip() == '':
new_node.tail = ''
for child in node.getchildren():
if child is not None:
child = remove_whitespace_nodes(child)
new_node.append(child)
return new_node
def remove_namespace(doc):
"""Remove namespace in the passed document in place."""
for elem in doc.getiterator():
match = re.match('(\{.*\})(.*)', elem.tag)
if match:
elem.tag = match.group(2)
def replace_escape_chars(needle):
needle = needle.replace(' ', ' ')
return needle
def fix_xml(needle):
needle = replace_escape_chars(needle)
needle_node = etree.fromstring(needle)
needle_node = remove_whitespace_nodes(needle_node)
remove_namespace(needle_node)
needle_s = etree.tostring(needle_node)
return needle_s
def in_xml(needle, haystack):
needle_s, haystack_s = map(fix_xml, (needle, haystack))
return needle_s in haystack_s
def eq_xml(needle, haystack):
needle_s, haystack_s = map(fix_xml, (needle, haystack))
return needle_s == haystack_s
def assert_in_xml(needle, haystack):
assert in_xml(needle, haystack), "%s not found in %s"%(needle, haystack)
def assert_eq_xml(needle, haystack):
assert eq_xml(needle, haystack), "%s does not equal %s"%(needle, haystack)
database_setup=False
def setup_database():
global session, engine, database_setup, connect, metadata
#singletonizes things
if not database_setup:
engine = create_engine(os.environ.get('DBURL', 'sqlite://'), strategy="threadlocal")
connect = engine.connect()
# print 'testing on', engine
metadata.bind = engine
metadata.drop_all()
metadata.create_all()
Session = sessionmaker(bind=engine, autoflush=True, autocommit=False)
session = Session()
database_setup = True
return session, engine, metadata
records_setup = None
def setup_records(session):
session.expunge_all()
user = User()
user.user_name = u'asdf'
user.email_address = u"asdf@asdf.com"
user.password = u"asdf"
session.add(user)
arvada = Town(name=u'Arvada')
session.add(arvada)
session.flush()
user.town = arvada
session.add(Town(name=u'Denver'))
session.add(Town(name=u'Golden'))
session.add(Town(name=u'Boulder'))
#test_table.insert(values=dict(BLOB=FieldStorage('asdf', StringIO()).value)).execute()
#user_reference_table.insert(values=dict(user_id=user.user_id)).execute()
# print user.user_id
for i in range (5):
group = Group(group_name=unicode(i))
session.add(group)
user.groups.append(group)
session.flush()
return user
def teardown_database():
pass
#metadata.drop_all()
def _reassign_from_metadata():
global visits_table, visit_identity_table, groups_table, town_table
global users_table, permissions_table, user_group_table
global group_permission_table, test_table
visits_table = metadata.tables['visit']
visit_identity_table = metadata.tables['visit_identity']
groups_table = metadata.tables['tg_group']
town_table = metadata.tables['town_table']
users_table = metadata.tables['tg_user']
permissions_table = metadata.tables['permission']
user_group_table = metadata.tables['user_group']
group_permission_table = metadata.tables['group_permission']
test_table = metadata.tables['test_table']
def setup_reflection():
#if os.environ.get('AUTOLOAD', False):
metadata.clear()
metadata.reflect()
_reassign_from_metadata()
clear_mappers()
tables = metadata.tables
mapper(Town, tables['town_table'])
mapper(Example, tables['test_table'])
mapper(Visit, tables['visit'])
mapper(VisitIdentity, tables['visit_identity'],
properties=dict(users=relation(User, backref='visit_identity')))
mapper(User, tables['tg_user'])
mapper(Group, tables['tg_group'],
properties=dict(users=relation(User,
secondary=tables['user_group'],
backref='groups')))
mapper(Permission, tables['permission'],
properties=dict(groups=relation(Group,
secondary=tables['group_permission'],
backref='permissions')))
class SproxTest(object):
def setup(self):
self.session = session
self.engine = engine
try:
self.user = setup_records(session)
except:
self.session.rollback()
def teardown(self):
self.session.rollback()
if __name__ == '__main__':
setupDatabase()
| jokajak/itweb | data/env/lib/python2.6/site-packages/sprox-0.6.10-py2.6.egg/sprox/test/base.py | Python | gpl-3.0 | 5,454 | [
"VisIt"
] | 5dc790a787838924b5e4f883507869ddec47a14db1789867c026493d70cc13b9 |
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import numpy
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
walmart_frame = pd.read_csv('train.csv')
label_encoder = LabelEncoder()
walmart_frame.dropna(inplace=True)
class_labels = list(walmart_frame['TripType'].values)
del walmart_frame['TripType']
del walmart_frame['Upc']
for column_name in walmart_frame.columns:
if type(walmart_frame[column_name][0]) is str:
walmart_frame[column_name] = label_encoder.fit_transform(walmart_frame[column_name].values)
X_train,X_test,y_train,y_test = train_test_split(walmart_frame.values,class_labels,test_size=0.2,random_state=42)
#classifier_list, classifier_name_list = get_ensemble_models()
classifier_list, classifier_name_list = get_naive_bayes_models()
#classifier_list, classifier_name_list = get_neural_network(hidden_layer_size=21)
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
| rupakc/Kaggle-Compendium | Walmart Trip Type Challenge/walmart-baseline.py | Python | mit | 2,893 | [
"Gaussian"
] | 23dc70e41332fb1789d5b5e2abf524c01e0db41a045ed09d551bf1247d7c1030 |
from __future__ import print_function
import copy
import warnings
import graphviz
import matplotlib.pyplot as plt
import numpy as np
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
stdev_fitness = np.array(statistics.get_fitness_stdev())
plt.plot(generation, avg_fitness, 'b-', label="average")
plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
def plot_spikes(spikes, view=False, filename=None, title=None):
""" Plots the trains for a single spiking neuron. """
t_values = [t for t, I, v, u, f in spikes]
v_values = [v for t, I, v, u, f in spikes]
u_values = [u for t, I, v, u, f in spikes]
I_values = [I for t, I, v, u, f in spikes]
f_values = [f for t, I, v, u, f in spikes]
fig = plt.figure()
plt.subplot(4, 1, 1)
plt.ylabel("Potential (mv)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, v_values, "g-")
if title is None:
plt.title("Izhikevich's spiking neuron model")
else:
plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
plt.subplot(4, 1, 2)
plt.ylabel("Fired")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, f_values, "r-")
plt.subplot(4, 1, 3)
plt.ylabel("Recovery (u)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, u_values, "r-")
plt.subplot(4, 1, 4)
plt.ylabel("Current (I)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, I_values, "r-o")
if filename is not None:
plt.savefig(filename)
if view:
plt.show()
plt.close()
fig = None
return fig
def plot_species(statistics, view=False, filename='speciation.svg'):
""" Visualizes speciation throughout evolution. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
species_sizes = statistics.get_species_sizes()
num_generations = len(species_sizes)
curves = np.array(species_sizes).T
fig, ax = plt.subplots()
ax.stackplot(range(num_generations), *curves)
plt.title("Speciation")
plt.ylabel("Size per Species")
plt.xlabel("Generations")
plt.savefig(filename)
if view:
plt.show()
plt.close()
def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
node_colors=None, fmt='svg'):
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled', 'shape': 'box', 'fillcolor': node_colors.get(k, 'lightgray')}
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled', 'fillcolor': node_colors.get(k, 'lightblue')}
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add((cg.in_node_id, cg.out_node_id))
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled',
'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
#if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
| CodeReclaimers/neat-python | examples/xor/visualize.py | Python | bsd-3-clause | 5,915 | [
"NEURON"
] | 35e6d9dc43bff65bea1f6f087598d840bb977c864f80e4808e294c827e37bd93 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/export.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import copy
import csv
import datetime
import io
import ipaddress
import json
import logging
import os
import re
import shutil
import tarfile
import xml.etree.ElementTree as ET
from king_phisher.errors import KingPhisherInputValidationError
from boltons import iterutils
import geojson
from smoke_zephyr.utilities import escape_single_quote
from smoke_zephyr.utilities import unescape_single_quote
__all__ = [
'campaign_to_xml',
'convert_value',
'message_data_to_kpm',
'treeview_liststore_to_csv'
]
KPM_ARCHIVE_FILES = {
'attachment_file': 'message_attachment.bin',
'target_file': 'target_file.csv'
}
KPM_INLINE_IMAGE_REGEXP = re.compile(r"""{{\s*inline_image\(\s*(('(?:[^'\\]|\\.)+')|("(?:[^"\\]|\\.)+"))\s*\)\s*}}""")
logger = logging.getLogger('KingPhisher.Client.export')
def message_template_to_kpm(template):
files = []
cursor = 0
match = True
while match:
match = KPM_INLINE_IMAGE_REGEXP.search(template[cursor:])
if not match:
break
file_path = unescape_single_quote(match.group(1)[1:-1])
files.append(file_path)
file_name = os.path.basename(file_path)
start = cursor + match.start()
end = cursor + match.end()
inline_tag = "{{{{ inline_image('{0}') }}}}".format(escape_single_quote(file_name))
template = template[:start] + inline_tag + template[end:]
cursor = start + len(inline_tag)
return template, files
def message_template_from_kpm(template, files):
files = dict(zip(map(os.path.basename, files), files))
cursor = 0
match = True
while match:
match = KPM_INLINE_IMAGE_REGEXP.search(template[cursor:])
if not match:
break
file_name = unescape_single_quote(match.group(1)[1:-1])
file_path = files.get(file_name)
start = cursor + match.start()
end = cursor + match.end()
if not file_path:
cursor = end
continue
insert_tag = "{{{{ inline_image('{0}') }}}}".format(escape_single_quote(file_path))
template = template[:start] + insert_tag + template[end:]
cursor = start + len(insert_tag)
return template
def convert_value(table_name, key, value):
"""
Perform any conversions necessary to neatly display the data in XML
format.
:param str table_name: The table name that the key and value pair are from.
:param str key: The data key.
:param value: The data value to convert.
:return: The converted value.
:rtype: str
"""
if isinstance(value, datetime.datetime):
value = value.isoformat()
if value != None:
value = str(value)
return value
def campaign_to_xml(rpc, campaign_id, xml_file):
"""
Load all information for a particular campaign and dump it to an XML
file.
:param rpc: The connected RPC instance to load the information with.
:type rpc: :py:class:`.KingPhisherRPCClient`
:param campaign_id: The ID of the campaign to load the information for.
:param str xml_file: The destination file for the XML data.
"""
root = ET.Element('king_phisher')
# Generate export metadata
metadata = ET.SubElement(root, 'metadata')
ET.SubElement(metadata, 'timestamp').text = datetime.datetime.utcnow().isoformat()
ET.SubElement(metadata, 'utctime').text = 'True'
ET.SubElement(metadata, 'version').text = '1.2'
campaign = ET.SubElement(root, 'campaign')
campaign_info = rpc.remote_table_row('campaigns', campaign_id)
for key, value in campaign_info._asdict().items():
ET.SubElement(campaign, key).text = convert_value('campaigns', key, value)
# Tables with a campaign_id field
for table_name in ['landing_pages', 'messages', 'visits', 'credentials', 'deaddrop_deployments', 'deaddrop_connections']:
table_element = ET.SubElement(campaign, table_name)
for table_row in rpc.remote_table(table_name, query_filter={'campaign_id': campaign_id}):
table_row_element = ET.SubElement(table_element, table_name[:-1])
for key, value in table_row._asdict().items():
ET.SubElement(table_row_element, key).text = convert_value(table_name, key, value)
element_tree = ET.ElementTree(root)
element_tree.write(xml_file, encoding='utf-8', xml_declaration=True)
def campaign_visits_to_geojson(rpc, campaign_id, geojson_file):
"""
Export the geo location information for all the visits of a campaign into
the `GeoJSON <http://geojson.org/>`_ format.
:param rpc: The connected RPC instance to load the information with.
:type rpc: :py:class:`.KingPhisherRPCClient`
:param campaign_id: The ID of the campaign to load the information for.
:param str geojson_file: The destination file for the GeoJSON data.
"""
ips_for_georesolution = {}
ip_counter = collections.Counter()
for visit in rpc.remote_table('visits', query_filter={'campaign_id': campaign_id}):
ip_counter.update((visit.visitor_ip,))
visitor_ip = ipaddress.ip_address(visit.visitor_ip)
if not isinstance(visitor_ip, ipaddress.IPv4Address):
continue
if visitor_ip.is_loopback or visitor_ip.is_private:
continue
if not visitor_ip in ips_for_georesolution:
ips_for_georesolution[visitor_ip] = visit.first_visit
elif ips_for_georesolution[visitor_ip] > visit.first_visit:
ips_for_georesolution[visitor_ip] = visit.first_visit
ips_for_georesolution = [ip for (ip, _) in sorted(ips_for_georesolution.items(), key=lambda x: x[1])]
locations = {}
for ip_addresses in iterutils.chunked(ips_for_georesolution, 50):
locations.update(rpc.geoip_lookup_multi(ip_addresses))
points = []
for ip, location in locations.items():
if not (location.coordinates and location.coordinates[0] and location.coordinates[1]):
continue
points.append(geojson.Feature(geometry=location, properties={'count': ip_counter[ip], 'ip-address': ip}))
feature_collection = geojson.FeatureCollection(points)
with open(geojson_file, 'w') as file_h:
json.dump(feature_collection, file_h, indent=2, separators=(',', ': '))
def message_data_from_kpm(target_file, dest_dir):
"""
Retrieve the stored details describing a message from a previously exported
file.
:param str target_file: The file to load as a message archive.
:param str dest_dir: The directory to extract data and attachment files to.
:return: The restored details from the message config.
:rtype: dict
"""
if not tarfile.is_tarfile(target_file):
logger.warning('the file is not recognized as a valid tar archive')
raise KingPhisherInputValidationError('file is not in the correct format')
tar_h = tarfile.open(target_file)
member_names = tar_h.getnames()
attachment_member_names = [n for n in member_names if n.startswith('attachments' + os.path.sep)]
tar_get_file = lambda name: tar_h.extractfile(tar_h.getmember(name))
attachments = []
if not 'message_config.json' in member_names:
logger.warning('the kpm archive is missing the message_config.json file')
raise KingPhisherInputValidationError('data is missing from the message archive')
message_config = tar_get_file('message_config.json').read()
message_config = json.loads(message_config)
if attachment_member_names:
attachment_dir = os.path.join(dest_dir, 'attachments')
if not os.path.isdir(attachment_dir):
os.mkdir(attachment_dir)
for file_name in attachment_member_names:
tarfile_h = tar_get_file(file_name)
file_name = os.path.basename(file_name)
file_path = os.path.join(attachment_dir, file_name)
with open(file_path, 'wb') as file_h:
shutil.copyfileobj(tarfile_h, file_h)
attachments.append(file_path)
logger.debug("extracted {0} attachment file{1} from the archive".format(len(attachments), 's' if len(attachments) > 1 else ''))
for config_name, file_name in KPM_ARCHIVE_FILES.items():
if not file_name in member_names:
if config_name in message_config:
logger.warning("the kpm archive is missing the {0} file".format(file_name))
raise KingPhisherInputValidationError('data is missing from the message archive')
continue
if not message_config.get(config_name):
logger.warning("the kpm message configuration is missing the {0} setting".format(config_name))
raise KingPhisherInputValidationError('data is missing from the message archive')
tarfile_h = tar_get_file(file_name)
file_path = os.path.join(dest_dir, message_config[config_name])
with open(file_path, 'wb') as file_h:
shutil.copyfileobj(tarfile_h, file_h)
message_config[config_name] = file_path
if 'message_content.html' in member_names:
if not 'html_file' in message_config:
logger.warning('the kpm message configuration is missing the html_file setting')
raise KingPhisherInputValidationError('data is missing from the message archive')
tarfile_h = tar_get_file('message_content.html')
file_path = os.path.join(dest_dir, message_config['html_file'])
with open(file_path, 'wb') as file_h:
file_h.write(message_template_from_kpm(tarfile_h.read(), attachments))
message_config['html_file'] = file_path
elif 'html_file' in message_config:
logger.warning('the kpm archive is missing the message_content.html file')
raise KingPhisherInputValidationError('data is missing from the message archive')
return message_config
def message_data_to_kpm(message_config, target_file):
"""
Save details describing a message to the target file.
:param dict message_config: The message details from the client configuration.
:param str target_file: The file to write the data to.
"""
message_config = copy.copy(message_config)
epoch = datetime.datetime.utcfromtimestamp(0)
mtime = (datetime.datetime.utcnow() - epoch).total_seconds()
tar_h = tarfile.open(target_file, 'w:bz2')
for config_name, file_name in KPM_ARCHIVE_FILES.items():
if os.access(message_config.get(config_name, ''), os.R_OK):
tar_h.add(message_config[config_name], arcname=file_name)
message_config[config_name] = os.path.basename(message_config[config_name])
continue
if len(message_config.get(config_name, '')):
logger.info("the specified {0} '{1}' is not readable, the setting will be removed".format(config_name, message_config[config_name]))
if config_name in message_config:
del message_config[config_name]
if os.access(message_config.get('html_file', ''), os.R_OK):
template = open(message_config['html_file'], 'rb').read()
message_config['html_file'] = os.path.basename(message_config['html_file'])
template, attachments = message_template_to_kpm(template)
logger.debug("identified {0} attachment file{1} to be archived".format(len(attachments), 's' if len(attachments) > 1 else ''))
for attachment in attachments:
if os.access(attachment, os.R_OK):
tar_h.add(attachment, arcname=os.path.join('attachments', os.path.basename(attachment)))
template_strio = io.BytesIO()
template_strio.write(template)
tarinfo_h = tarfile.TarInfo(name='message_content.html')
tarinfo_h.mtime = mtime
tarinfo_h.size = template_strio.tell()
template_strio.seek(os.SEEK_SET)
tar_h.addfile(tarinfo=tarinfo_h, fileobj=template_strio)
else:
if len(message_config.get('html_file', '')):
logger.info("the specified html_file '{0}' is not readable, the setting will be removed".format(message_config['html_file']))
if 'html_file' in message_config:
del message_config['html_file']
msg_strio = io.BytesIO()
msg_strio.write(json.dumps(message_config, sort_keys=True, indent=4))
tarinfo_h = tarfile.TarInfo(name='message_config.json')
tarinfo_h.mtime = mtime
tarinfo_h.size = msg_strio.tell()
msg_strio.seek(os.SEEK_SET)
tar_h.addfile(tarinfo=tarinfo_h, fileobj=msg_strio)
tar_h.close()
return
def treeview_liststore_to_csv(treeview, target_file):
"""
Convert a treeview object to a CSV file. The CSV column names are loaded
from the treeview.
:param treeview: The treeview to load the information from.
:type treeview: :py:class:`Gtk.TreeView`
:param str target_file: The destination file for the CSV data.
:return: The number of rows that were written.
:rtype: int
"""
target_file_h = open(target_file, 'wb')
writer = csv.writer(target_file_h, quoting=csv.QUOTE_ALL)
column_names = [column.get_property('title') for column in treeview.get_columns()]
column_names.insert(0, 'UID')
column_count = len(column_names)
writer.writerow(column_names)
store = treeview.get_model()
store_iter = store.get_iter_first()
rows_written = 0
while store_iter:
values = [store.get_value(store_iter, x) for x in range(column_count)]
writer.writerow(values)
rows_written += 1
store_iter = store.iter_next(store_iter)
target_file_h.close()
return rows_written
| zigitax/king-phisher | king_phisher/client/export.py | Python | bsd-3-clause | 13,884 | [
"VisIt"
] | 86cfe448278240b800e7fa5f0ac12b1c8be31af5c0f213a38cef426f470d32b6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.